]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/misc/sgi-xp/xpc_channel.c
2eb3abff0e3a82d5035bd7f8c2830f1cf9ec945d
[net-next-2.6.git] / drivers / misc / sgi-xp / xpc_channel.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (c) 2004-2009 Silicon Graphics, Inc.  All Rights Reserved.
7  */
8
9 /*
10  * Cross Partition Communication (XPC) channel support.
11  *
12  *      This is the part of XPC that manages the channels and
13  *      sends/receives messages across them to/from other partitions.
14  *
15  */
16
17 #include <linux/device.h>
18 #include "xpc.h"
19
20 /*
21  * Process a connect message from a remote partition.
22  *
23  * Note: xpc_process_connect() is expecting to be called with the
24  * spin_lock_irqsave held and will leave it locked upon return.
25  */
26 static void
27 xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
28 {
29         enum xp_retval ret;
30
31         DBUG_ON(!spin_is_locked(&ch->lock));
32
33         if (!(ch->flags & XPC_C_OPENREQUEST) ||
34             !(ch->flags & XPC_C_ROPENREQUEST)) {
35                 /* nothing more to do for now */
36                 return;
37         }
38         DBUG_ON(!(ch->flags & XPC_C_CONNECTING));
39
40         if (!(ch->flags & XPC_C_SETUP)) {
41                 spin_unlock_irqrestore(&ch->lock, *irq_flags);
42                 ret = xpc_setup_msg_structures(ch);
43                 spin_lock_irqsave(&ch->lock, *irq_flags);
44
45                 if (ret != xpSuccess)
46                         XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags);
47                 else
48                         ch->flags |= XPC_C_SETUP;
49
50                 if (ch->flags & XPC_C_DISCONNECTING)
51                         return;
52         }
53
54         if (!(ch->flags & XPC_C_OPENREPLY)) {
55                 ch->flags |= XPC_C_OPENREPLY;
56                 xpc_send_chctl_openreply(ch, irq_flags);
57         }
58
59         if (!(ch->flags & XPC_C_ROPENREPLY))
60                 return;
61
62         if (!(ch->flags & XPC_C_OPENCOMPLETE)) {
63                 ch->flags |= (XPC_C_OPENCOMPLETE | XPC_C_CONNECTED);
64                 xpc_send_chctl_opencomplete(ch, irq_flags);
65         }
66
67         if (!(ch->flags & XPC_C_ROPENCOMPLETE))
68                 return;
69
70         dev_info(xpc_chan, "channel %d to partition %d connected\n",
71                  ch->number, ch->partid);
72
73         ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP);    /* clear all else */
74 }
75
76 /*
77  * spin_lock_irqsave() is expected to be held on entry.
78  */
79 static void
80 xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
81 {
82         struct xpc_partition *part = &xpc_partitions[ch->partid];
83         u32 channel_was_connected = (ch->flags & XPC_C_WASCONNECTED);
84
85         DBUG_ON(!spin_is_locked(&ch->lock));
86
87         if (!(ch->flags & XPC_C_DISCONNECTING))
88                 return;
89
90         DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
91
92         /* make sure all activity has settled down first */
93
94         if (atomic_read(&ch->kthreads_assigned) > 0 ||
95             atomic_read(&ch->references) > 0) {
96                 return;
97         }
98         DBUG_ON((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
99                 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE));
100
101         if (part->act_state == XPC_P_AS_DEACTIVATING) {
102                 /* can't proceed until the other side disengages from us */
103                 if (xpc_partition_engaged(ch->partid))
104                         return;
105
106         } else {
107
108                 /* as long as the other side is up do the full protocol */
109
110                 if (!(ch->flags & XPC_C_RCLOSEREQUEST))
111                         return;
112
113                 if (!(ch->flags & XPC_C_CLOSEREPLY)) {
114                         ch->flags |= XPC_C_CLOSEREPLY;
115                         xpc_send_chctl_closereply(ch, irq_flags);
116                 }
117
118                 if (!(ch->flags & XPC_C_RCLOSEREPLY))
119                         return;
120         }
121
122         /* wake those waiting for notify completion */
123         if (atomic_read(&ch->n_to_notify) > 0) {
124                 /* we do callout while holding ch->lock, callout can't block */
125                 xpc_notify_senders_of_disconnect(ch);
126         }
127
128         /* both sides are disconnected now */
129
130         if (ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE) {
131                 spin_unlock_irqrestore(&ch->lock, *irq_flags);
132                 xpc_disconnect_callout(ch, xpDisconnected);
133                 spin_lock_irqsave(&ch->lock, *irq_flags);
134         }
135
136         DBUG_ON(atomic_read(&ch->n_to_notify) != 0);
137
138         /* it's now safe to free the channel's message queues */
139         xpc_teardown_msg_structures(ch);
140
141         ch->func = NULL;
142         ch->key = NULL;
143         ch->entry_size = 0;
144         ch->local_nentries = 0;
145         ch->remote_nentries = 0;
146         ch->kthreads_assigned_limit = 0;
147         ch->kthreads_idle_limit = 0;
148
149         /*
150          * Mark the channel disconnected and clear all other flags, including
151          * XPC_C_SETUP (because of call to xpc_teardown_msg_structures()) but
152          * not including XPC_C_WDISCONNECT (if it was set).
153          */
154         ch->flags = (XPC_C_DISCONNECTED | (ch->flags & XPC_C_WDISCONNECT));
155
156         atomic_dec(&part->nchannels_active);
157
158         if (channel_was_connected) {
159                 dev_info(xpc_chan, "channel %d to partition %d disconnected, "
160                          "reason=%d\n", ch->number, ch->partid, ch->reason);
161         }
162
163         if (ch->flags & XPC_C_WDISCONNECT) {
164                 /* we won't lose the CPU since we're holding ch->lock */
165                 complete(&ch->wdisconnect_wait);
166         } else if (ch->delayed_chctl_flags) {
167                 if (part->act_state != XPC_P_AS_DEACTIVATING) {
168                         /* time to take action on any delayed chctl flags */
169                         spin_lock(&part->chctl_lock);
170                         part->chctl.flags[ch->number] |=
171                             ch->delayed_chctl_flags;
172                         spin_unlock(&part->chctl_lock);
173                 }
174                 ch->delayed_chctl_flags = 0;
175         }
176 }
177
178 /*
179  * Process a change in the channel's remote connection state.
180  */
181 static void
182 xpc_process_openclose_chctl_flags(struct xpc_partition *part, int ch_number,
183                                   u8 chctl_flags)
184 {
185         unsigned long irq_flags;
186         struct xpc_openclose_args *args =
187             &part->remote_openclose_args[ch_number];
188         struct xpc_channel *ch = &part->channels[ch_number];
189         enum xp_retval reason;
190         enum xp_retval ret;
191         int create_kthread = 0;
192
193         spin_lock_irqsave(&ch->lock, irq_flags);
194
195 again:
196
197         if ((ch->flags & XPC_C_DISCONNECTED) &&
198             (ch->flags & XPC_C_WDISCONNECT)) {
199                 /*
200                  * Delay processing chctl flags until thread waiting disconnect
201                  * has had a chance to see that the channel is disconnected.
202                  */
203                 ch->delayed_chctl_flags |= chctl_flags;
204                 goto out;
205         }
206
207         if (chctl_flags & XPC_CHCTL_CLOSEREQUEST) {
208
209                 dev_dbg(xpc_chan, "XPC_CHCTL_CLOSEREQUEST (reason=%d) received "
210                         "from partid=%d, channel=%d\n", args->reason,
211                         ch->partid, ch->number);
212
213                 /*
214                  * If RCLOSEREQUEST is set, we're probably waiting for
215                  * RCLOSEREPLY. We should find it and a ROPENREQUEST packed
216                  * with this RCLOSEREQUEST in the chctl_flags.
217                  */
218
219                 if (ch->flags & XPC_C_RCLOSEREQUEST) {
220                         DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING));
221                         DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
222                         DBUG_ON(!(ch->flags & XPC_C_CLOSEREPLY));
223                         DBUG_ON(ch->flags & XPC_C_RCLOSEREPLY);
224
225                         DBUG_ON(!(chctl_flags & XPC_CHCTL_CLOSEREPLY));
226                         chctl_flags &= ~XPC_CHCTL_CLOSEREPLY;
227                         ch->flags |= XPC_C_RCLOSEREPLY;
228
229                         /* both sides have finished disconnecting */
230                         xpc_process_disconnect(ch, &irq_flags);
231                         DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED));
232                         goto again;
233                 }
234
235                 if (ch->flags & XPC_C_DISCONNECTED) {
236                         if (!(chctl_flags & XPC_CHCTL_OPENREQUEST)) {
237                                 if (part->chctl.flags[ch_number] &
238                                     XPC_CHCTL_OPENREQUEST) {
239
240                                         DBUG_ON(ch->delayed_chctl_flags != 0);
241                                         spin_lock(&part->chctl_lock);
242                                         part->chctl.flags[ch_number] |=
243                                             XPC_CHCTL_CLOSEREQUEST;
244                                         spin_unlock(&part->chctl_lock);
245                                 }
246                                 goto out;
247                         }
248
249                         XPC_SET_REASON(ch, 0, 0);
250                         ch->flags &= ~XPC_C_DISCONNECTED;
251
252                         atomic_inc(&part->nchannels_active);
253                         ch->flags |= (XPC_C_CONNECTING | XPC_C_ROPENREQUEST);
254                 }
255
256                 chctl_flags &= ~(XPC_CHCTL_OPENREQUEST | XPC_CHCTL_OPENREPLY |
257                     XPC_CHCTL_OPENCOMPLETE);
258
259                 /*
260                  * The meaningful CLOSEREQUEST connection state fields are:
261                  *      reason = reason connection is to be closed
262                  */
263
264                 ch->flags |= XPC_C_RCLOSEREQUEST;
265
266                 if (!(ch->flags & XPC_C_DISCONNECTING)) {
267                         reason = args->reason;
268                         if (reason <= xpSuccess || reason > xpUnknownReason)
269                                 reason = xpUnknownReason;
270                         else if (reason == xpUnregistering)
271                                 reason = xpOtherUnregistering;
272
273                         XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
274
275                         DBUG_ON(chctl_flags & XPC_CHCTL_CLOSEREPLY);
276                         goto out;
277                 }
278
279                 xpc_process_disconnect(ch, &irq_flags);
280         }
281
282         if (chctl_flags & XPC_CHCTL_CLOSEREPLY) {
283
284                 dev_dbg(xpc_chan, "XPC_CHCTL_CLOSEREPLY received from partid="
285                         "%d, channel=%d\n", ch->partid, ch->number);
286
287                 if (ch->flags & XPC_C_DISCONNECTED) {
288                         DBUG_ON(part->act_state != XPC_P_AS_DEACTIVATING);
289                         goto out;
290                 }
291
292                 DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
293
294                 if (!(ch->flags & XPC_C_RCLOSEREQUEST)) {
295                         if (part->chctl.flags[ch_number] &
296                             XPC_CHCTL_CLOSEREQUEST) {
297
298                                 DBUG_ON(ch->delayed_chctl_flags != 0);
299                                 spin_lock(&part->chctl_lock);
300                                 part->chctl.flags[ch_number] |=
301                                     XPC_CHCTL_CLOSEREPLY;
302                                 spin_unlock(&part->chctl_lock);
303                         }
304                         goto out;
305                 }
306
307                 ch->flags |= XPC_C_RCLOSEREPLY;
308
309                 if (ch->flags & XPC_C_CLOSEREPLY) {
310                         /* both sides have finished disconnecting */
311                         xpc_process_disconnect(ch, &irq_flags);
312                 }
313         }
314
315         if (chctl_flags & XPC_CHCTL_OPENREQUEST) {
316
317                 dev_dbg(xpc_chan, "XPC_CHCTL_OPENREQUEST (entry_size=%d, "
318                         "local_nentries=%d) received from partid=%d, "
319                         "channel=%d\n", args->entry_size, args->local_nentries,
320                         ch->partid, ch->number);
321
322                 if (part->act_state == XPC_P_AS_DEACTIVATING ||
323                     (ch->flags & XPC_C_ROPENREQUEST)) {
324                         goto out;
325                 }
326
327                 if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_WDISCONNECT)) {
328                         ch->delayed_chctl_flags |= XPC_CHCTL_OPENREQUEST;
329                         goto out;
330                 }
331                 DBUG_ON(!(ch->flags & (XPC_C_DISCONNECTED |
332                                        XPC_C_OPENREQUEST)));
333                 DBUG_ON(ch->flags & (XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
334                                      XPC_C_OPENREPLY | XPC_C_CONNECTED));
335
336                 /*
337                  * The meaningful OPENREQUEST connection state fields are:
338                  *      entry_size = size of channel's messages in bytes
339                  *      local_nentries = remote partition's local_nentries
340                  */
341                 if (args->entry_size == 0 || args->local_nentries == 0) {
342                         /* assume OPENREQUEST was delayed by mistake */
343                         goto out;
344                 }
345
346                 ch->flags |= (XPC_C_ROPENREQUEST | XPC_C_CONNECTING);
347                 ch->remote_nentries = args->local_nentries;
348
349                 if (ch->flags & XPC_C_OPENREQUEST) {
350                         if (args->entry_size != ch->entry_size) {
351                                 XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes,
352                                                        &irq_flags);
353                                 goto out;
354                         }
355                 } else {
356                         ch->entry_size = args->entry_size;
357
358                         XPC_SET_REASON(ch, 0, 0);
359                         ch->flags &= ~XPC_C_DISCONNECTED;
360
361                         atomic_inc(&part->nchannels_active);
362                 }
363
364                 xpc_process_connect(ch, &irq_flags);
365         }
366
367         if (chctl_flags & XPC_CHCTL_OPENREPLY) {
368
369                 dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY (local_msgqueue_pa="
370                         "0x%lx, local_nentries=%d, remote_nentries=%d) "
371                         "received from partid=%d, channel=%d\n",
372                         args->local_msgqueue_pa, args->local_nentries,
373                         args->remote_nentries, ch->partid, ch->number);
374
375                 if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED))
376                         goto out;
377
378                 if (!(ch->flags & XPC_C_OPENREQUEST)) {
379                         XPC_DISCONNECT_CHANNEL(ch, xpOpenCloseError,
380                                                &irq_flags);
381                         goto out;
382                 }
383
384                 DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST));
385                 DBUG_ON(ch->flags & XPC_C_CONNECTED);
386
387                 /*
388                  * The meaningful OPENREPLY connection state fields are:
389                  *      local_msgqueue_pa = physical address of remote
390                  *                          partition's local_msgqueue
391                  *      local_nentries = remote partition's local_nentries
392                  *      remote_nentries = remote partition's remote_nentries
393                  */
394                 DBUG_ON(args->local_msgqueue_pa == 0);
395                 DBUG_ON(args->local_nentries == 0);
396                 DBUG_ON(args->remote_nentries == 0);
397
398                 ret = xpc_save_remote_msgqueue_pa(ch, args->local_msgqueue_pa);
399                 if (ret != xpSuccess) {
400                         XPC_DISCONNECT_CHANNEL(ch, ret, &irq_flags);
401                         goto out;
402                 }
403                 ch->flags |= XPC_C_ROPENREPLY;
404
405                 if (args->local_nentries < ch->remote_nentries) {
406                         dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY: new "
407                                 "remote_nentries=%d, old remote_nentries=%d, "
408                                 "partid=%d, channel=%d\n",
409                                 args->local_nentries, ch->remote_nentries,
410                                 ch->partid, ch->number);
411
412                         ch->remote_nentries = args->local_nentries;
413                 }
414                 if (args->remote_nentries < ch->local_nentries) {
415                         dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY: new "
416                                 "local_nentries=%d, old local_nentries=%d, "
417                                 "partid=%d, channel=%d\n",
418                                 args->remote_nentries, ch->local_nentries,
419                                 ch->partid, ch->number);
420
421                         ch->local_nentries = args->remote_nentries;
422                 }
423
424                 xpc_process_connect(ch, &irq_flags);
425         }
426
427         if (chctl_flags & XPC_CHCTL_OPENCOMPLETE) {
428
429                 dev_dbg(xpc_chan, "XPC_CHCTL_OPENCOMPLETE received from "
430                         "partid=%d, channel=%d\n", ch->partid, ch->number);
431
432                 if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED))
433                         goto out;
434
435                 if (!(ch->flags & XPC_C_OPENREQUEST) ||
436                     !(ch->flags & XPC_C_OPENREPLY)) {
437                         XPC_DISCONNECT_CHANNEL(ch, xpOpenCloseError,
438                                                &irq_flags);
439                         goto out;
440                 }
441
442                 DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST));
443                 DBUG_ON(!(ch->flags & XPC_C_ROPENREPLY));
444                 DBUG_ON(!(ch->flags & XPC_C_CONNECTED));
445
446                 ch->flags |= XPC_C_ROPENCOMPLETE;
447
448                 xpc_process_connect(ch, &irq_flags);
449                 create_kthread = 1;
450         }
451
452 out:
453         spin_unlock_irqrestore(&ch->lock, irq_flags);
454
455         if (create_kthread)
456                 xpc_create_kthreads(ch, 1, 0);
457 }
458
459 /*
460  * Attempt to establish a channel connection to a remote partition.
461  */
462 static enum xp_retval
463 xpc_connect_channel(struct xpc_channel *ch)
464 {
465         unsigned long irq_flags;
466         struct xpc_registration *registration = &xpc_registrations[ch->number];
467
468         if (mutex_trylock(&registration->mutex) == 0)
469                 return xpRetry;
470
471         if (!XPC_CHANNEL_REGISTERED(ch->number)) {
472                 mutex_unlock(&registration->mutex);
473                 return xpUnregistered;
474         }
475
476         spin_lock_irqsave(&ch->lock, irq_flags);
477
478         DBUG_ON(ch->flags & XPC_C_CONNECTED);
479         DBUG_ON(ch->flags & XPC_C_OPENREQUEST);
480
481         if (ch->flags & XPC_C_DISCONNECTING) {
482                 spin_unlock_irqrestore(&ch->lock, irq_flags);
483                 mutex_unlock(&registration->mutex);
484                 return ch->reason;
485         }
486
487         /* add info from the channel connect registration to the channel */
488
489         ch->kthreads_assigned_limit = registration->assigned_limit;
490         ch->kthreads_idle_limit = registration->idle_limit;
491         DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0);
492         DBUG_ON(atomic_read(&ch->kthreads_idle) != 0);
493         DBUG_ON(atomic_read(&ch->kthreads_active) != 0);
494
495         ch->func = registration->func;
496         DBUG_ON(registration->func == NULL);
497         ch->key = registration->key;
498
499         ch->local_nentries = registration->nentries;
500
501         if (ch->flags & XPC_C_ROPENREQUEST) {
502                 if (registration->entry_size != ch->entry_size) {
503                         /* the local and remote sides aren't the same */
504
505                         /*
506                          * Because XPC_DISCONNECT_CHANNEL() can block we're
507                          * forced to up the registration sema before we unlock
508                          * the channel lock. But that's okay here because we're
509                          * done with the part that required the registration
510                          * sema. XPC_DISCONNECT_CHANNEL() requires that the
511                          * channel lock be locked and will unlock and relock
512                          * the channel lock as needed.
513                          */
514                         mutex_unlock(&registration->mutex);
515                         XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes,
516                                                &irq_flags);
517                         spin_unlock_irqrestore(&ch->lock, irq_flags);
518                         return xpUnequalMsgSizes;
519                 }
520         } else {
521                 ch->entry_size = registration->entry_size;
522
523                 XPC_SET_REASON(ch, 0, 0);
524                 ch->flags &= ~XPC_C_DISCONNECTED;
525
526                 atomic_inc(&xpc_partitions[ch->partid].nchannels_active);
527         }
528
529         mutex_unlock(&registration->mutex);
530
531         /* initiate the connection */
532
533         ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING);
534         xpc_send_chctl_openrequest(ch, &irq_flags);
535
536         xpc_process_connect(ch, &irq_flags);
537
538         spin_unlock_irqrestore(&ch->lock, irq_flags);
539
540         return xpSuccess;
541 }
542
543 void
544 xpc_process_sent_chctl_flags(struct xpc_partition *part)
545 {
546         unsigned long irq_flags;
547         union xpc_channel_ctl_flags chctl;
548         struct xpc_channel *ch;
549         int ch_number;
550         u32 ch_flags;
551
552         chctl.all_flags = xpc_get_chctl_all_flags(part);
553
554         /*
555          * Initiate channel connections for registered channels.
556          *
557          * For each connected channel that has pending messages activate idle
558          * kthreads and/or create new kthreads as needed.
559          */
560
561         for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
562                 ch = &part->channels[ch_number];
563
564                 /*
565                  * Process any open or close related chctl flags, and then deal
566                  * with connecting or disconnecting the channel as required.
567                  */
568
569                 if (chctl.flags[ch_number] & XPC_OPENCLOSE_CHCTL_FLAGS) {
570                         xpc_process_openclose_chctl_flags(part, ch_number,
571                                                         chctl.flags[ch_number]);
572                 }
573
574                 ch_flags = ch->flags;   /* need an atomic snapshot of flags */
575
576                 if (ch_flags & XPC_C_DISCONNECTING) {
577                         spin_lock_irqsave(&ch->lock, irq_flags);
578                         xpc_process_disconnect(ch, &irq_flags);
579                         spin_unlock_irqrestore(&ch->lock, irq_flags);
580                         continue;
581                 }
582
583                 if (part->act_state == XPC_P_AS_DEACTIVATING)
584                         continue;
585
586                 if (!(ch_flags & XPC_C_CONNECTED)) {
587                         if (!(ch_flags & XPC_C_OPENREQUEST)) {
588                                 DBUG_ON(ch_flags & XPC_C_SETUP);
589                                 (void)xpc_connect_channel(ch);
590                         }
591                         continue;
592                 }
593
594                 /*
595                  * Process any message related chctl flags, this may involve
596                  * the activation of kthreads to deliver any pending messages
597                  * sent from the other partition.
598                  */
599
600                 if (chctl.flags[ch_number] & XPC_MSG_CHCTL_FLAGS)
601                         xpc_process_msg_chctl_flags(part, ch_number);
602         }
603 }
604
605 /*
606  * XPC's heartbeat code calls this function to inform XPC that a partition is
607  * going down.  XPC responds by tearing down the XPartition Communication
608  * infrastructure used for the just downed partition.
609  *
610  * XPC's heartbeat code will never call this function and xpc_partition_up()
611  * at the same time. Nor will it ever make multiple calls to either function
612  * at the same time.
613  */
614 void
615 xpc_partition_going_down(struct xpc_partition *part, enum xp_retval reason)
616 {
617         unsigned long irq_flags;
618         int ch_number;
619         struct xpc_channel *ch;
620
621         dev_dbg(xpc_chan, "deactivating partition %d, reason=%d\n",
622                 XPC_PARTID(part), reason);
623
624         if (!xpc_part_ref(part)) {
625                 /* infrastructure for this partition isn't currently set up */
626                 return;
627         }
628
629         /* disconnect channels associated with the partition going down */
630
631         for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
632                 ch = &part->channels[ch_number];
633
634                 xpc_msgqueue_ref(ch);
635                 spin_lock_irqsave(&ch->lock, irq_flags);
636
637                 XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
638
639                 spin_unlock_irqrestore(&ch->lock, irq_flags);
640                 xpc_msgqueue_deref(ch);
641         }
642
643         xpc_wakeup_channel_mgr(part);
644
645         xpc_part_deref(part);
646 }
647
648 /*
649  * Called by XP at the time of channel connection registration to cause
650  * XPC to establish connections to all currently active partitions.
651  */
652 void
653 xpc_initiate_connect(int ch_number)
654 {
655         short partid;
656         struct xpc_partition *part;
657         struct xpc_channel *ch;
658
659         DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS);
660
661         for (partid = 0; partid < xp_max_npartitions; partid++) {
662                 part = &xpc_partitions[partid];
663
664                 if (xpc_part_ref(part)) {
665                         ch = &part->channels[ch_number];
666
667                         /*
668                          * Initiate the establishment of a connection on the
669                          * newly registered channel to the remote partition.
670                          */
671                         xpc_wakeup_channel_mgr(part);
672                         xpc_part_deref(part);
673                 }
674         }
675 }
676
677 void
678 xpc_connected_callout(struct xpc_channel *ch)
679 {
680         /* let the registerer know that a connection has been established */
681
682         if (ch->func != NULL) {
683                 dev_dbg(xpc_chan, "ch->func() called, reason=xpConnected, "
684                         "partid=%d, channel=%d\n", ch->partid, ch->number);
685
686                 ch->func(xpConnected, ch->partid, ch->number,
687                          (void *)(u64)ch->local_nentries, ch->key);
688
689                 dev_dbg(xpc_chan, "ch->func() returned, reason=xpConnected, "
690                         "partid=%d, channel=%d\n", ch->partid, ch->number);
691         }
692 }
693
694 /*
695  * Called by XP at the time of channel connection unregistration to cause
696  * XPC to teardown all current connections for the specified channel.
697  *
698  * Before returning xpc_initiate_disconnect() will wait until all connections
699  * on the specified channel have been closed/torndown. So the caller can be
700  * assured that they will not be receiving any more callouts from XPC to the
701  * function they registered via xpc_connect().
702  *
703  * Arguments:
704  *
705  *      ch_number - channel # to unregister.
706  */
707 void
708 xpc_initiate_disconnect(int ch_number)
709 {
710         unsigned long irq_flags;
711         short partid;
712         struct xpc_partition *part;
713         struct xpc_channel *ch;
714
715         DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS);
716
717         /* initiate the channel disconnect for every active partition */
718         for (partid = 0; partid < xp_max_npartitions; partid++) {
719                 part = &xpc_partitions[partid];
720
721                 if (xpc_part_ref(part)) {
722                         ch = &part->channels[ch_number];
723                         xpc_msgqueue_ref(ch);
724
725                         spin_lock_irqsave(&ch->lock, irq_flags);
726
727                         if (!(ch->flags & XPC_C_DISCONNECTED)) {
728                                 ch->flags |= XPC_C_WDISCONNECT;
729
730                                 XPC_DISCONNECT_CHANNEL(ch, xpUnregistering,
731                                                        &irq_flags);
732                         }
733
734                         spin_unlock_irqrestore(&ch->lock, irq_flags);
735
736                         xpc_msgqueue_deref(ch);
737                         xpc_part_deref(part);
738                 }
739         }
740
741         xpc_disconnect_wait(ch_number);
742 }
743
744 /*
745  * To disconnect a channel, and reflect it back to all who may be waiting.
746  *
747  * An OPEN is not allowed until XPC_C_DISCONNECTING is cleared by
748  * xpc_process_disconnect(), and if set, XPC_C_WDISCONNECT is cleared by
749  * xpc_disconnect_wait().
750  *
751  * THE CHANNEL IS TO BE LOCKED BY THE CALLER AND WILL REMAIN LOCKED UPON RETURN.
752  */
753 void
754 xpc_disconnect_channel(const int line, struct xpc_channel *ch,
755                        enum xp_retval reason, unsigned long *irq_flags)
756 {
757         u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED);
758
759         DBUG_ON(!spin_is_locked(&ch->lock));
760
761         if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED))
762                 return;
763
764         DBUG_ON(!(ch->flags & (XPC_C_CONNECTING | XPC_C_CONNECTED)));
765
766         dev_dbg(xpc_chan, "reason=%d, line=%d, partid=%d, channel=%d\n",
767                 reason, line, ch->partid, ch->number);
768
769         XPC_SET_REASON(ch, reason, line);
770
771         ch->flags |= (XPC_C_CLOSEREQUEST | XPC_C_DISCONNECTING);
772         /* some of these may not have been set */
773         ch->flags &= ~(XPC_C_OPENREQUEST | XPC_C_OPENREPLY |
774                        XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
775                        XPC_C_CONNECTING | XPC_C_CONNECTED);
776
777         xpc_send_chctl_closerequest(ch, irq_flags);
778
779         if (channel_was_connected)
780                 ch->flags |= XPC_C_WASCONNECTED;
781
782         spin_unlock_irqrestore(&ch->lock, *irq_flags);
783
784         /* wake all idle kthreads so they can exit */
785         if (atomic_read(&ch->kthreads_idle) > 0) {
786                 wake_up_all(&ch->idle_wq);
787
788         } else if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
789                    !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
790                 /* start a kthread that will do the xpDisconnecting callout */
791                 xpc_create_kthreads(ch, 1, 1);
792         }
793
794         /* wake those waiting to allocate an entry from the local msg queue */
795         if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
796                 wake_up(&ch->msg_allocate_wq);
797
798         spin_lock_irqsave(&ch->lock, *irq_flags);
799 }
800
801 void
802 xpc_disconnect_callout(struct xpc_channel *ch, enum xp_retval reason)
803 {
804         /*
805          * Let the channel's registerer know that the channel is being
806          * disconnected. We don't want to do this if the registerer was never
807          * informed of a connection being made.
808          */
809
810         if (ch->func != NULL) {
811                 dev_dbg(xpc_chan, "ch->func() called, reason=%d, partid=%d, "
812                         "channel=%d\n", reason, ch->partid, ch->number);
813
814                 ch->func(reason, ch->partid, ch->number, NULL, ch->key);
815
816                 dev_dbg(xpc_chan, "ch->func() returned, reason=%d, partid=%d, "
817                         "channel=%d\n", reason, ch->partid, ch->number);
818         }
819 }
820
821 /*
822  * Wait for a message entry to become available for the specified channel,
823  * but don't wait any longer than 1 jiffy.
824  */
825 enum xp_retval
826 xpc_allocate_msg_wait(struct xpc_channel *ch)
827 {
828         enum xp_retval ret;
829
830         if (ch->flags & XPC_C_DISCONNECTING) {
831                 DBUG_ON(ch->reason == xpInterrupted);
832                 return ch->reason;
833         }
834
835         atomic_inc(&ch->n_on_msg_allocate_wq);
836         ret = interruptible_sleep_on_timeout(&ch->msg_allocate_wq, 1);
837         atomic_dec(&ch->n_on_msg_allocate_wq);
838
839         if (ch->flags & XPC_C_DISCONNECTING) {
840                 ret = ch->reason;
841                 DBUG_ON(ch->reason == xpInterrupted);
842         } else if (ret == 0) {
843                 ret = xpTimeout;
844         } else {
845                 ret = xpInterrupted;
846         }
847
848         return ret;
849 }
850
851 /*
852  * Send a message that contains the user's payload on the specified channel
853  * connected to the specified partition.
854  *
855  * NOTE that this routine can sleep waiting for a message entry to become
856  * available. To not sleep, pass in the XPC_NOWAIT flag.
857  *
858  * Once sent, this routine will not wait for the message to be received, nor
859  * will notification be given when it does happen.
860  *
861  * Arguments:
862  *
863  *      partid - ID of partition to which the channel is connected.
864  *      ch_number - channel # to send message on.
865  *      flags - see xp.h for valid flags.
866  *      payload - pointer to the payload which is to be sent.
867  *      payload_size - size of the payload in bytes.
868  */
869 enum xp_retval
870 xpc_initiate_send(short partid, int ch_number, u32 flags, void *payload,
871                   u16 payload_size)
872 {
873         struct xpc_partition *part = &xpc_partitions[partid];
874         enum xp_retval ret = xpUnknownReason;
875
876         dev_dbg(xpc_chan, "payload=0x%p, partid=%d, channel=%d\n", payload,
877                 partid, ch_number);
878
879         DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
880         DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
881         DBUG_ON(payload == NULL);
882
883         if (xpc_part_ref(part)) {
884                 ret = xpc_send_payload(&part->channels[ch_number], flags,
885                                        payload, payload_size, 0, NULL, NULL);
886                 xpc_part_deref(part);
887         }
888
889         return ret;
890 }
891
892 /*
893  * Send a message that contains the user's payload on the specified channel
894  * connected to the specified partition.
895  *
896  * NOTE that this routine can sleep waiting for a message entry to become
897  * available. To not sleep, pass in the XPC_NOWAIT flag.
898  *
899  * This routine will not wait for the message to be sent or received.
900  *
901  * Once the remote end of the channel has received the message, the function
902  * passed as an argument to xpc_initiate_send_notify() will be called. This
903  * allows the sender to free up or re-use any buffers referenced by the
904  * message, but does NOT mean the message has been processed at the remote
905  * end by a receiver.
906  *
907  * If this routine returns an error, the caller's function will NOT be called.
908  *
909  * Arguments:
910  *
911  *      partid - ID of partition to which the channel is connected.
912  *      ch_number - channel # to send message on.
913  *      flags - see xp.h for valid flags.
914  *      payload - pointer to the payload which is to be sent.
915  *      payload_size - size of the payload in bytes.
916  *      func - function to call with asynchronous notification of message
917  *                receipt. THIS FUNCTION MUST BE NON-BLOCKING.
918  *      key - user-defined key to be passed to the function when it's called.
919  */
920 enum xp_retval
921 xpc_initiate_send_notify(short partid, int ch_number, u32 flags, void *payload,
922                          u16 payload_size, xpc_notify_func func, void *key)
923 {
924         struct xpc_partition *part = &xpc_partitions[partid];
925         enum xp_retval ret = xpUnknownReason;
926
927         dev_dbg(xpc_chan, "payload=0x%p, partid=%d, channel=%d\n", payload,
928                 partid, ch_number);
929
930         DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
931         DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
932         DBUG_ON(payload == NULL);
933         DBUG_ON(func == NULL);
934
935         if (xpc_part_ref(part)) {
936                 ret = xpc_send_payload(&part->channels[ch_number], flags,
937                                        payload, payload_size, XPC_N_CALL, func,
938                                        key);
939                 xpc_part_deref(part);
940         }
941         return ret;
942 }
943
944 /*
945  * Deliver a message's payload to its intended recipient.
946  */
947 void
948 xpc_deliver_payload(struct xpc_channel *ch)
949 {
950         void *payload;
951
952         payload = xpc_get_deliverable_payload(ch);
953         if (payload != NULL) {
954
955                 /*
956                  * This ref is taken to protect the payload itself from being
957                  * freed before the user is finished with it, which the user
958                  * indicates by calling xpc_initiate_received().
959                  */
960                 xpc_msgqueue_ref(ch);
961
962                 atomic_inc(&ch->kthreads_active);
963
964                 if (ch->func != NULL) {
965                         dev_dbg(xpc_chan, "ch->func() called, payload=0x%p "
966                                 "partid=%d channel=%d\n", payload, ch->partid,
967                                 ch->number);
968
969                         /* deliver the message to its intended recipient */
970                         ch->func(xpMsgReceived, ch->partid, ch->number, payload,
971                                  ch->key);
972
973                         dev_dbg(xpc_chan, "ch->func() returned, payload=0x%p "
974                                 "partid=%d channel=%d\n", payload, ch->partid,
975                                 ch->number);
976                 }
977
978                 atomic_dec(&ch->kthreads_active);
979         }
980 }
981
982 /*
983  * Acknowledge receipt of a delivered message's payload.
984  *
985  * This function, although called by users, does not call xpc_part_ref() to
986  * ensure that the partition infrastructure is in place. It relies on the
987  * fact that we called xpc_msgqueue_ref() in xpc_deliver_payload().
988  *
989  * Arguments:
990  *
991  *      partid - ID of partition to which the channel is connected.
992  *      ch_number - channel # message received on.
993  *      payload - pointer to the payload area allocated via
994  *                      xpc_initiate_send() or xpc_initiate_send_notify().
995  */
996 void
997 xpc_initiate_received(short partid, int ch_number, void *payload)
998 {
999         struct xpc_partition *part = &xpc_partitions[partid];
1000         struct xpc_channel *ch;
1001
1002         DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
1003         DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
1004
1005         ch = &part->channels[ch_number];
1006         xpc_received_payload(ch, payload);
1007
1008         /* the call to xpc_msgqueue_ref() was done by xpc_deliver_payload()  */
1009         xpc_msgqueue_deref(ch);
1010 }