]>
Commit | Line | Data |
---|---|---|
89eb8eb9 DN |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
45d9ca49 | 6 | * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved. |
89eb8eb9 DN |
7 | */ |
8 | ||
89eb8eb9 DN |
9 | /* |
10 | * Cross Partition Communication (XPC) channel support. | |
11 | * | |
12 | * This is the part of XPC that manages the channels and | |
13 | * sends/receives messages across them to/from other partitions. | |
14 | * | |
15 | */ | |
16 | ||
261f3b49 | 17 | #include <linux/device.h> |
45d9ca49 | 18 | #include "xpc.h" |
89eb8eb9 | 19 | |
89eb8eb9 DN |
20 | /* |
21 | * Process a connect message from a remote partition. | |
22 | * | |
23 | * Note: xpc_process_connect() is expecting to be called with the | |
24 | * spin_lock_irqsave held and will leave it locked upon return. | |
25 | */ | |
26 | static void | |
27 | xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags) | |
28 | { | |
65c17b80 | 29 | enum xp_retval ret; |
89eb8eb9 | 30 | |
89eb8eb9 DN |
31 | DBUG_ON(!spin_is_locked(&ch->lock)); |
32 | ||
33 | if (!(ch->flags & XPC_C_OPENREQUEST) || | |
35190506 | 34 | !(ch->flags & XPC_C_ROPENREQUEST)) { |
89eb8eb9 DN |
35 | /* nothing more to do for now */ |
36 | return; | |
37 | } | |
38 | DBUG_ON(!(ch->flags & XPC_C_CONNECTING)); | |
39 | ||
40 | if (!(ch->flags & XPC_C_SETUP)) { | |
41 | spin_unlock_irqrestore(&ch->lock, *irq_flags); | |
5b8669df | 42 | ret = xpc_setup_msg_structures(ch); |
89eb8eb9 DN |
43 | spin_lock_irqsave(&ch->lock, *irq_flags); |
44 | ||
65c17b80 | 45 | if (ret != xpSuccess) |
89eb8eb9 | 46 | XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags); |
2c2b94f9 | 47 | |
185c3a1b DN |
48 | ch->flags |= XPC_C_SETUP; |
49 | ||
2c2b94f9 | 50 | if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING)) |
89eb8eb9 | 51 | return; |
89eb8eb9 DN |
52 | } |
53 | ||
54 | if (!(ch->flags & XPC_C_OPENREPLY)) { | |
55 | ch->flags |= XPC_C_OPENREPLY; | |
7fb5e59d | 56 | xpc_send_chctl_openreply(ch, irq_flags); |
89eb8eb9 DN |
57 | } |
58 | ||
2c2b94f9 | 59 | if (!(ch->flags & XPC_C_ROPENREPLY)) |
89eb8eb9 | 60 | return; |
89eb8eb9 | 61 | |
89eb8eb9 DN |
62 | ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP); /* clear all else */ |
63 | ||
64 | dev_info(xpc_chan, "channel %d to partition %d connected\n", | |
35190506 | 65 | ch->number, ch->partid); |
89eb8eb9 DN |
66 | |
67 | spin_unlock_irqrestore(&ch->lock, *irq_flags); | |
a460ef8d | 68 | xpc_create_kthreads(ch, 1, 0); |
89eb8eb9 DN |
69 | spin_lock_irqsave(&ch->lock, *irq_flags); |
70 | } | |
71 | ||
89eb8eb9 DN |
72 | /* |
73 | * spin_lock_irqsave() is expected to be held on entry. | |
74 | */ | |
75 | static void | |
76 | xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags) | |
77 | { | |
78 | struct xpc_partition *part = &xpc_partitions[ch->partid]; | |
a607c389 | 79 | u32 channel_was_connected = (ch->flags & XPC_C_WASCONNECTED); |
89eb8eb9 | 80 | |
89eb8eb9 DN |
81 | DBUG_ON(!spin_is_locked(&ch->lock)); |
82 | ||
2c2b94f9 | 83 | if (!(ch->flags & XPC_C_DISCONNECTING)) |
89eb8eb9 | 84 | return; |
89eb8eb9 DN |
85 | |
86 | DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST)); | |
87 | ||
88 | /* make sure all activity has settled down first */ | |
89 | ||
a460ef8d | 90 | if (atomic_read(&ch->kthreads_assigned) > 0 || |
35190506 | 91 | atomic_read(&ch->references) > 0) { |
89eb8eb9 DN |
92 | return; |
93 | } | |
a460ef8d | 94 | DBUG_ON((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && |
35190506 | 95 | !(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE)); |
89eb8eb9 | 96 | |
83469b55 | 97 | if (part->act_state == XPC_P_AS_DEACTIVATING) { |
a607c389 | 98 | /* can't proceed until the other side disengages from us */ |
a47d5dac | 99 | if (xpc_partition_engaged(ch->partid)) |
a607c389 | 100 | return; |
89eb8eb9 | 101 | |
a607c389 | 102 | } else { |
89eb8eb9 DN |
103 | |
104 | /* as long as the other side is up do the full protocol */ | |
105 | ||
2c2b94f9 | 106 | if (!(ch->flags & XPC_C_RCLOSEREQUEST)) |
89eb8eb9 | 107 | return; |
89eb8eb9 DN |
108 | |
109 | if (!(ch->flags & XPC_C_CLOSEREPLY)) { | |
110 | ch->flags |= XPC_C_CLOSEREPLY; | |
7fb5e59d | 111 | xpc_send_chctl_closereply(ch, irq_flags); |
89eb8eb9 DN |
112 | } |
113 | ||
2c2b94f9 | 114 | if (!(ch->flags & XPC_C_RCLOSEREPLY)) |
89eb8eb9 | 115 | return; |
89eb8eb9 DN |
116 | } |
117 | ||
a607c389 DN |
118 | /* wake those waiting for notify completion */ |
119 | if (atomic_read(&ch->n_to_notify) > 0) { | |
ea57f80c | 120 | /* we do callout while holding ch->lock, callout can't block */ |
a47d5dac | 121 | xpc_notify_senders_of_disconnect(ch); |
a607c389 DN |
122 | } |
123 | ||
89eb8eb9 DN |
124 | /* both sides are disconnected now */ |
125 | ||
4c2cd966 | 126 | if (ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE) { |
246c7e33 | 127 | spin_unlock_irqrestore(&ch->lock, *irq_flags); |
65c17b80 | 128 | xpc_disconnect_callout(ch, xpDisconnected); |
246c7e33 DN |
129 | spin_lock_irqsave(&ch->lock, *irq_flags); |
130 | } | |
131 | ||
5b8669df DN |
132 | DBUG_ON(atomic_read(&ch->n_to_notify) != 0); |
133 | ||
a607c389 | 134 | /* it's now safe to free the channel's message queues */ |
5b8669df DN |
135 | xpc_teardown_msg_structures(ch); |
136 | ||
137 | ch->func = NULL; | |
138 | ch->key = NULL; | |
bd3e64c1 | 139 | ch->entry_size = 0; |
5b8669df DN |
140 | ch->local_nentries = 0; |
141 | ch->remote_nentries = 0; | |
142 | ch->kthreads_assigned_limit = 0; | |
143 | ch->kthreads_idle_limit = 0; | |
a607c389 | 144 | |
185c3a1b DN |
145 | /* |
146 | * Mark the channel disconnected and clear all other flags, including | |
5b8669df DN |
147 | * XPC_C_SETUP (because of call to xpc_teardown_msg_structures()) but |
148 | * not including XPC_C_WDISCONNECT (if it was set). | |
185c3a1b | 149 | */ |
a607c389 | 150 | ch->flags = (XPC_C_DISCONNECTED | (ch->flags & XPC_C_WDISCONNECT)); |
89eb8eb9 DN |
151 | |
152 | atomic_dec(&part->nchannels_active); | |
153 | ||
a607c389 | 154 | if (channel_was_connected) { |
89eb8eb9 | 155 | dev_info(xpc_chan, "channel %d to partition %d disconnected, " |
35190506 | 156 | "reason=%d\n", ch->number, ch->partid, ch->reason); |
89eb8eb9 | 157 | } |
a607c389 | 158 | |
a607c389 | 159 | if (ch->flags & XPC_C_WDISCONNECT) { |
f9e505a9 JS |
160 | /* we won't lose the CPU since we're holding ch->lock */ |
161 | complete(&ch->wdisconnect_wait); | |
7fb5e59d | 162 | } else if (ch->delayed_chctl_flags) { |
83469b55 | 163 | if (part->act_state != XPC_P_AS_DEACTIVATING) { |
7fb5e59d DN |
164 | /* time to take action on any delayed chctl flags */ |
165 | spin_lock(&part->chctl_lock); | |
166 | part->chctl.flags[ch->number] |= | |
167 | ch->delayed_chctl_flags; | |
168 | spin_unlock(&part->chctl_lock); | |
e54af724 | 169 | } |
7fb5e59d | 170 | ch->delayed_chctl_flags = 0; |
a607c389 | 171 | } |
89eb8eb9 DN |
172 | } |
173 | ||
89eb8eb9 DN |
174 | /* |
175 | * Process a change in the channel's remote connection state. | |
176 | */ | |
177 | static void | |
7fb5e59d DN |
178 | xpc_process_openclose_chctl_flags(struct xpc_partition *part, int ch_number, |
179 | u8 chctl_flags) | |
89eb8eb9 DN |
180 | { |
181 | unsigned long irq_flags; | |
182 | struct xpc_openclose_args *args = | |
35190506 | 183 | &part->remote_openclose_args[ch_number]; |
89eb8eb9 | 184 | struct xpc_channel *ch = &part->channels[ch_number]; |
65c17b80 | 185 | enum xp_retval reason; |
89eb8eb9 | 186 | |
89eb8eb9 DN |
187 | spin_lock_irqsave(&ch->lock, irq_flags); |
188 | ||
2c2b94f9 | 189 | again: |
e54af724 | 190 | |
2c2b94f9 DN |
191 | if ((ch->flags & XPC_C_DISCONNECTED) && |
192 | (ch->flags & XPC_C_WDISCONNECT)) { | |
e54af724 | 193 | /* |
7fb5e59d | 194 | * Delay processing chctl flags until thread waiting disconnect |
e54af724 DN |
195 | * has had a chance to see that the channel is disconnected. |
196 | */ | |
7fb5e59d | 197 | ch->delayed_chctl_flags |= chctl_flags; |
e54af724 DN |
198 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
199 | return; | |
200 | } | |
201 | ||
7fb5e59d | 202 | if (chctl_flags & XPC_CHCTL_CLOSEREQUEST) { |
89eb8eb9 | 203 | |
7fb5e59d | 204 | dev_dbg(xpc_chan, "XPC_CHCTL_CLOSEREQUEST (reason=%d) received " |
89eb8eb9 DN |
205 | "from partid=%d, channel=%d\n", args->reason, |
206 | ch->partid, ch->number); | |
207 | ||
208 | /* | |
209 | * If RCLOSEREQUEST is set, we're probably waiting for | |
210 | * RCLOSEREPLY. We should find it and a ROPENREQUEST packed | |
7fb5e59d | 211 | * with this RCLOSEREQUEST in the chctl_flags. |
89eb8eb9 DN |
212 | */ |
213 | ||
214 | if (ch->flags & XPC_C_RCLOSEREQUEST) { | |
215 | DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING)); | |
216 | DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST)); | |
217 | DBUG_ON(!(ch->flags & XPC_C_CLOSEREPLY)); | |
218 | DBUG_ON(ch->flags & XPC_C_RCLOSEREPLY); | |
219 | ||
7fb5e59d DN |
220 | DBUG_ON(!(chctl_flags & XPC_CHCTL_CLOSEREPLY)); |
221 | chctl_flags &= ~XPC_CHCTL_CLOSEREPLY; | |
89eb8eb9 DN |
222 | ch->flags |= XPC_C_RCLOSEREPLY; |
223 | ||
224 | /* both sides have finished disconnecting */ | |
225 | xpc_process_disconnect(ch, &irq_flags); | |
e54af724 DN |
226 | DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED)); |
227 | goto again; | |
89eb8eb9 DN |
228 | } |
229 | ||
230 | if (ch->flags & XPC_C_DISCONNECTED) { | |
7fb5e59d DN |
231 | if (!(chctl_flags & XPC_CHCTL_OPENREQUEST)) { |
232 | if (part->chctl.flags[ch_number] & | |
233 | XPC_CHCTL_OPENREQUEST) { | |
234 | ||
235 | DBUG_ON(ch->delayed_chctl_flags != 0); | |
236 | spin_lock(&part->chctl_lock); | |
237 | part->chctl.flags[ch_number] |= | |
238 | XPC_CHCTL_CLOSEREQUEST; | |
239 | spin_unlock(&part->chctl_lock); | |
e54af724 | 240 | } |
89eb8eb9 DN |
241 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
242 | return; | |
243 | } | |
244 | ||
245 | XPC_SET_REASON(ch, 0, 0); | |
246 | ch->flags &= ~XPC_C_DISCONNECTED; | |
247 | ||
248 | atomic_inc(&part->nchannels_active); | |
249 | ch->flags |= (XPC_C_CONNECTING | XPC_C_ROPENREQUEST); | |
250 | } | |
251 | ||
7fb5e59d | 252 | chctl_flags &= ~(XPC_CHCTL_OPENREQUEST | XPC_CHCTL_OPENREPLY); |
89eb8eb9 DN |
253 | |
254 | /* | |
255 | * The meaningful CLOSEREQUEST connection state fields are: | |
256 | * reason = reason connection is to be closed | |
257 | */ | |
258 | ||
259 | ch->flags |= XPC_C_RCLOSEREQUEST; | |
260 | ||
261 | if (!(ch->flags & XPC_C_DISCONNECTING)) { | |
262 | reason = args->reason; | |
65c17b80 DN |
263 | if (reason <= xpSuccess || reason > xpUnknownReason) |
264 | reason = xpUnknownReason; | |
265 | else if (reason == xpUnregistering) | |
266 | reason = xpOtherUnregistering; | |
89eb8eb9 DN |
267 | |
268 | XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags); | |
e54af724 | 269 | |
7fb5e59d | 270 | DBUG_ON(chctl_flags & XPC_CHCTL_CLOSEREPLY); |
e54af724 DN |
271 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
272 | return; | |
89eb8eb9 | 273 | } |
e54af724 DN |
274 | |
275 | xpc_process_disconnect(ch, &irq_flags); | |
89eb8eb9 DN |
276 | } |
277 | ||
7fb5e59d | 278 | if (chctl_flags & XPC_CHCTL_CLOSEREPLY) { |
89eb8eb9 | 279 | |
7fb5e59d DN |
280 | dev_dbg(xpc_chan, "XPC_CHCTL_CLOSEREPLY received from partid=" |
281 | "%d, channel=%d\n", ch->partid, ch->number); | |
89eb8eb9 DN |
282 | |
283 | if (ch->flags & XPC_C_DISCONNECTED) { | |
83469b55 | 284 | DBUG_ON(part->act_state != XPC_P_AS_DEACTIVATING); |
89eb8eb9 DN |
285 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
286 | return; | |
287 | } | |
288 | ||
289 | DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST)); | |
e54af724 DN |
290 | |
291 | if (!(ch->flags & XPC_C_RCLOSEREQUEST)) { | |
7fb5e59d DN |
292 | if (part->chctl.flags[ch_number] & |
293 | XPC_CHCTL_CLOSEREQUEST) { | |
294 | ||
295 | DBUG_ON(ch->delayed_chctl_flags != 0); | |
296 | spin_lock(&part->chctl_lock); | |
297 | part->chctl.flags[ch_number] |= | |
298 | XPC_CHCTL_CLOSEREPLY; | |
299 | spin_unlock(&part->chctl_lock); | |
e54af724 DN |
300 | } |
301 | spin_unlock_irqrestore(&ch->lock, irq_flags); | |
302 | return; | |
303 | } | |
89eb8eb9 DN |
304 | |
305 | ch->flags |= XPC_C_RCLOSEREPLY; | |
306 | ||
307 | if (ch->flags & XPC_C_CLOSEREPLY) { | |
308 | /* both sides have finished disconnecting */ | |
309 | xpc_process_disconnect(ch, &irq_flags); | |
310 | } | |
311 | } | |
312 | ||
7fb5e59d | 313 | if (chctl_flags & XPC_CHCTL_OPENREQUEST) { |
89eb8eb9 | 314 | |
bd3e64c1 | 315 | dev_dbg(xpc_chan, "XPC_CHCTL_OPENREQUEST (entry_size=%d, " |
89eb8eb9 | 316 | "local_nentries=%d) received from partid=%d, " |
bd3e64c1 | 317 | "channel=%d\n", args->entry_size, args->local_nentries, |
89eb8eb9 DN |
318 | ch->partid, ch->number); |
319 | ||
83469b55 | 320 | if (part->act_state == XPC_P_AS_DEACTIVATING || |
35190506 | 321 | (ch->flags & XPC_C_ROPENREQUEST)) { |
e54af724 DN |
322 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
323 | return; | |
324 | } | |
325 | ||
326 | if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_WDISCONNECT)) { | |
7fb5e59d | 327 | ch->delayed_chctl_flags |= XPC_CHCTL_OPENREQUEST; |
89eb8eb9 DN |
328 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
329 | return; | |
330 | } | |
331 | DBUG_ON(!(ch->flags & (XPC_C_DISCONNECTED | | |
35190506 | 332 | XPC_C_OPENREQUEST))); |
89eb8eb9 | 333 | DBUG_ON(ch->flags & (XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY | |
35190506 | 334 | XPC_C_OPENREPLY | XPC_C_CONNECTED)); |
89eb8eb9 DN |
335 | |
336 | /* | |
337 | * The meaningful OPENREQUEST connection state fields are: | |
bd3e64c1 | 338 | * entry_size = size of channel's messages in bytes |
89eb8eb9 DN |
339 | * local_nentries = remote partition's local_nentries |
340 | */ | |
bd3e64c1 | 341 | if (args->entry_size == 0 || args->local_nentries == 0) { |
e54af724 DN |
342 | /* assume OPENREQUEST was delayed by mistake */ |
343 | spin_unlock_irqrestore(&ch->lock, irq_flags); | |
344 | return; | |
345 | } | |
89eb8eb9 DN |
346 | |
347 | ch->flags |= (XPC_C_ROPENREQUEST | XPC_C_CONNECTING); | |
348 | ch->remote_nentries = args->local_nentries; | |
349 | ||
89eb8eb9 | 350 | if (ch->flags & XPC_C_OPENREQUEST) { |
bd3e64c1 | 351 | if (args->entry_size != ch->entry_size) { |
65c17b80 | 352 | XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes, |
35190506 | 353 | &irq_flags); |
89eb8eb9 DN |
354 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
355 | return; | |
356 | } | |
357 | } else { | |
bd3e64c1 | 358 | ch->entry_size = args->entry_size; |
89eb8eb9 DN |
359 | |
360 | XPC_SET_REASON(ch, 0, 0); | |
361 | ch->flags &= ~XPC_C_DISCONNECTED; | |
362 | ||
363 | atomic_inc(&part->nchannels_active); | |
364 | } | |
365 | ||
366 | xpc_process_connect(ch, &irq_flags); | |
367 | } | |
368 | ||
7fb5e59d | 369 | if (chctl_flags & XPC_CHCTL_OPENREPLY) { |
89eb8eb9 | 370 | |
7fb5e59d DN |
371 | dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY (local_msgqueue_pa=" |
372 | "0x%lx, local_nentries=%d, remote_nentries=%d) " | |
373 | "received from partid=%d, channel=%d\n", | |
a812dcc3 DN |
374 | args->local_msgqueue_pa, args->local_nentries, |
375 | args->remote_nentries, ch->partid, ch->number); | |
89eb8eb9 DN |
376 | |
377 | if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) { | |
378 | spin_unlock_irqrestore(&ch->lock, irq_flags); | |
379 | return; | |
380 | } | |
e54af724 | 381 | if (!(ch->flags & XPC_C_OPENREQUEST)) { |
65c17b80 | 382 | XPC_DISCONNECT_CHANNEL(ch, xpOpenCloseError, |
35190506 | 383 | &irq_flags); |
e54af724 DN |
384 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
385 | return; | |
386 | } | |
387 | ||
89eb8eb9 DN |
388 | DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST)); |
389 | DBUG_ON(ch->flags & XPC_C_CONNECTED); | |
390 | ||
391 | /* | |
392 | * The meaningful OPENREPLY connection state fields are: | |
393 | * local_msgqueue_pa = physical address of remote | |
35190506 | 394 | * partition's local_msgqueue |
89eb8eb9 DN |
395 | * local_nentries = remote partition's local_nentries |
396 | * remote_nentries = remote partition's remote_nentries | |
397 | */ | |
398 | DBUG_ON(args->local_msgqueue_pa == 0); | |
399 | DBUG_ON(args->local_nentries == 0); | |
400 | DBUG_ON(args->remote_nentries == 0); | |
401 | ||
402 | ch->flags |= XPC_C_ROPENREPLY; | |
5b8669df | 403 | xpc_save_remote_msgqueue_pa(ch, args->local_msgqueue_pa); |
89eb8eb9 DN |
404 | |
405 | if (args->local_nentries < ch->remote_nentries) { | |
7fb5e59d | 406 | dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY: new " |
89eb8eb9 DN |
407 | "remote_nentries=%d, old remote_nentries=%d, " |
408 | "partid=%d, channel=%d\n", | |
409 | args->local_nentries, ch->remote_nentries, | |
410 | ch->partid, ch->number); | |
411 | ||
412 | ch->remote_nentries = args->local_nentries; | |
413 | } | |
414 | if (args->remote_nentries < ch->local_nentries) { | |
7fb5e59d | 415 | dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY: new " |
89eb8eb9 DN |
416 | "local_nentries=%d, old local_nentries=%d, " |
417 | "partid=%d, channel=%d\n", | |
418 | args->remote_nentries, ch->local_nentries, | |
419 | ch->partid, ch->number); | |
420 | ||
421 | ch->local_nentries = args->remote_nentries; | |
422 | } | |
423 | ||
424 | xpc_process_connect(ch, &irq_flags); | |
425 | } | |
426 | ||
427 | spin_unlock_irqrestore(&ch->lock, irq_flags); | |
428 | } | |
429 | ||
89eb8eb9 DN |
430 | /* |
431 | * Attempt to establish a channel connection to a remote partition. | |
432 | */ | |
65c17b80 | 433 | static enum xp_retval |
89eb8eb9 DN |
434 | xpc_connect_channel(struct xpc_channel *ch) |
435 | { | |
436 | unsigned long irq_flags; | |
437 | struct xpc_registration *registration = &xpc_registrations[ch->number]; | |
438 | ||
2c2b94f9 | 439 | if (mutex_trylock(®istration->mutex) == 0) |
65c17b80 | 440 | return xpRetry; |
89eb8eb9 DN |
441 | |
442 | if (!XPC_CHANNEL_REGISTERED(ch->number)) { | |
f9e505a9 | 443 | mutex_unlock(®istration->mutex); |
65c17b80 | 444 | return xpUnregistered; |
89eb8eb9 DN |
445 | } |
446 | ||
447 | spin_lock_irqsave(&ch->lock, irq_flags); | |
448 | ||
449 | DBUG_ON(ch->flags & XPC_C_CONNECTED); | |
450 | DBUG_ON(ch->flags & XPC_C_OPENREQUEST); | |
451 | ||
452 | if (ch->flags & XPC_C_DISCONNECTING) { | |
453 | spin_unlock_irqrestore(&ch->lock, irq_flags); | |
f9e505a9 | 454 | mutex_unlock(®istration->mutex); |
89eb8eb9 DN |
455 | return ch->reason; |
456 | } | |
457 | ||
89eb8eb9 DN |
458 | /* add info from the channel connect registration to the channel */ |
459 | ||
460 | ch->kthreads_assigned_limit = registration->assigned_limit; | |
461 | ch->kthreads_idle_limit = registration->idle_limit; | |
462 | DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0); | |
463 | DBUG_ON(atomic_read(&ch->kthreads_idle) != 0); | |
464 | DBUG_ON(atomic_read(&ch->kthreads_active) != 0); | |
465 | ||
466 | ch->func = registration->func; | |
467 | DBUG_ON(registration->func == NULL); | |
468 | ch->key = registration->key; | |
469 | ||
470 | ch->local_nentries = registration->nentries; | |
471 | ||
472 | if (ch->flags & XPC_C_ROPENREQUEST) { | |
bd3e64c1 | 473 | if (registration->entry_size != ch->entry_size) { |
89eb8eb9 DN |
474 | /* the local and remote sides aren't the same */ |
475 | ||
476 | /* | |
477 | * Because XPC_DISCONNECT_CHANNEL() can block we're | |
478 | * forced to up the registration sema before we unlock | |
479 | * the channel lock. But that's okay here because we're | |
480 | * done with the part that required the registration | |
481 | * sema. XPC_DISCONNECT_CHANNEL() requires that the | |
482 | * channel lock be locked and will unlock and relock | |
483 | * the channel lock as needed. | |
484 | */ | |
f9e505a9 | 485 | mutex_unlock(®istration->mutex); |
65c17b80 | 486 | XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes, |
35190506 | 487 | &irq_flags); |
89eb8eb9 | 488 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
65c17b80 | 489 | return xpUnequalMsgSizes; |
89eb8eb9 DN |
490 | } |
491 | } else { | |
bd3e64c1 | 492 | ch->entry_size = registration->entry_size; |
89eb8eb9 DN |
493 | |
494 | XPC_SET_REASON(ch, 0, 0); | |
495 | ch->flags &= ~XPC_C_DISCONNECTED; | |
496 | ||
497 | atomic_inc(&xpc_partitions[ch->partid].nchannels_active); | |
498 | } | |
499 | ||
f9e505a9 | 500 | mutex_unlock(®istration->mutex); |
89eb8eb9 | 501 | |
89eb8eb9 DN |
502 | /* initiate the connection */ |
503 | ||
504 | ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING); | |
7fb5e59d | 505 | xpc_send_chctl_openrequest(ch, &irq_flags); |
89eb8eb9 DN |
506 | |
507 | xpc_process_connect(ch, &irq_flags); | |
508 | ||
509 | spin_unlock_irqrestore(&ch->lock, irq_flags); | |
510 | ||
65c17b80 | 511 | return xpSuccess; |
89eb8eb9 DN |
512 | } |
513 | ||
89eb8eb9 | 514 | void |
7fb5e59d | 515 | xpc_process_sent_chctl_flags(struct xpc_partition *part) |
89eb8eb9 DN |
516 | { |
517 | unsigned long irq_flags; | |
7fb5e59d | 518 | union xpc_channel_ctl_flags chctl; |
89eb8eb9 DN |
519 | struct xpc_channel *ch; |
520 | int ch_number; | |
a607c389 | 521 | u32 ch_flags; |
89eb8eb9 | 522 | |
7fb5e59d | 523 | chctl.all_flags = xpc_get_chctl_all_flags(part); |
89eb8eb9 DN |
524 | |
525 | /* | |
526 | * Initiate channel connections for registered channels. | |
527 | * | |
528 | * For each connected channel that has pending messages activate idle | |
529 | * kthreads and/or create new kthreads as needed. | |
530 | */ | |
531 | ||
532 | for (ch_number = 0; ch_number < part->nchannels; ch_number++) { | |
533 | ch = &part->channels[ch_number]; | |
534 | ||
89eb8eb9 | 535 | /* |
7fb5e59d | 536 | * Process any open or close related chctl flags, and then deal |
89eb8eb9 DN |
537 | * with connecting or disconnecting the channel as required. |
538 | */ | |
539 | ||
7fb5e59d DN |
540 | if (chctl.flags[ch_number] & XPC_OPENCLOSE_CHCTL_FLAGS) { |
541 | xpc_process_openclose_chctl_flags(part, ch_number, | |
542 | chctl.flags[ch_number]); | |
543 | } | |
89eb8eb9 | 544 | |
a607c389 | 545 | ch_flags = ch->flags; /* need an atomic snapshot of flags */ |
89eb8eb9 | 546 | |
a607c389 | 547 | if (ch_flags & XPC_C_DISCONNECTING) { |
89eb8eb9 DN |
548 | spin_lock_irqsave(&ch->lock, irq_flags); |
549 | xpc_process_disconnect(ch, &irq_flags); | |
550 | spin_unlock_irqrestore(&ch->lock, irq_flags); | |
551 | continue; | |
552 | } | |
553 | ||
83469b55 | 554 | if (part->act_state == XPC_P_AS_DEACTIVATING) |
89eb8eb9 | 555 | continue; |
89eb8eb9 | 556 | |
a607c389 DN |
557 | if (!(ch_flags & XPC_C_CONNECTED)) { |
558 | if (!(ch_flags & XPC_C_OPENREQUEST)) { | |
559 | DBUG_ON(ch_flags & XPC_C_SETUP); | |
35190506 | 560 | (void)xpc_connect_channel(ch); |
89eb8eb9 DN |
561 | } else { |
562 | spin_lock_irqsave(&ch->lock, irq_flags); | |
563 | xpc_process_connect(ch, &irq_flags); | |
564 | spin_unlock_irqrestore(&ch->lock, irq_flags); | |
565 | } | |
566 | continue; | |
567 | } | |
568 | ||
89eb8eb9 | 569 | /* |
7fb5e59d DN |
570 | * Process any message related chctl flags, this may involve |
571 | * the activation of kthreads to deliver any pending messages | |
572 | * sent from the other partition. | |
89eb8eb9 DN |
573 | */ |
574 | ||
7fb5e59d DN |
575 | if (chctl.flags[ch_number] & XPC_MSG_CHCTL_FLAGS) |
576 | xpc_process_msg_chctl_flags(part, ch_number); | |
89eb8eb9 DN |
577 | } |
578 | } | |
579 | ||
89eb8eb9 | 580 | /* |
a607c389 DN |
581 | * XPC's heartbeat code calls this function to inform XPC that a partition is |
582 | * going down. XPC responds by tearing down the XPartition Communication | |
89eb8eb9 DN |
583 | * infrastructure used for the just downed partition. |
584 | * | |
585 | * XPC's heartbeat code will never call this function and xpc_partition_up() | |
586 | * at the same time. Nor will it ever make multiple calls to either function | |
587 | * at the same time. | |
588 | */ | |
589 | void | |
65c17b80 | 590 | xpc_partition_going_down(struct xpc_partition *part, enum xp_retval reason) |
89eb8eb9 DN |
591 | { |
592 | unsigned long irq_flags; | |
593 | int ch_number; | |
594 | struct xpc_channel *ch; | |
595 | ||
89eb8eb9 DN |
596 | dev_dbg(xpc_chan, "deactivating partition %d, reason=%d\n", |
597 | XPC_PARTID(part), reason); | |
598 | ||
599 | if (!xpc_part_ref(part)) { | |
600 | /* infrastructure for this partition isn't currently set up */ | |
601 | return; | |
602 | } | |
603 | ||
a607c389 | 604 | /* disconnect channels associated with the partition going down */ |
89eb8eb9 DN |
605 | |
606 | for (ch_number = 0; ch_number < part->nchannels; ch_number++) { | |
607 | ch = &part->channels[ch_number]; | |
608 | ||
89eb8eb9 DN |
609 | xpc_msgqueue_ref(ch); |
610 | spin_lock_irqsave(&ch->lock, irq_flags); | |
611 | ||
612 | XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags); | |
613 | ||
614 | spin_unlock_irqrestore(&ch->lock, irq_flags); | |
615 | xpc_msgqueue_deref(ch); | |
616 | } | |
617 | ||
618 | xpc_wakeup_channel_mgr(part); | |
619 | ||
620 | xpc_part_deref(part); | |
621 | } | |
622 | ||
89eb8eb9 DN |
623 | /* |
624 | * Called by XP at the time of channel connection registration to cause | |
625 | * XPC to establish connections to all currently active partitions. | |
626 | */ | |
627 | void | |
628 | xpc_initiate_connect(int ch_number) | |
629 | { | |
64d032ba | 630 | short partid; |
89eb8eb9 DN |
631 | struct xpc_partition *part; |
632 | struct xpc_channel *ch; | |
633 | ||
bc63d387 | 634 | DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS); |
89eb8eb9 | 635 | |
bc63d387 | 636 | for (partid = 0; partid < xp_max_npartitions; partid++) { |
89eb8eb9 DN |
637 | part = &xpc_partitions[partid]; |
638 | ||
639 | if (xpc_part_ref(part)) { | |
640 | ch = &part->channels[ch_number]; | |
641 | ||
e54af724 DN |
642 | /* |
643 | * Initiate the establishment of a connection on the | |
644 | * newly registered channel to the remote partition. | |
645 | */ | |
646 | xpc_wakeup_channel_mgr(part); | |
89eb8eb9 DN |
647 | xpc_part_deref(part); |
648 | } | |
649 | } | |
650 | } | |
651 | ||
89eb8eb9 DN |
652 | void |
653 | xpc_connected_callout(struct xpc_channel *ch) | |
654 | { | |
89eb8eb9 DN |
655 | /* let the registerer know that a connection has been established */ |
656 | ||
657 | if (ch->func != NULL) { | |
65c17b80 | 658 | dev_dbg(xpc_chan, "ch->func() called, reason=xpConnected, " |
89eb8eb9 DN |
659 | "partid=%d, channel=%d\n", ch->partid, ch->number); |
660 | ||
65c17b80 | 661 | ch->func(xpConnected, ch->partid, ch->number, |
35190506 | 662 | (void *)(u64)ch->local_nentries, ch->key); |
89eb8eb9 | 663 | |
65c17b80 | 664 | dev_dbg(xpc_chan, "ch->func() returned, reason=xpConnected, " |
89eb8eb9 DN |
665 | "partid=%d, channel=%d\n", ch->partid, ch->number); |
666 | } | |
89eb8eb9 DN |
667 | } |
668 | ||
89eb8eb9 DN |
669 | /* |
670 | * Called by XP at the time of channel connection unregistration to cause | |
671 | * XPC to teardown all current connections for the specified channel. | |
672 | * | |
673 | * Before returning xpc_initiate_disconnect() will wait until all connections | |
674 | * on the specified channel have been closed/torndown. So the caller can be | |
675 | * assured that they will not be receiving any more callouts from XPC to the | |
676 | * function they registered via xpc_connect(). | |
677 | * | |
678 | * Arguments: | |
679 | * | |
680 | * ch_number - channel # to unregister. | |
681 | */ | |
682 | void | |
683 | xpc_initiate_disconnect(int ch_number) | |
684 | { | |
685 | unsigned long irq_flags; | |
64d032ba | 686 | short partid; |
89eb8eb9 DN |
687 | struct xpc_partition *part; |
688 | struct xpc_channel *ch; | |
689 | ||
bc63d387 | 690 | DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS); |
89eb8eb9 DN |
691 | |
692 | /* initiate the channel disconnect for every active partition */ | |
bc63d387 | 693 | for (partid = 0; partid < xp_max_npartitions; partid++) { |
89eb8eb9 DN |
694 | part = &xpc_partitions[partid]; |
695 | ||
696 | if (xpc_part_ref(part)) { | |
697 | ch = &part->channels[ch_number]; | |
698 | xpc_msgqueue_ref(ch); | |
699 | ||
700 | spin_lock_irqsave(&ch->lock, irq_flags); | |
701 | ||
a607c389 DN |
702 | if (!(ch->flags & XPC_C_DISCONNECTED)) { |
703 | ch->flags |= XPC_C_WDISCONNECT; | |
704 | ||
65c17b80 | 705 | XPC_DISCONNECT_CHANNEL(ch, xpUnregistering, |
35190506 | 706 | &irq_flags); |
a607c389 | 707 | } |
89eb8eb9 DN |
708 | |
709 | spin_unlock_irqrestore(&ch->lock, irq_flags); | |
710 | ||
711 | xpc_msgqueue_deref(ch); | |
712 | xpc_part_deref(part); | |
713 | } | |
714 | } | |
715 | ||
716 | xpc_disconnect_wait(ch_number); | |
717 | } | |
718 | ||
89eb8eb9 DN |
719 | /* |
720 | * To disconnect a channel, and reflect it back to all who may be waiting. | |
721 | * | |
a607c389 DN |
722 | * An OPEN is not allowed until XPC_C_DISCONNECTING is cleared by |
723 | * xpc_process_disconnect(), and if set, XPC_C_WDISCONNECT is cleared by | |
724 | * xpc_disconnect_wait(). | |
89eb8eb9 DN |
725 | * |
726 | * THE CHANNEL IS TO BE LOCKED BY THE CALLER AND WILL REMAIN LOCKED UPON RETURN. | |
727 | */ | |
728 | void | |
729 | xpc_disconnect_channel(const int line, struct xpc_channel *ch, | |
65c17b80 | 730 | enum xp_retval reason, unsigned long *irq_flags) |
89eb8eb9 | 731 | { |
a607c389 | 732 | u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED); |
89eb8eb9 | 733 | |
89eb8eb9 DN |
734 | DBUG_ON(!spin_is_locked(&ch->lock)); |
735 | ||
2c2b94f9 | 736 | if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) |
89eb8eb9 | 737 | return; |
2c2b94f9 | 738 | |
89eb8eb9 DN |
739 | DBUG_ON(!(ch->flags & (XPC_C_CONNECTING | XPC_C_CONNECTED))); |
740 | ||
741 | dev_dbg(xpc_chan, "reason=%d, line=%d, partid=%d, channel=%d\n", | |
742 | reason, line, ch->partid, ch->number); | |
743 | ||
744 | XPC_SET_REASON(ch, reason, line); | |
745 | ||
a607c389 | 746 | ch->flags |= (XPC_C_CLOSEREQUEST | XPC_C_DISCONNECTING); |
89eb8eb9 DN |
747 | /* some of these may not have been set */ |
748 | ch->flags &= ~(XPC_C_OPENREQUEST | XPC_C_OPENREPLY | | |
35190506 DN |
749 | XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY | |
750 | XPC_C_CONNECTING | XPC_C_CONNECTED); | |
89eb8eb9 | 751 | |
7fb5e59d | 752 | xpc_send_chctl_closerequest(ch, irq_flags); |
89eb8eb9 | 753 | |
2c2b94f9 | 754 | if (channel_was_connected) |
89eb8eb9 | 755 | ch->flags |= XPC_C_WASCONNECTED; |
89eb8eb9 | 756 | |
a607c389 DN |
757 | spin_unlock_irqrestore(&ch->lock, *irq_flags); |
758 | ||
759 | /* wake all idle kthreads so they can exit */ | |
89eb8eb9 | 760 | if (atomic_read(&ch->kthreads_idle) > 0) { |
89eb8eb9 | 761 | wake_up_all(&ch->idle_wq); |
a460ef8d DN |
762 | |
763 | } else if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && | |
35190506 | 764 | !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) { |
65c17b80 | 765 | /* start a kthread that will do the xpDisconnecting callout */ |
a460ef8d | 766 | xpc_create_kthreads(ch, 1, 1); |
89eb8eb9 DN |
767 | } |
768 | ||
89eb8eb9 | 769 | /* wake those waiting to allocate an entry from the local msg queue */ |
2c2b94f9 | 770 | if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) |
89eb8eb9 | 771 | wake_up(&ch->msg_allocate_wq); |
89eb8eb9 | 772 | |
89eb8eb9 DN |
773 | spin_lock_irqsave(&ch->lock, *irq_flags); |
774 | } | |
775 | ||
89eb8eb9 | 776 | void |
65c17b80 | 777 | xpc_disconnect_callout(struct xpc_channel *ch, enum xp_retval reason) |
89eb8eb9 DN |
778 | { |
779 | /* | |
a607c389 | 780 | * Let the channel's registerer know that the channel is being |
89eb8eb9 | 781 | * disconnected. We don't want to do this if the registerer was never |
a607c389 | 782 | * informed of a connection being made. |
89eb8eb9 DN |
783 | */ |
784 | ||
785 | if (ch->func != NULL) { | |
246c7e33 DN |
786 | dev_dbg(xpc_chan, "ch->func() called, reason=%d, partid=%d, " |
787 | "channel=%d\n", reason, ch->partid, ch->number); | |
89eb8eb9 | 788 | |
246c7e33 | 789 | ch->func(reason, ch->partid, ch->number, NULL, ch->key); |
89eb8eb9 | 790 | |
246c7e33 DN |
791 | dev_dbg(xpc_chan, "ch->func() returned, reason=%d, partid=%d, " |
792 | "channel=%d\n", reason, ch->partid, ch->number); | |
89eb8eb9 DN |
793 | } |
794 | } | |
795 | ||
89eb8eb9 DN |
796 | /* |
797 | * Wait for a message entry to become available for the specified channel, | |
798 | * but don't wait any longer than 1 jiffy. | |
799 | */ | |
33ba3c77 | 800 | enum xp_retval |
89eb8eb9 DN |
801 | xpc_allocate_msg_wait(struct xpc_channel *ch) |
802 | { | |
65c17b80 | 803 | enum xp_retval ret; |
89eb8eb9 | 804 | |
89eb8eb9 | 805 | if (ch->flags & XPC_C_DISCONNECTING) { |
65c17b80 | 806 | DBUG_ON(ch->reason == xpInterrupted); |
89eb8eb9 DN |
807 | return ch->reason; |
808 | } | |
809 | ||
810 | atomic_inc(&ch->n_on_msg_allocate_wq); | |
811 | ret = interruptible_sleep_on_timeout(&ch->msg_allocate_wq, 1); | |
812 | atomic_dec(&ch->n_on_msg_allocate_wq); | |
813 | ||
814 | if (ch->flags & XPC_C_DISCONNECTING) { | |
815 | ret = ch->reason; | |
65c17b80 | 816 | DBUG_ON(ch->reason == xpInterrupted); |
89eb8eb9 | 817 | } else if (ret == 0) { |
65c17b80 | 818 | ret = xpTimeout; |
89eb8eb9 | 819 | } else { |
65c17b80 | 820 | ret = xpInterrupted; |
89eb8eb9 DN |
821 | } |
822 | ||
823 | return ret; | |
824 | } | |
825 | ||
89eb8eb9 | 826 | /* |
97bf1aa1 DN |
827 | * Send a message that contains the user's payload on the specified channel |
828 | * connected to the specified partition. | |
89eb8eb9 | 829 | * |
97bf1aa1 DN |
830 | * NOTE that this routine can sleep waiting for a message entry to become |
831 | * available. To not sleep, pass in the XPC_NOWAIT flag. | |
89eb8eb9 | 832 | * |
97bf1aa1 DN |
833 | * Once sent, this routine will not wait for the message to be received, nor |
834 | * will notification be given when it does happen. | |
89eb8eb9 DN |
835 | * |
836 | * Arguments: | |
837 | * | |
838 | * partid - ID of partition to which the channel is connected. | |
839 | * ch_number - channel # to send message on. | |
97bf1aa1 DN |
840 | * flags - see xp.h for valid flags. |
841 | * payload - pointer to the payload which is to be sent. | |
842 | * payload_size - size of the payload in bytes. | |
89eb8eb9 | 843 | */ |
65c17b80 | 844 | enum xp_retval |
97bf1aa1 DN |
845 | xpc_initiate_send(short partid, int ch_number, u32 flags, void *payload, |
846 | u16 payload_size) | |
89eb8eb9 DN |
847 | { |
848 | struct xpc_partition *part = &xpc_partitions[partid]; | |
97bf1aa1 | 849 | enum xp_retval ret = xpUnknownReason; |
89eb8eb9 | 850 | |
97bf1aa1 | 851 | dev_dbg(xpc_chan, "payload=0x%p, partid=%d, channel=%d\n", payload, |
89eb8eb9 DN |
852 | partid, ch_number); |
853 | ||
bc63d387 | 854 | DBUG_ON(partid < 0 || partid >= xp_max_npartitions); |
89eb8eb9 | 855 | DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); |
97bf1aa1 | 856 | DBUG_ON(payload == NULL); |
89eb8eb9 | 857 | |
97bf1aa1 | 858 | if (xpc_part_ref(part)) { |
bd3e64c1 DN |
859 | ret = xpc_send_payload(&part->channels[ch_number], flags, |
860 | payload, payload_size, 0, NULL, NULL); | |
97bf1aa1 DN |
861 | xpc_part_deref(part); |
862 | } | |
89eb8eb9 DN |
863 | |
864 | return ret; | |
865 | } | |
866 | ||
89eb8eb9 | 867 | /* |
97bf1aa1 DN |
868 | * Send a message that contains the user's payload on the specified channel |
869 | * connected to the specified partition. | |
89eb8eb9 | 870 | * |
97bf1aa1 DN |
871 | * NOTE that this routine can sleep waiting for a message entry to become |
872 | * available. To not sleep, pass in the XPC_NOWAIT flag. | |
873 | * | |
874 | * This routine will not wait for the message to be sent or received. | |
89eb8eb9 DN |
875 | * |
876 | * Once the remote end of the channel has received the message, the function | |
877 | * passed as an argument to xpc_initiate_send_notify() will be called. This | |
878 | * allows the sender to free up or re-use any buffers referenced by the | |
879 | * message, but does NOT mean the message has been processed at the remote | |
880 | * end by a receiver. | |
881 | * | |
882 | * If this routine returns an error, the caller's function will NOT be called. | |
883 | * | |
89eb8eb9 DN |
884 | * Arguments: |
885 | * | |
886 | * partid - ID of partition to which the channel is connected. | |
887 | * ch_number - channel # to send message on. | |
97bf1aa1 DN |
888 | * flags - see xp.h for valid flags. |
889 | * payload - pointer to the payload which is to be sent. | |
890 | * payload_size - size of the payload in bytes. | |
89eb8eb9 DN |
891 | * func - function to call with asynchronous notification of message |
892 | * receipt. THIS FUNCTION MUST BE NON-BLOCKING. | |
893 | * key - user-defined key to be passed to the function when it's called. | |
894 | */ | |
65c17b80 | 895 | enum xp_retval |
97bf1aa1 DN |
896 | xpc_initiate_send_notify(short partid, int ch_number, u32 flags, void *payload, |
897 | u16 payload_size, xpc_notify_func func, void *key) | |
89eb8eb9 DN |
898 | { |
899 | struct xpc_partition *part = &xpc_partitions[partid]; | |
97bf1aa1 | 900 | enum xp_retval ret = xpUnknownReason; |
89eb8eb9 | 901 | |
97bf1aa1 | 902 | dev_dbg(xpc_chan, "payload=0x%p, partid=%d, channel=%d\n", payload, |
89eb8eb9 DN |
903 | partid, ch_number); |
904 | ||
bc63d387 | 905 | DBUG_ON(partid < 0 || partid >= xp_max_npartitions); |
89eb8eb9 | 906 | DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); |
97bf1aa1 | 907 | DBUG_ON(payload == NULL); |
89eb8eb9 DN |
908 | DBUG_ON(func == NULL); |
909 | ||
97bf1aa1 | 910 | if (xpc_part_ref(part)) { |
bd3e64c1 DN |
911 | ret = xpc_send_payload(&part->channels[ch_number], flags, |
912 | payload, payload_size, XPC_N_CALL, func, | |
913 | key); | |
97bf1aa1 DN |
914 | xpc_part_deref(part); |
915 | } | |
89eb8eb9 DN |
916 | return ret; |
917 | } | |
918 | ||
89eb8eb9 | 919 | /* |
bd3e64c1 | 920 | * Deliver a message's payload to its intended recipient. |
89eb8eb9 DN |
921 | */ |
922 | void | |
bd3e64c1 | 923 | xpc_deliver_payload(struct xpc_channel *ch) |
89eb8eb9 | 924 | { |
bd3e64c1 | 925 | void *payload; |
89eb8eb9 | 926 | |
bd3e64c1 DN |
927 | payload = xpc_get_deliverable_payload(ch); |
928 | if (payload != NULL) { | |
89eb8eb9 DN |
929 | |
930 | /* | |
931 | * This ref is taken to protect the payload itself from being | |
932 | * freed before the user is finished with it, which the user | |
933 | * indicates by calling xpc_initiate_received(). | |
934 | */ | |
935 | xpc_msgqueue_ref(ch); | |
936 | ||
937 | atomic_inc(&ch->kthreads_active); | |
938 | ||
939 | if (ch->func != NULL) { | |
bd3e64c1 DN |
940 | dev_dbg(xpc_chan, "ch->func() called, payload=0x%p " |
941 | "partid=%d channel=%d\n", payload, ch->partid, | |
89eb8eb9 DN |
942 | ch->number); |
943 | ||
944 | /* deliver the message to its intended recipient */ | |
bd3e64c1 DN |
945 | ch->func(xpMsgReceived, ch->partid, ch->number, payload, |
946 | ch->key); | |
89eb8eb9 | 947 | |
bd3e64c1 DN |
948 | dev_dbg(xpc_chan, "ch->func() returned, payload=0x%p " |
949 | "partid=%d channel=%d\n", payload, ch->partid, | |
89eb8eb9 DN |
950 | ch->number); |
951 | } | |
952 | ||
953 | atomic_dec(&ch->kthreads_active); | |
954 | } | |
955 | } | |
956 | ||
89eb8eb9 | 957 | /* |
bd3e64c1 | 958 | * Acknowledge receipt of a delivered message's payload. |
89eb8eb9 DN |
959 | * |
960 | * This function, although called by users, does not call xpc_part_ref() to | |
961 | * ensure that the partition infrastructure is in place. It relies on the | |
bd3e64c1 | 962 | * fact that we called xpc_msgqueue_ref() in xpc_deliver_payload(). |
89eb8eb9 DN |
963 | * |
964 | * Arguments: | |
965 | * | |
966 | * partid - ID of partition to which the channel is connected. | |
967 | * ch_number - channel # message received on. | |
968 | * payload - pointer to the payload area allocated via | |
97bf1aa1 | 969 | * xpc_initiate_send() or xpc_initiate_send_notify(). |
89eb8eb9 DN |
970 | */ |
971 | void | |
64d032ba | 972 | xpc_initiate_received(short partid, int ch_number, void *payload) |
89eb8eb9 DN |
973 | { |
974 | struct xpc_partition *part = &xpc_partitions[partid]; | |
975 | struct xpc_channel *ch; | |
89eb8eb9 | 976 | |
bc63d387 | 977 | DBUG_ON(partid < 0 || partid >= xp_max_npartitions); |
89eb8eb9 DN |
978 | DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); |
979 | ||
980 | ch = &part->channels[ch_number]; | |
bd3e64c1 | 981 | xpc_received_payload(ch, payload); |
89eb8eb9 | 982 | |
bd3e64c1 | 983 | /* the call to xpc_msgqueue_ref() was done by xpc_deliver_payload() */ |
89eb8eb9 DN |
984 | xpc_msgqueue_deref(ch); |
985 | } |