2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved.
10 * Cross Partition Communication (XPC) uv-based functions.
12 * Architecture specific implementation of common functions.
16 #include <linux/kernel.h>
18 #include <linux/interrupt.h>
19 #include <linux/delay.h>
20 #include <linux/device.h>
21 #include <linux/err.h>
22 #include <asm/uv/uv_hub.h>
23 #if defined CONFIG_X86_64
24 #include <asm/uv/bios.h>
25 #include <asm/uv/uv_irq.h>
26 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
27 #include <asm/sn/intr.h>
28 #include <asm/sn/sn_sal.h>
30 #include "../sgi-gru/gru.h"
31 #include "../sgi-gru/grukservices.h"
34 #if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
35 struct uv_IO_APIC_route_entry {
49 static struct xpc_heartbeat_uv *xpc_heartbeat_uv;
51 #define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES)
52 #define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
53 XPC_ACTIVATE_MSG_SIZE_UV)
54 #define XPC_ACTIVATE_IRQ_NAME "xpc_activate"
56 #define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES)
57 #define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
58 XPC_NOTIFY_MSG_SIZE_UV)
59 #define XPC_NOTIFY_IRQ_NAME "xpc_notify"
61 static struct xpc_gru_mq_uv *xpc_activate_mq_uv;
62 static struct xpc_gru_mq_uv *xpc_notify_mq_uv;
65 xpc_setup_partitions_uv(void)
68 struct xpc_partition_uv *part_uv;
70 for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
71 part_uv = &xpc_partitions[partid].sn.uv;
73 mutex_init(&part_uv->cached_activate_gru_mq_desc_mutex);
74 spin_lock_init(&part_uv->flags_lock);
75 part_uv->remote_act_state = XPC_P_AS_INACTIVE;
81 xpc_teardown_partitions_uv(void)
84 struct xpc_partition_uv *part_uv;
85 unsigned long irq_flags;
87 for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
88 part_uv = &xpc_partitions[partid].sn.uv;
90 if (part_uv->cached_activate_gru_mq_desc != NULL) {
91 mutex_lock(&part_uv->cached_activate_gru_mq_desc_mutex);
92 spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
93 part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
94 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
95 kfree(part_uv->cached_activate_gru_mq_desc);
96 part_uv->cached_activate_gru_mq_desc = NULL;
97 mutex_unlock(&part_uv->
98 cached_activate_gru_mq_desc_mutex);
104 xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name)
106 int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
108 #if defined CONFIG_X86_64
109 mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset,
112 dev_err(xpc_part, "uv_setup_irq() returned error=%d\n",
117 mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset);
119 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
120 if (strcmp(irq_name, XPC_ACTIVATE_IRQ_NAME) == 0)
121 mq->irq = SGI_XPC_ACTIVATE;
122 else if (strcmp(irq_name, XPC_NOTIFY_IRQ_NAME) == 0)
123 mq->irq = SGI_XPC_NOTIFY;
127 mq->mmr_value = (unsigned long)cpu_physical_id(cpu) << 32 | mq->irq;
128 uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mq->mmr_value);
130 #error not a supported configuration
137 xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq)
139 #if defined CONFIG_X86_64
140 uv_teardown_irq(mq->irq);
142 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
144 unsigned long mmr_value;
146 mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
147 mmr_value = 1UL << 16;
149 uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mmr_value);
151 #error not a supported configuration
156 xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq)
160 #if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
161 int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
163 ret = sn_mq_watchlist_alloc(mmr_pnode, (void *)uv_gpa(mq->address),
164 mq->order, &mq->mmr_offset);
166 dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n",
170 #elif defined CONFIG_X86_64
171 ret = uv_bios_mq_watchlist_alloc(uv_gpa(mq->address),
172 mq->order, &mq->mmr_offset);
174 dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, "
179 #error not a supported configuration
182 mq->watchlist_num = ret;
187 xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq)
190 int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
192 #if defined CONFIG_X86_64
193 ret = uv_bios_mq_watchlist_free(mmr_pnode, mq->watchlist_num);
194 BUG_ON(ret != BIOS_STATUS_SUCCESS);
195 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
196 ret = sn_mq_watchlist_free(mmr_pnode, mq->watchlist_num);
197 BUG_ON(ret != SALRET_OK);
199 #error not a supported configuration
203 static struct xpc_gru_mq_uv *
204 xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
205 irq_handler_t irq_handler)
207 enum xp_retval xp_ret;
213 struct xpc_gru_mq_uv *mq;
214 struct uv_IO_APIC_route_entry *mmr_value;
216 mq = kmalloc(sizeof(struct xpc_gru_mq_uv), GFP_KERNEL);
218 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() "
219 "a xpc_gru_mq_uv structure\n");
224 mq->gru_mq_desc = kzalloc(sizeof(struct gru_message_queue_desc),
226 if (mq->gru_mq_desc == NULL) {
227 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() "
228 "a gru_message_queue_desc structure\n");
233 pg_order = get_order(mq_size);
234 mq->order = pg_order + PAGE_SHIFT;
235 mq_size = 1UL << mq->order;
237 mq->mmr_blade = uv_cpu_to_blade_id(cpu);
239 nid = cpu_to_node(cpu);
240 page = alloc_pages_exact_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
243 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d "
244 "bytes of memory on nid=%d for GRU mq\n", mq_size, nid);
248 mq->address = page_address(page);
250 /* enable generation of irq when GRU mq operation occurs to this mq */
251 ret = xpc_gru_mq_watchlist_alloc_uv(mq);
255 ret = xpc_get_gru_mq_irq_uv(mq, cpu, irq_name);
259 ret = request_irq(mq->irq, irq_handler, 0, irq_name, NULL);
261 dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n",
266 nasid = UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpu));
268 mmr_value = (struct uv_IO_APIC_route_entry *)&mq->mmr_value;
269 ret = gru_create_message_queue(mq->gru_mq_desc, mq->address, mq_size,
270 nasid, mmr_value->vector, mmr_value->dest);
272 dev_err(xpc_part, "gru_create_message_queue() returned "
278 /* allow other partitions to access this GRU mq */
279 xp_ret = xp_expand_memprotect(xp_pa(mq->address), mq_size);
280 if (xp_ret != xpSuccess) {
287 /* something went wrong */
289 free_irq(mq->irq, NULL);
291 xpc_release_gru_mq_irq_uv(mq);
293 xpc_gru_mq_watchlist_free_uv(mq);
295 free_pages((unsigned long)mq->address, pg_order);
297 kfree(mq->gru_mq_desc);
305 xpc_destroy_gru_mq_uv(struct xpc_gru_mq_uv *mq)
307 unsigned int mq_size;
311 /* disallow other partitions to access GRU mq */
312 mq_size = 1UL << mq->order;
313 ret = xp_restrict_memprotect(xp_pa(mq->address), mq_size);
314 BUG_ON(ret != xpSuccess);
316 /* unregister irq handler and release mq irq/vector mapping */
317 free_irq(mq->irq, NULL);
318 xpc_release_gru_mq_irq_uv(mq);
320 /* disable generation of irq when GRU mq op occurs to this mq */
321 xpc_gru_mq_watchlist_free_uv(mq);
323 pg_order = mq->order - PAGE_SHIFT;
324 free_pages((unsigned long)mq->address, pg_order);
329 static enum xp_retval
330 xpc_send_gru_msg(struct gru_message_queue_desc *gru_mq_desc, void *msg,
333 enum xp_retval xp_ret;
337 ret = gru_send_message_gpa(gru_mq_desc, msg, msg_size);
343 if (ret == MQE_QUEUE_FULL) {
344 dev_dbg(xpc_chan, "gru_send_message_gpa() returned "
345 "error=MQE_QUEUE_FULL\n");
346 /* !!! handle QLimit reached; delay & try again */
347 /* ??? Do we add a limit to the number of retries? */
348 (void)msleep_interruptible(10);
349 } else if (ret == MQE_CONGESTION) {
350 dev_dbg(xpc_chan, "gru_send_message_gpa() returned "
351 "error=MQE_CONGESTION\n");
352 /* !!! handle LB Overflow; simply try again */
353 /* ??? Do we add a limit to the number of retries? */
355 /* !!! Currently this is MQE_UNEXPECTED_CB_ERR */
356 dev_err(xpc_chan, "gru_send_message_gpa() returned "
358 xp_ret = xpGruSendMqError;
366 xpc_process_activate_IRQ_rcvd_uv(void)
368 unsigned long irq_flags;
370 struct xpc_partition *part;
373 DBUG_ON(xpc_activate_IRQ_rcvd == 0);
375 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
376 for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
377 part = &xpc_partitions[partid];
379 if (part->sn.uv.act_state_req == 0)
382 xpc_activate_IRQ_rcvd--;
383 BUG_ON(xpc_activate_IRQ_rcvd < 0);
385 act_state_req = part->sn.uv.act_state_req;
386 part->sn.uv.act_state_req = 0;
387 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
389 if (act_state_req == XPC_P_ASR_ACTIVATE_UV) {
390 if (part->act_state == XPC_P_AS_INACTIVE)
391 xpc_activate_partition(part);
392 else if (part->act_state == XPC_P_AS_DEACTIVATING)
393 XPC_DEACTIVATE_PARTITION(part, xpReactivating);
395 } else if (act_state_req == XPC_P_ASR_REACTIVATE_UV) {
396 if (part->act_state == XPC_P_AS_INACTIVE)
397 xpc_activate_partition(part);
399 XPC_DEACTIVATE_PARTITION(part, xpReactivating);
401 } else if (act_state_req == XPC_P_ASR_DEACTIVATE_UV) {
402 XPC_DEACTIVATE_PARTITION(part, part->sn.uv.reason);
408 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
409 if (xpc_activate_IRQ_rcvd == 0)
412 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
417 xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
418 struct xpc_activate_mq_msghdr_uv *msg_hdr,
419 int *wakeup_hb_checker)
421 unsigned long irq_flags;
422 struct xpc_partition_uv *part_uv = &part->sn.uv;
423 struct xpc_openclose_args *args;
425 part_uv->remote_act_state = msg_hdr->act_state;
427 switch (msg_hdr->type) {
428 case XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV:
429 /* syncing of remote_act_state was just done above */
432 case XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV: {
433 struct xpc_activate_mq_msg_activate_req_uv *msg;
436 * ??? Do we deal here with ts_jiffies being different
437 * ??? if act_state != XPC_P_AS_INACTIVE instead of
440 msg = container_of(msg_hdr, struct
441 xpc_activate_mq_msg_activate_req_uv, hdr);
443 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
444 if (part_uv->act_state_req == 0)
445 xpc_activate_IRQ_rcvd++;
446 part_uv->act_state_req = XPC_P_ASR_ACTIVATE_UV;
447 part->remote_rp_pa = msg->rp_gpa; /* !!! _pa is _gpa */
448 part->remote_rp_ts_jiffies = msg_hdr->rp_ts_jiffies;
449 part_uv->heartbeat_gpa = msg->heartbeat_gpa;
451 if (msg->activate_gru_mq_desc_gpa !=
452 part_uv->activate_gru_mq_desc_gpa) {
453 spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
454 part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
455 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
456 part_uv->activate_gru_mq_desc_gpa =
457 msg->activate_gru_mq_desc_gpa;
459 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
461 (*wakeup_hb_checker)++;
464 case XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV: {
465 struct xpc_activate_mq_msg_deactivate_req_uv *msg;
467 msg = container_of(msg_hdr, struct
468 xpc_activate_mq_msg_deactivate_req_uv, hdr);
470 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
471 if (part_uv->act_state_req == 0)
472 xpc_activate_IRQ_rcvd++;
473 part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV;
474 part_uv->reason = msg->reason;
475 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
477 (*wakeup_hb_checker)++;
480 case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV: {
481 struct xpc_activate_mq_msg_chctl_closerequest_uv *msg;
483 msg = container_of(msg_hdr, struct
484 xpc_activate_mq_msg_chctl_closerequest_uv,
486 args = &part->remote_openclose_args[msg->ch_number];
487 args->reason = msg->reason;
489 spin_lock_irqsave(&part->chctl_lock, irq_flags);
490 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREQUEST;
491 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
493 xpc_wakeup_channel_mgr(part);
496 case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV: {
497 struct xpc_activate_mq_msg_chctl_closereply_uv *msg;
499 msg = container_of(msg_hdr, struct
500 xpc_activate_mq_msg_chctl_closereply_uv,
503 spin_lock_irqsave(&part->chctl_lock, irq_flags);
504 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREPLY;
505 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
507 xpc_wakeup_channel_mgr(part);
510 case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV: {
511 struct xpc_activate_mq_msg_chctl_openrequest_uv *msg;
513 msg = container_of(msg_hdr, struct
514 xpc_activate_mq_msg_chctl_openrequest_uv,
516 args = &part->remote_openclose_args[msg->ch_number];
517 args->entry_size = msg->entry_size;
518 args->local_nentries = msg->local_nentries;
520 spin_lock_irqsave(&part->chctl_lock, irq_flags);
521 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREQUEST;
522 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
524 xpc_wakeup_channel_mgr(part);
527 case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV: {
528 struct xpc_activate_mq_msg_chctl_openreply_uv *msg;
530 msg = container_of(msg_hdr, struct
531 xpc_activate_mq_msg_chctl_openreply_uv, hdr);
532 args = &part->remote_openclose_args[msg->ch_number];
533 args->remote_nentries = msg->remote_nentries;
534 args->local_nentries = msg->local_nentries;
535 args->local_msgqueue_pa = msg->notify_gru_mq_desc_gpa;
537 spin_lock_irqsave(&part->chctl_lock, irq_flags);
538 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREPLY;
539 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
541 xpc_wakeup_channel_mgr(part);
544 case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV: {
545 struct xpc_activate_mq_msg_chctl_opencomplete_uv *msg;
547 msg = container_of(msg_hdr, struct
548 xpc_activate_mq_msg_chctl_opencomplete_uv, hdr);
549 spin_lock_irqsave(&part->chctl_lock, irq_flags);
550 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENCOMPLETE;
551 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
553 xpc_wakeup_channel_mgr(part);
555 case XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV:
556 spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
557 part_uv->flags |= XPC_P_ENGAGED_UV;
558 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
561 case XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV:
562 spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
563 part_uv->flags &= ~XPC_P_ENGAGED_UV;
564 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
568 dev_err(xpc_part, "received unknown activate_mq msg type=%d "
569 "from partition=%d\n", msg_hdr->type, XPC_PARTID(part));
571 /* get hb checker to deactivate from the remote partition */
572 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
573 if (part_uv->act_state_req == 0)
574 xpc_activate_IRQ_rcvd++;
575 part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV;
576 part_uv->reason = xpBadMsgType;
577 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
579 (*wakeup_hb_checker)++;
583 if (msg_hdr->rp_ts_jiffies != part->remote_rp_ts_jiffies &&
584 part->remote_rp_ts_jiffies != 0) {
586 * ??? Does what we do here need to be sensitive to
587 * ??? act_state or remote_act_state?
589 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
590 if (part_uv->act_state_req == 0)
591 xpc_activate_IRQ_rcvd++;
592 part_uv->act_state_req = XPC_P_ASR_REACTIVATE_UV;
593 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
595 (*wakeup_hb_checker)++;
600 xpc_handle_activate_IRQ_uv(int irq, void *dev_id)
602 struct xpc_activate_mq_msghdr_uv *msg_hdr;
604 struct xpc_partition *part;
605 int wakeup_hb_checker = 0;
609 msg_hdr = gru_get_next_message(xpc_activate_mq_uv->gru_mq_desc);
613 partid = msg_hdr->partid;
614 if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) {
615 dev_err(xpc_part, "xpc_handle_activate_IRQ_uv() "
616 "received invalid partid=0x%x in message\n",
619 part = &xpc_partitions[partid];
621 part_referenced = xpc_part_ref(part);
622 xpc_handle_activate_mq_msg_uv(part, msg_hdr,
625 xpc_part_deref(part);
628 gru_free_message(xpc_activate_mq_uv->gru_mq_desc, msg_hdr);
631 if (wakeup_hb_checker)
632 wake_up_interruptible(&xpc_activate_IRQ_wq);
637 static enum xp_retval
638 xpc_cache_remote_gru_mq_desc_uv(struct gru_message_queue_desc *gru_mq_desc,
639 unsigned long gru_mq_desc_gpa)
643 ret = xp_remote_memcpy(uv_gpa(gru_mq_desc), gru_mq_desc_gpa,
644 sizeof(struct gru_message_queue_desc));
645 if (ret == xpSuccess)
646 gru_mq_desc->mq = NULL;
651 static enum xp_retval
652 xpc_send_activate_IRQ_uv(struct xpc_partition *part, void *msg, size_t msg_size,
655 struct xpc_activate_mq_msghdr_uv *msg_hdr = msg;
656 struct xpc_partition_uv *part_uv = &part->sn.uv;
657 struct gru_message_queue_desc *gru_mq_desc;
658 unsigned long irq_flags;
661 DBUG_ON(msg_size > XPC_ACTIVATE_MSG_SIZE_UV);
663 msg_hdr->type = msg_type;
664 msg_hdr->partid = xp_partition_id;
665 msg_hdr->act_state = part->act_state;
666 msg_hdr->rp_ts_jiffies = xpc_rsvd_page->ts_jiffies;
668 mutex_lock(&part_uv->cached_activate_gru_mq_desc_mutex);
670 if (!(part_uv->flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV)) {
671 gru_mq_desc = part_uv->cached_activate_gru_mq_desc;
672 if (gru_mq_desc == NULL) {
673 gru_mq_desc = kmalloc(sizeof(struct
674 gru_message_queue_desc),
676 if (gru_mq_desc == NULL) {
680 part_uv->cached_activate_gru_mq_desc = gru_mq_desc;
683 ret = xpc_cache_remote_gru_mq_desc_uv(gru_mq_desc,
685 activate_gru_mq_desc_gpa);
686 if (ret != xpSuccess)
689 spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
690 part_uv->flags |= XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
691 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
694 /* ??? Is holding a spin_lock (ch->lock) during this call a bad idea? */
695 ret = xpc_send_gru_msg(part_uv->cached_activate_gru_mq_desc, msg,
697 if (ret != xpSuccess) {
698 smp_rmb(); /* ensure a fresh copy of part_uv->flags */
699 if (!(part_uv->flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV))
703 mutex_unlock(&part_uv->cached_activate_gru_mq_desc_mutex);
708 xpc_send_activate_IRQ_part_uv(struct xpc_partition *part, void *msg,
709 size_t msg_size, int msg_type)
713 ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type);
714 if (unlikely(ret != xpSuccess))
715 XPC_DEACTIVATE_PARTITION(part, ret);
719 xpc_send_activate_IRQ_ch_uv(struct xpc_channel *ch, unsigned long *irq_flags,
720 void *msg, size_t msg_size, int msg_type)
722 struct xpc_partition *part = &xpc_partitions[ch->partid];
725 ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type);
726 if (unlikely(ret != xpSuccess)) {
727 if (irq_flags != NULL)
728 spin_unlock_irqrestore(&ch->lock, *irq_flags);
730 XPC_DEACTIVATE_PARTITION(part, ret);
732 if (irq_flags != NULL)
733 spin_lock_irqsave(&ch->lock, *irq_flags);
738 xpc_send_local_activate_IRQ_uv(struct xpc_partition *part, int act_state_req)
740 unsigned long irq_flags;
741 struct xpc_partition_uv *part_uv = &part->sn.uv;
744 * !!! Make our side think that the remote partition sent an activate
745 * !!! mq message our way by doing what the activate IRQ handler would
746 * !!! do had one really been sent.
749 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
750 if (part_uv->act_state_req == 0)
751 xpc_activate_IRQ_rcvd++;
752 part_uv->act_state_req = act_state_req;
753 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
755 wake_up_interruptible(&xpc_activate_IRQ_wq);
758 static enum xp_retval
759 xpc_get_partition_rsvd_page_pa_uv(void *buf, u64 *cookie, unsigned long *rp_pa,
765 #if defined CONFIG_X86_64
766 status = uv_bios_reserved_page_pa((u64)buf, cookie, (u64 *)rp_pa,
768 if (status == BIOS_STATUS_SUCCESS)
770 else if (status == BIOS_STATUS_MORE_PASSES)
771 ret = xpNeedMoreInfo;
775 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
776 status = sn_partition_reserved_page_pa((u64)buf, cookie, rp_pa, len);
777 if (status == SALRET_OK)
779 else if (status == SALRET_MORE_PASSES)
780 ret = xpNeedMoreInfo;
785 #error not a supported configuration
792 xpc_setup_rsvd_page_uv(struct xpc_rsvd_page *rp)
795 &xpc_partitions[sn_partition_id].sn.uv.cached_heartbeat;
796 rp->sn.uv.heartbeat_gpa = uv_gpa(xpc_heartbeat_uv);
797 rp->sn.uv.activate_gru_mq_desc_gpa =
798 uv_gpa(xpc_activate_mq_uv->gru_mq_desc);
803 xpc_allow_hb_uv(short partid)
808 xpc_disallow_hb_uv(short partid)
813 xpc_disallow_all_hbs_uv(void)
818 xpc_increment_heartbeat_uv(void)
820 xpc_heartbeat_uv->value++;
824 xpc_offline_heartbeat_uv(void)
826 xpc_increment_heartbeat_uv();
827 xpc_heartbeat_uv->offline = 1;
831 xpc_online_heartbeat_uv(void)
833 xpc_increment_heartbeat_uv();
834 xpc_heartbeat_uv->offline = 0;
838 xpc_heartbeat_init_uv(void)
840 xpc_heartbeat_uv->value = 1;
841 xpc_heartbeat_uv->offline = 0;
845 xpc_heartbeat_exit_uv(void)
847 xpc_offline_heartbeat_uv();
850 static enum xp_retval
851 xpc_get_remote_heartbeat_uv(struct xpc_partition *part)
853 struct xpc_partition_uv *part_uv = &part->sn.uv;
856 ret = xp_remote_memcpy(uv_gpa(&part_uv->cached_heartbeat),
857 part_uv->heartbeat_gpa,
858 sizeof(struct xpc_heartbeat_uv));
859 if (ret != xpSuccess)
862 if (part_uv->cached_heartbeat.value == part->last_heartbeat &&
863 !part_uv->cached_heartbeat.offline) {
867 part->last_heartbeat = part_uv->cached_heartbeat.value;
873 xpc_request_partition_activation_uv(struct xpc_rsvd_page *remote_rp,
874 unsigned long remote_rp_gpa, int nasid)
876 short partid = remote_rp->SAL_partid;
877 struct xpc_partition *part = &xpc_partitions[partid];
878 struct xpc_activate_mq_msg_activate_req_uv msg;
880 part->remote_rp_pa = remote_rp_gpa; /* !!! _pa here is really _gpa */
881 part->remote_rp_ts_jiffies = remote_rp->ts_jiffies;
882 part->sn.uv.heartbeat_gpa = remote_rp->sn.uv.heartbeat_gpa;
883 part->sn.uv.activate_gru_mq_desc_gpa =
884 remote_rp->sn.uv.activate_gru_mq_desc_gpa;
887 * ??? Is it a good idea to make this conditional on what is
888 * ??? potentially stale state information?
890 if (part->sn.uv.remote_act_state == XPC_P_AS_INACTIVE) {
891 msg.rp_gpa = uv_gpa(xpc_rsvd_page);
892 msg.heartbeat_gpa = xpc_rsvd_page->sn.uv.heartbeat_gpa;
893 msg.activate_gru_mq_desc_gpa =
894 xpc_rsvd_page->sn.uv.activate_gru_mq_desc_gpa;
895 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
896 XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV);
899 if (part->act_state == XPC_P_AS_INACTIVE)
900 xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV);
904 xpc_request_partition_reactivation_uv(struct xpc_partition *part)
906 xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV);
910 xpc_request_partition_deactivation_uv(struct xpc_partition *part)
912 struct xpc_activate_mq_msg_deactivate_req_uv msg;
915 * ??? Is it a good idea to make this conditional on what is
916 * ??? potentially stale state information?
918 if (part->sn.uv.remote_act_state != XPC_P_AS_DEACTIVATING &&
919 part->sn.uv.remote_act_state != XPC_P_AS_INACTIVE) {
921 msg.reason = part->reason;
922 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
923 XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV);
928 xpc_cancel_partition_deactivation_request_uv(struct xpc_partition *part)
930 /* nothing needs to be done */
935 xpc_init_fifo_uv(struct xpc_fifo_head_uv *head)
939 spin_lock_init(&head->lock);
944 xpc_get_fifo_entry_uv(struct xpc_fifo_head_uv *head)
946 unsigned long irq_flags;
947 struct xpc_fifo_entry_uv *first;
949 spin_lock_irqsave(&head->lock, irq_flags);
951 if (head->first != NULL) {
952 head->first = first->next;
953 if (head->first == NULL)
957 BUG_ON(head->n_entries < 0);
961 spin_unlock_irqrestore(&head->lock, irq_flags);
966 xpc_put_fifo_entry_uv(struct xpc_fifo_head_uv *head,
967 struct xpc_fifo_entry_uv *last)
969 unsigned long irq_flags;
972 spin_lock_irqsave(&head->lock, irq_flags);
973 if (head->last != NULL)
974 head->last->next = last;
979 spin_unlock_irqrestore(&head->lock, irq_flags);
983 xpc_n_of_fifo_entries_uv(struct xpc_fifo_head_uv *head)
985 return head->n_entries;
989 * Setup the channel structures that are uv specific.
991 static enum xp_retval
992 xpc_setup_ch_structures_uv(struct xpc_partition *part)
994 struct xpc_channel_uv *ch_uv;
997 for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
998 ch_uv = &part->channels[ch_number].sn.uv;
1000 xpc_init_fifo_uv(&ch_uv->msg_slot_free_list);
1001 xpc_init_fifo_uv(&ch_uv->recv_msg_list);
1008 * Teardown the channel structures that are uv specific.
1011 xpc_teardown_ch_structures_uv(struct xpc_partition *part)
1013 /* nothing needs to be done */
1017 static enum xp_retval
1018 xpc_make_first_contact_uv(struct xpc_partition *part)
1020 struct xpc_activate_mq_msg_uv msg;
1023 * We send a sync msg to get the remote partition's remote_act_state
1024 * updated to our current act_state which at this point should
1025 * be XPC_P_AS_ACTIVATING.
1027 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
1028 XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV);
1030 while (!((part->sn.uv.remote_act_state == XPC_P_AS_ACTIVATING) ||
1031 (part->sn.uv.remote_act_state == XPC_P_AS_ACTIVE))) {
1033 dev_dbg(xpc_part, "waiting to make first contact with "
1034 "partition %d\n", XPC_PARTID(part));
1036 /* wait a 1/4 of a second or so */
1037 (void)msleep_interruptible(250);
1039 if (part->act_state == XPC_P_AS_DEACTIVATING)
1040 return part->reason;
1047 xpc_get_chctl_all_flags_uv(struct xpc_partition *part)
1049 unsigned long irq_flags;
1050 union xpc_channel_ctl_flags chctl;
1052 spin_lock_irqsave(&part->chctl_lock, irq_flags);
1053 chctl = part->chctl;
1054 if (chctl.all_flags != 0)
1055 part->chctl.all_flags = 0;
1057 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
1058 return chctl.all_flags;
1061 static enum xp_retval
1062 xpc_allocate_send_msg_slot_uv(struct xpc_channel *ch)
1064 struct xpc_channel_uv *ch_uv = &ch->sn.uv;
1065 struct xpc_send_msg_slot_uv *msg_slot;
1066 unsigned long irq_flags;
1071 for (nentries = ch->local_nentries; nentries > 0; nentries--) {
1072 nbytes = nentries * sizeof(struct xpc_send_msg_slot_uv);
1073 ch_uv->send_msg_slots = kzalloc(nbytes, GFP_KERNEL);
1074 if (ch_uv->send_msg_slots == NULL)
1077 for (entry = 0; entry < nentries; entry++) {
1078 msg_slot = &ch_uv->send_msg_slots[entry];
1080 msg_slot->msg_slot_number = entry;
1081 xpc_put_fifo_entry_uv(&ch_uv->msg_slot_free_list,
1085 spin_lock_irqsave(&ch->lock, irq_flags);
1086 if (nentries < ch->local_nentries)
1087 ch->local_nentries = nentries;
1088 spin_unlock_irqrestore(&ch->lock, irq_flags);
1095 static enum xp_retval
1096 xpc_allocate_recv_msg_slot_uv(struct xpc_channel *ch)
1098 struct xpc_channel_uv *ch_uv = &ch->sn.uv;
1099 struct xpc_notify_mq_msg_uv *msg_slot;
1100 unsigned long irq_flags;
1105 for (nentries = ch->remote_nentries; nentries > 0; nentries--) {
1106 nbytes = nentries * ch->entry_size;
1107 ch_uv->recv_msg_slots = kzalloc(nbytes, GFP_KERNEL);
1108 if (ch_uv->recv_msg_slots == NULL)
1111 for (entry = 0; entry < nentries; entry++) {
1112 msg_slot = ch_uv->recv_msg_slots +
1113 entry * ch->entry_size;
1115 msg_slot->hdr.msg_slot_number = entry;
1118 spin_lock_irqsave(&ch->lock, irq_flags);
1119 if (nentries < ch->remote_nentries)
1120 ch->remote_nentries = nentries;
1121 spin_unlock_irqrestore(&ch->lock, irq_flags);
1129 * Allocate msg_slots associated with the channel.
1131 static enum xp_retval
1132 xpc_setup_msg_structures_uv(struct xpc_channel *ch)
1134 static enum xp_retval ret;
1135 struct xpc_channel_uv *ch_uv = &ch->sn.uv;
1137 DBUG_ON(ch->flags & XPC_C_SETUP);
1139 ch_uv->cached_notify_gru_mq_desc = kmalloc(sizeof(struct
1140 gru_message_queue_desc),
1142 if (ch_uv->cached_notify_gru_mq_desc == NULL)
1145 ret = xpc_allocate_send_msg_slot_uv(ch);
1146 if (ret == xpSuccess) {
1148 ret = xpc_allocate_recv_msg_slot_uv(ch);
1149 if (ret != xpSuccess) {
1150 kfree(ch_uv->send_msg_slots);
1151 xpc_init_fifo_uv(&ch_uv->msg_slot_free_list);
1158 * Free up msg_slots and clear other stuff that were setup for the specified
1162 xpc_teardown_msg_structures_uv(struct xpc_channel *ch)
1164 struct xpc_channel_uv *ch_uv = &ch->sn.uv;
1166 DBUG_ON(!spin_is_locked(&ch->lock));
1168 kfree(ch_uv->cached_notify_gru_mq_desc);
1169 ch_uv->cached_notify_gru_mq_desc = NULL;
1171 if (ch->flags & XPC_C_SETUP) {
1172 xpc_init_fifo_uv(&ch_uv->msg_slot_free_list);
1173 kfree(ch_uv->send_msg_slots);
1174 xpc_init_fifo_uv(&ch_uv->recv_msg_list);
1175 kfree(ch_uv->recv_msg_slots);
1180 xpc_send_chctl_closerequest_uv(struct xpc_channel *ch, unsigned long *irq_flags)
1182 struct xpc_activate_mq_msg_chctl_closerequest_uv msg;
1184 msg.ch_number = ch->number;
1185 msg.reason = ch->reason;
1186 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
1187 XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV);
1191 xpc_send_chctl_closereply_uv(struct xpc_channel *ch, unsigned long *irq_flags)
1193 struct xpc_activate_mq_msg_chctl_closereply_uv msg;
1195 msg.ch_number = ch->number;
1196 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
1197 XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV);
1201 xpc_send_chctl_openrequest_uv(struct xpc_channel *ch, unsigned long *irq_flags)
1203 struct xpc_activate_mq_msg_chctl_openrequest_uv msg;
1205 msg.ch_number = ch->number;
1206 msg.entry_size = ch->entry_size;
1207 msg.local_nentries = ch->local_nentries;
1208 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
1209 XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV);
1213 xpc_send_chctl_openreply_uv(struct xpc_channel *ch, unsigned long *irq_flags)
1215 struct xpc_activate_mq_msg_chctl_openreply_uv msg;
1217 msg.ch_number = ch->number;
1218 msg.local_nentries = ch->local_nentries;
1219 msg.remote_nentries = ch->remote_nentries;
1220 msg.notify_gru_mq_desc_gpa = uv_gpa(xpc_notify_mq_uv->gru_mq_desc);
1221 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
1222 XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV);
1226 xpc_send_chctl_opencomplete_uv(struct xpc_channel *ch, unsigned long *irq_flags)
1228 struct xpc_activate_mq_msg_chctl_opencomplete_uv msg;
1230 msg.ch_number = ch->number;
1231 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
1232 XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV);
1236 xpc_send_chctl_local_msgrequest_uv(struct xpc_partition *part, int ch_number)
1238 unsigned long irq_flags;
1240 spin_lock_irqsave(&part->chctl_lock, irq_flags);
1241 part->chctl.flags[ch_number] |= XPC_CHCTL_MSGREQUEST;
1242 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
1244 xpc_wakeup_channel_mgr(part);
1247 static enum xp_retval
1248 xpc_save_remote_msgqueue_pa_uv(struct xpc_channel *ch,
1249 unsigned long gru_mq_desc_gpa)
1251 struct xpc_channel_uv *ch_uv = &ch->sn.uv;
1253 DBUG_ON(ch_uv->cached_notify_gru_mq_desc == NULL);
1254 return xpc_cache_remote_gru_mq_desc_uv(ch_uv->cached_notify_gru_mq_desc,
1259 xpc_indicate_partition_engaged_uv(struct xpc_partition *part)
1261 struct xpc_activate_mq_msg_uv msg;
1263 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
1264 XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV);
1268 xpc_indicate_partition_disengaged_uv(struct xpc_partition *part)
1270 struct xpc_activate_mq_msg_uv msg;
1272 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
1273 XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV);
1277 xpc_assume_partition_disengaged_uv(short partid)
1279 struct xpc_partition_uv *part_uv = &xpc_partitions[partid].sn.uv;
1280 unsigned long irq_flags;
1282 spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
1283 part_uv->flags &= ~XPC_P_ENGAGED_UV;
1284 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
1288 xpc_partition_engaged_uv(short partid)
1290 return (xpc_partitions[partid].sn.uv.flags & XPC_P_ENGAGED_UV) != 0;
1294 xpc_any_partition_engaged_uv(void)
1296 struct xpc_partition_uv *part_uv;
1299 for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
1300 part_uv = &xpc_partitions[partid].sn.uv;
1301 if ((part_uv->flags & XPC_P_ENGAGED_UV) != 0)
1307 static enum xp_retval
1308 xpc_allocate_msg_slot_uv(struct xpc_channel *ch, u32 flags,
1309 struct xpc_send_msg_slot_uv **address_of_msg_slot)
1312 struct xpc_send_msg_slot_uv *msg_slot;
1313 struct xpc_fifo_entry_uv *entry;
1316 entry = xpc_get_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list);
1320 if (flags & XPC_NOWAIT)
1323 ret = xpc_allocate_msg_wait(ch);
1324 if (ret != xpInterrupted && ret != xpTimeout)
1328 msg_slot = container_of(entry, struct xpc_send_msg_slot_uv, next);
1329 *address_of_msg_slot = msg_slot;
1334 xpc_free_msg_slot_uv(struct xpc_channel *ch,
1335 struct xpc_send_msg_slot_uv *msg_slot)
1337 xpc_put_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list, &msg_slot->next);
1339 /* wakeup anyone waiting for a free msg slot */
1340 if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
1341 wake_up(&ch->msg_allocate_wq);
1345 xpc_notify_sender_uv(struct xpc_channel *ch,
1346 struct xpc_send_msg_slot_uv *msg_slot,
1347 enum xp_retval reason)
1349 xpc_notify_func func = msg_slot->func;
1351 if (func != NULL && cmpxchg(&msg_slot->func, func, NULL) == func) {
1353 atomic_dec(&ch->n_to_notify);
1355 dev_dbg(xpc_chan, "msg_slot->func() called, msg_slot=0x%p "
1356 "msg_slot_number=%d partid=%d channel=%d\n", msg_slot,
1357 msg_slot->msg_slot_number, ch->partid, ch->number);
1359 func(reason, ch->partid, ch->number, msg_slot->key);
1361 dev_dbg(xpc_chan, "msg_slot->func() returned, msg_slot=0x%p "
1362 "msg_slot_number=%d partid=%d channel=%d\n", msg_slot,
1363 msg_slot->msg_slot_number, ch->partid, ch->number);
1368 xpc_handle_notify_mq_ack_uv(struct xpc_channel *ch,
1369 struct xpc_notify_mq_msg_uv *msg)
1371 struct xpc_send_msg_slot_uv *msg_slot;
1372 int entry = msg->hdr.msg_slot_number % ch->local_nentries;
1374 msg_slot = &ch->sn.uv.send_msg_slots[entry];
1376 BUG_ON(msg_slot->msg_slot_number != msg->hdr.msg_slot_number);
1377 msg_slot->msg_slot_number += ch->local_nentries;
1379 if (msg_slot->func != NULL)
1380 xpc_notify_sender_uv(ch, msg_slot, xpMsgDelivered);
1382 xpc_free_msg_slot_uv(ch, msg_slot);
1386 xpc_handle_notify_mq_msg_uv(struct xpc_partition *part,
1387 struct xpc_notify_mq_msg_uv *msg)
1389 struct xpc_partition_uv *part_uv = &part->sn.uv;
1390 struct xpc_channel *ch;
1391 struct xpc_channel_uv *ch_uv;
1392 struct xpc_notify_mq_msg_uv *msg_slot;
1393 unsigned long irq_flags;
1394 int ch_number = msg->hdr.ch_number;
1396 if (unlikely(ch_number >= part->nchannels)) {
1397 dev_err(xpc_part, "xpc_handle_notify_IRQ_uv() received invalid "
1398 "channel number=0x%x in message from partid=%d\n",
1399 ch_number, XPC_PARTID(part));
1401 /* get hb checker to deactivate from the remote partition */
1402 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
1403 if (part_uv->act_state_req == 0)
1404 xpc_activate_IRQ_rcvd++;
1405 part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV;
1406 part_uv->reason = xpBadChannelNumber;
1407 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
1409 wake_up_interruptible(&xpc_activate_IRQ_wq);
1413 ch = &part->channels[ch_number];
1414 xpc_msgqueue_ref(ch);
1416 if (!(ch->flags & XPC_C_CONNECTED)) {
1417 xpc_msgqueue_deref(ch);
1421 /* see if we're really dealing with an ACK for a previously sent msg */
1422 if (msg->hdr.size == 0) {
1423 xpc_handle_notify_mq_ack_uv(ch, msg);
1424 xpc_msgqueue_deref(ch);
1428 /* we're dealing with a normal message sent via the notify_mq */
1431 msg_slot = ch_uv->recv_msg_slots +
1432 (msg->hdr.msg_slot_number % ch->remote_nentries) * ch->entry_size;
1434 BUG_ON(msg_slot->hdr.size != 0);
1436 memcpy(msg_slot, msg, msg->hdr.size);
1438 xpc_put_fifo_entry_uv(&ch_uv->recv_msg_list, &msg_slot->hdr.u.next);
1440 if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) {
1442 * If there is an existing idle kthread get it to deliver
1443 * the payload, otherwise we'll have to get the channel mgr
1444 * for this partition to create a kthread to do the delivery.
1446 if (atomic_read(&ch->kthreads_idle) > 0)
1447 wake_up_nr(&ch->idle_wq, 1);
1449 xpc_send_chctl_local_msgrequest_uv(part, ch->number);
1451 xpc_msgqueue_deref(ch);
1455 xpc_handle_notify_IRQ_uv(int irq, void *dev_id)
1457 struct xpc_notify_mq_msg_uv *msg;
1459 struct xpc_partition *part;
1461 while ((msg = gru_get_next_message(xpc_notify_mq_uv->gru_mq_desc)) !=
1464 partid = msg->hdr.partid;
1465 if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) {
1466 dev_err(xpc_part, "xpc_handle_notify_IRQ_uv() received "
1467 "invalid partid=0x%x in message\n", partid);
1469 part = &xpc_partitions[partid];
1471 if (xpc_part_ref(part)) {
1472 xpc_handle_notify_mq_msg_uv(part, msg);
1473 xpc_part_deref(part);
1477 gru_free_message(xpc_notify_mq_uv->gru_mq_desc, msg);
1484 xpc_n_of_deliverable_payloads_uv(struct xpc_channel *ch)
1486 return xpc_n_of_fifo_entries_uv(&ch->sn.uv.recv_msg_list);
1490 xpc_process_msg_chctl_flags_uv(struct xpc_partition *part, int ch_number)
1492 struct xpc_channel *ch = &part->channels[ch_number];
1493 int ndeliverable_payloads;
1495 xpc_msgqueue_ref(ch);
1497 ndeliverable_payloads = xpc_n_of_deliverable_payloads_uv(ch);
1499 if (ndeliverable_payloads > 0 &&
1500 (ch->flags & XPC_C_CONNECTED) &&
1501 (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE)) {
1503 xpc_activate_kthreads(ch, ndeliverable_payloads);
1506 xpc_msgqueue_deref(ch);
1509 static enum xp_retval
1510 xpc_send_payload_uv(struct xpc_channel *ch, u32 flags, void *payload,
1511 u16 payload_size, u8 notify_type, xpc_notify_func func,
1514 enum xp_retval ret = xpSuccess;
1515 struct xpc_send_msg_slot_uv *msg_slot = NULL;
1516 struct xpc_notify_mq_msg_uv *msg;
1517 u8 msg_buffer[XPC_NOTIFY_MSG_SIZE_UV];
1520 DBUG_ON(notify_type != XPC_N_CALL);
1522 msg_size = sizeof(struct xpc_notify_mq_msghdr_uv) + payload_size;
1523 if (msg_size > ch->entry_size)
1524 return xpPayloadTooBig;
1526 xpc_msgqueue_ref(ch);
1528 if (ch->flags & XPC_C_DISCONNECTING) {
1532 if (!(ch->flags & XPC_C_CONNECTED)) {
1533 ret = xpNotConnected;
1537 ret = xpc_allocate_msg_slot_uv(ch, flags, &msg_slot);
1538 if (ret != xpSuccess)
1542 atomic_inc(&ch->n_to_notify);
1544 msg_slot->key = key;
1545 smp_wmb(); /* a non-NULL func must hit memory after the key */
1546 msg_slot->func = func;
1548 if (ch->flags & XPC_C_DISCONNECTING) {
1554 msg = (struct xpc_notify_mq_msg_uv *)&msg_buffer;
1555 msg->hdr.partid = xp_partition_id;
1556 msg->hdr.ch_number = ch->number;
1557 msg->hdr.size = msg_size;
1558 msg->hdr.msg_slot_number = msg_slot->msg_slot_number;
1559 memcpy(&msg->payload, payload, payload_size);
1561 ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg,
1563 if (ret == xpSuccess)
1566 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
1570 * Try to NULL the msg_slot's func field. If we fail, then
1571 * xpc_notify_senders_of_disconnect_uv() beat us to it, in which
1572 * case we need to pretend we succeeded to send the message
1573 * since the user will get a callout for the disconnect error
1574 * by xpc_notify_senders_of_disconnect_uv(), and to also get an
1575 * error returned here will confuse them. Additionally, since
1576 * in this case the channel is being disconnected we don't need
1577 * to put the the msg_slot back on the free list.
1579 if (cmpxchg(&msg_slot->func, func, NULL) != func) {
1584 msg_slot->key = NULL;
1585 atomic_dec(&ch->n_to_notify);
1587 xpc_free_msg_slot_uv(ch, msg_slot);
1589 xpc_msgqueue_deref(ch);
1594 * Tell the callers of xpc_send_notify() that the status of their payloads
1595 * is unknown because the channel is now disconnecting.
1597 * We don't worry about putting these msg_slots on the free list since the
1598 * msg_slots themselves are about to be kfree'd.
1601 xpc_notify_senders_of_disconnect_uv(struct xpc_channel *ch)
1603 struct xpc_send_msg_slot_uv *msg_slot;
1606 DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING));
1608 for (entry = 0; entry < ch->local_nentries; entry++) {
1610 if (atomic_read(&ch->n_to_notify) == 0)
1613 msg_slot = &ch->sn.uv.send_msg_slots[entry];
1614 if (msg_slot->func != NULL)
1615 xpc_notify_sender_uv(ch, msg_slot, ch->reason);
1620 * Get the next deliverable message's payload.
1623 xpc_get_deliverable_payload_uv(struct xpc_channel *ch)
1625 struct xpc_fifo_entry_uv *entry;
1626 struct xpc_notify_mq_msg_uv *msg;
1627 void *payload = NULL;
1629 if (!(ch->flags & XPC_C_DISCONNECTING)) {
1630 entry = xpc_get_fifo_entry_uv(&ch->sn.uv.recv_msg_list);
1631 if (entry != NULL) {
1632 msg = container_of(entry, struct xpc_notify_mq_msg_uv,
1634 payload = &msg->payload;
1641 xpc_received_payload_uv(struct xpc_channel *ch, void *payload)
1643 struct xpc_notify_mq_msg_uv *msg;
1646 msg = container_of(payload, struct xpc_notify_mq_msg_uv, payload);
1648 /* return an ACK to the sender of this message */
1650 msg->hdr.partid = xp_partition_id;
1651 msg->hdr.size = 0; /* size of zero indicates this is an ACK */
1653 ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg,
1654 sizeof(struct xpc_notify_mq_msghdr_uv));
1655 if (ret != xpSuccess)
1656 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
1659 static struct xpc_arch_operations xpc_arch_ops_uv = {
1660 .setup_partitions = xpc_setup_partitions_uv,
1661 .teardown_partitions = xpc_teardown_partitions_uv,
1662 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
1663 .get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_uv,
1664 .setup_rsvd_page = xpc_setup_rsvd_page_uv,
1666 .allow_hb = xpc_allow_hb_uv,
1667 .disallow_hb = xpc_disallow_hb_uv,
1668 .disallow_all_hbs = xpc_disallow_all_hbs_uv,
1669 .increment_heartbeat = xpc_increment_heartbeat_uv,
1670 .offline_heartbeat = xpc_offline_heartbeat_uv,
1671 .online_heartbeat = xpc_online_heartbeat_uv,
1672 .heartbeat_init = xpc_heartbeat_init_uv,
1673 .heartbeat_exit = xpc_heartbeat_exit_uv,
1674 .get_remote_heartbeat = xpc_get_remote_heartbeat_uv,
1676 .request_partition_activation =
1677 xpc_request_partition_activation_uv,
1678 .request_partition_reactivation =
1679 xpc_request_partition_reactivation_uv,
1680 .request_partition_deactivation =
1681 xpc_request_partition_deactivation_uv,
1682 .cancel_partition_deactivation_request =
1683 xpc_cancel_partition_deactivation_request_uv,
1685 .setup_ch_structures = xpc_setup_ch_structures_uv,
1686 .teardown_ch_structures = xpc_teardown_ch_structures_uv,
1688 .make_first_contact = xpc_make_first_contact_uv,
1690 .get_chctl_all_flags = xpc_get_chctl_all_flags_uv,
1691 .send_chctl_closerequest = xpc_send_chctl_closerequest_uv,
1692 .send_chctl_closereply = xpc_send_chctl_closereply_uv,
1693 .send_chctl_openrequest = xpc_send_chctl_openrequest_uv,
1694 .send_chctl_openreply = xpc_send_chctl_openreply_uv,
1695 .send_chctl_opencomplete = xpc_send_chctl_opencomplete_uv,
1696 .process_msg_chctl_flags = xpc_process_msg_chctl_flags_uv,
1698 .save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_uv,
1700 .setup_msg_structures = xpc_setup_msg_structures_uv,
1701 .teardown_msg_structures = xpc_teardown_msg_structures_uv,
1703 .indicate_partition_engaged = xpc_indicate_partition_engaged_uv,
1704 .indicate_partition_disengaged = xpc_indicate_partition_disengaged_uv,
1705 .assume_partition_disengaged = xpc_assume_partition_disengaged_uv,
1706 .partition_engaged = xpc_partition_engaged_uv,
1707 .any_partition_engaged = xpc_any_partition_engaged_uv,
1709 .n_of_deliverable_payloads = xpc_n_of_deliverable_payloads_uv,
1710 .send_payload = xpc_send_payload_uv,
1711 .get_deliverable_payload = xpc_get_deliverable_payload_uv,
1712 .received_payload = xpc_received_payload_uv,
1713 .notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv,
1719 xpc_arch_ops = xpc_arch_ops_uv;
1721 if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
1722 dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n",
1723 XPC_MSG_HDR_MAX_SIZE);
1727 xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0,
1728 XPC_ACTIVATE_IRQ_NAME,
1729 xpc_handle_activate_IRQ_uv);
1730 if (IS_ERR(xpc_activate_mq_uv))
1731 return PTR_ERR(xpc_activate_mq_uv);
1733 xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0,
1734 XPC_NOTIFY_IRQ_NAME,
1735 xpc_handle_notify_IRQ_uv);
1736 if (IS_ERR(xpc_notify_mq_uv)) {
1737 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
1738 return PTR_ERR(xpc_notify_mq_uv);
1747 xpc_destroy_gru_mq_uv(xpc_notify_mq_uv);
1748 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);