]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/staging/dream/smd/smd_rpcrouter.c
Staging: dream: remove wakelock support
[net-next-2.6.git] / drivers / staging / dream / smd / smd_rpcrouter.c
CommitLineData
7bf06dac
BS
1/* arch/arm/mach-msm/smd_rpcrouter.c
2 *
3 * Copyright (C) 2007 Google, Inc.
4 * Copyright (c) 2007-2009 QUALCOMM Incorporated.
5 * Author: San Mehat <san@android.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18/* TODO: handle cases where smd_write() will tempfail due to full fifo */
19/* TODO: thread priority? schedule a work to bump it? */
20/* TODO: maybe make server_list_lock a mutex */
21/* TODO: pool fragments to avoid kmalloc/kfree churn */
22
23#include <linux/module.h>
24#include <linux/kernel.h>
25#include <linux/string.h>
26#include <linux/errno.h>
27#include <linux/cdev.h>
28#include <linux/init.h>
29#include <linux/device.h>
30#include <linux/types.h>
31#include <linux/delay.h>
32#include <linux/fs.h>
33#include <linux/err.h>
34#include <linux/sched.h>
35#include <linux/poll.h>
7bf06dac
BS
36#include <asm/uaccess.h>
37#include <asm/byteorder.h>
38#include <linux/platform_device.h>
39#include <linux/uaccess.h>
40
41#include <asm/byteorder.h>
42
43#include <mach/msm_smd.h>
44#include "smd_rpcrouter.h"
45
46#define TRACE_R2R_MSG 0
47#define TRACE_R2R_RAW 0
48#define TRACE_RPC_MSG 0
49#define TRACE_NOTIFY_MSG 0
50
51#define MSM_RPCROUTER_DEBUG 0
52#define MSM_RPCROUTER_DEBUG_PKT 0
53#define MSM_RPCROUTER_R2R_DEBUG 0
54#define DUMP_ALL_RECEIVED_HEADERS 0
55
56#define DIAG(x...) printk("[RR] ERROR " x)
57
58#if MSM_RPCROUTER_DEBUG
59#define D(x...) printk(x)
60#else
61#define D(x...) do {} while (0)
62#endif
63
64#if TRACE_R2R_MSG
65#define RR(x...) printk("[RR] "x)
66#else
67#define RR(x...) do {} while (0)
68#endif
69
70#if TRACE_RPC_MSG
71#define IO(x...) printk("[RPC] "x)
72#else
73#define IO(x...) do {} while (0)
74#endif
75
76#if TRACE_NOTIFY_MSG
77#define NTFY(x...) printk(KERN_ERR "[NOTIFY] "x)
78#else
79#define NTFY(x...) do {} while (0)
80#endif
81
82static LIST_HEAD(local_endpoints);
83static LIST_HEAD(remote_endpoints);
84
85static LIST_HEAD(server_list);
86
87static smd_channel_t *smd_channel;
88static int initialized;
89static wait_queue_head_t newserver_wait;
90static wait_queue_head_t smd_wait;
91
92static DEFINE_SPINLOCK(local_endpoints_lock);
93static DEFINE_SPINLOCK(remote_endpoints_lock);
94static DEFINE_SPINLOCK(server_list_lock);
95static DEFINE_SPINLOCK(smd_lock);
96
97static struct workqueue_struct *rpcrouter_workqueue;
7bf06dac
BS
98static int rpcrouter_need_len;
99
100static atomic_t next_xid = ATOMIC_INIT(1);
101static uint8_t next_pacmarkid;
102
103static void do_read_data(struct work_struct *work);
104static void do_create_pdevs(struct work_struct *work);
105static void do_create_rpcrouter_pdev(struct work_struct *work);
106
107static DECLARE_WORK(work_read_data, do_read_data);
108static DECLARE_WORK(work_create_pdevs, do_create_pdevs);
109static DECLARE_WORK(work_create_rpcrouter_pdev, do_create_rpcrouter_pdev);
110
111#define RR_STATE_IDLE 0
112#define RR_STATE_HEADER 1
113#define RR_STATE_BODY 2
114#define RR_STATE_ERROR 3
115
116struct rr_context {
117 struct rr_packet *pkt;
118 uint8_t *ptr;
119 uint32_t state; /* current assembly state */
120 uint32_t count; /* bytes needed in this state */
121};
122
a5ca2dfc 123static struct rr_context the_rr_context;
7bf06dac
BS
124
125static struct platform_device rpcrouter_pdev = {
126 .name = "oncrpc_router",
127 .id = -1,
128};
129
130
131static int rpcrouter_send_control_msg(union rr_control_msg *msg)
132{
133 struct rr_header hdr;
134 unsigned long flags;
135 int need;
136
137 if (!(msg->cmd == RPCROUTER_CTRL_CMD_HELLO) && !initialized) {
138 printk(KERN_ERR "rpcrouter_send_control_msg(): Warning, "
139 "router not initialized\n");
140 return -EINVAL;
141 }
142
143 hdr.version = RPCROUTER_VERSION;
144 hdr.type = msg->cmd;
145 hdr.src_pid = RPCROUTER_PID_LOCAL;
146 hdr.src_cid = RPCROUTER_ROUTER_ADDRESS;
147 hdr.confirm_rx = 0;
148 hdr.size = sizeof(*msg);
149 hdr.dst_pid = 0;
150 hdr.dst_cid = RPCROUTER_ROUTER_ADDRESS;
151
152 /* TODO: what if channel is full? */
153
154 need = sizeof(hdr) + hdr.size;
155 spin_lock_irqsave(&smd_lock, flags);
156 while (smd_write_avail(smd_channel) < need) {
157 spin_unlock_irqrestore(&smd_lock, flags);
158 msleep(250);
159 spin_lock_irqsave(&smd_lock, flags);
160 }
161 smd_write(smd_channel, &hdr, sizeof(hdr));
162 smd_write(smd_channel, msg, hdr.size);
163 spin_unlock_irqrestore(&smd_lock, flags);
164 return 0;
165}
166
167static struct rr_server *rpcrouter_create_server(uint32_t pid,
168 uint32_t cid,
169 uint32_t prog,
170 uint32_t ver)
171{
172 struct rr_server *server;
173 unsigned long flags;
174 int rc;
175
176 server = kmalloc(sizeof(struct rr_server), GFP_KERNEL);
177 if (!server)
178 return ERR_PTR(-ENOMEM);
179
180 memset(server, 0, sizeof(struct rr_server));
181 server->pid = pid;
182 server->cid = cid;
183 server->prog = prog;
184 server->vers = ver;
185
186 spin_lock_irqsave(&server_list_lock, flags);
187 list_add_tail(&server->list, &server_list);
188 spin_unlock_irqrestore(&server_list_lock, flags);
189
190 if (pid == RPCROUTER_PID_REMOTE) {
191 rc = msm_rpcrouter_create_server_cdev(server);
192 if (rc < 0)
193 goto out_fail;
194 }
195 return server;
196out_fail:
197 spin_lock_irqsave(&server_list_lock, flags);
198 list_del(&server->list);
199 spin_unlock_irqrestore(&server_list_lock, flags);
200 kfree(server);
201 return ERR_PTR(rc);
202}
203
204static void rpcrouter_destroy_server(struct rr_server *server)
205{
206 unsigned long flags;
207
208 spin_lock_irqsave(&server_list_lock, flags);
209 list_del(&server->list);
210 spin_unlock_irqrestore(&server_list_lock, flags);
211 device_destroy(msm_rpcrouter_class, server->device_number);
212 kfree(server);
213}
214
215static struct rr_server *rpcrouter_lookup_server(uint32_t prog, uint32_t ver)
216{
217 struct rr_server *server;
218 unsigned long flags;
219
220 spin_lock_irqsave(&server_list_lock, flags);
221 list_for_each_entry(server, &server_list, list) {
222 if (server->prog == prog
223 && server->vers == ver) {
224 spin_unlock_irqrestore(&server_list_lock, flags);
225 return server;
226 }
227 }
228 spin_unlock_irqrestore(&server_list_lock, flags);
229 return NULL;
230}
231
232static struct rr_server *rpcrouter_lookup_server_by_dev(dev_t dev)
233{
234 struct rr_server *server;
235 unsigned long flags;
236
237 spin_lock_irqsave(&server_list_lock, flags);
238 list_for_each_entry(server, &server_list, list) {
239 if (server->device_number == dev) {
240 spin_unlock_irqrestore(&server_list_lock, flags);
241 return server;
242 }
243 }
244 spin_unlock_irqrestore(&server_list_lock, flags);
245 return NULL;
246}
247
248struct msm_rpc_endpoint *msm_rpcrouter_create_local_endpoint(dev_t dev)
249{
250 struct msm_rpc_endpoint *ept;
251 unsigned long flags;
252
253 ept = kmalloc(sizeof(struct msm_rpc_endpoint), GFP_KERNEL);
254 if (!ept)
255 return NULL;
256 memset(ept, 0, sizeof(struct msm_rpc_endpoint));
257
258 /* mark no reply outstanding */
259 ept->reply_pid = 0xffffffff;
260
261 ept->cid = (uint32_t) ept;
262 ept->pid = RPCROUTER_PID_LOCAL;
263 ept->dev = dev;
264
265 if ((dev != msm_rpcrouter_devno) && (dev != MKDEV(0, 0))) {
266 struct rr_server *srv;
267 /*
268 * This is a userspace client which opened
269 * a program/ver devicenode. Bind the client
270 * to that destination
271 */
272 srv = rpcrouter_lookup_server_by_dev(dev);
273 /* TODO: bug? really? */
274 BUG_ON(!srv);
275
276 ept->dst_pid = srv->pid;
277 ept->dst_cid = srv->cid;
278 ept->dst_prog = cpu_to_be32(srv->prog);
279 ept->dst_vers = cpu_to_be32(srv->vers);
280
281 D("Creating local ept %p @ %08x:%08x\n", ept, srv->prog, srv->vers);
282 } else {
283 /* mark not connected */
284 ept->dst_pid = 0xffffffff;
285 D("Creating a master local ept %p\n", ept);
286 }
287
288 init_waitqueue_head(&ept->wait_q);
289 INIT_LIST_HEAD(&ept->read_q);
290 spin_lock_init(&ept->read_q_lock);
7bf06dac
BS
291 INIT_LIST_HEAD(&ept->incomplete);
292
293 spin_lock_irqsave(&local_endpoints_lock, flags);
294 list_add_tail(&ept->list, &local_endpoints);
295 spin_unlock_irqrestore(&local_endpoints_lock, flags);
296 return ept;
297}
298
299int msm_rpcrouter_destroy_local_endpoint(struct msm_rpc_endpoint *ept)
300{
301 int rc;
302 union rr_control_msg msg;
303
304 msg.cmd = RPCROUTER_CTRL_CMD_REMOVE_CLIENT;
305 msg.cli.pid = ept->pid;
306 msg.cli.cid = ept->cid;
307
308 RR("x REMOVE_CLIENT id=%d:%08x\n", ept->pid, ept->cid);
309 rc = rpcrouter_send_control_msg(&msg);
310 if (rc < 0)
311 return rc;
312
7bf06dac
BS
313 list_del(&ept->list);
314 kfree(ept);
315 return 0;
316}
317
318static int rpcrouter_create_remote_endpoint(uint32_t cid)
319{
320 struct rr_remote_endpoint *new_c;
321 unsigned long flags;
322
323 new_c = kmalloc(sizeof(struct rr_remote_endpoint), GFP_KERNEL);
324 if (!new_c)
325 return -ENOMEM;
326 memset(new_c, 0, sizeof(struct rr_remote_endpoint));
327
328 new_c->cid = cid;
329 new_c->pid = RPCROUTER_PID_REMOTE;
330 init_waitqueue_head(&new_c->quota_wait);
331 spin_lock_init(&new_c->quota_lock);
332
333 spin_lock_irqsave(&remote_endpoints_lock, flags);
334 list_add_tail(&new_c->list, &remote_endpoints);
335 spin_unlock_irqrestore(&remote_endpoints_lock, flags);
336 return 0;
337}
338
339static struct msm_rpc_endpoint *rpcrouter_lookup_local_endpoint(uint32_t cid)
340{
341 struct msm_rpc_endpoint *ept;
342 unsigned long flags;
343
344 spin_lock_irqsave(&local_endpoints_lock, flags);
345 list_for_each_entry(ept, &local_endpoints, list) {
346 if (ept->cid == cid) {
347 spin_unlock_irqrestore(&local_endpoints_lock, flags);
348 return ept;
349 }
350 }
351 spin_unlock_irqrestore(&local_endpoints_lock, flags);
352 return NULL;
353}
354
355static struct rr_remote_endpoint *rpcrouter_lookup_remote_endpoint(uint32_t cid)
356{
357 struct rr_remote_endpoint *ept;
358 unsigned long flags;
359
360 spin_lock_irqsave(&remote_endpoints_lock, flags);
361 list_for_each_entry(ept, &remote_endpoints, list) {
362 if (ept->cid == cid) {
363 spin_unlock_irqrestore(&remote_endpoints_lock, flags);
364 return ept;
365 }
366 }
367 spin_unlock_irqrestore(&remote_endpoints_lock, flags);
368 return NULL;
369}
370
371static int process_control_msg(union rr_control_msg *msg, int len)
372{
373 union rr_control_msg ctl;
374 struct rr_server *server;
375 struct rr_remote_endpoint *r_ept;
376 int rc = 0;
377 unsigned long flags;
378
379 if (len != sizeof(*msg)) {
380 printk(KERN_ERR "rpcrouter: r2r msg size %d != %d\n",
381 len, sizeof(*msg));
382 return -EINVAL;
383 }
384
385 switch (msg->cmd) {
386 case RPCROUTER_CTRL_CMD_HELLO:
387 RR("o HELLO\n");
388
389 RR("x HELLO\n");
390 memset(&ctl, 0, sizeof(ctl));
391 ctl.cmd = RPCROUTER_CTRL_CMD_HELLO;
392 rpcrouter_send_control_msg(&ctl);
393
394 initialized = 1;
395
396 /* Send list of servers one at a time */
397 ctl.cmd = RPCROUTER_CTRL_CMD_NEW_SERVER;
398
399 /* TODO: long time to hold a spinlock... */
400 spin_lock_irqsave(&server_list_lock, flags);
401 list_for_each_entry(server, &server_list, list) {
402 ctl.srv.pid = server->pid;
403 ctl.srv.cid = server->cid;
404 ctl.srv.prog = server->prog;
405 ctl.srv.vers = server->vers;
406
407 RR("x NEW_SERVER id=%d:%08x prog=%08x:%08x\n",
408 server->pid, server->cid,
409 server->prog, server->vers);
410
411 rpcrouter_send_control_msg(&ctl);
412 }
413 spin_unlock_irqrestore(&server_list_lock, flags);
414
415 queue_work(rpcrouter_workqueue, &work_create_rpcrouter_pdev);
416 break;
417
418 case RPCROUTER_CTRL_CMD_RESUME_TX:
419 RR("o RESUME_TX id=%d:%08x\n", msg->cli.pid, msg->cli.cid);
420
421 r_ept = rpcrouter_lookup_remote_endpoint(msg->cli.cid);
422 if (!r_ept) {
423 printk(KERN_ERR
424 "rpcrouter: Unable to resume client\n");
425 break;
426 }
427 spin_lock_irqsave(&r_ept->quota_lock, flags);
428 r_ept->tx_quota_cntr = 0;
429 spin_unlock_irqrestore(&r_ept->quota_lock, flags);
430 wake_up(&r_ept->quota_wait);
431 break;
432
433 case RPCROUTER_CTRL_CMD_NEW_SERVER:
434 RR("o NEW_SERVER id=%d:%08x prog=%08x:%08x\n",
435 msg->srv.pid, msg->srv.cid, msg->srv.prog, msg->srv.vers);
436
437 server = rpcrouter_lookup_server(msg->srv.prog, msg->srv.vers);
438
439 if (!server) {
440 server = rpcrouter_create_server(
441 msg->srv.pid, msg->srv.cid,
442 msg->srv.prog, msg->srv.vers);
443 if (!server)
444 return -ENOMEM;
445 /*
446 * XXX: Verify that its okay to add the
447 * client to our remote client list
448 * if we get a NEW_SERVER notification
449 */
450 if (!rpcrouter_lookup_remote_endpoint(msg->srv.cid)) {
451 rc = rpcrouter_create_remote_endpoint(
452 msg->srv.cid);
453 if (rc < 0)
454 printk(KERN_ERR
455 "rpcrouter:Client create"
456 "error (%d)\n", rc);
457 }
458 schedule_work(&work_create_pdevs);
459 wake_up(&newserver_wait);
460 } else {
461 if ((server->pid == msg->srv.pid) &&
462 (server->cid == msg->srv.cid)) {
463 printk(KERN_ERR "rpcrouter: Duplicate svr\n");
464 } else {
465 server->pid = msg->srv.pid;
466 server->cid = msg->srv.cid;
467 }
468 }
469 break;
470
471 case RPCROUTER_CTRL_CMD_REMOVE_SERVER:
472 RR("o REMOVE_SERVER prog=%08x:%d\n",
473 msg->srv.prog, msg->srv.vers);
474 server = rpcrouter_lookup_server(msg->srv.prog, msg->srv.vers);
475 if (server)
476 rpcrouter_destroy_server(server);
477 break;
478
479 case RPCROUTER_CTRL_CMD_REMOVE_CLIENT:
480 RR("o REMOVE_CLIENT id=%d:%08x\n", msg->cli.pid, msg->cli.cid);
481 if (msg->cli.pid != RPCROUTER_PID_REMOTE) {
482 printk(KERN_ERR
483 "rpcrouter: Denying remote removal of "
484 "local client\n");
485 break;
486 }
487 r_ept = rpcrouter_lookup_remote_endpoint(msg->cli.cid);
488 if (r_ept) {
489 spin_lock_irqsave(&remote_endpoints_lock, flags);
490 list_del(&r_ept->list);
491 spin_unlock_irqrestore(&remote_endpoints_lock, flags);
492 kfree(r_ept);
493 }
494
495 /* Notify local clients of this event */
496 printk(KERN_ERR "rpcrouter: LOCAL NOTIFICATION NOT IMP\n");
497 rc = -ENOSYS;
498
499 break;
500 default:
501 RR("o UNKNOWN(%08x)\n", msg->cmd);
502 rc = -ENOSYS;
503 }
504
505 return rc;
506}
507
508static void do_create_rpcrouter_pdev(struct work_struct *work)
509{
510 platform_device_register(&rpcrouter_pdev);
511}
512
513static void do_create_pdevs(struct work_struct *work)
514{
515 unsigned long flags;
516 struct rr_server *server;
517
518 /* TODO: race if destroyed while being registered */
519 spin_lock_irqsave(&server_list_lock, flags);
520 list_for_each_entry(server, &server_list, list) {
521 if (server->pid == RPCROUTER_PID_REMOTE) {
522 if (server->pdev_name[0] == 0) {
523 spin_unlock_irqrestore(&server_list_lock,
524 flags);
525 msm_rpcrouter_create_server_pdev(server);
526 schedule_work(&work_create_pdevs);
527 return;
528 }
529 }
530 }
531 spin_unlock_irqrestore(&server_list_lock, flags);
532}
533
534static void rpcrouter_smdnotify(void *_dev, unsigned event)
535{
536 if (event != SMD_EVENT_DATA)
537 return;
538
7bf06dac
BS
539 wake_up(&smd_wait);
540}
541
542static void *rr_malloc(unsigned sz)
543{
544 void *ptr = kmalloc(sz, GFP_KERNEL);
545 if (ptr)
546 return ptr;
547
548 printk(KERN_ERR "rpcrouter: kmalloc of %d failed, retrying...\n", sz);
549 do {
550 ptr = kmalloc(sz, GFP_KERNEL);
551 } while (!ptr);
552
553 return ptr;
554}
555
556/* TODO: deal with channel teardown / restore */
557static int rr_read(void *data, int len)
558{
559 int rc;
560 unsigned long flags;
561// printk("rr_read() %d\n", len);
562 for(;;) {
563 spin_lock_irqsave(&smd_lock, flags);
564 if (smd_read_avail(smd_channel) >= len) {
565 rc = smd_read(smd_channel, data, len);
566 spin_unlock_irqrestore(&smd_lock, flags);
567 if (rc == len)
568 return 0;
569 else
570 return -EIO;
571 }
572 rpcrouter_need_len = len;
7bf06dac
BS
573 spin_unlock_irqrestore(&smd_lock, flags);
574
575// printk("rr_read: waiting (%d)\n", len);
576 wait_event(smd_wait, smd_read_avail(smd_channel) >= len);
577 }
578 return 0;
579}
580
581static uint32_t r2r_buf[RPCROUTER_MSGSIZE_MAX];
582
583static void do_read_data(struct work_struct *work)
584{
585 struct rr_header hdr;
586 struct rr_packet *pkt;
587 struct rr_fragment *frag;
588 struct msm_rpc_endpoint *ept;
589 uint32_t pm, mid;
590 unsigned long flags;
591
592 if (rr_read(&hdr, sizeof(hdr)))
593 goto fail_io;
594
595#if TRACE_R2R_RAW
596 RR("- ver=%d type=%d src=%d:%08x crx=%d siz=%d dst=%d:%08x\n",
597 hdr.version, hdr.type, hdr.src_pid, hdr.src_cid,
598 hdr.confirm_rx, hdr.size, hdr.dst_pid, hdr.dst_cid);
599#endif
600
601 if (hdr.version != RPCROUTER_VERSION) {
602 DIAG("version %d != %d\n", hdr.version, RPCROUTER_VERSION);
603 goto fail_data;
604 }
605 if (hdr.size > RPCROUTER_MSGSIZE_MAX) {
606 DIAG("msg size %d > max %d\n", hdr.size, RPCROUTER_MSGSIZE_MAX);
607 goto fail_data;
608 }
609
610 if (hdr.dst_cid == RPCROUTER_ROUTER_ADDRESS) {
611 if (rr_read(r2r_buf, hdr.size))
612 goto fail_io;
613 process_control_msg((void*) r2r_buf, hdr.size);
614 goto done;
615 }
616
617 if (hdr.size < sizeof(pm)) {
618 DIAG("runt packet (no pacmark)\n");
619 goto fail_data;
620 }
621 if (rr_read(&pm, sizeof(pm)))
622 goto fail_io;
623
624 hdr.size -= sizeof(pm);
625
626 frag = rr_malloc(hdr.size + sizeof(*frag));
627 frag->next = NULL;
628 frag->length = hdr.size;
629 if (rr_read(frag->data, hdr.size))
630 goto fail_io;
631
632 ept = rpcrouter_lookup_local_endpoint(hdr.dst_cid);
633 if (!ept) {
634 DIAG("no local ept for cid %08x\n", hdr.dst_cid);
635 kfree(frag);
636 goto done;
637 }
638
639 /* See if there is already a partial packet that matches our mid
640 * and if so, append this fragment to that packet.
641 */
642 mid = PACMARK_MID(pm);
643 list_for_each_entry(pkt, &ept->incomplete, list) {
644 if (pkt->mid == mid) {
645 pkt->last->next = frag;
646 pkt->last = frag;
647 pkt->length += frag->length;
648 if (PACMARK_LAST(pm)) {
649 list_del(&pkt->list);
650 goto packet_complete;
651 }
652 goto done;
653 }
654 }
655 /* This mid is new -- create a packet for it, and put it on
656 * the incomplete list if this fragment is not a last fragment,
657 * otherwise put it on the read queue.
658 */
659 pkt = rr_malloc(sizeof(struct rr_packet));
660 pkt->first = frag;
661 pkt->last = frag;
662 memcpy(&pkt->hdr, &hdr, sizeof(hdr));
663 pkt->mid = mid;
664 pkt->length = frag->length;
665 if (!PACMARK_LAST(pm)) {
666 list_add_tail(&pkt->list, &ept->incomplete);
667 goto done;
668 }
669
670packet_complete:
671 spin_lock_irqsave(&ept->read_q_lock, flags);
7bf06dac
BS
672 list_add_tail(&pkt->list, &ept->read_q);
673 wake_up(&ept->wait_q);
674 spin_unlock_irqrestore(&ept->read_q_lock, flags);
675done:
676
677 if (hdr.confirm_rx) {
678 union rr_control_msg msg;
679
680 msg.cmd = RPCROUTER_CTRL_CMD_RESUME_TX;
681 msg.cli.pid = hdr.dst_pid;
682 msg.cli.cid = hdr.dst_cid;
683
684 RR("x RESUME_TX id=%d:%08x\n", msg.cli.pid, msg.cli.cid);
685 rpcrouter_send_control_msg(&msg);
686 }
687
688 queue_work(rpcrouter_workqueue, &work_read_data);
689 return;
690
691fail_io:
692fail_data:
693 printk(KERN_ERR "rpc_router has died\n");
7bf06dac
BS
694}
695
696void msm_rpc_setup_req(struct rpc_request_hdr *hdr, uint32_t prog,
697 uint32_t vers, uint32_t proc)
698{
699 memset(hdr, 0, sizeof(struct rpc_request_hdr));
700 hdr->xid = cpu_to_be32(atomic_add_return(1, &next_xid));
701 hdr->rpc_vers = cpu_to_be32(2);
702 hdr->prog = cpu_to_be32(prog);
703 hdr->vers = cpu_to_be32(vers);
704 hdr->procedure = cpu_to_be32(proc);
705}
706
707struct msm_rpc_endpoint *msm_rpc_open(void)
708{
709 struct msm_rpc_endpoint *ept;
710
711 ept = msm_rpcrouter_create_local_endpoint(MKDEV(0, 0));
712 if (ept == NULL)
713 return ERR_PTR(-ENOMEM);
714
715 return ept;
716}
717
718int msm_rpc_close(struct msm_rpc_endpoint *ept)
719{
720 return msm_rpcrouter_destroy_local_endpoint(ept);
721}
722EXPORT_SYMBOL(msm_rpc_close);
723
724int msm_rpc_write(struct msm_rpc_endpoint *ept, void *buffer, int count)
725{
726 struct rr_header hdr;
727 uint32_t pacmark;
728 struct rpc_request_hdr *rq = buffer;
729 struct rr_remote_endpoint *r_ept;
730 unsigned long flags;
731 int needed;
732 DEFINE_WAIT(__wait);
733
734 /* TODO: fragmentation for large outbound packets */
735 if (count > (RPCROUTER_MSGSIZE_MAX - sizeof(uint32_t)) || !count)
736 return -EINVAL;
737
738 /* snoop the RPC packet and enforce permissions */
739
740 /* has to have at least the xid and type fields */
741 if (count < (sizeof(uint32_t) * 2)) {
742 printk(KERN_ERR "rr_write: rejecting runt packet\n");
743 return -EINVAL;
744 }
745
746 if (rq->type == 0) {
747 /* RPC CALL */
748 if (count < (sizeof(uint32_t) * 6)) {
749 printk(KERN_ERR
750 "rr_write: rejecting runt call packet\n");
751 return -EINVAL;
752 }
753 if (ept->dst_pid == 0xffffffff) {
754 printk(KERN_ERR "rr_write: not connected\n");
755 return -ENOTCONN;
756 }
757
758#if CONFIG_MSM_AMSS_VERSION >= 6350
759 if ((ept->dst_prog != rq->prog) ||
760 !msm_rpc_is_compatible_version(
761 be32_to_cpu(ept->dst_vers),
762 be32_to_cpu(rq->vers))) {
763#else
764 if (ept->dst_prog != rq->prog || ept->dst_vers != rq->vers) {
765#endif
766 printk(KERN_ERR
767 "rr_write: cannot write to %08x:%d "
768 "(bound to %08x:%d)\n",
769 be32_to_cpu(rq->prog), be32_to_cpu(rq->vers),
770 be32_to_cpu(ept->dst_prog),
771 be32_to_cpu(ept->dst_vers));
772 return -EINVAL;
773 }
774 hdr.dst_pid = ept->dst_pid;
775 hdr.dst_cid = ept->dst_cid;
776 IO("CALL on ept %p to %08x:%08x @ %d:%08x (%d bytes) (xid %x proc %x)\n",
777 ept,
778 be32_to_cpu(rq->prog), be32_to_cpu(rq->vers),
779 ept->dst_pid, ept->dst_cid, count,
780 be32_to_cpu(rq->xid), be32_to_cpu(rq->procedure));
781 } else {
782 /* RPC REPLY */
783 /* TODO: locking */
784 if (ept->reply_pid == 0xffffffff) {
785 printk(KERN_ERR
786 "rr_write: rejecting unexpected reply\n");
787 return -EINVAL;
788 }
789 if (ept->reply_xid != rq->xid) {
790 printk(KERN_ERR
791 "rr_write: rejecting packet w/ bad xid\n");
792 return -EINVAL;
793 }
794
795 hdr.dst_pid = ept->reply_pid;
796 hdr.dst_cid = ept->reply_cid;
797
798 /* consume this reply */
799 ept->reply_pid = 0xffffffff;
800
801 IO("REPLY on ept %p to xid=%d @ %d:%08x (%d bytes)\n",
802 ept,
803 be32_to_cpu(rq->xid), hdr.dst_pid, hdr.dst_cid, count);
804 }
805
806 r_ept = rpcrouter_lookup_remote_endpoint(hdr.dst_cid);
807
808 if (!r_ept) {
809 printk(KERN_ERR
810 "msm_rpc_write(): No route to ept "
811 "[PID %x CID %x]\n", hdr.dst_pid, hdr.dst_cid);
812 return -EHOSTUNREACH;
813 }
814
815 /* Create routing header */
816 hdr.type = RPCROUTER_CTRL_CMD_DATA;
817 hdr.version = RPCROUTER_VERSION;
818 hdr.src_pid = ept->pid;
819 hdr.src_cid = ept->cid;
820 hdr.confirm_rx = 0;
821 hdr.size = count + sizeof(uint32_t);
822
823 for (;;) {
824 prepare_to_wait(&r_ept->quota_wait, &__wait,
825 TASK_INTERRUPTIBLE);
826 spin_lock_irqsave(&r_ept->quota_lock, flags);
827 if (r_ept->tx_quota_cntr < RPCROUTER_DEFAULT_RX_QUOTA)
828 break;
829 if (signal_pending(current) &&
830 (!(ept->flags & MSM_RPC_UNINTERRUPTIBLE)))
831 break;
832 spin_unlock_irqrestore(&r_ept->quota_lock, flags);
833 schedule();
834 }
835 finish_wait(&r_ept->quota_wait, &__wait);
836
837 if (signal_pending(current) &&
838 (!(ept->flags & MSM_RPC_UNINTERRUPTIBLE))) {
839 spin_unlock_irqrestore(&r_ept->quota_lock, flags);
840 return -ERESTARTSYS;
841 }
842 r_ept->tx_quota_cntr++;
843 if (r_ept->tx_quota_cntr == RPCROUTER_DEFAULT_RX_QUOTA)
844 hdr.confirm_rx = 1;
845
846 /* bump pacmark while interrupts disabled to avoid race
847 * probably should be atomic op instead
848 */
849 pacmark = PACMARK(count, ++next_pacmarkid, 0, 1);
850
851 spin_unlock_irqrestore(&r_ept->quota_lock, flags);
852
853 spin_lock_irqsave(&smd_lock, flags);
854
855 needed = sizeof(hdr) + hdr.size;
856 while (smd_write_avail(smd_channel) < needed) {
857 spin_unlock_irqrestore(&smd_lock, flags);
858 msleep(250);
859 spin_lock_irqsave(&smd_lock, flags);
860 }
861
862 /* TODO: deal with full fifo */
863 smd_write(smd_channel, &hdr, sizeof(hdr));
864 smd_write(smd_channel, &pacmark, sizeof(pacmark));
865 smd_write(smd_channel, buffer, count);
866
867 spin_unlock_irqrestore(&smd_lock, flags);
868
869 return count;
870}
871EXPORT_SYMBOL(msm_rpc_write);
872
873/*
874 * NOTE: It is the responsibility of the caller to kfree buffer
875 */
876int msm_rpc_read(struct msm_rpc_endpoint *ept, void **buffer,
877 unsigned user_len, long timeout)
878{
879 struct rr_fragment *frag, *next;
880 char *buf;
881 int rc;
882
883 rc = __msm_rpc_read(ept, &frag, user_len, timeout);
884 if (rc <= 0)
885 return rc;
886
887 /* single-fragment messages conveniently can be
888 * returned as-is (the buffer is at the front)
889 */
890 if (frag->next == 0) {
891 *buffer = (void*) frag;
892 return rc;
893 }
894
895 /* multi-fragment messages, we have to do it the
896 * hard way, which is rather disgusting right now
897 */
898 buf = rr_malloc(rc);
899 *buffer = buf;
900
901 while (frag != NULL) {
902 memcpy(buf, frag->data, frag->length);
903 next = frag->next;
904 buf += frag->length;
905 kfree(frag);
906 frag = next;
907 }
908
909 return rc;
910}
911
912int msm_rpc_call(struct msm_rpc_endpoint *ept, uint32_t proc,
913 void *_request, int request_size,
914 long timeout)
915{
916 return msm_rpc_call_reply(ept, proc,
917 _request, request_size,
918 NULL, 0, timeout);
919}
920EXPORT_SYMBOL(msm_rpc_call);
921
922int msm_rpc_call_reply(struct msm_rpc_endpoint *ept, uint32_t proc,
923 void *_request, int request_size,
924 void *_reply, int reply_size,
925 long timeout)
926{
927 struct rpc_request_hdr *req = _request;
928 struct rpc_reply_hdr *reply;
929 int rc;
930
931 if (request_size < sizeof(*req))
932 return -ETOOSMALL;
933
934 if (ept->dst_pid == 0xffffffff)
935 return -ENOTCONN;
936
937 /* We can't use msm_rpc_setup_req() here, because dst_prog and
938 * dst_vers here are already in BE.
939 */
940 memset(req, 0, sizeof(*req));
941 req->xid = cpu_to_be32(atomic_add_return(1, &next_xid));
942 req->rpc_vers = cpu_to_be32(2);
943 req->prog = ept->dst_prog;
944 req->vers = ept->dst_vers;
945 req->procedure = cpu_to_be32(proc);
946
947 rc = msm_rpc_write(ept, req, request_size);
948 if (rc < 0)
949 return rc;
950
951 for (;;) {
952 rc = msm_rpc_read(ept, (void*) &reply, -1, timeout);
953 if (rc < 0)
954 return rc;
955 if (rc < (3 * sizeof(uint32_t))) {
956 rc = -EIO;
957 break;
958 }
959 /* we should not get CALL packets -- ignore them */
960 if (reply->type == 0) {
961 kfree(reply);
962 continue;
963 }
964 /* If an earlier call timed out, we could get the (no
965 * longer wanted) reply for it. Ignore replies that
966 * we don't expect.
967 */
968 if (reply->xid != req->xid) {
969 kfree(reply);
970 continue;
971 }
972 if (reply->reply_stat != 0) {
973 rc = -EPERM;
974 break;
975 }
976 if (reply->data.acc_hdr.accept_stat != 0) {
977 rc = -EINVAL;
978 break;
979 }
980 if (_reply == NULL) {
981 rc = 0;
982 break;
983 }
984 if (rc > reply_size) {
985 rc = -ENOMEM;
986 } else {
987 memcpy(_reply, reply, rc);
988 }
989 break;
990 }
991 kfree(reply);
992 return rc;
993}
994EXPORT_SYMBOL(msm_rpc_call_reply);
995
996
997static inline int ept_packet_available(struct msm_rpc_endpoint *ept)
998{
999 unsigned long flags;
1000 int ret;
1001 spin_lock_irqsave(&ept->read_q_lock, flags);
1002 ret = !list_empty(&ept->read_q);
1003 spin_unlock_irqrestore(&ept->read_q_lock, flags);
1004 return ret;
1005}
1006
1007int __msm_rpc_read(struct msm_rpc_endpoint *ept,
1008 struct rr_fragment **frag_ret,
1009 unsigned len, long timeout)
1010{
1011 struct rr_packet *pkt;
1012 struct rpc_request_hdr *rq;
1013 DEFINE_WAIT(__wait);
1014 unsigned long flags;
1015 int rc;
1016
1017 IO("READ on ept %p\n", ept);
1018
1019 if (ept->flags & MSM_RPC_UNINTERRUPTIBLE) {
1020 if (timeout < 0) {
1021 wait_event(ept->wait_q, ept_packet_available(ept));
1022 } else {
1023 rc = wait_event_timeout(
1024 ept->wait_q, ept_packet_available(ept),
1025 timeout);
1026 if (rc == 0)
1027 return -ETIMEDOUT;
1028 }
1029 } else {
1030 if (timeout < 0) {
1031 rc = wait_event_interruptible(
1032 ept->wait_q, ept_packet_available(ept));
1033 if (rc < 0)
1034 return rc;
1035 } else {
1036 rc = wait_event_interruptible_timeout(
1037 ept->wait_q, ept_packet_available(ept),
1038 timeout);
1039 if (rc == 0)
1040 return -ETIMEDOUT;
1041 }
1042 }
1043
1044 spin_lock_irqsave(&ept->read_q_lock, flags);
1045 if (list_empty(&ept->read_q)) {
1046 spin_unlock_irqrestore(&ept->read_q_lock, flags);
1047 return -EAGAIN;
1048 }
1049 pkt = list_first_entry(&ept->read_q, struct rr_packet, list);
1050 if (pkt->length > len) {
1051 spin_unlock_irqrestore(&ept->read_q_lock, flags);
1052 return -ETOOSMALL;
1053 }
1054 list_del(&pkt->list);
7bf06dac
BS
1055 spin_unlock_irqrestore(&ept->read_q_lock, flags);
1056
1057 rc = pkt->length;
1058
1059 *frag_ret = pkt->first;
1060 rq = (void*) pkt->first->data;
1061 if ((rc >= (sizeof(uint32_t) * 3)) && (rq->type == 0)) {
1062 IO("READ on ept %p is a CALL on %08x:%08x proc %d xid %d\n",
1063 ept, be32_to_cpu(rq->prog), be32_to_cpu(rq->vers),
1064 be32_to_cpu(rq->procedure),
1065 be32_to_cpu(rq->xid));
1066 /* RPC CALL */
1067 if (ept->reply_pid != 0xffffffff) {
1068 printk(KERN_WARNING
1069 "rr_read: lost previous reply xid...\n");
1070 }
1071 /* TODO: locking? */
1072 ept->reply_pid = pkt->hdr.src_pid;
1073 ept->reply_cid = pkt->hdr.src_cid;
1074 ept->reply_xid = rq->xid;
1075 }
1076#if TRACE_RPC_MSG
1077 else if ((rc >= (sizeof(uint32_t) * 3)) && (rq->type == 1))
1078 IO("READ on ept %p is a REPLY\n", ept);
1079 else IO("READ on ept %p (%d bytes)\n", ept, rc);
1080#endif
1081
1082 kfree(pkt);
1083 return rc;
1084}
1085
1086#if CONFIG_MSM_AMSS_VERSION >= 6350
1087int msm_rpc_is_compatible_version(uint32_t server_version,
1088 uint32_t client_version)
1089{
1090 if ((server_version & RPC_VERSION_MODE_MASK) !=
1091 (client_version & RPC_VERSION_MODE_MASK))
1092 return 0;
1093
1094 if (server_version & RPC_VERSION_MODE_MASK)
1095 return server_version == client_version;
1096
1097 return ((server_version & RPC_VERSION_MAJOR_MASK) ==
1098 (client_version & RPC_VERSION_MAJOR_MASK)) &&
1099 ((server_version & RPC_VERSION_MINOR_MASK) >=
1100 (client_version & RPC_VERSION_MINOR_MASK));
1101}
1102EXPORT_SYMBOL(msm_rpc_is_compatible_version);
1103
1104static int msm_rpc_get_compatible_server(uint32_t prog,
1105 uint32_t ver,
1106 uint32_t *found_vers)
1107{
1108 struct rr_server *server;
1109 unsigned long flags;
1110 if (found_vers == NULL)
1111 return 0;
1112
1113 spin_lock_irqsave(&server_list_lock, flags);
1114 list_for_each_entry(server, &server_list, list) {
1115 if ((server->prog == prog) &&
1116 msm_rpc_is_compatible_version(server->vers, ver)) {
1117 *found_vers = server->vers;
1118 spin_unlock_irqrestore(&server_list_lock, flags);
1119 return 0;
1120 }
1121 }
1122 spin_unlock_irqrestore(&server_list_lock, flags);
1123 return -1;
1124}
1125#endif
1126
1127struct msm_rpc_endpoint *msm_rpc_connect(uint32_t prog, uint32_t vers, unsigned flags)
1128{
1129 struct msm_rpc_endpoint *ept;
1130 struct rr_server *server;
1131
1132#if CONFIG_MSM_AMSS_VERSION >= 6350
1133 if (!(vers & RPC_VERSION_MODE_MASK)) {
1134 uint32_t found_vers;
1135 if (msm_rpc_get_compatible_server(prog, vers, &found_vers) < 0)
1136 return ERR_PTR(-EHOSTUNREACH);
1137 if (found_vers != vers) {
1138 D("RPC using new version %08x:{%08x --> %08x}\n",
1139 prog, vers, found_vers);
1140 vers = found_vers;
1141 }
1142 }
1143#endif
1144
1145 server = rpcrouter_lookup_server(prog, vers);
1146 if (!server)
1147 return ERR_PTR(-EHOSTUNREACH);
1148
1149 ept = msm_rpc_open();
1150 if (IS_ERR(ept))
1151 return ept;
1152
1153 ept->flags = flags;
1154 ept->dst_pid = server->pid;
1155 ept->dst_cid = server->cid;
1156 ept->dst_prog = cpu_to_be32(prog);
1157 ept->dst_vers = cpu_to_be32(vers);
1158
1159 return ept;
1160}
1161EXPORT_SYMBOL(msm_rpc_connect);
1162
1163uint32_t msm_rpc_get_vers(struct msm_rpc_endpoint *ept)
1164{
1165 return be32_to_cpu(ept->dst_vers);
1166}
1167EXPORT_SYMBOL(msm_rpc_get_vers);
1168
1169/* TODO: permission check? */
1170int msm_rpc_register_server(struct msm_rpc_endpoint *ept,
1171 uint32_t prog, uint32_t vers)
1172{
1173 int rc;
1174 union rr_control_msg msg;
1175 struct rr_server *server;
1176
1177 server = rpcrouter_create_server(ept->pid, ept->cid,
1178 prog, vers);
1179 if (!server)
1180 return -ENODEV;
1181
1182 msg.srv.cmd = RPCROUTER_CTRL_CMD_NEW_SERVER;
1183 msg.srv.pid = ept->pid;
1184 msg.srv.cid = ept->cid;
1185 msg.srv.prog = prog;
1186 msg.srv.vers = vers;
1187
1188 RR("x NEW_SERVER id=%d:%08x prog=%08x:%08x\n",
1189 ept->pid, ept->cid, prog, vers);
1190
1191 rc = rpcrouter_send_control_msg(&msg);
1192 if (rc < 0)
1193 return rc;
1194
1195 return 0;
1196}
1197
1198/* TODO: permission check -- disallow unreg of somebody else's server */
1199int msm_rpc_unregister_server(struct msm_rpc_endpoint *ept,
1200 uint32_t prog, uint32_t vers)
1201{
1202 struct rr_server *server;
1203 server = rpcrouter_lookup_server(prog, vers);
1204
1205 if (!server)
1206 return -ENOENT;
1207 rpcrouter_destroy_server(server);
1208 return 0;
1209}
1210
1211static int msm_rpcrouter_probe(struct platform_device *pdev)
1212{
1213 int rc;
1214
1215 /* Initialize what we need to start processing */
1216 INIT_LIST_HEAD(&local_endpoints);
1217 INIT_LIST_HEAD(&remote_endpoints);
1218
1219 init_waitqueue_head(&newserver_wait);
1220 init_waitqueue_head(&smd_wait);
7bf06dac
BS
1221
1222 rpcrouter_workqueue = create_singlethread_workqueue("rpcrouter");
1223 if (!rpcrouter_workqueue)
1224 return -ENOMEM;
1225
1226 rc = msm_rpcrouter_init_devices();
1227 if (rc < 0)
1228 goto fail_destroy_workqueue;
1229
1230 /* Open up SMD channel 2 */
1231 initialized = 0;
1232 rc = smd_open("SMD_RPCCALL", &smd_channel, NULL, rpcrouter_smdnotify);
1233 if (rc < 0)
1234 goto fail_remove_devices;
1235
1236 queue_work(rpcrouter_workqueue, &work_read_data);
1237 return 0;
1238
1239 fail_remove_devices:
1240 msm_rpcrouter_exit_devices();
1241 fail_destroy_workqueue:
1242 destroy_workqueue(rpcrouter_workqueue);
1243 return rc;
1244}
1245
1246static struct platform_driver msm_smd_channel2_driver = {
1247 .probe = msm_rpcrouter_probe,
1248 .driver = {
1249 .name = "SMD_RPCCALL",
1250 .owner = THIS_MODULE,
1251 },
1252};
1253
1254static int __init rpcrouter_init(void)
1255{
1256 return platform_driver_register(&msm_smd_channel2_driver);
1257}
1258
1259module_init(rpcrouter_init);
1260MODULE_DESCRIPTION("MSM RPC Router");
1261MODULE_AUTHOR("San Mehat <san@android.com>");
1262MODULE_LICENSE("GPL");