]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/staging/dream/smd/smd_rpcrouter.c
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[net-next-2.6.git] / drivers / staging / dream / smd / smd_rpcrouter.c
CommitLineData
7bf06dac
BS
1/* arch/arm/mach-msm/smd_rpcrouter.c
2 *
3 * Copyright (C) 2007 Google, Inc.
4 * Copyright (c) 2007-2009 QUALCOMM Incorporated.
5 * Author: San Mehat <san@android.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18/* TODO: handle cases where smd_write() will tempfail due to full fifo */
19/* TODO: thread priority? schedule a work to bump it? */
20/* TODO: maybe make server_list_lock a mutex */
21/* TODO: pool fragments to avoid kmalloc/kfree churn */
22
23#include <linux/module.h>
24#include <linux/kernel.h>
25#include <linux/string.h>
26#include <linux/errno.h>
27#include <linux/cdev.h>
28#include <linux/init.h>
29#include <linux/device.h>
30#include <linux/types.h>
31#include <linux/delay.h>
32#include <linux/fs.h>
33#include <linux/err.h>
34#include <linux/sched.h>
35#include <linux/poll.h>
5a0e3ad6 36#include <linux/slab.h>
7bf06dac
BS
37#include <asm/uaccess.h>
38#include <asm/byteorder.h>
39#include <linux/platform_device.h>
40#include <linux/uaccess.h>
41
7bf06dac
BS
42#include <mach/msm_smd.h>
43#include "smd_rpcrouter.h"
44
45#define TRACE_R2R_MSG 0
46#define TRACE_R2R_RAW 0
47#define TRACE_RPC_MSG 0
48#define TRACE_NOTIFY_MSG 0
49
50#define MSM_RPCROUTER_DEBUG 0
51#define MSM_RPCROUTER_DEBUG_PKT 0
52#define MSM_RPCROUTER_R2R_DEBUG 0
53#define DUMP_ALL_RECEIVED_HEADERS 0
54
55#define DIAG(x...) printk("[RR] ERROR " x)
56
57#if MSM_RPCROUTER_DEBUG
58#define D(x...) printk(x)
59#else
60#define D(x...) do {} while (0)
61#endif
62
63#if TRACE_R2R_MSG
64#define RR(x...) printk("[RR] "x)
65#else
66#define RR(x...) do {} while (0)
67#endif
68
69#if TRACE_RPC_MSG
70#define IO(x...) printk("[RPC] "x)
71#else
72#define IO(x...) do {} while (0)
73#endif
74
75#if TRACE_NOTIFY_MSG
76#define NTFY(x...) printk(KERN_ERR "[NOTIFY] "x)
77#else
78#define NTFY(x...) do {} while (0)
79#endif
80
81static LIST_HEAD(local_endpoints);
82static LIST_HEAD(remote_endpoints);
83
84static LIST_HEAD(server_list);
85
86static smd_channel_t *smd_channel;
87static int initialized;
88static wait_queue_head_t newserver_wait;
89static wait_queue_head_t smd_wait;
90
91static DEFINE_SPINLOCK(local_endpoints_lock);
92static DEFINE_SPINLOCK(remote_endpoints_lock);
93static DEFINE_SPINLOCK(server_list_lock);
94static DEFINE_SPINLOCK(smd_lock);
95
96static struct workqueue_struct *rpcrouter_workqueue;
7bf06dac
BS
97static int rpcrouter_need_len;
98
99static atomic_t next_xid = ATOMIC_INIT(1);
100static uint8_t next_pacmarkid;
101
102static void do_read_data(struct work_struct *work);
103static void do_create_pdevs(struct work_struct *work);
104static void do_create_rpcrouter_pdev(struct work_struct *work);
105
106static DECLARE_WORK(work_read_data, do_read_data);
107static DECLARE_WORK(work_create_pdevs, do_create_pdevs);
108static DECLARE_WORK(work_create_rpcrouter_pdev, do_create_rpcrouter_pdev);
109
110#define RR_STATE_IDLE 0
111#define RR_STATE_HEADER 1
112#define RR_STATE_BODY 2
113#define RR_STATE_ERROR 3
114
115struct rr_context {
116 struct rr_packet *pkt;
117 uint8_t *ptr;
118 uint32_t state; /* current assembly state */
119 uint32_t count; /* bytes needed in this state */
120};
121
a5ca2dfc 122static struct rr_context the_rr_context;
7bf06dac
BS
123
124static struct platform_device rpcrouter_pdev = {
125 .name = "oncrpc_router",
126 .id = -1,
127};
128
129
130static int rpcrouter_send_control_msg(union rr_control_msg *msg)
131{
132 struct rr_header hdr;
133 unsigned long flags;
134 int need;
135
136 if (!(msg->cmd == RPCROUTER_CTRL_CMD_HELLO) && !initialized) {
137 printk(KERN_ERR "rpcrouter_send_control_msg(): Warning, "
138 "router not initialized\n");
139 return -EINVAL;
140 }
141
142 hdr.version = RPCROUTER_VERSION;
143 hdr.type = msg->cmd;
144 hdr.src_pid = RPCROUTER_PID_LOCAL;
145 hdr.src_cid = RPCROUTER_ROUTER_ADDRESS;
146 hdr.confirm_rx = 0;
147 hdr.size = sizeof(*msg);
148 hdr.dst_pid = 0;
149 hdr.dst_cid = RPCROUTER_ROUTER_ADDRESS;
150
151 /* TODO: what if channel is full? */
152
153 need = sizeof(hdr) + hdr.size;
154 spin_lock_irqsave(&smd_lock, flags);
155 while (smd_write_avail(smd_channel) < need) {
156 spin_unlock_irqrestore(&smd_lock, flags);
157 msleep(250);
158 spin_lock_irqsave(&smd_lock, flags);
159 }
160 smd_write(smd_channel, &hdr, sizeof(hdr));
161 smd_write(smd_channel, msg, hdr.size);
162 spin_unlock_irqrestore(&smd_lock, flags);
163 return 0;
164}
165
166static struct rr_server *rpcrouter_create_server(uint32_t pid,
167 uint32_t cid,
168 uint32_t prog,
169 uint32_t ver)
170{
171 struct rr_server *server;
172 unsigned long flags;
173 int rc;
174
175 server = kmalloc(sizeof(struct rr_server), GFP_KERNEL);
176 if (!server)
177 return ERR_PTR(-ENOMEM);
178
179 memset(server, 0, sizeof(struct rr_server));
180 server->pid = pid;
181 server->cid = cid;
182 server->prog = prog;
183 server->vers = ver;
184
185 spin_lock_irqsave(&server_list_lock, flags);
186 list_add_tail(&server->list, &server_list);
187 spin_unlock_irqrestore(&server_list_lock, flags);
188
189 if (pid == RPCROUTER_PID_REMOTE) {
190 rc = msm_rpcrouter_create_server_cdev(server);
191 if (rc < 0)
192 goto out_fail;
193 }
194 return server;
195out_fail:
196 spin_lock_irqsave(&server_list_lock, flags);
197 list_del(&server->list);
198 spin_unlock_irqrestore(&server_list_lock, flags);
199 kfree(server);
200 return ERR_PTR(rc);
201}
202
203static void rpcrouter_destroy_server(struct rr_server *server)
204{
205 unsigned long flags;
206
207 spin_lock_irqsave(&server_list_lock, flags);
208 list_del(&server->list);
209 spin_unlock_irqrestore(&server_list_lock, flags);
210 device_destroy(msm_rpcrouter_class, server->device_number);
211 kfree(server);
212}
213
214static struct rr_server *rpcrouter_lookup_server(uint32_t prog, uint32_t ver)
215{
216 struct rr_server *server;
217 unsigned long flags;
218
219 spin_lock_irqsave(&server_list_lock, flags);
220 list_for_each_entry(server, &server_list, list) {
221 if (server->prog == prog
222 && server->vers == ver) {
223 spin_unlock_irqrestore(&server_list_lock, flags);
224 return server;
225 }
226 }
227 spin_unlock_irqrestore(&server_list_lock, flags);
228 return NULL;
229}
230
231static struct rr_server *rpcrouter_lookup_server_by_dev(dev_t dev)
232{
233 struct rr_server *server;
234 unsigned long flags;
235
236 spin_lock_irqsave(&server_list_lock, flags);
237 list_for_each_entry(server, &server_list, list) {
238 if (server->device_number == dev) {
239 spin_unlock_irqrestore(&server_list_lock, flags);
240 return server;
241 }
242 }
243 spin_unlock_irqrestore(&server_list_lock, flags);
244 return NULL;
245}
246
247struct msm_rpc_endpoint *msm_rpcrouter_create_local_endpoint(dev_t dev)
248{
249 struct msm_rpc_endpoint *ept;
250 unsigned long flags;
251
252 ept = kmalloc(sizeof(struct msm_rpc_endpoint), GFP_KERNEL);
253 if (!ept)
254 return NULL;
255 memset(ept, 0, sizeof(struct msm_rpc_endpoint));
256
257 /* mark no reply outstanding */
258 ept->reply_pid = 0xffffffff;
259
260 ept->cid = (uint32_t) ept;
261 ept->pid = RPCROUTER_PID_LOCAL;
262 ept->dev = dev;
263
264 if ((dev != msm_rpcrouter_devno) && (dev != MKDEV(0, 0))) {
265 struct rr_server *srv;
266 /*
267 * This is a userspace client which opened
268 * a program/ver devicenode. Bind the client
269 * to that destination
270 */
271 srv = rpcrouter_lookup_server_by_dev(dev);
272 /* TODO: bug? really? */
273 BUG_ON(!srv);
274
275 ept->dst_pid = srv->pid;
276 ept->dst_cid = srv->cid;
277 ept->dst_prog = cpu_to_be32(srv->prog);
278 ept->dst_vers = cpu_to_be32(srv->vers);
279
280 D("Creating local ept %p @ %08x:%08x\n", ept, srv->prog, srv->vers);
281 } else {
282 /* mark not connected */
283 ept->dst_pid = 0xffffffff;
284 D("Creating a master local ept %p\n", ept);
285 }
286
287 init_waitqueue_head(&ept->wait_q);
288 INIT_LIST_HEAD(&ept->read_q);
289 spin_lock_init(&ept->read_q_lock);
7bf06dac
BS
290 INIT_LIST_HEAD(&ept->incomplete);
291
292 spin_lock_irqsave(&local_endpoints_lock, flags);
293 list_add_tail(&ept->list, &local_endpoints);
294 spin_unlock_irqrestore(&local_endpoints_lock, flags);
295 return ept;
296}
297
298int msm_rpcrouter_destroy_local_endpoint(struct msm_rpc_endpoint *ept)
299{
300 int rc;
301 union rr_control_msg msg;
302
303 msg.cmd = RPCROUTER_CTRL_CMD_REMOVE_CLIENT;
304 msg.cli.pid = ept->pid;
305 msg.cli.cid = ept->cid;
306
307 RR("x REMOVE_CLIENT id=%d:%08x\n", ept->pid, ept->cid);
308 rc = rpcrouter_send_control_msg(&msg);
309 if (rc < 0)
310 return rc;
311
7bf06dac
BS
312 list_del(&ept->list);
313 kfree(ept);
314 return 0;
315}
316
317static int rpcrouter_create_remote_endpoint(uint32_t cid)
318{
319 struct rr_remote_endpoint *new_c;
320 unsigned long flags;
321
322 new_c = kmalloc(sizeof(struct rr_remote_endpoint), GFP_KERNEL);
323 if (!new_c)
324 return -ENOMEM;
325 memset(new_c, 0, sizeof(struct rr_remote_endpoint));
326
327 new_c->cid = cid;
328 new_c->pid = RPCROUTER_PID_REMOTE;
329 init_waitqueue_head(&new_c->quota_wait);
330 spin_lock_init(&new_c->quota_lock);
331
332 spin_lock_irqsave(&remote_endpoints_lock, flags);
333 list_add_tail(&new_c->list, &remote_endpoints);
334 spin_unlock_irqrestore(&remote_endpoints_lock, flags);
335 return 0;
336}
337
338static struct msm_rpc_endpoint *rpcrouter_lookup_local_endpoint(uint32_t cid)
339{
340 struct msm_rpc_endpoint *ept;
341 unsigned long flags;
342
343 spin_lock_irqsave(&local_endpoints_lock, flags);
344 list_for_each_entry(ept, &local_endpoints, list) {
345 if (ept->cid == cid) {
346 spin_unlock_irqrestore(&local_endpoints_lock, flags);
347 return ept;
348 }
349 }
350 spin_unlock_irqrestore(&local_endpoints_lock, flags);
351 return NULL;
352}
353
354static struct rr_remote_endpoint *rpcrouter_lookup_remote_endpoint(uint32_t cid)
355{
356 struct rr_remote_endpoint *ept;
357 unsigned long flags;
358
359 spin_lock_irqsave(&remote_endpoints_lock, flags);
360 list_for_each_entry(ept, &remote_endpoints, list) {
361 if (ept->cid == cid) {
362 spin_unlock_irqrestore(&remote_endpoints_lock, flags);
363 return ept;
364 }
365 }
366 spin_unlock_irqrestore(&remote_endpoints_lock, flags);
367 return NULL;
368}
369
370static int process_control_msg(union rr_control_msg *msg, int len)
371{
372 union rr_control_msg ctl;
373 struct rr_server *server;
374 struct rr_remote_endpoint *r_ept;
375 int rc = 0;
376 unsigned long flags;
377
378 if (len != sizeof(*msg)) {
379 printk(KERN_ERR "rpcrouter: r2r msg size %d != %d\n",
380 len, sizeof(*msg));
381 return -EINVAL;
382 }
383
384 switch (msg->cmd) {
385 case RPCROUTER_CTRL_CMD_HELLO:
386 RR("o HELLO\n");
387
388 RR("x HELLO\n");
389 memset(&ctl, 0, sizeof(ctl));
390 ctl.cmd = RPCROUTER_CTRL_CMD_HELLO;
391 rpcrouter_send_control_msg(&ctl);
392
393 initialized = 1;
394
395 /* Send list of servers one at a time */
396 ctl.cmd = RPCROUTER_CTRL_CMD_NEW_SERVER;
397
398 /* TODO: long time to hold a spinlock... */
399 spin_lock_irqsave(&server_list_lock, flags);
400 list_for_each_entry(server, &server_list, list) {
401 ctl.srv.pid = server->pid;
402 ctl.srv.cid = server->cid;
403 ctl.srv.prog = server->prog;
404 ctl.srv.vers = server->vers;
405
406 RR("x NEW_SERVER id=%d:%08x prog=%08x:%08x\n",
407 server->pid, server->cid,
408 server->prog, server->vers);
409
410 rpcrouter_send_control_msg(&ctl);
411 }
412 spin_unlock_irqrestore(&server_list_lock, flags);
413
414 queue_work(rpcrouter_workqueue, &work_create_rpcrouter_pdev);
415 break;
416
417 case RPCROUTER_CTRL_CMD_RESUME_TX:
418 RR("o RESUME_TX id=%d:%08x\n", msg->cli.pid, msg->cli.cid);
419
420 r_ept = rpcrouter_lookup_remote_endpoint(msg->cli.cid);
421 if (!r_ept) {
422 printk(KERN_ERR
423 "rpcrouter: Unable to resume client\n");
424 break;
425 }
426 spin_lock_irqsave(&r_ept->quota_lock, flags);
427 r_ept->tx_quota_cntr = 0;
428 spin_unlock_irqrestore(&r_ept->quota_lock, flags);
429 wake_up(&r_ept->quota_wait);
430 break;
431
432 case RPCROUTER_CTRL_CMD_NEW_SERVER:
433 RR("o NEW_SERVER id=%d:%08x prog=%08x:%08x\n",
434 msg->srv.pid, msg->srv.cid, msg->srv.prog, msg->srv.vers);
435
436 server = rpcrouter_lookup_server(msg->srv.prog, msg->srv.vers);
437
438 if (!server) {
439 server = rpcrouter_create_server(
440 msg->srv.pid, msg->srv.cid,
441 msg->srv.prog, msg->srv.vers);
442 if (!server)
443 return -ENOMEM;
444 /*
445 * XXX: Verify that its okay to add the
446 * client to our remote client list
447 * if we get a NEW_SERVER notification
448 */
449 if (!rpcrouter_lookup_remote_endpoint(msg->srv.cid)) {
450 rc = rpcrouter_create_remote_endpoint(
451 msg->srv.cid);
452 if (rc < 0)
453 printk(KERN_ERR
454 "rpcrouter:Client create"
455 "error (%d)\n", rc);
456 }
457 schedule_work(&work_create_pdevs);
458 wake_up(&newserver_wait);
459 } else {
460 if ((server->pid == msg->srv.pid) &&
461 (server->cid == msg->srv.cid)) {
462 printk(KERN_ERR "rpcrouter: Duplicate svr\n");
463 } else {
464 server->pid = msg->srv.pid;
465 server->cid = msg->srv.cid;
466 }
467 }
468 break;
469
470 case RPCROUTER_CTRL_CMD_REMOVE_SERVER:
471 RR("o REMOVE_SERVER prog=%08x:%d\n",
472 msg->srv.prog, msg->srv.vers);
473 server = rpcrouter_lookup_server(msg->srv.prog, msg->srv.vers);
474 if (server)
475 rpcrouter_destroy_server(server);
476 break;
477
478 case RPCROUTER_CTRL_CMD_REMOVE_CLIENT:
479 RR("o REMOVE_CLIENT id=%d:%08x\n", msg->cli.pid, msg->cli.cid);
480 if (msg->cli.pid != RPCROUTER_PID_REMOTE) {
481 printk(KERN_ERR
482 "rpcrouter: Denying remote removal of "
483 "local client\n");
484 break;
485 }
486 r_ept = rpcrouter_lookup_remote_endpoint(msg->cli.cid);
487 if (r_ept) {
488 spin_lock_irqsave(&remote_endpoints_lock, flags);
489 list_del(&r_ept->list);
490 spin_unlock_irqrestore(&remote_endpoints_lock, flags);
491 kfree(r_ept);
492 }
493
494 /* Notify local clients of this event */
495 printk(KERN_ERR "rpcrouter: LOCAL NOTIFICATION NOT IMP\n");
496 rc = -ENOSYS;
497
498 break;
499 default:
500 RR("o UNKNOWN(%08x)\n", msg->cmd);
501 rc = -ENOSYS;
502 }
503
504 return rc;
505}
506
507static void do_create_rpcrouter_pdev(struct work_struct *work)
508{
509 platform_device_register(&rpcrouter_pdev);
510}
511
512static void do_create_pdevs(struct work_struct *work)
513{
514 unsigned long flags;
515 struct rr_server *server;
516
517 /* TODO: race if destroyed while being registered */
518 spin_lock_irqsave(&server_list_lock, flags);
519 list_for_each_entry(server, &server_list, list) {
520 if (server->pid == RPCROUTER_PID_REMOTE) {
521 if (server->pdev_name[0] == 0) {
522 spin_unlock_irqrestore(&server_list_lock,
523 flags);
524 msm_rpcrouter_create_server_pdev(server);
525 schedule_work(&work_create_pdevs);
526 return;
527 }
528 }
529 }
530 spin_unlock_irqrestore(&server_list_lock, flags);
531}
532
533static void rpcrouter_smdnotify(void *_dev, unsigned event)
534{
535 if (event != SMD_EVENT_DATA)
536 return;
537
7bf06dac
BS
538 wake_up(&smd_wait);
539}
540
541static void *rr_malloc(unsigned sz)
542{
543 void *ptr = kmalloc(sz, GFP_KERNEL);
544 if (ptr)
545 return ptr;
546
547 printk(KERN_ERR "rpcrouter: kmalloc of %d failed, retrying...\n", sz);
548 do {
549 ptr = kmalloc(sz, GFP_KERNEL);
550 } while (!ptr);
551
552 return ptr;
553}
554
555/* TODO: deal with channel teardown / restore */
556static int rr_read(void *data, int len)
557{
558 int rc;
559 unsigned long flags;
560// printk("rr_read() %d\n", len);
561 for(;;) {
562 spin_lock_irqsave(&smd_lock, flags);
563 if (smd_read_avail(smd_channel) >= len) {
564 rc = smd_read(smd_channel, data, len);
565 spin_unlock_irqrestore(&smd_lock, flags);
566 if (rc == len)
567 return 0;
568 else
569 return -EIO;
570 }
571 rpcrouter_need_len = len;
7bf06dac
BS
572 spin_unlock_irqrestore(&smd_lock, flags);
573
574// printk("rr_read: waiting (%d)\n", len);
575 wait_event(smd_wait, smd_read_avail(smd_channel) >= len);
576 }
577 return 0;
578}
579
580static uint32_t r2r_buf[RPCROUTER_MSGSIZE_MAX];
581
582static void do_read_data(struct work_struct *work)
583{
584 struct rr_header hdr;
585 struct rr_packet *pkt;
586 struct rr_fragment *frag;
587 struct msm_rpc_endpoint *ept;
588 uint32_t pm, mid;
589 unsigned long flags;
590
591 if (rr_read(&hdr, sizeof(hdr)))
592 goto fail_io;
593
594#if TRACE_R2R_RAW
595 RR("- ver=%d type=%d src=%d:%08x crx=%d siz=%d dst=%d:%08x\n",
596 hdr.version, hdr.type, hdr.src_pid, hdr.src_cid,
597 hdr.confirm_rx, hdr.size, hdr.dst_pid, hdr.dst_cid);
598#endif
599
600 if (hdr.version != RPCROUTER_VERSION) {
601 DIAG("version %d != %d\n", hdr.version, RPCROUTER_VERSION);
602 goto fail_data;
603 }
604 if (hdr.size > RPCROUTER_MSGSIZE_MAX) {
605 DIAG("msg size %d > max %d\n", hdr.size, RPCROUTER_MSGSIZE_MAX);
606 goto fail_data;
607 }
608
609 if (hdr.dst_cid == RPCROUTER_ROUTER_ADDRESS) {
610 if (rr_read(r2r_buf, hdr.size))
611 goto fail_io;
612 process_control_msg((void*) r2r_buf, hdr.size);
613 goto done;
614 }
615
616 if (hdr.size < sizeof(pm)) {
617 DIAG("runt packet (no pacmark)\n");
618 goto fail_data;
619 }
620 if (rr_read(&pm, sizeof(pm)))
621 goto fail_io;
622
623 hdr.size -= sizeof(pm);
624
625 frag = rr_malloc(hdr.size + sizeof(*frag));
626 frag->next = NULL;
627 frag->length = hdr.size;
628 if (rr_read(frag->data, hdr.size))
629 goto fail_io;
630
631 ept = rpcrouter_lookup_local_endpoint(hdr.dst_cid);
632 if (!ept) {
633 DIAG("no local ept for cid %08x\n", hdr.dst_cid);
634 kfree(frag);
635 goto done;
636 }
637
638 /* See if there is already a partial packet that matches our mid
639 * and if so, append this fragment to that packet.
640 */
641 mid = PACMARK_MID(pm);
642 list_for_each_entry(pkt, &ept->incomplete, list) {
643 if (pkt->mid == mid) {
644 pkt->last->next = frag;
645 pkt->last = frag;
646 pkt->length += frag->length;
647 if (PACMARK_LAST(pm)) {
648 list_del(&pkt->list);
649 goto packet_complete;
650 }
651 goto done;
652 }
653 }
654 /* This mid is new -- create a packet for it, and put it on
655 * the incomplete list if this fragment is not a last fragment,
656 * otherwise put it on the read queue.
657 */
658 pkt = rr_malloc(sizeof(struct rr_packet));
659 pkt->first = frag;
660 pkt->last = frag;
661 memcpy(&pkt->hdr, &hdr, sizeof(hdr));
662 pkt->mid = mid;
663 pkt->length = frag->length;
664 if (!PACMARK_LAST(pm)) {
665 list_add_tail(&pkt->list, &ept->incomplete);
666 goto done;
667 }
668
669packet_complete:
670 spin_lock_irqsave(&ept->read_q_lock, flags);
7bf06dac
BS
671 list_add_tail(&pkt->list, &ept->read_q);
672 wake_up(&ept->wait_q);
673 spin_unlock_irqrestore(&ept->read_q_lock, flags);
674done:
675
676 if (hdr.confirm_rx) {
677 union rr_control_msg msg;
678
679 msg.cmd = RPCROUTER_CTRL_CMD_RESUME_TX;
680 msg.cli.pid = hdr.dst_pid;
681 msg.cli.cid = hdr.dst_cid;
682
683 RR("x RESUME_TX id=%d:%08x\n", msg.cli.pid, msg.cli.cid);
684 rpcrouter_send_control_msg(&msg);
685 }
686
687 queue_work(rpcrouter_workqueue, &work_read_data);
688 return;
689
690fail_io:
691fail_data:
692 printk(KERN_ERR "rpc_router has died\n");
7bf06dac
BS
693}
694
695void msm_rpc_setup_req(struct rpc_request_hdr *hdr, uint32_t prog,
696 uint32_t vers, uint32_t proc)
697{
698 memset(hdr, 0, sizeof(struct rpc_request_hdr));
699 hdr->xid = cpu_to_be32(atomic_add_return(1, &next_xid));
700 hdr->rpc_vers = cpu_to_be32(2);
701 hdr->prog = cpu_to_be32(prog);
702 hdr->vers = cpu_to_be32(vers);
703 hdr->procedure = cpu_to_be32(proc);
704}
705
706struct msm_rpc_endpoint *msm_rpc_open(void)
707{
708 struct msm_rpc_endpoint *ept;
709
710 ept = msm_rpcrouter_create_local_endpoint(MKDEV(0, 0));
711 if (ept == NULL)
712 return ERR_PTR(-ENOMEM);
713
714 return ept;
715}
716
717int msm_rpc_close(struct msm_rpc_endpoint *ept)
718{
719 return msm_rpcrouter_destroy_local_endpoint(ept);
720}
721EXPORT_SYMBOL(msm_rpc_close);
722
723int msm_rpc_write(struct msm_rpc_endpoint *ept, void *buffer, int count)
724{
725 struct rr_header hdr;
726 uint32_t pacmark;
727 struct rpc_request_hdr *rq = buffer;
728 struct rr_remote_endpoint *r_ept;
729 unsigned long flags;
730 int needed;
731 DEFINE_WAIT(__wait);
732
733 /* TODO: fragmentation for large outbound packets */
734 if (count > (RPCROUTER_MSGSIZE_MAX - sizeof(uint32_t)) || !count)
735 return -EINVAL;
736
737 /* snoop the RPC packet and enforce permissions */
738
739 /* has to have at least the xid and type fields */
740 if (count < (sizeof(uint32_t) * 2)) {
741 printk(KERN_ERR "rr_write: rejecting runt packet\n");
742 return -EINVAL;
743 }
744
745 if (rq->type == 0) {
746 /* RPC CALL */
747 if (count < (sizeof(uint32_t) * 6)) {
748 printk(KERN_ERR
749 "rr_write: rejecting runt call packet\n");
750 return -EINVAL;
751 }
752 if (ept->dst_pid == 0xffffffff) {
753 printk(KERN_ERR "rr_write: not connected\n");
754 return -ENOTCONN;
755 }
756
757#if CONFIG_MSM_AMSS_VERSION >= 6350
758 if ((ept->dst_prog != rq->prog) ||
759 !msm_rpc_is_compatible_version(
760 be32_to_cpu(ept->dst_vers),
761 be32_to_cpu(rq->vers))) {
762#else
763 if (ept->dst_prog != rq->prog || ept->dst_vers != rq->vers) {
764#endif
765 printk(KERN_ERR
766 "rr_write: cannot write to %08x:%d "
767 "(bound to %08x:%d)\n",
768 be32_to_cpu(rq->prog), be32_to_cpu(rq->vers),
769 be32_to_cpu(ept->dst_prog),
770 be32_to_cpu(ept->dst_vers));
771 return -EINVAL;
772 }
773 hdr.dst_pid = ept->dst_pid;
774 hdr.dst_cid = ept->dst_cid;
775 IO("CALL on ept %p to %08x:%08x @ %d:%08x (%d bytes) (xid %x proc %x)\n",
776 ept,
777 be32_to_cpu(rq->prog), be32_to_cpu(rq->vers),
778 ept->dst_pid, ept->dst_cid, count,
779 be32_to_cpu(rq->xid), be32_to_cpu(rq->procedure));
780 } else {
781 /* RPC REPLY */
782 /* TODO: locking */
783 if (ept->reply_pid == 0xffffffff) {
784 printk(KERN_ERR
785 "rr_write: rejecting unexpected reply\n");
786 return -EINVAL;
787 }
788 if (ept->reply_xid != rq->xid) {
789 printk(KERN_ERR
790 "rr_write: rejecting packet w/ bad xid\n");
791 return -EINVAL;
792 }
793
794 hdr.dst_pid = ept->reply_pid;
795 hdr.dst_cid = ept->reply_cid;
796
797 /* consume this reply */
798 ept->reply_pid = 0xffffffff;
799
800 IO("REPLY on ept %p to xid=%d @ %d:%08x (%d bytes)\n",
801 ept,
802 be32_to_cpu(rq->xid), hdr.dst_pid, hdr.dst_cid, count);
803 }
804
805 r_ept = rpcrouter_lookup_remote_endpoint(hdr.dst_cid);
806
807 if (!r_ept) {
808 printk(KERN_ERR
809 "msm_rpc_write(): No route to ept "
810 "[PID %x CID %x]\n", hdr.dst_pid, hdr.dst_cid);
811 return -EHOSTUNREACH;
812 }
813
814 /* Create routing header */
815 hdr.type = RPCROUTER_CTRL_CMD_DATA;
816 hdr.version = RPCROUTER_VERSION;
817 hdr.src_pid = ept->pid;
818 hdr.src_cid = ept->cid;
819 hdr.confirm_rx = 0;
820 hdr.size = count + sizeof(uint32_t);
821
822 for (;;) {
823 prepare_to_wait(&r_ept->quota_wait, &__wait,
824 TASK_INTERRUPTIBLE);
825 spin_lock_irqsave(&r_ept->quota_lock, flags);
826 if (r_ept->tx_quota_cntr < RPCROUTER_DEFAULT_RX_QUOTA)
827 break;
828 if (signal_pending(current) &&
829 (!(ept->flags & MSM_RPC_UNINTERRUPTIBLE)))
830 break;
831 spin_unlock_irqrestore(&r_ept->quota_lock, flags);
832 schedule();
833 }
834 finish_wait(&r_ept->quota_wait, &__wait);
835
836 if (signal_pending(current) &&
837 (!(ept->flags & MSM_RPC_UNINTERRUPTIBLE))) {
838 spin_unlock_irqrestore(&r_ept->quota_lock, flags);
839 return -ERESTARTSYS;
840 }
841 r_ept->tx_quota_cntr++;
842 if (r_ept->tx_quota_cntr == RPCROUTER_DEFAULT_RX_QUOTA)
843 hdr.confirm_rx = 1;
844
845 /* bump pacmark while interrupts disabled to avoid race
846 * probably should be atomic op instead
847 */
848 pacmark = PACMARK(count, ++next_pacmarkid, 0, 1);
849
850 spin_unlock_irqrestore(&r_ept->quota_lock, flags);
851
852 spin_lock_irqsave(&smd_lock, flags);
853
854 needed = sizeof(hdr) + hdr.size;
855 while (smd_write_avail(smd_channel) < needed) {
856 spin_unlock_irqrestore(&smd_lock, flags);
857 msleep(250);
858 spin_lock_irqsave(&smd_lock, flags);
859 }
860
861 /* TODO: deal with full fifo */
862 smd_write(smd_channel, &hdr, sizeof(hdr));
863 smd_write(smd_channel, &pacmark, sizeof(pacmark));
864 smd_write(smd_channel, buffer, count);
865
866 spin_unlock_irqrestore(&smd_lock, flags);
867
868 return count;
869}
870EXPORT_SYMBOL(msm_rpc_write);
871
872/*
873 * NOTE: It is the responsibility of the caller to kfree buffer
874 */
875int msm_rpc_read(struct msm_rpc_endpoint *ept, void **buffer,
876 unsigned user_len, long timeout)
877{
878 struct rr_fragment *frag, *next;
879 char *buf;
880 int rc;
881
882 rc = __msm_rpc_read(ept, &frag, user_len, timeout);
883 if (rc <= 0)
884 return rc;
885
886 /* single-fragment messages conveniently can be
887 * returned as-is (the buffer is at the front)
888 */
889 if (frag->next == 0) {
890 *buffer = (void*) frag;
891 return rc;
892 }
893
894 /* multi-fragment messages, we have to do it the
895 * hard way, which is rather disgusting right now
896 */
897 buf = rr_malloc(rc);
898 *buffer = buf;
899
900 while (frag != NULL) {
901 memcpy(buf, frag->data, frag->length);
902 next = frag->next;
903 buf += frag->length;
904 kfree(frag);
905 frag = next;
906 }
907
908 return rc;
909}
910
911int msm_rpc_call(struct msm_rpc_endpoint *ept, uint32_t proc,
912 void *_request, int request_size,
913 long timeout)
914{
915 return msm_rpc_call_reply(ept, proc,
916 _request, request_size,
917 NULL, 0, timeout);
918}
919EXPORT_SYMBOL(msm_rpc_call);
920
921int msm_rpc_call_reply(struct msm_rpc_endpoint *ept, uint32_t proc,
922 void *_request, int request_size,
923 void *_reply, int reply_size,
924 long timeout)
925{
926 struct rpc_request_hdr *req = _request;
927 struct rpc_reply_hdr *reply;
928 int rc;
929
930 if (request_size < sizeof(*req))
931 return -ETOOSMALL;
932
933 if (ept->dst_pid == 0xffffffff)
934 return -ENOTCONN;
935
936 /* We can't use msm_rpc_setup_req() here, because dst_prog and
937 * dst_vers here are already in BE.
938 */
939 memset(req, 0, sizeof(*req));
940 req->xid = cpu_to_be32(atomic_add_return(1, &next_xid));
941 req->rpc_vers = cpu_to_be32(2);
942 req->prog = ept->dst_prog;
943 req->vers = ept->dst_vers;
944 req->procedure = cpu_to_be32(proc);
945
946 rc = msm_rpc_write(ept, req, request_size);
947 if (rc < 0)
948 return rc;
949
950 for (;;) {
951 rc = msm_rpc_read(ept, (void*) &reply, -1, timeout);
952 if (rc < 0)
953 return rc;
954 if (rc < (3 * sizeof(uint32_t))) {
955 rc = -EIO;
956 break;
957 }
958 /* we should not get CALL packets -- ignore them */
959 if (reply->type == 0) {
960 kfree(reply);
961 continue;
962 }
963 /* If an earlier call timed out, we could get the (no
964 * longer wanted) reply for it. Ignore replies that
965 * we don't expect.
966 */
967 if (reply->xid != req->xid) {
968 kfree(reply);
969 continue;
970 }
971 if (reply->reply_stat != 0) {
972 rc = -EPERM;
973 break;
974 }
975 if (reply->data.acc_hdr.accept_stat != 0) {
976 rc = -EINVAL;
977 break;
978 }
979 if (_reply == NULL) {
980 rc = 0;
981 break;
982 }
983 if (rc > reply_size) {
984 rc = -ENOMEM;
985 } else {
986 memcpy(_reply, reply, rc);
987 }
988 break;
989 }
990 kfree(reply);
991 return rc;
992}
993EXPORT_SYMBOL(msm_rpc_call_reply);
994
995
996static inline int ept_packet_available(struct msm_rpc_endpoint *ept)
997{
998 unsigned long flags;
999 int ret;
1000 spin_lock_irqsave(&ept->read_q_lock, flags);
1001 ret = !list_empty(&ept->read_q);
1002 spin_unlock_irqrestore(&ept->read_q_lock, flags);
1003 return ret;
1004}
1005
1006int __msm_rpc_read(struct msm_rpc_endpoint *ept,
1007 struct rr_fragment **frag_ret,
1008 unsigned len, long timeout)
1009{
1010 struct rr_packet *pkt;
1011 struct rpc_request_hdr *rq;
1012 DEFINE_WAIT(__wait);
1013 unsigned long flags;
1014 int rc;
1015
1016 IO("READ on ept %p\n", ept);
1017
1018 if (ept->flags & MSM_RPC_UNINTERRUPTIBLE) {
1019 if (timeout < 0) {
1020 wait_event(ept->wait_q, ept_packet_available(ept));
1021 } else {
1022 rc = wait_event_timeout(
1023 ept->wait_q, ept_packet_available(ept),
1024 timeout);
1025 if (rc == 0)
1026 return -ETIMEDOUT;
1027 }
1028 } else {
1029 if (timeout < 0) {
1030 rc = wait_event_interruptible(
1031 ept->wait_q, ept_packet_available(ept));
1032 if (rc < 0)
1033 return rc;
1034 } else {
1035 rc = wait_event_interruptible_timeout(
1036 ept->wait_q, ept_packet_available(ept),
1037 timeout);
1038 if (rc == 0)
1039 return -ETIMEDOUT;
1040 }
1041 }
1042
1043 spin_lock_irqsave(&ept->read_q_lock, flags);
1044 if (list_empty(&ept->read_q)) {
1045 spin_unlock_irqrestore(&ept->read_q_lock, flags);
1046 return -EAGAIN;
1047 }
1048 pkt = list_first_entry(&ept->read_q, struct rr_packet, list);
1049 if (pkt->length > len) {
1050 spin_unlock_irqrestore(&ept->read_q_lock, flags);
1051 return -ETOOSMALL;
1052 }
1053 list_del(&pkt->list);
7bf06dac
BS
1054 spin_unlock_irqrestore(&ept->read_q_lock, flags);
1055
1056 rc = pkt->length;
1057
1058 *frag_ret = pkt->first;
1059 rq = (void*) pkt->first->data;
1060 if ((rc >= (sizeof(uint32_t) * 3)) && (rq->type == 0)) {
1061 IO("READ on ept %p is a CALL on %08x:%08x proc %d xid %d\n",
1062 ept, be32_to_cpu(rq->prog), be32_to_cpu(rq->vers),
1063 be32_to_cpu(rq->procedure),
1064 be32_to_cpu(rq->xid));
1065 /* RPC CALL */
1066 if (ept->reply_pid != 0xffffffff) {
1067 printk(KERN_WARNING
1068 "rr_read: lost previous reply xid...\n");
1069 }
1070 /* TODO: locking? */
1071 ept->reply_pid = pkt->hdr.src_pid;
1072 ept->reply_cid = pkt->hdr.src_cid;
1073 ept->reply_xid = rq->xid;
1074 }
1075#if TRACE_RPC_MSG
1076 else if ((rc >= (sizeof(uint32_t) * 3)) && (rq->type == 1))
1077 IO("READ on ept %p is a REPLY\n", ept);
1078 else IO("READ on ept %p (%d bytes)\n", ept, rc);
1079#endif
1080
1081 kfree(pkt);
1082 return rc;
1083}
1084
1085#if CONFIG_MSM_AMSS_VERSION >= 6350
1086int msm_rpc_is_compatible_version(uint32_t server_version,
1087 uint32_t client_version)
1088{
1089 if ((server_version & RPC_VERSION_MODE_MASK) !=
1090 (client_version & RPC_VERSION_MODE_MASK))
1091 return 0;
1092
1093 if (server_version & RPC_VERSION_MODE_MASK)
1094 return server_version == client_version;
1095
1096 return ((server_version & RPC_VERSION_MAJOR_MASK) ==
1097 (client_version & RPC_VERSION_MAJOR_MASK)) &&
1098 ((server_version & RPC_VERSION_MINOR_MASK) >=
1099 (client_version & RPC_VERSION_MINOR_MASK));
1100}
1101EXPORT_SYMBOL(msm_rpc_is_compatible_version);
1102
1103static int msm_rpc_get_compatible_server(uint32_t prog,
1104 uint32_t ver,
1105 uint32_t *found_vers)
1106{
1107 struct rr_server *server;
1108 unsigned long flags;
1109 if (found_vers == NULL)
1110 return 0;
1111
1112 spin_lock_irqsave(&server_list_lock, flags);
1113 list_for_each_entry(server, &server_list, list) {
1114 if ((server->prog == prog) &&
1115 msm_rpc_is_compatible_version(server->vers, ver)) {
1116 *found_vers = server->vers;
1117 spin_unlock_irqrestore(&server_list_lock, flags);
1118 return 0;
1119 }
1120 }
1121 spin_unlock_irqrestore(&server_list_lock, flags);
1122 return -1;
1123}
1124#endif
1125
1126struct msm_rpc_endpoint *msm_rpc_connect(uint32_t prog, uint32_t vers, unsigned flags)
1127{
1128 struct msm_rpc_endpoint *ept;
1129 struct rr_server *server;
1130
1131#if CONFIG_MSM_AMSS_VERSION >= 6350
1132 if (!(vers & RPC_VERSION_MODE_MASK)) {
1133 uint32_t found_vers;
1134 if (msm_rpc_get_compatible_server(prog, vers, &found_vers) < 0)
1135 return ERR_PTR(-EHOSTUNREACH);
1136 if (found_vers != vers) {
1137 D("RPC using new version %08x:{%08x --> %08x}\n",
1138 prog, vers, found_vers);
1139 vers = found_vers;
1140 }
1141 }
1142#endif
1143
1144 server = rpcrouter_lookup_server(prog, vers);
1145 if (!server)
1146 return ERR_PTR(-EHOSTUNREACH);
1147
1148 ept = msm_rpc_open();
1149 if (IS_ERR(ept))
1150 return ept;
1151
1152 ept->flags = flags;
1153 ept->dst_pid = server->pid;
1154 ept->dst_cid = server->cid;
1155 ept->dst_prog = cpu_to_be32(prog);
1156 ept->dst_vers = cpu_to_be32(vers);
1157
1158 return ept;
1159}
1160EXPORT_SYMBOL(msm_rpc_connect);
1161
1162uint32_t msm_rpc_get_vers(struct msm_rpc_endpoint *ept)
1163{
1164 return be32_to_cpu(ept->dst_vers);
1165}
1166EXPORT_SYMBOL(msm_rpc_get_vers);
1167
1168/* TODO: permission check? */
1169int msm_rpc_register_server(struct msm_rpc_endpoint *ept,
1170 uint32_t prog, uint32_t vers)
1171{
1172 int rc;
1173 union rr_control_msg msg;
1174 struct rr_server *server;
1175
1176 server = rpcrouter_create_server(ept->pid, ept->cid,
1177 prog, vers);
1178 if (!server)
1179 return -ENODEV;
1180
1181 msg.srv.cmd = RPCROUTER_CTRL_CMD_NEW_SERVER;
1182 msg.srv.pid = ept->pid;
1183 msg.srv.cid = ept->cid;
1184 msg.srv.prog = prog;
1185 msg.srv.vers = vers;
1186
1187 RR("x NEW_SERVER id=%d:%08x prog=%08x:%08x\n",
1188 ept->pid, ept->cid, prog, vers);
1189
1190 rc = rpcrouter_send_control_msg(&msg);
1191 if (rc < 0)
1192 return rc;
1193
1194 return 0;
1195}
1196
1197/* TODO: permission check -- disallow unreg of somebody else's server */
1198int msm_rpc_unregister_server(struct msm_rpc_endpoint *ept,
1199 uint32_t prog, uint32_t vers)
1200{
1201 struct rr_server *server;
1202 server = rpcrouter_lookup_server(prog, vers);
1203
1204 if (!server)
1205 return -ENOENT;
1206 rpcrouter_destroy_server(server);
1207 return 0;
1208}
1209
1210static int msm_rpcrouter_probe(struct platform_device *pdev)
1211{
1212 int rc;
1213
1214 /* Initialize what we need to start processing */
1215 INIT_LIST_HEAD(&local_endpoints);
1216 INIT_LIST_HEAD(&remote_endpoints);
1217
1218 init_waitqueue_head(&newserver_wait);
1219 init_waitqueue_head(&smd_wait);
7bf06dac
BS
1220
1221 rpcrouter_workqueue = create_singlethread_workqueue("rpcrouter");
1222 if (!rpcrouter_workqueue)
1223 return -ENOMEM;
1224
1225 rc = msm_rpcrouter_init_devices();
1226 if (rc < 0)
1227 goto fail_destroy_workqueue;
1228
1229 /* Open up SMD channel 2 */
1230 initialized = 0;
1231 rc = smd_open("SMD_RPCCALL", &smd_channel, NULL, rpcrouter_smdnotify);
1232 if (rc < 0)
1233 goto fail_remove_devices;
1234
1235 queue_work(rpcrouter_workqueue, &work_read_data);
1236 return 0;
1237
1238 fail_remove_devices:
1239 msm_rpcrouter_exit_devices();
1240 fail_destroy_workqueue:
1241 destroy_workqueue(rpcrouter_workqueue);
1242 return rc;
1243}
1244
1245static struct platform_driver msm_smd_channel2_driver = {
1246 .probe = msm_rpcrouter_probe,
1247 .driver = {
1248 .name = "SMD_RPCCALL",
1249 .owner = THIS_MODULE,
1250 },
1251};
1252
1253static int __init rpcrouter_init(void)
1254{
1255 return platform_driver_register(&msm_smd_channel2_driver);
1256}
1257
1258module_init(rpcrouter_init);
1259MODULE_DESCRIPTION("MSM RPC Router");
1260MODULE_AUTHOR("San Mehat <san@android.com>");
1261MODULE_LICENSE("GPL");