2 * Char device for device raw access
4 * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 #include <linux/compat.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/errno.h>
25 #include <linux/firewire.h>
26 #include <linux/firewire-cdev.h>
27 #include <linux/idr.h>
28 #include <linux/irqflags.h>
29 #include <linux/jiffies.h>
30 #include <linux/kernel.h>
31 #include <linux/kref.h>
33 #include <linux/module.h>
34 #include <linux/mutex.h>
35 #include <linux/poll.h>
36 #include <linux/sched.h>
37 #include <linux/spinlock.h>
38 #include <linux/string.h>
39 #include <linux/time.h>
40 #include <linux/uaccess.h>
41 #include <linux/vmalloc.h>
42 #include <linux/wait.h>
43 #include <linux/workqueue.h>
45 #include <asm/system.h>
50 * ABI version history is documented in linux/firewire-cdev.h.
52 #define FW_CDEV_KERNEL_VERSION 4
53 #define FW_CDEV_VERSION_EVENT_REQUEST2 4
57 struct fw_device *device;
61 struct idr resource_idr;
62 struct list_head event_list;
63 wait_queue_head_t wait;
64 u64 bus_reset_closure;
66 struct fw_iso_context *iso_context;
68 struct fw_iso_buffer buffer;
69 unsigned long vm_start;
71 struct list_head link;
75 static inline void client_get(struct client *client)
77 kref_get(&client->kref);
80 static void client_release(struct kref *kref)
82 struct client *client = container_of(kref, struct client, kref);
84 fw_device_put(client->device);
88 static void client_put(struct client *client)
90 kref_put(&client->kref, client_release);
93 struct client_resource;
94 typedef void (*client_resource_release_fn_t)(struct client *,
95 struct client_resource *);
96 struct client_resource {
97 client_resource_release_fn_t release;
101 struct address_handler_resource {
102 struct client_resource resource;
103 struct fw_address_handler handler;
105 struct client *client;
108 struct outbound_transaction_resource {
109 struct client_resource resource;
110 struct fw_transaction transaction;
113 struct inbound_transaction_resource {
114 struct client_resource resource;
115 struct fw_card *card;
116 struct fw_request *request;
121 struct descriptor_resource {
122 struct client_resource resource;
123 struct fw_descriptor descriptor;
127 struct iso_resource {
128 struct client_resource resource;
129 struct client *client;
130 /* Schedule work and access todo only with client->lock held. */
131 struct delayed_work work;
132 enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC,
133 ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo;
137 __be32 transaction_data[2];
138 struct iso_resource_event *e_alloc, *e_dealloc;
141 static void release_iso_resource(struct client *, struct client_resource *);
143 static void schedule_iso_resource(struct iso_resource *r, unsigned long delay)
145 client_get(r->client);
146 if (!schedule_delayed_work(&r->work, delay))
147 client_put(r->client);
150 static void schedule_if_iso_resource(struct client_resource *resource)
152 if (resource->release == release_iso_resource)
153 schedule_iso_resource(container_of(resource,
154 struct iso_resource, resource), 0);
158 * dequeue_event() just kfree()'s the event, so the event has to be
159 * the first field in a struct XYZ_event.
162 struct { void *data; size_t size; } v[2];
163 struct list_head link;
166 struct bus_reset_event {
168 struct fw_cdev_event_bus_reset reset;
171 struct outbound_transaction_event {
173 struct client *client;
174 struct outbound_transaction_resource r;
175 struct fw_cdev_event_response response;
178 struct inbound_transaction_event {
181 struct fw_cdev_event_request request;
182 struct fw_cdev_event_request2 request2;
186 struct iso_interrupt_event {
188 struct fw_cdev_event_iso_interrupt interrupt;
191 struct iso_resource_event {
193 struct fw_cdev_event_iso_resource iso_resource;
196 static inline void __user *u64_to_uptr(__u64 value)
198 return (void __user *)(unsigned long)value;
201 static inline __u64 uptr_to_u64(void __user *ptr)
203 return (__u64)(unsigned long)ptr;
206 static int fw_device_op_open(struct inode *inode, struct file *file)
208 struct fw_device *device;
209 struct client *client;
211 device = fw_device_get_by_devt(inode->i_rdev);
215 if (fw_device_is_shutdown(device)) {
216 fw_device_put(device);
220 client = kzalloc(sizeof(*client), GFP_KERNEL);
221 if (client == NULL) {
222 fw_device_put(device);
226 client->device = device;
227 spin_lock_init(&client->lock);
228 idr_init(&client->resource_idr);
229 INIT_LIST_HEAD(&client->event_list);
230 init_waitqueue_head(&client->wait);
231 kref_init(&client->kref);
233 file->private_data = client;
235 mutex_lock(&device->client_list_mutex);
236 list_add_tail(&client->link, &device->client_list);
237 mutex_unlock(&device->client_list_mutex);
239 return nonseekable_open(inode, file);
242 static void queue_event(struct client *client, struct event *event,
243 void *data0, size_t size0, void *data1, size_t size1)
247 event->v[0].data = data0;
248 event->v[0].size = size0;
249 event->v[1].data = data1;
250 event->v[1].size = size1;
252 spin_lock_irqsave(&client->lock, flags);
253 if (client->in_shutdown)
256 list_add_tail(&event->link, &client->event_list);
257 spin_unlock_irqrestore(&client->lock, flags);
259 wake_up_interruptible(&client->wait);
262 static int dequeue_event(struct client *client,
263 char __user *buffer, size_t count)
269 ret = wait_event_interruptible(client->wait,
270 !list_empty(&client->event_list) ||
271 fw_device_is_shutdown(client->device));
275 if (list_empty(&client->event_list) &&
276 fw_device_is_shutdown(client->device))
279 spin_lock_irq(&client->lock);
280 event = list_first_entry(&client->event_list, struct event, link);
281 list_del(&event->link);
282 spin_unlock_irq(&client->lock);
285 for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
286 size = min(event->v[i].size, count - total);
287 if (copy_to_user(buffer + total, event->v[i].data, size)) {
301 static ssize_t fw_device_op_read(struct file *file, char __user *buffer,
302 size_t count, loff_t *offset)
304 struct client *client = file->private_data;
306 return dequeue_event(client, buffer, count);
309 static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
310 struct client *client)
312 struct fw_card *card = client->device->card;
314 spin_lock_irq(&card->lock);
316 event->closure = client->bus_reset_closure;
317 event->type = FW_CDEV_EVENT_BUS_RESET;
318 event->generation = client->device->generation;
319 event->node_id = client->device->node_id;
320 event->local_node_id = card->local_node->node_id;
321 event->bm_node_id = card->bm_node_id;
322 event->irm_node_id = card->irm_node->node_id;
323 event->root_node_id = card->root_node->node_id;
325 spin_unlock_irq(&card->lock);
328 static void for_each_client(struct fw_device *device,
329 void (*callback)(struct client *client))
333 mutex_lock(&device->client_list_mutex);
334 list_for_each_entry(c, &device->client_list, link)
336 mutex_unlock(&device->client_list_mutex);
339 static int schedule_reallocations(int id, void *p, void *data)
341 schedule_if_iso_resource(p);
346 static void queue_bus_reset_event(struct client *client)
348 struct bus_reset_event *e;
350 e = kzalloc(sizeof(*e), GFP_KERNEL);
352 fw_notify("Out of memory when allocating bus reset event\n");
356 fill_bus_reset_event(&e->reset, client);
358 queue_event(client, &e->event,
359 &e->reset, sizeof(e->reset), NULL, 0);
361 spin_lock_irq(&client->lock);
362 idr_for_each(&client->resource_idr, schedule_reallocations, client);
363 spin_unlock_irq(&client->lock);
366 void fw_device_cdev_update(struct fw_device *device)
368 for_each_client(device, queue_bus_reset_event);
371 static void wake_up_client(struct client *client)
373 wake_up_interruptible(&client->wait);
376 void fw_device_cdev_remove(struct fw_device *device)
378 for_each_client(device, wake_up_client);
382 struct fw_cdev_get_info get_info;
383 struct fw_cdev_send_request send_request;
384 struct fw_cdev_allocate allocate;
385 struct fw_cdev_deallocate deallocate;
386 struct fw_cdev_send_response send_response;
387 struct fw_cdev_initiate_bus_reset initiate_bus_reset;
388 struct fw_cdev_add_descriptor add_descriptor;
389 struct fw_cdev_remove_descriptor remove_descriptor;
390 struct fw_cdev_create_iso_context create_iso_context;
391 struct fw_cdev_queue_iso queue_iso;
392 struct fw_cdev_start_iso start_iso;
393 struct fw_cdev_stop_iso stop_iso;
394 struct fw_cdev_get_cycle_timer get_cycle_timer;
395 struct fw_cdev_allocate_iso_resource allocate_iso_resource;
396 struct fw_cdev_send_stream_packet send_stream_packet;
397 struct fw_cdev_get_cycle_timer2 get_cycle_timer2;
400 static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
402 struct fw_cdev_get_info *a = &arg->get_info;
403 struct fw_cdev_event_bus_reset bus_reset;
404 unsigned long ret = 0;
406 client->version = a->version;
407 a->version = FW_CDEV_KERNEL_VERSION;
408 a->card = client->device->card->index;
410 down_read(&fw_device_rwsem);
413 size_t want = a->rom_length;
414 size_t have = client->device->config_rom_length * 4;
416 ret = copy_to_user(u64_to_uptr(a->rom),
417 client->device->config_rom, min(want, have));
419 a->rom_length = client->device->config_rom_length * 4;
421 up_read(&fw_device_rwsem);
426 client->bus_reset_closure = a->bus_reset_closure;
427 if (a->bus_reset != 0) {
428 fill_bus_reset_event(&bus_reset, client);
429 if (copy_to_user(u64_to_uptr(a->bus_reset),
430 &bus_reset, sizeof(bus_reset)))
437 static int add_client_resource(struct client *client,
438 struct client_resource *resource, gfp_t gfp_mask)
444 if (idr_pre_get(&client->resource_idr, gfp_mask) == 0)
447 spin_lock_irqsave(&client->lock, flags);
448 if (client->in_shutdown)
451 ret = idr_get_new(&client->resource_idr, resource,
455 schedule_if_iso_resource(resource);
457 spin_unlock_irqrestore(&client->lock, flags);
462 return ret < 0 ? ret : 0;
465 static int release_client_resource(struct client *client, u32 handle,
466 client_resource_release_fn_t release,
467 struct client_resource **return_resource)
469 struct client_resource *resource;
471 spin_lock_irq(&client->lock);
472 if (client->in_shutdown)
475 resource = idr_find(&client->resource_idr, handle);
476 if (resource && resource->release == release)
477 idr_remove(&client->resource_idr, handle);
478 spin_unlock_irq(&client->lock);
480 if (!(resource && resource->release == release))
484 *return_resource = resource;
486 resource->release(client, resource);
493 static void release_transaction(struct client *client,
494 struct client_resource *resource)
496 struct outbound_transaction_resource *r = container_of(resource,
497 struct outbound_transaction_resource, resource);
499 fw_cancel_transaction(client->device->card, &r->transaction);
502 static void complete_transaction(struct fw_card *card, int rcode,
503 void *payload, size_t length, void *data)
505 struct outbound_transaction_event *e = data;
506 struct fw_cdev_event_response *rsp = &e->response;
507 struct client *client = e->client;
510 if (length < rsp->length)
511 rsp->length = length;
512 if (rcode == RCODE_COMPLETE)
513 memcpy(rsp->data, payload, rsp->length);
515 spin_lock_irqsave(&client->lock, flags);
517 * 1. If called while in shutdown, the idr tree must be left untouched.
518 * The idr handle will be removed and the client reference will be
520 * 2. If the call chain was release_client_resource ->
521 * release_transaction -> complete_transaction (instead of a normal
522 * conclusion of the transaction), i.e. if this resource was already
523 * unregistered from the idr, the client reference will be dropped
524 * by release_client_resource and we must not drop it here.
526 if (!client->in_shutdown &&
527 idr_find(&client->resource_idr, e->r.resource.handle)) {
528 idr_remove(&client->resource_idr, e->r.resource.handle);
529 /* Drop the idr's reference */
532 spin_unlock_irqrestore(&client->lock, flags);
534 rsp->type = FW_CDEV_EVENT_RESPONSE;
538 * In the case that sizeof(*rsp) doesn't align with the position of the
539 * data, and the read is short, preserve an extra copy of the data
540 * to stay compatible with a pre-2.6.27 bug. Since the bug is harmless
541 * for short reads and some apps depended on it, this is both safe
542 * and prudent for compatibility.
544 if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data))
545 queue_event(client, &e->event, rsp, sizeof(*rsp),
546 rsp->data, rsp->length);
548 queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length,
551 /* Drop the transaction callback's reference */
555 static int init_request(struct client *client,
556 struct fw_cdev_send_request *request,
557 int destination_id, int speed)
559 struct outbound_transaction_event *e;
562 if (request->tcode != TCODE_STREAM_DATA &&
563 (request->length > 4096 || request->length > 512 << speed))
566 if (request->tcode == TCODE_WRITE_QUADLET_REQUEST &&
570 e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL);
575 e->response.length = request->length;
576 e->response.closure = request->closure;
579 copy_from_user(e->response.data,
580 u64_to_uptr(request->data), request->length)) {
585 e->r.resource.release = release_transaction;
586 ret = add_client_resource(client, &e->r.resource, GFP_KERNEL);
590 /* Get a reference for the transaction callback */
593 fw_send_request(client->device->card, &e->r.transaction,
594 request->tcode, destination_id, request->generation,
595 speed, request->offset, e->response.data,
596 request->length, complete_transaction, e);
605 static int ioctl_send_request(struct client *client, union ioctl_arg *arg)
607 switch (arg->send_request.tcode) {
608 case TCODE_WRITE_QUADLET_REQUEST:
609 case TCODE_WRITE_BLOCK_REQUEST:
610 case TCODE_READ_QUADLET_REQUEST:
611 case TCODE_READ_BLOCK_REQUEST:
612 case TCODE_LOCK_MASK_SWAP:
613 case TCODE_LOCK_COMPARE_SWAP:
614 case TCODE_LOCK_FETCH_ADD:
615 case TCODE_LOCK_LITTLE_ADD:
616 case TCODE_LOCK_BOUNDED_ADD:
617 case TCODE_LOCK_WRAP_ADD:
618 case TCODE_LOCK_VENDOR_DEPENDENT:
624 return init_request(client, &arg->send_request, client->device->node_id,
625 client->device->max_speed);
628 static inline bool is_fcp_request(struct fw_request *request)
630 return request == NULL;
633 static void release_request(struct client *client,
634 struct client_resource *resource)
636 struct inbound_transaction_resource *r = container_of(resource,
637 struct inbound_transaction_resource, resource);
639 if (is_fcp_request(r->request))
642 fw_send_response(r->card, r->request, RCODE_CONFLICT_ERROR);
644 fw_card_put(r->card);
648 static void handle_request(struct fw_card *card, struct fw_request *request,
649 int tcode, int destination, int source,
650 int generation, unsigned long long offset,
651 void *payload, size_t length, void *callback_data)
653 struct address_handler_resource *handler = callback_data;
654 struct inbound_transaction_resource *r;
655 struct inbound_transaction_event *e;
657 void *fcp_frame = NULL;
660 /* card may be different from handler->client->device->card */
663 r = kmalloc(sizeof(*r), GFP_ATOMIC);
664 e = kmalloc(sizeof(*e), GFP_ATOMIC);
665 if (r == NULL || e == NULL)
669 r->request = request;
673 if (is_fcp_request(request)) {
675 * FIXME: Let core-transaction.c manage a
676 * single reference-counted copy?
678 fcp_frame = kmemdup(payload, length, GFP_ATOMIC);
679 if (fcp_frame == NULL)
685 r->resource.release = release_request;
686 ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC);
690 if (handler->client->version < FW_CDEV_VERSION_EVENT_REQUEST2) {
691 struct fw_cdev_event_request *req = &e->req.request;
694 tcode = TCODE_LOCK_REQUEST;
696 req->type = FW_CDEV_EVENT_REQUEST;
698 req->offset = offset;
699 req->length = length;
700 req->handle = r->resource.handle;
701 req->closure = handler->closure;
702 event_size0 = sizeof(*req);
704 struct fw_cdev_event_request2 *req = &e->req.request2;
706 req->type = FW_CDEV_EVENT_REQUEST2;
708 req->offset = offset;
709 req->source_node_id = source;
710 req->destination_node_id = destination;
711 req->card = card->index;
712 req->generation = generation;
713 req->length = length;
714 req->handle = r->resource.handle;
715 req->closure = handler->closure;
716 event_size0 = sizeof(*req);
719 queue_event(handler->client, &e->event,
720 &e->req, event_size0, r->data, length);
728 if (!is_fcp_request(request))
729 fw_send_response(card, request, RCODE_CONFLICT_ERROR);
734 static void release_address_handler(struct client *client,
735 struct client_resource *resource)
737 struct address_handler_resource *r =
738 container_of(resource, struct address_handler_resource, resource);
740 fw_core_remove_address_handler(&r->handler);
744 static int ioctl_allocate(struct client *client, union ioctl_arg *arg)
746 struct fw_cdev_allocate *a = &arg->allocate;
747 struct address_handler_resource *r;
748 struct fw_address_region region;
751 r = kmalloc(sizeof(*r), GFP_KERNEL);
755 region.start = a->offset;
756 region.end = a->offset + a->length;
757 r->handler.length = a->length;
758 r->handler.address_callback = handle_request;
759 r->handler.callback_data = r;
760 r->closure = a->closure;
763 ret = fw_core_add_address_handler(&r->handler, ®ion);
769 r->resource.release = release_address_handler;
770 ret = add_client_resource(client, &r->resource, GFP_KERNEL);
772 release_address_handler(client, &r->resource);
775 a->handle = r->resource.handle;
780 static int ioctl_deallocate(struct client *client, union ioctl_arg *arg)
782 return release_client_resource(client, arg->deallocate.handle,
783 release_address_handler, NULL);
786 static int ioctl_send_response(struct client *client, union ioctl_arg *arg)
788 struct fw_cdev_send_response *a = &arg->send_response;
789 struct client_resource *resource;
790 struct inbound_transaction_resource *r;
793 if (release_client_resource(client, a->handle,
794 release_request, &resource) < 0)
797 r = container_of(resource, struct inbound_transaction_resource,
799 if (is_fcp_request(r->request))
802 if (a->length != fw_get_response_length(r->request)) {
807 if (copy_from_user(r->data, u64_to_uptr(a->data), a->length)) {
812 fw_send_response(r->card, r->request, a->rcode);
814 fw_card_put(r->card);
820 static int ioctl_initiate_bus_reset(struct client *client, union ioctl_arg *arg)
822 return fw_core_initiate_bus_reset(client->device->card,
823 arg->initiate_bus_reset.type == FW_CDEV_SHORT_RESET);
826 static void release_descriptor(struct client *client,
827 struct client_resource *resource)
829 struct descriptor_resource *r =
830 container_of(resource, struct descriptor_resource, resource);
832 fw_core_remove_descriptor(&r->descriptor);
836 static int ioctl_add_descriptor(struct client *client, union ioctl_arg *arg)
838 struct fw_cdev_add_descriptor *a = &arg->add_descriptor;
839 struct descriptor_resource *r;
842 /* Access policy: Allow this ioctl only on local nodes' device files. */
843 if (!client->device->is_local)
849 r = kmalloc(sizeof(*r) + a->length * 4, GFP_KERNEL);
853 if (copy_from_user(r->data, u64_to_uptr(a->data), a->length * 4)) {
858 r->descriptor.length = a->length;
859 r->descriptor.immediate = a->immediate;
860 r->descriptor.key = a->key;
861 r->descriptor.data = r->data;
863 ret = fw_core_add_descriptor(&r->descriptor);
867 r->resource.release = release_descriptor;
868 ret = add_client_resource(client, &r->resource, GFP_KERNEL);
870 fw_core_remove_descriptor(&r->descriptor);
873 a->handle = r->resource.handle;
882 static int ioctl_remove_descriptor(struct client *client, union ioctl_arg *arg)
884 return release_client_resource(client, arg->remove_descriptor.handle,
885 release_descriptor, NULL);
888 static void iso_callback(struct fw_iso_context *context, u32 cycle,
889 size_t header_length, void *header, void *data)
891 struct client *client = data;
892 struct iso_interrupt_event *e;
894 e = kmalloc(sizeof(*e) + header_length, GFP_ATOMIC);
898 e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT;
899 e->interrupt.closure = client->iso_closure;
900 e->interrupt.cycle = cycle;
901 e->interrupt.header_length = header_length;
902 memcpy(e->interrupt.header, header, header_length);
903 queue_event(client, &e->event, &e->interrupt,
904 sizeof(e->interrupt) + header_length, NULL, 0);
907 static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
909 struct fw_cdev_create_iso_context *a = &arg->create_iso_context;
910 struct fw_iso_context *context;
916 case FW_ISO_CONTEXT_RECEIVE:
917 if (a->header_size < 4 || (a->header_size & 3))
921 case FW_ISO_CONTEXT_TRANSMIT:
922 if (a->speed > SCODE_3200)
930 context = fw_iso_context_create(client->device->card, a->type,
931 a->channel, a->speed, a->header_size,
932 iso_callback, client);
934 return PTR_ERR(context);
936 /* We only support one context at this time. */
937 spin_lock_irq(&client->lock);
938 if (client->iso_context != NULL) {
939 spin_unlock_irq(&client->lock);
940 fw_iso_context_destroy(context);
943 client->iso_closure = a->closure;
944 client->iso_context = context;
945 spin_unlock_irq(&client->lock);
952 /* Macros for decoding the iso packet control header. */
953 #define GET_PAYLOAD_LENGTH(v) ((v) & 0xffff)
954 #define GET_INTERRUPT(v) (((v) >> 16) & 0x01)
955 #define GET_SKIP(v) (((v) >> 17) & 0x01)
956 #define GET_TAG(v) (((v) >> 18) & 0x03)
957 #define GET_SY(v) (((v) >> 20) & 0x0f)
958 #define GET_HEADER_LENGTH(v) (((v) >> 24) & 0xff)
960 static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
962 struct fw_cdev_queue_iso *a = &arg->queue_iso;
963 struct fw_cdev_iso_packet __user *p, *end, *next;
964 struct fw_iso_context *ctx = client->iso_context;
965 unsigned long payload, buffer_end, header_length;
969 struct fw_iso_packet packet;
973 if (ctx == NULL || a->handle != 0)
977 * If the user passes a non-NULL data pointer, has mmap()'ed
978 * the iso buffer, and the pointer points inside the buffer,
979 * we setup the payload pointers accordingly. Otherwise we
980 * set them both to 0, which will still let packets with
981 * payload_length == 0 through. In other words, if no packets
982 * use the indirect payload, the iso buffer need not be mapped
983 * and the a->data pointer is ignored.
986 payload = (unsigned long)a->data - client->vm_start;
987 buffer_end = client->buffer.page_count << PAGE_SHIFT;
988 if (a->data == 0 || client->buffer.pages == NULL ||
989 payload >= buffer_end) {
994 p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets);
996 if (!access_ok(VERIFY_READ, p, a->size))
999 end = (void __user *)p + a->size;
1002 if (get_user(control, &p->control))
1004 u.packet.payload_length = GET_PAYLOAD_LENGTH(control);
1005 u.packet.interrupt = GET_INTERRUPT(control);
1006 u.packet.skip = GET_SKIP(control);
1007 u.packet.tag = GET_TAG(control);
1008 u.packet.sy = GET_SY(control);
1009 u.packet.header_length = GET_HEADER_LENGTH(control);
1011 if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) {
1012 if (u.packet.header_length % 4 != 0)
1014 header_length = u.packet.header_length;
1017 * We require that header_length is a multiple of
1018 * the fixed header size, ctx->header_size.
1020 if (ctx->header_size == 0) {
1021 if (u.packet.header_length > 0)
1023 } else if (u.packet.header_length == 0 ||
1024 u.packet.header_length % ctx->header_size != 0) {
1030 next = (struct fw_cdev_iso_packet __user *)
1031 &p->header[header_length / 4];
1034 if (__copy_from_user
1035 (u.packet.header, p->header, header_length))
1037 if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
1038 u.packet.header_length + u.packet.payload_length > 0)
1040 if (payload + u.packet.payload_length > buffer_end)
1043 if (fw_iso_context_queue(ctx, &u.packet,
1044 &client->buffer, payload))
1048 payload += u.packet.payload_length;
1052 a->size -= uptr_to_u64(p) - a->packets;
1053 a->packets = uptr_to_u64(p);
1054 a->data = client->vm_start + payload;
1059 static int ioctl_start_iso(struct client *client, union ioctl_arg *arg)
1061 struct fw_cdev_start_iso *a = &arg->start_iso;
1063 if (client->iso_context == NULL || a->handle != 0)
1066 if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE &&
1067 (a->tags == 0 || a->tags > 15 || a->sync > 15))
1070 return fw_iso_context_start(client->iso_context,
1071 a->cycle, a->sync, a->tags);
1074 static int ioctl_stop_iso(struct client *client, union ioctl_arg *arg)
1076 struct fw_cdev_stop_iso *a = &arg->stop_iso;
1078 if (client->iso_context == NULL || a->handle != 0)
1081 return fw_iso_context_stop(client->iso_context);
1084 static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg)
1086 struct fw_cdev_get_cycle_timer2 *a = &arg->get_cycle_timer2;
1087 struct fw_card *card = client->device->card;
1088 struct timespec ts = {0, 0};
1092 local_irq_disable();
1094 cycle_time = card->driver->read_csr(card, CSR_CYCLE_TIME);
1096 switch (a->clk_id) {
1097 case CLOCK_REALTIME: getnstimeofday(&ts); break;
1098 case CLOCK_MONOTONIC: do_posix_clock_monotonic_gettime(&ts); break;
1099 case CLOCK_MONOTONIC_RAW: getrawmonotonic(&ts); break;
1106 a->tv_sec = ts.tv_sec;
1107 a->tv_nsec = ts.tv_nsec;
1108 a->cycle_timer = cycle_time;
1113 static int ioctl_get_cycle_timer(struct client *client, union ioctl_arg *arg)
1115 struct fw_cdev_get_cycle_timer *a = &arg->get_cycle_timer;
1116 struct fw_cdev_get_cycle_timer2 ct2;
1118 ct2.clk_id = CLOCK_REALTIME;
1119 ioctl_get_cycle_timer2(client, (union ioctl_arg *)&ct2);
1121 a->local_time = ct2.tv_sec * USEC_PER_SEC + ct2.tv_nsec / NSEC_PER_USEC;
1122 a->cycle_timer = ct2.cycle_timer;
1127 static void iso_resource_work(struct work_struct *work)
1129 struct iso_resource_event *e;
1130 struct iso_resource *r =
1131 container_of(work, struct iso_resource, work.work);
1132 struct client *client = r->client;
1133 int generation, channel, bandwidth, todo;
1134 bool skip, free, success;
1136 spin_lock_irq(&client->lock);
1137 generation = client->device->generation;
1139 /* Allow 1000ms grace period for other reallocations. */
1140 if (todo == ISO_RES_ALLOC &&
1141 time_is_after_jiffies(client->device->card->reset_jiffies + HZ)) {
1142 schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3));
1145 /* We could be called twice within the same generation. */
1146 skip = todo == ISO_RES_REALLOC &&
1147 r->generation == generation;
1149 free = todo == ISO_RES_DEALLOC ||
1150 todo == ISO_RES_ALLOC_ONCE ||
1151 todo == ISO_RES_DEALLOC_ONCE;
1152 r->generation = generation;
1153 spin_unlock_irq(&client->lock);
1158 bandwidth = r->bandwidth;
1160 fw_iso_resource_manage(client->device->card, generation,
1161 r->channels, &channel, &bandwidth,
1162 todo == ISO_RES_ALLOC ||
1163 todo == ISO_RES_REALLOC ||
1164 todo == ISO_RES_ALLOC_ONCE,
1165 r->transaction_data);
1167 * Is this generation outdated already? As long as this resource sticks
1168 * in the idr, it will be scheduled again for a newer generation or at
1171 if (channel == -EAGAIN &&
1172 (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC))
1175 success = channel >= 0 || bandwidth > 0;
1177 spin_lock_irq(&client->lock);
1179 * Transit from allocation to reallocation, except if the client
1180 * requested deallocation in the meantime.
1182 if (r->todo == ISO_RES_ALLOC)
1183 r->todo = ISO_RES_REALLOC;
1185 * Allocation or reallocation failure? Pull this resource out of the
1186 * idr and prepare for deletion, unless the client is shutting down.
1188 if (r->todo == ISO_RES_REALLOC && !success &&
1189 !client->in_shutdown &&
1190 idr_find(&client->resource_idr, r->resource.handle)) {
1191 idr_remove(&client->resource_idr, r->resource.handle);
1195 spin_unlock_irq(&client->lock);
1197 if (todo == ISO_RES_ALLOC && channel >= 0)
1198 r->channels = 1ULL << channel;
1200 if (todo == ISO_RES_REALLOC && success)
1203 if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) {
1208 r->e_dealloc = NULL;
1210 e->iso_resource.handle = r->resource.handle;
1211 e->iso_resource.channel = channel;
1212 e->iso_resource.bandwidth = bandwidth;
1214 queue_event(client, &e->event,
1215 &e->iso_resource, sizeof(e->iso_resource), NULL, 0);
1218 cancel_delayed_work(&r->work);
1220 kfree(r->e_dealloc);
1227 static void release_iso_resource(struct client *client,
1228 struct client_resource *resource)
1230 struct iso_resource *r =
1231 container_of(resource, struct iso_resource, resource);
1233 spin_lock_irq(&client->lock);
1234 r->todo = ISO_RES_DEALLOC;
1235 schedule_iso_resource(r, 0);
1236 spin_unlock_irq(&client->lock);
1239 static int init_iso_resource(struct client *client,
1240 struct fw_cdev_allocate_iso_resource *request, int todo)
1242 struct iso_resource_event *e1, *e2;
1243 struct iso_resource *r;
1246 if ((request->channels == 0 && request->bandwidth == 0) ||
1247 request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
1248 request->bandwidth < 0)
1251 r = kmalloc(sizeof(*r), GFP_KERNEL);
1252 e1 = kmalloc(sizeof(*e1), GFP_KERNEL);
1253 e2 = kmalloc(sizeof(*e2), GFP_KERNEL);
1254 if (r == NULL || e1 == NULL || e2 == NULL) {
1259 INIT_DELAYED_WORK(&r->work, iso_resource_work);
1263 r->channels = request->channels;
1264 r->bandwidth = request->bandwidth;
1268 e1->iso_resource.closure = request->closure;
1269 e1->iso_resource.type = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED;
1270 e2->iso_resource.closure = request->closure;
1271 e2->iso_resource.type = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED;
1273 if (todo == ISO_RES_ALLOC) {
1274 r->resource.release = release_iso_resource;
1275 ret = add_client_resource(client, &r->resource, GFP_KERNEL);
1279 r->resource.release = NULL;
1280 r->resource.handle = -1;
1281 schedule_iso_resource(r, 0);
1283 request->handle = r->resource.handle;
1294 static int ioctl_allocate_iso_resource(struct client *client,
1295 union ioctl_arg *arg)
1297 return init_iso_resource(client,
1298 &arg->allocate_iso_resource, ISO_RES_ALLOC);
1301 static int ioctl_deallocate_iso_resource(struct client *client,
1302 union ioctl_arg *arg)
1304 return release_client_resource(client,
1305 arg->deallocate.handle, release_iso_resource, NULL);
1308 static int ioctl_allocate_iso_resource_once(struct client *client,
1309 union ioctl_arg *arg)
1311 return init_iso_resource(client,
1312 &arg->allocate_iso_resource, ISO_RES_ALLOC_ONCE);
1315 static int ioctl_deallocate_iso_resource_once(struct client *client,
1316 union ioctl_arg *arg)
1318 return init_iso_resource(client,
1319 &arg->allocate_iso_resource, ISO_RES_DEALLOC_ONCE);
1323 * Returns a speed code: Maximum speed to or from this device,
1324 * limited by the device's link speed, the local node's link speed,
1325 * and all PHY port speeds between the two links.
1327 static int ioctl_get_speed(struct client *client, union ioctl_arg *arg)
1329 return client->device->max_speed;
1332 static int ioctl_send_broadcast_request(struct client *client,
1333 union ioctl_arg *arg)
1335 struct fw_cdev_send_request *a = &arg->send_request;
1338 case TCODE_WRITE_QUADLET_REQUEST:
1339 case TCODE_WRITE_BLOCK_REQUEST:
1345 /* Security policy: Only allow accesses to Units Space. */
1346 if (a->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END)
1349 return init_request(client, a, LOCAL_BUS | 0x3f, SCODE_100);
1352 static int ioctl_send_stream_packet(struct client *client, union ioctl_arg *arg)
1354 struct fw_cdev_send_stream_packet *a = &arg->send_stream_packet;
1355 struct fw_cdev_send_request request;
1358 if (a->speed > client->device->card->link_speed ||
1359 a->length > 1024 << a->speed)
1362 if (a->tag > 3 || a->channel > 63 || a->sy > 15)
1365 dest = fw_stream_packet_destination_id(a->tag, a->channel, a->sy);
1366 request.tcode = TCODE_STREAM_DATA;
1367 request.length = a->length;
1368 request.closure = a->closure;
1369 request.data = a->data;
1370 request.generation = a->generation;
1372 return init_request(client, &request, dest, a->speed);
1375 static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = {
1380 ioctl_send_response,
1381 ioctl_initiate_bus_reset,
1382 ioctl_add_descriptor,
1383 ioctl_remove_descriptor,
1384 ioctl_create_iso_context,
1388 ioctl_get_cycle_timer,
1389 ioctl_allocate_iso_resource,
1390 ioctl_deallocate_iso_resource,
1391 ioctl_allocate_iso_resource_once,
1392 ioctl_deallocate_iso_resource_once,
1394 ioctl_send_broadcast_request,
1395 ioctl_send_stream_packet,
1396 ioctl_get_cycle_timer2,
1399 static int dispatch_ioctl(struct client *client,
1400 unsigned int cmd, void __user *arg)
1402 union ioctl_arg buffer;
1405 if (fw_device_is_shutdown(client->device))
1408 if (_IOC_TYPE(cmd) != '#' ||
1409 _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers) ||
1410 _IOC_SIZE(cmd) > sizeof(buffer))
1413 if (_IOC_DIR(cmd) == _IOC_READ)
1414 memset(&buffer, 0, _IOC_SIZE(cmd));
1416 if (_IOC_DIR(cmd) & _IOC_WRITE)
1417 if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd)))
1420 ret = ioctl_handlers[_IOC_NR(cmd)](client, &buffer);
1424 if (_IOC_DIR(cmd) & _IOC_READ)
1425 if (copy_to_user(arg, &buffer, _IOC_SIZE(cmd)))
1431 static long fw_device_op_ioctl(struct file *file,
1432 unsigned int cmd, unsigned long arg)
1434 return dispatch_ioctl(file->private_data, cmd, (void __user *)arg);
1437 #ifdef CONFIG_COMPAT
1438 static long fw_device_op_compat_ioctl(struct file *file,
1439 unsigned int cmd, unsigned long arg)
1441 return dispatch_ioctl(file->private_data, cmd, compat_ptr(arg));
1445 static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
1447 struct client *client = file->private_data;
1448 enum dma_data_direction direction;
1450 int page_count, ret;
1452 if (fw_device_is_shutdown(client->device))
1455 /* FIXME: We could support multiple buffers, but we don't. */
1456 if (client->buffer.pages != NULL)
1459 if (!(vma->vm_flags & VM_SHARED))
1462 if (vma->vm_start & ~PAGE_MASK)
1465 client->vm_start = vma->vm_start;
1466 size = vma->vm_end - vma->vm_start;
1467 page_count = size >> PAGE_SHIFT;
1468 if (size & ~PAGE_MASK)
1471 if (vma->vm_flags & VM_WRITE)
1472 direction = DMA_TO_DEVICE;
1474 direction = DMA_FROM_DEVICE;
1476 ret = fw_iso_buffer_init(&client->buffer, client->device->card,
1477 page_count, direction);
1481 ret = fw_iso_buffer_map(&client->buffer, vma);
1483 fw_iso_buffer_destroy(&client->buffer, client->device->card);
1488 static int shutdown_resource(int id, void *p, void *data)
1490 struct client_resource *resource = p;
1491 struct client *client = data;
1493 resource->release(client, resource);
1499 static int fw_device_op_release(struct inode *inode, struct file *file)
1501 struct client *client = file->private_data;
1502 struct event *event, *next_event;
1504 mutex_lock(&client->device->client_list_mutex);
1505 list_del(&client->link);
1506 mutex_unlock(&client->device->client_list_mutex);
1508 if (client->iso_context)
1509 fw_iso_context_destroy(client->iso_context);
1511 if (client->buffer.pages)
1512 fw_iso_buffer_destroy(&client->buffer, client->device->card);
1514 /* Freeze client->resource_idr and client->event_list */
1515 spin_lock_irq(&client->lock);
1516 client->in_shutdown = true;
1517 spin_unlock_irq(&client->lock);
1519 idr_for_each(&client->resource_idr, shutdown_resource, client);
1520 idr_remove_all(&client->resource_idr);
1521 idr_destroy(&client->resource_idr);
1523 list_for_each_entry_safe(event, next_event, &client->event_list, link)
1531 static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
1533 struct client *client = file->private_data;
1534 unsigned int mask = 0;
1536 poll_wait(file, &client->wait, pt);
1538 if (fw_device_is_shutdown(client->device))
1539 mask |= POLLHUP | POLLERR;
1540 if (!list_empty(&client->event_list))
1541 mask |= POLLIN | POLLRDNORM;
1546 const struct file_operations fw_device_ops = {
1547 .owner = THIS_MODULE,
1548 .llseek = no_llseek,
1549 .open = fw_device_op_open,
1550 .read = fw_device_op_read,
1551 .unlocked_ioctl = fw_device_op_ioctl,
1552 .mmap = fw_device_op_mmap,
1553 .release = fw_device_op_release,
1554 .poll = fw_device_op_poll,
1555 #ifdef CONFIG_COMPAT
1556 .compat_ioctl = fw_device_op_compat_ioctl,