2 * Char device for device raw access
4 * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 #include <linux/compat.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/errno.h>
25 #include <linux/firewire-cdev.h>
26 #include <linux/idr.h>
27 #include <linux/jiffies.h>
28 #include <linux/kernel.h>
29 #include <linux/kref.h>
31 #include <linux/module.h>
32 #include <linux/mutex.h>
33 #include <linux/poll.h>
34 #include <linux/preempt.h>
35 #include <linux/spinlock.h>
36 #include <linux/time.h>
37 #include <linux/vmalloc.h>
38 #include <linux/wait.h>
39 #include <linux/workqueue.h>
41 #include <asm/system.h>
42 #include <asm/uaccess.h>
44 #include "fw-device.h"
45 #include "fw-topology.h"
46 #include "fw-transaction.h"
50 struct fw_device *device;
54 struct idr resource_idr;
55 struct list_head event_list;
56 wait_queue_head_t wait;
57 u64 bus_reset_closure;
59 struct fw_iso_context *iso_context;
61 struct fw_iso_buffer buffer;
62 unsigned long vm_start;
64 struct list_head link;
68 static inline void client_get(struct client *client)
70 kref_get(&client->kref);
73 static void client_release(struct kref *kref)
75 struct client *client = container_of(kref, struct client, kref);
77 fw_device_put(client->device);
81 static void client_put(struct client *client)
83 kref_put(&client->kref, client_release);
86 struct client_resource;
87 typedef void (*client_resource_release_fn_t)(struct client *,
88 struct client_resource *);
89 struct client_resource {
90 client_resource_release_fn_t release;
94 struct address_handler_resource {
95 struct client_resource resource;
96 struct fw_address_handler handler;
98 struct client *client;
101 struct outbound_transaction_resource {
102 struct client_resource resource;
103 struct fw_transaction transaction;
106 struct inbound_transaction_resource {
107 struct client_resource resource;
108 struct fw_request *request;
113 struct descriptor_resource {
114 struct client_resource resource;
115 struct fw_descriptor descriptor;
119 struct iso_resource {
120 struct client_resource resource;
121 struct client *client;
122 /* Schedule work and access todo only with client->lock held. */
123 struct delayed_work work;
124 enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC,
125 ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo;
129 struct iso_resource_event *e_alloc, *e_dealloc;
132 static int schedule_iso_resource(struct iso_resource *);
133 static void release_iso_resource(struct client *, struct client_resource *);
136 * dequeue_event() just kfree()'s the event, so the event has to be
137 * the first field in a struct XYZ_event.
140 struct { void *data; size_t size; } v[2];
141 struct list_head link;
144 struct bus_reset_event {
146 struct fw_cdev_event_bus_reset reset;
149 struct outbound_transaction_event {
151 struct client *client;
152 struct outbound_transaction_resource r;
153 struct fw_cdev_event_response response;
156 struct inbound_transaction_event {
158 struct fw_cdev_event_request request;
161 struct iso_interrupt_event {
163 struct fw_cdev_event_iso_interrupt interrupt;
166 struct iso_resource_event {
168 struct fw_cdev_event_iso_resource resource;
171 static inline void __user *u64_to_uptr(__u64 value)
173 return (void __user *)(unsigned long)value;
176 static inline __u64 uptr_to_u64(void __user *ptr)
178 return (__u64)(unsigned long)ptr;
181 static int fw_device_op_open(struct inode *inode, struct file *file)
183 struct fw_device *device;
184 struct client *client;
186 device = fw_device_get_by_devt(inode->i_rdev);
190 if (fw_device_is_shutdown(device)) {
191 fw_device_put(device);
195 client = kzalloc(sizeof(*client), GFP_KERNEL);
196 if (client == NULL) {
197 fw_device_put(device);
201 client->device = device;
202 spin_lock_init(&client->lock);
203 idr_init(&client->resource_idr);
204 INIT_LIST_HEAD(&client->event_list);
205 init_waitqueue_head(&client->wait);
206 kref_init(&client->kref);
208 file->private_data = client;
210 mutex_lock(&device->client_list_mutex);
211 list_add_tail(&client->link, &device->client_list);
212 mutex_unlock(&device->client_list_mutex);
217 static void queue_event(struct client *client, struct event *event,
218 void *data0, size_t size0, void *data1, size_t size1)
222 event->v[0].data = data0;
223 event->v[0].size = size0;
224 event->v[1].data = data1;
225 event->v[1].size = size1;
227 spin_lock_irqsave(&client->lock, flags);
228 if (client->in_shutdown)
231 list_add_tail(&event->link, &client->event_list);
232 spin_unlock_irqrestore(&client->lock, flags);
234 wake_up_interruptible(&client->wait);
237 static int dequeue_event(struct client *client,
238 char __user *buffer, size_t count)
245 ret = wait_event_interruptible(client->wait,
246 !list_empty(&client->event_list) ||
247 fw_device_is_shutdown(client->device));
251 if (list_empty(&client->event_list) &&
252 fw_device_is_shutdown(client->device))
255 spin_lock_irqsave(&client->lock, flags);
256 event = list_first_entry(&client->event_list, struct event, link);
257 list_del(&event->link);
258 spin_unlock_irqrestore(&client->lock, flags);
261 for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
262 size = min(event->v[i].size, count - total);
263 if (copy_to_user(buffer + total, event->v[i].data, size)) {
277 static ssize_t fw_device_op_read(struct file *file, char __user *buffer,
278 size_t count, loff_t *offset)
280 struct client *client = file->private_data;
282 return dequeue_event(client, buffer, count);
285 static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
286 struct client *client)
288 struct fw_card *card = client->device->card;
291 spin_lock_irqsave(&card->lock, flags);
293 event->closure = client->bus_reset_closure;
294 event->type = FW_CDEV_EVENT_BUS_RESET;
295 event->generation = client->device->generation;
296 event->node_id = client->device->node_id;
297 event->local_node_id = card->local_node->node_id;
298 event->bm_node_id = 0; /* FIXME: We don't track the BM. */
299 event->irm_node_id = card->irm_node->node_id;
300 event->root_node_id = card->root_node->node_id;
302 spin_unlock_irqrestore(&card->lock, flags);
305 static void for_each_client(struct fw_device *device,
306 void (*callback)(struct client *client))
310 mutex_lock(&device->client_list_mutex);
311 list_for_each_entry(c, &device->client_list, link)
313 mutex_unlock(&device->client_list_mutex);
316 static int schedule_reallocations(int id, void *p, void *data)
318 struct client_resource *r = p;
320 if (r->release == release_iso_resource)
321 schedule_iso_resource(container_of(r,
322 struct iso_resource, resource));
326 static void queue_bus_reset_event(struct client *client)
328 struct bus_reset_event *e;
330 e = kzalloc(sizeof(*e), GFP_KERNEL);
332 fw_notify("Out of memory when allocating bus reset event\n");
336 fill_bus_reset_event(&e->reset, client);
338 queue_event(client, &e->event,
339 &e->reset, sizeof(e->reset), NULL, 0);
341 spin_lock_irq(&client->lock);
342 idr_for_each(&client->resource_idr, schedule_reallocations, client);
343 spin_unlock_irq(&client->lock);
346 void fw_device_cdev_update(struct fw_device *device)
348 for_each_client(device, queue_bus_reset_event);
351 static void wake_up_client(struct client *client)
353 wake_up_interruptible(&client->wait);
356 void fw_device_cdev_remove(struct fw_device *device)
358 for_each_client(device, wake_up_client);
361 static int ioctl_get_info(struct client *client, void *buffer)
363 struct fw_cdev_get_info *get_info = buffer;
364 struct fw_cdev_event_bus_reset bus_reset;
365 unsigned long ret = 0;
367 client->version = get_info->version;
368 get_info->version = FW_CDEV_VERSION;
369 get_info->card = client->device->card->index;
371 down_read(&fw_device_rwsem);
373 if (get_info->rom != 0) {
374 void __user *uptr = u64_to_uptr(get_info->rom);
375 size_t want = get_info->rom_length;
376 size_t have = client->device->config_rom_length * 4;
378 ret = copy_to_user(uptr, client->device->config_rom,
381 get_info->rom_length = client->device->config_rom_length * 4;
383 up_read(&fw_device_rwsem);
388 client->bus_reset_closure = get_info->bus_reset_closure;
389 if (get_info->bus_reset != 0) {
390 void __user *uptr = u64_to_uptr(get_info->bus_reset);
392 fill_bus_reset_event(&bus_reset, client);
393 if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset)))
400 static int add_client_resource(struct client *client,
401 struct client_resource *resource, gfp_t gfp_mask)
407 if (idr_pre_get(&client->resource_idr, gfp_mask) == 0)
410 spin_lock_irqsave(&client->lock, flags);
411 if (client->in_shutdown)
414 ret = idr_get_new(&client->resource_idr, resource,
418 if (resource->release == release_iso_resource)
419 schedule_iso_resource(container_of(resource,
420 struct iso_resource, resource));
422 spin_unlock_irqrestore(&client->lock, flags);
427 return ret < 0 ? ret : 0;
430 static int release_client_resource(struct client *client, u32 handle,
431 client_resource_release_fn_t release,
432 struct client_resource **resource)
434 struct client_resource *r;
437 spin_lock_irqsave(&client->lock, flags);
438 if (client->in_shutdown)
441 r = idr_find(&client->resource_idr, handle);
442 if (r && r->release == release)
443 idr_remove(&client->resource_idr, handle);
444 spin_unlock_irqrestore(&client->lock, flags);
446 if (!(r && r->release == release))
452 r->release(client, r);
459 static void release_transaction(struct client *client,
460 struct client_resource *resource)
462 struct outbound_transaction_resource *r = container_of(resource,
463 struct outbound_transaction_resource, resource);
465 fw_cancel_transaction(client->device->card, &r->transaction);
468 static void complete_transaction(struct fw_card *card, int rcode,
469 void *payload, size_t length, void *data)
471 struct outbound_transaction_event *e = data;
472 struct fw_cdev_event_response *rsp = &e->response;
473 struct client *client = e->client;
476 if (length < rsp->length)
477 rsp->length = length;
478 if (rcode == RCODE_COMPLETE)
479 memcpy(rsp->data, payload, rsp->length);
481 spin_lock_irqsave(&client->lock, flags);
483 * 1. If called while in shutdown, the idr tree must be left untouched.
484 * The idr handle will be removed and the client reference will be
486 * 2. If the call chain was release_client_resource ->
487 * release_transaction -> complete_transaction (instead of a normal
488 * conclusion of the transaction), i.e. if this resource was already
489 * unregistered from the idr, the client reference will be dropped
490 * by release_client_resource and we must not drop it here.
492 if (!client->in_shutdown &&
493 idr_find(&client->resource_idr, e->r.resource.handle)) {
494 idr_remove(&client->resource_idr, e->r.resource.handle);
495 /* Drop the idr's reference */
498 spin_unlock_irqrestore(&client->lock, flags);
500 rsp->type = FW_CDEV_EVENT_RESPONSE;
504 * In the case that sizeof(*rsp) doesn't align with the position of the
505 * data, and the read is short, preserve an extra copy of the data
506 * to stay compatible with a pre-2.6.27 bug. Since the bug is harmless
507 * for short reads and some apps depended on it, this is both safe
508 * and prudent for compatibility.
510 if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data))
511 queue_event(client, &e->event, rsp, sizeof(*rsp),
512 rsp->data, rsp->length);
514 queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length,
517 /* Drop the transaction callback's reference */
521 static int init_request(struct client *client,
522 struct fw_cdev_send_request *request,
523 int destination_id, int speed)
525 struct outbound_transaction_event *e;
528 /* What is the biggest size we'll accept, really? */
529 if (request->length > 4096)
532 e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL);
537 e->response.length = request->length;
538 e->response.closure = request->closure;
541 copy_from_user(e->response.data,
542 u64_to_uptr(request->data), request->length)) {
547 e->r.resource.release = release_transaction;
548 ret = add_client_resource(client, &e->r.resource, GFP_KERNEL);
552 /* Get a reference for the transaction callback */
555 fw_send_request(client->device->card, &e->r.transaction,
556 request->tcode & 0x1f, destination_id,
557 request->generation, speed, request->offset,
558 e->response.data, request->length,
559 complete_transaction, e);
562 return sizeof(request) + request->length;
564 return sizeof(request);
571 static int ioctl_send_request(struct client *client, void *buffer)
573 struct fw_cdev_send_request *request = buffer;
575 switch (request->tcode) {
576 case TCODE_WRITE_QUADLET_REQUEST:
577 case TCODE_WRITE_BLOCK_REQUEST:
578 case TCODE_READ_QUADLET_REQUEST:
579 case TCODE_READ_BLOCK_REQUEST:
580 case TCODE_LOCK_MASK_SWAP:
581 case TCODE_LOCK_COMPARE_SWAP:
582 case TCODE_LOCK_FETCH_ADD:
583 case TCODE_LOCK_LITTLE_ADD:
584 case TCODE_LOCK_BOUNDED_ADD:
585 case TCODE_LOCK_WRAP_ADD:
586 case TCODE_LOCK_VENDOR_DEPENDENT:
592 return init_request(client, request, client->device->node->node_id,
593 client->device->max_speed);
596 static void release_request(struct client *client,
597 struct client_resource *resource)
599 struct inbound_transaction_resource *r = container_of(resource,
600 struct inbound_transaction_resource, resource);
602 fw_send_response(client->device->card, r->request,
603 RCODE_CONFLICT_ERROR);
607 static void handle_request(struct fw_card *card, struct fw_request *request,
608 int tcode, int destination, int source,
609 int generation, int speed,
610 unsigned long long offset,
611 void *payload, size_t length, void *callback_data)
613 struct address_handler_resource *handler = callback_data;
614 struct inbound_transaction_resource *r;
615 struct inbound_transaction_event *e;
618 r = kmalloc(sizeof(*r), GFP_ATOMIC);
619 e = kmalloc(sizeof(*e), GFP_ATOMIC);
620 if (r == NULL || e == NULL)
623 r->request = request;
627 r->resource.release = release_request;
628 ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC);
632 e->request.type = FW_CDEV_EVENT_REQUEST;
633 e->request.tcode = tcode;
634 e->request.offset = offset;
635 e->request.length = length;
636 e->request.handle = r->resource.handle;
637 e->request.closure = handler->closure;
639 queue_event(handler->client, &e->event,
640 &e->request, sizeof(e->request), payload, length);
646 fw_send_response(card, request, RCODE_CONFLICT_ERROR);
649 static void release_address_handler(struct client *client,
650 struct client_resource *resource)
652 struct address_handler_resource *r =
653 container_of(resource, struct address_handler_resource, resource);
655 fw_core_remove_address_handler(&r->handler);
659 static int ioctl_allocate(struct client *client, void *buffer)
661 struct fw_cdev_allocate *request = buffer;
662 struct address_handler_resource *r;
663 struct fw_address_region region;
666 r = kmalloc(sizeof(*r), GFP_KERNEL);
670 region.start = request->offset;
671 region.end = request->offset + request->length;
672 r->handler.length = request->length;
673 r->handler.address_callback = handle_request;
674 r->handler.callback_data = r;
675 r->closure = request->closure;
678 ret = fw_core_add_address_handler(&r->handler, ®ion);
684 r->resource.release = release_address_handler;
685 ret = add_client_resource(client, &r->resource, GFP_KERNEL);
687 release_address_handler(client, &r->resource);
690 request->handle = r->resource.handle;
695 static int ioctl_deallocate(struct client *client, void *buffer)
697 struct fw_cdev_deallocate *request = buffer;
699 return release_client_resource(client, request->handle,
700 release_address_handler, NULL);
703 static int ioctl_send_response(struct client *client, void *buffer)
705 struct fw_cdev_send_response *request = buffer;
706 struct client_resource *resource;
707 struct inbound_transaction_resource *r;
709 if (release_client_resource(client, request->handle,
710 release_request, &resource) < 0)
713 r = container_of(resource, struct inbound_transaction_resource,
715 if (request->length < r->length)
716 r->length = request->length;
717 if (copy_from_user(r->data, u64_to_uptr(request->data), r->length))
720 fw_send_response(client->device->card, r->request, request->rcode);
726 static int ioctl_initiate_bus_reset(struct client *client, void *buffer)
728 struct fw_cdev_initiate_bus_reset *request = buffer;
731 short_reset = (request->type == FW_CDEV_SHORT_RESET);
733 return fw_core_initiate_bus_reset(client->device->card, short_reset);
736 static void release_descriptor(struct client *client,
737 struct client_resource *resource)
739 struct descriptor_resource *r =
740 container_of(resource, struct descriptor_resource, resource);
742 fw_core_remove_descriptor(&r->descriptor);
746 static int ioctl_add_descriptor(struct client *client, void *buffer)
748 struct fw_cdev_add_descriptor *request = buffer;
749 struct descriptor_resource *r;
752 if (request->length > 256)
755 r = kmalloc(sizeof(*r) + request->length * 4, GFP_KERNEL);
759 if (copy_from_user(r->data,
760 u64_to_uptr(request->data), request->length * 4)) {
765 r->descriptor.length = request->length;
766 r->descriptor.immediate = request->immediate;
767 r->descriptor.key = request->key;
768 r->descriptor.data = r->data;
770 ret = fw_core_add_descriptor(&r->descriptor);
774 r->resource.release = release_descriptor;
775 ret = add_client_resource(client, &r->resource, GFP_KERNEL);
777 fw_core_remove_descriptor(&r->descriptor);
780 request->handle = r->resource.handle;
789 static int ioctl_remove_descriptor(struct client *client, void *buffer)
791 struct fw_cdev_remove_descriptor *request = buffer;
793 return release_client_resource(client, request->handle,
794 release_descriptor, NULL);
797 static void iso_callback(struct fw_iso_context *context, u32 cycle,
798 size_t header_length, void *header, void *data)
800 struct client *client = data;
801 struct iso_interrupt_event *e;
803 e = kzalloc(sizeof(*e) + header_length, GFP_ATOMIC);
807 e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT;
808 e->interrupt.closure = client->iso_closure;
809 e->interrupt.cycle = cycle;
810 e->interrupt.header_length = header_length;
811 memcpy(e->interrupt.header, header, header_length);
812 queue_event(client, &e->event, &e->interrupt,
813 sizeof(e->interrupt) + header_length, NULL, 0);
816 static int ioctl_create_iso_context(struct client *client, void *buffer)
818 struct fw_cdev_create_iso_context *request = buffer;
819 struct fw_iso_context *context;
821 /* We only support one context at this time. */
822 if (client->iso_context != NULL)
825 if (request->channel > 63)
828 switch (request->type) {
829 case FW_ISO_CONTEXT_RECEIVE:
830 if (request->header_size < 4 || (request->header_size & 3))
835 case FW_ISO_CONTEXT_TRANSMIT:
836 if (request->speed > SCODE_3200)
845 context = fw_iso_context_create(client->device->card,
849 request->header_size,
850 iso_callback, client);
852 return PTR_ERR(context);
854 client->iso_closure = request->closure;
855 client->iso_context = context;
857 /* We only support one context at this time. */
863 /* Macros for decoding the iso packet control header. */
864 #define GET_PAYLOAD_LENGTH(v) ((v) & 0xffff)
865 #define GET_INTERRUPT(v) (((v) >> 16) & 0x01)
866 #define GET_SKIP(v) (((v) >> 17) & 0x01)
867 #define GET_TAG(v) (((v) >> 18) & 0x03)
868 #define GET_SY(v) (((v) >> 20) & 0x0f)
869 #define GET_HEADER_LENGTH(v) (((v) >> 24) & 0xff)
871 static int ioctl_queue_iso(struct client *client, void *buffer)
873 struct fw_cdev_queue_iso *request = buffer;
874 struct fw_cdev_iso_packet __user *p, *end, *next;
875 struct fw_iso_context *ctx = client->iso_context;
876 unsigned long payload, buffer_end, header_length;
880 struct fw_iso_packet packet;
884 if (ctx == NULL || request->handle != 0)
888 * If the user passes a non-NULL data pointer, has mmap()'ed
889 * the iso buffer, and the pointer points inside the buffer,
890 * we setup the payload pointers accordingly. Otherwise we
891 * set them both to 0, which will still let packets with
892 * payload_length == 0 through. In other words, if no packets
893 * use the indirect payload, the iso buffer need not be mapped
894 * and the request->data pointer is ignored.
897 payload = (unsigned long)request->data - client->vm_start;
898 buffer_end = client->buffer.page_count << PAGE_SHIFT;
899 if (request->data == 0 || client->buffer.pages == NULL ||
900 payload >= buffer_end) {
905 p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(request->packets);
907 if (!access_ok(VERIFY_READ, p, request->size))
910 end = (void __user *)p + request->size;
913 if (get_user(control, &p->control))
915 u.packet.payload_length = GET_PAYLOAD_LENGTH(control);
916 u.packet.interrupt = GET_INTERRUPT(control);
917 u.packet.skip = GET_SKIP(control);
918 u.packet.tag = GET_TAG(control);
919 u.packet.sy = GET_SY(control);
920 u.packet.header_length = GET_HEADER_LENGTH(control);
922 if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) {
923 header_length = u.packet.header_length;
926 * We require that header_length is a multiple of
927 * the fixed header size, ctx->header_size.
929 if (ctx->header_size == 0) {
930 if (u.packet.header_length > 0)
932 } else if (u.packet.header_length % ctx->header_size != 0) {
938 next = (struct fw_cdev_iso_packet __user *)
939 &p->header[header_length / 4];
943 (u.packet.header, p->header, header_length))
945 if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
946 u.packet.header_length + u.packet.payload_length > 0)
948 if (payload + u.packet.payload_length > buffer_end)
951 if (fw_iso_context_queue(ctx, &u.packet,
952 &client->buffer, payload))
956 payload += u.packet.payload_length;
960 request->size -= uptr_to_u64(p) - request->packets;
961 request->packets = uptr_to_u64(p);
962 request->data = client->vm_start + payload;
967 static int ioctl_start_iso(struct client *client, void *buffer)
969 struct fw_cdev_start_iso *request = buffer;
971 if (client->iso_context == NULL || request->handle != 0)
974 if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) {
975 if (request->tags == 0 || request->tags > 15)
978 if (request->sync > 15)
982 return fw_iso_context_start(client->iso_context, request->cycle,
983 request->sync, request->tags);
986 static int ioctl_stop_iso(struct client *client, void *buffer)
988 struct fw_cdev_stop_iso *request = buffer;
990 if (client->iso_context == NULL || request->handle != 0)
993 return fw_iso_context_stop(client->iso_context);
996 static int ioctl_get_cycle_timer(struct client *client, void *buffer)
998 struct fw_cdev_get_cycle_timer *request = buffer;
999 struct fw_card *card = client->device->card;
1000 unsigned long long bus_time;
1002 unsigned long flags;
1005 local_irq_save(flags);
1007 bus_time = card->driver->get_bus_time(card);
1008 do_gettimeofday(&tv);
1010 local_irq_restore(flags);
1013 request->local_time = tv.tv_sec * 1000000ULL + tv.tv_usec;
1014 request->cycle_timer = bus_time & 0xffffffff;
1018 static void iso_resource_work(struct work_struct *work)
1020 struct iso_resource_event *e;
1021 struct iso_resource *r =
1022 container_of(work, struct iso_resource, work.work);
1023 struct client *client = r->client;
1024 int generation, channel, bandwidth, todo;
1025 bool skip, free, success;
1027 spin_lock_irq(&client->lock);
1028 generation = client->device->generation;
1030 /* Allow 1000ms grace period for other reallocations. */
1031 if (todo == ISO_RES_ALLOC &&
1032 time_is_after_jiffies(client->device->card->reset_jiffies + HZ)) {
1033 if (schedule_delayed_work(&r->work, DIV_ROUND_UP(HZ, 3)))
1037 /* We could be called twice within the same generation. */
1038 skip = todo == ISO_RES_REALLOC &&
1039 r->generation == generation;
1041 free = todo == ISO_RES_DEALLOC ||
1042 todo == ISO_RES_ALLOC_ONCE ||
1043 todo == ISO_RES_DEALLOC_ONCE;
1044 r->generation = generation;
1045 spin_unlock_irq(&client->lock);
1050 bandwidth = r->bandwidth;
1052 fw_iso_resource_manage(client->device->card, generation,
1053 r->channels, &channel, &bandwidth,
1054 todo == ISO_RES_ALLOC ||
1055 todo == ISO_RES_REALLOC ||
1056 todo == ISO_RES_ALLOC_ONCE);
1058 * Is this generation outdated already? As long as this resource sticks
1059 * in the idr, it will be scheduled again for a newer generation or at
1062 if (channel == -EAGAIN &&
1063 (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC))
1066 success = channel >= 0 || bandwidth > 0;
1068 spin_lock_irq(&client->lock);
1070 * Transit from allocation to reallocation, except if the client
1071 * requested deallocation in the meantime.
1073 if (r->todo == ISO_RES_ALLOC)
1074 r->todo = ISO_RES_REALLOC;
1076 * Allocation or reallocation failure? Pull this resource out of the
1077 * idr and prepare for deletion, unless the client is shutting down.
1079 if (r->todo == ISO_RES_REALLOC && !success &&
1080 !client->in_shutdown &&
1081 idr_find(&client->resource_idr, r->resource.handle)) {
1082 idr_remove(&client->resource_idr, r->resource.handle);
1086 spin_unlock_irq(&client->lock);
1088 if (todo == ISO_RES_ALLOC && channel >= 0)
1089 r->channels = 1ULL << (63 - channel);
1091 if (todo == ISO_RES_REALLOC && success)
1094 if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) {
1099 r->e_dealloc = NULL;
1101 e->resource.handle = r->resource.handle;
1102 e->resource.channel = channel;
1103 e->resource.bandwidth = bandwidth;
1105 queue_event(client, &e->event,
1106 &e->resource, sizeof(e->resource), NULL, 0);
1109 cancel_delayed_work(&r->work);
1111 kfree(r->e_dealloc);
1118 static int schedule_iso_resource(struct iso_resource *r)
1122 client_get(r->client);
1124 scheduled = schedule_delayed_work(&r->work, 0);
1126 client_put(r->client);
1131 static void release_iso_resource(struct client *client,
1132 struct client_resource *resource)
1134 struct iso_resource *r =
1135 container_of(resource, struct iso_resource, resource);
1137 spin_lock_irq(&client->lock);
1138 r->todo = ISO_RES_DEALLOC;
1139 schedule_iso_resource(r);
1140 spin_unlock_irq(&client->lock);
1143 static int init_iso_resource(struct client *client,
1144 struct fw_cdev_allocate_iso_resource *request, int todo)
1146 struct iso_resource_event *e1, *e2;
1147 struct iso_resource *r;
1150 if ((request->channels == 0 && request->bandwidth == 0) ||
1151 request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
1152 request->bandwidth < 0)
1155 r = kmalloc(sizeof(*r), GFP_KERNEL);
1156 e1 = kmalloc(sizeof(*e1), GFP_KERNEL);
1157 e2 = kmalloc(sizeof(*e2), GFP_KERNEL);
1158 if (r == NULL || e1 == NULL || e2 == NULL) {
1163 INIT_DELAYED_WORK(&r->work, iso_resource_work);
1167 r->channels = request->channels;
1168 r->bandwidth = request->bandwidth;
1172 e1->resource.closure = request->closure;
1173 e1->resource.type = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED;
1174 e2->resource.closure = request->closure;
1175 e2->resource.type = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED;
1177 if (todo == ISO_RES_ALLOC) {
1178 r->resource.release = release_iso_resource;
1179 ret = add_client_resource(client, &r->resource, GFP_KERNEL);
1181 r->resource.release = NULL;
1182 r->resource.handle = -1;
1183 ret = schedule_iso_resource(r) ? 0 : -ENOMEM;
1187 request->handle = r->resource.handle;
1198 static int ioctl_allocate_iso_resource(struct client *client, void *buffer)
1200 struct fw_cdev_allocate_iso_resource *request = buffer;
1202 return init_iso_resource(client, request, ISO_RES_ALLOC);
1205 static int ioctl_deallocate_iso_resource(struct client *client, void *buffer)
1207 struct fw_cdev_deallocate *request = buffer;
1209 return release_client_resource(client, request->handle,
1210 release_iso_resource, NULL);
1213 static int ioctl_allocate_iso_resource_once(struct client *client, void *buffer)
1215 struct fw_cdev_allocate_iso_resource *request = buffer;
1217 return init_iso_resource(client, request, ISO_RES_ALLOC_ONCE);
1220 static int ioctl_deallocate_iso_resource_once(struct client *client, void *buffer)
1222 struct fw_cdev_allocate_iso_resource *request = buffer;
1224 return init_iso_resource(client, request, ISO_RES_DEALLOC_ONCE);
1227 static int ioctl_get_speed(struct client *client, void *buffer)
1229 struct fw_cdev_get_speed *request = buffer;
1231 request->max_speed = client->device->max_speed;
1236 static int ioctl_send_broadcast_request(struct client *client, void *buffer)
1238 struct fw_cdev_send_request *request = buffer;
1240 switch (request->tcode) {
1241 case TCODE_WRITE_QUADLET_REQUEST:
1242 case TCODE_WRITE_BLOCK_REQUEST:
1248 /* Security policy: Only allow accesses to Units Space. */
1249 if (request->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END)
1252 return init_request(client, request, LOCAL_BUS | 0x3f, SCODE_100);
1255 static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
1260 ioctl_send_response,
1261 ioctl_initiate_bus_reset,
1262 ioctl_add_descriptor,
1263 ioctl_remove_descriptor,
1264 ioctl_create_iso_context,
1268 ioctl_get_cycle_timer,
1269 ioctl_allocate_iso_resource,
1270 ioctl_deallocate_iso_resource,
1271 ioctl_allocate_iso_resource_once,
1272 ioctl_deallocate_iso_resource_once,
1274 ioctl_send_broadcast_request,
1277 static int dispatch_ioctl(struct client *client,
1278 unsigned int cmd, void __user *arg)
1283 if (_IOC_TYPE(cmd) != '#' ||
1284 _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers))
1287 if (_IOC_DIR(cmd) & _IOC_WRITE) {
1288 if (_IOC_SIZE(cmd) > sizeof(buffer) ||
1289 copy_from_user(buffer, arg, _IOC_SIZE(cmd)))
1293 ret = ioctl_handlers[_IOC_NR(cmd)](client, buffer);
1297 if (_IOC_DIR(cmd) & _IOC_READ) {
1298 if (_IOC_SIZE(cmd) > sizeof(buffer) ||
1299 copy_to_user(arg, buffer, _IOC_SIZE(cmd)))
1306 static long fw_device_op_ioctl(struct file *file,
1307 unsigned int cmd, unsigned long arg)
1309 struct client *client = file->private_data;
1311 if (fw_device_is_shutdown(client->device))
1314 return dispatch_ioctl(client, cmd, (void __user *) arg);
1317 #ifdef CONFIG_COMPAT
1318 static long fw_device_op_compat_ioctl(struct file *file,
1319 unsigned int cmd, unsigned long arg)
1321 struct client *client = file->private_data;
1323 if (fw_device_is_shutdown(client->device))
1326 return dispatch_ioctl(client, cmd, compat_ptr(arg));
1330 static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
1332 struct client *client = file->private_data;
1333 enum dma_data_direction direction;
1335 int page_count, ret;
1337 if (fw_device_is_shutdown(client->device))
1340 /* FIXME: We could support multiple buffers, but we don't. */
1341 if (client->buffer.pages != NULL)
1344 if (!(vma->vm_flags & VM_SHARED))
1347 if (vma->vm_start & ~PAGE_MASK)
1350 client->vm_start = vma->vm_start;
1351 size = vma->vm_end - vma->vm_start;
1352 page_count = size >> PAGE_SHIFT;
1353 if (size & ~PAGE_MASK)
1356 if (vma->vm_flags & VM_WRITE)
1357 direction = DMA_TO_DEVICE;
1359 direction = DMA_FROM_DEVICE;
1361 ret = fw_iso_buffer_init(&client->buffer, client->device->card,
1362 page_count, direction);
1366 ret = fw_iso_buffer_map(&client->buffer, vma);
1368 fw_iso_buffer_destroy(&client->buffer, client->device->card);
1373 static int shutdown_resource(int id, void *p, void *data)
1375 struct client_resource *r = p;
1376 struct client *client = data;
1378 r->release(client, r);
1384 static int fw_device_op_release(struct inode *inode, struct file *file)
1386 struct client *client = file->private_data;
1387 struct event *e, *next_e;
1388 unsigned long flags;
1390 mutex_lock(&client->device->client_list_mutex);
1391 list_del(&client->link);
1392 mutex_unlock(&client->device->client_list_mutex);
1394 if (client->buffer.pages)
1395 fw_iso_buffer_destroy(&client->buffer, client->device->card);
1397 if (client->iso_context)
1398 fw_iso_context_destroy(client->iso_context);
1400 /* Freeze client->resource_idr and client->event_list */
1401 spin_lock_irqsave(&client->lock, flags);
1402 client->in_shutdown = true;
1403 spin_unlock_irqrestore(&client->lock, flags);
1405 idr_for_each(&client->resource_idr, shutdown_resource, client);
1406 idr_remove_all(&client->resource_idr);
1407 idr_destroy(&client->resource_idr);
1409 list_for_each_entry_safe(e, next_e, &client->event_list, link)
1417 static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
1419 struct client *client = file->private_data;
1420 unsigned int mask = 0;
1422 poll_wait(file, &client->wait, pt);
1424 if (fw_device_is_shutdown(client->device))
1425 mask |= POLLHUP | POLLERR;
1426 if (!list_empty(&client->event_list))
1427 mask |= POLLIN | POLLRDNORM;
1432 const struct file_operations fw_device_ops = {
1433 .owner = THIS_MODULE,
1434 .open = fw_device_op_open,
1435 .read = fw_device_op_read,
1436 .unlocked_ioctl = fw_device_op_ioctl,
1437 .poll = fw_device_op_poll,
1438 .release = fw_device_op_release,
1439 .mmap = fw_device_op_mmap,
1441 #ifdef CONFIG_COMPAT
1442 .compat_ioctl = fw_device_op_compat_ioctl,