]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/media/video/v4l2-event.c
91bb1c8239925f589560a00ff09b88517eff8d32
[net-next-2.6.git] / drivers / media / video / v4l2-event.c
1 /*
2  * v4l2-event.c
3  *
4  * V4L2 events.
5  *
6  * Copyright (C) 2009--2010 Nokia Corporation.
7  *
8  * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public License
12  * version 2 as published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful, but
15  * WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
22  * 02110-1301 USA
23  */
24
25 #include <media/v4l2-dev.h>
26 #include <media/v4l2-fh.h>
27 #include <media/v4l2-event.h>
28
29 #include <linux/sched.h>
30 #include <linux/slab.h>
31
32 int v4l2_event_init(struct v4l2_fh *fh)
33 {
34         fh->events = kzalloc(sizeof(*fh->events), GFP_KERNEL);
35         if (fh->events == NULL)
36                 return -ENOMEM;
37
38         init_waitqueue_head(&fh->events->wait);
39
40         INIT_LIST_HEAD(&fh->events->free);
41         INIT_LIST_HEAD(&fh->events->available);
42         INIT_LIST_HEAD(&fh->events->subscribed);
43
44         fh->events->sequence = -1;
45
46         return 0;
47 }
48
49 int v4l2_event_alloc(struct v4l2_fh *fh, unsigned int n)
50 {
51         struct v4l2_events *events = fh->events;
52         unsigned long flags;
53
54         if (!events) {
55                 WARN_ON(1);
56                 return -ENOMEM;
57         }
58
59         while (events->nallocated < n) {
60                 struct v4l2_kevent *kev;
61
62                 kev = kzalloc(sizeof(*kev), GFP_KERNEL);
63                 if (kev == NULL)
64                         return -ENOMEM;
65
66                 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
67                 list_add_tail(&kev->list, &events->free);
68                 events->nallocated++;
69                 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
70         }
71
72         return 0;
73 }
74 EXPORT_SYMBOL_GPL(v4l2_event_alloc);
75
76 #define list_kfree(list, type, member)                          \
77         while (!list_empty(list)) {                             \
78                 type *hi;                                       \
79                 hi = list_first_entry(list, type, member);      \
80                 list_del(&hi->member);                          \
81                 kfree(hi);                                      \
82         }
83
84 void v4l2_event_free(struct v4l2_fh *fh)
85 {
86         struct v4l2_events *events = fh->events;
87
88         if (!events)
89                 return;
90
91         list_kfree(&events->free, struct v4l2_kevent, list);
92         list_kfree(&events->available, struct v4l2_kevent, list);
93         list_kfree(&events->subscribed, struct v4l2_subscribed_event, list);
94
95         kfree(events);
96         fh->events = NULL;
97 }
98 EXPORT_SYMBOL_GPL(v4l2_event_free);
99
100 static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
101 {
102         struct v4l2_events *events = fh->events;
103         struct v4l2_kevent *kev;
104         unsigned long flags;
105
106         spin_lock_irqsave(&fh->vdev->fh_lock, flags);
107
108         if (list_empty(&events->available)) {
109                 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
110                 return -ENOENT;
111         }
112
113         WARN_ON(events->navailable == 0);
114
115         kev = list_first_entry(&events->available, struct v4l2_kevent, list);
116         list_move(&kev->list, &events->free);
117         events->navailable--;
118
119         kev->event.pending = events->navailable;
120         *event = kev->event;
121
122         spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
123
124         return 0;
125 }
126
127 int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
128                        int nonblocking)
129 {
130         struct v4l2_events *events = fh->events;
131         int ret;
132
133         if (nonblocking)
134                 return __v4l2_event_dequeue(fh, event);
135
136         do {
137                 ret = wait_event_interruptible(events->wait,
138                                                events->navailable != 0);
139                 if (ret < 0)
140                         return ret;
141
142                 ret = __v4l2_event_dequeue(fh, event);
143         } while (ret == -ENOENT);
144
145         return ret;
146 }
147
148 /* Caller must hold fh->event->lock! */
149 static struct v4l2_subscribed_event *v4l2_event_subscribed(
150         struct v4l2_fh *fh, u32 type)
151 {
152         struct v4l2_events *events = fh->events;
153         struct v4l2_subscribed_event *sev;
154
155         assert_spin_locked(&fh->vdev->fh_lock);
156
157         list_for_each_entry(sev, &events->subscribed, list) {
158                 if (sev->type == type)
159                         return sev;
160         }
161
162         return NULL;
163 }
164
165 void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
166 {
167         struct v4l2_fh *fh;
168         unsigned long flags;
169         struct timespec timestamp;
170
171         ktime_get_ts(&timestamp);
172
173         spin_lock_irqsave(&vdev->fh_lock, flags);
174
175         list_for_each_entry(fh, &vdev->fh_list, list) {
176                 struct v4l2_events *events = fh->events;
177                 struct v4l2_kevent *kev;
178
179                 /* Are we subscribed? */
180                 if (!v4l2_event_subscribed(fh, ev->type))
181                         continue;
182
183                 /* Increase event sequence number on fh. */
184                 events->sequence++;
185
186                 /* Do we have any free events? */
187                 if (list_empty(&events->free))
188                         continue;
189
190                 /* Take one and fill it. */
191                 kev = list_first_entry(&events->free, struct v4l2_kevent, list);
192                 kev->event.type = ev->type;
193                 kev->event.u = ev->u;
194                 kev->event.timestamp = timestamp;
195                 kev->event.sequence = events->sequence;
196                 list_move_tail(&kev->list, &events->available);
197
198                 events->navailable++;
199
200                 wake_up_all(&events->wait);
201         }
202
203         spin_unlock_irqrestore(&vdev->fh_lock, flags);
204 }
205 EXPORT_SYMBOL_GPL(v4l2_event_queue);
206
207 int v4l2_event_pending(struct v4l2_fh *fh)
208 {
209         return fh->events->navailable;
210 }
211 EXPORT_SYMBOL_GPL(v4l2_event_pending);
212
213 int v4l2_event_subscribe(struct v4l2_fh *fh,
214                          struct v4l2_event_subscription *sub)
215 {
216         struct v4l2_events *events = fh->events;
217         struct v4l2_subscribed_event *sev;
218         unsigned long flags;
219
220         if (fh->events == NULL) {
221                 WARN_ON(1);
222                 return -ENOMEM;
223         }
224
225         sev = kmalloc(sizeof(*sev), GFP_KERNEL);
226         if (!sev)
227                 return -ENOMEM;
228
229         spin_lock_irqsave(&fh->vdev->fh_lock, flags);
230
231         if (v4l2_event_subscribed(fh, sub->type) == NULL) {
232                 INIT_LIST_HEAD(&sev->list);
233                 sev->type = sub->type;
234
235                 list_add(&sev->list, &events->subscribed);
236                 sev = NULL;
237         }
238
239         spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
240
241         kfree(sev);
242
243         return 0;
244 }
245 EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
246
247 static void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
248 {
249         struct v4l2_events *events = fh->events;
250         struct v4l2_subscribed_event *sev;
251         unsigned long flags;
252
253         do {
254                 sev = NULL;
255
256                 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
257                 if (!list_empty(&events->subscribed)) {
258                         sev = list_first_entry(&events->subscribed,
259                                        struct v4l2_subscribed_event, list);
260                         list_del(&sev->list);
261                 }
262                 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
263                 kfree(sev);
264         } while (sev);
265 }
266
267 int v4l2_event_unsubscribe(struct v4l2_fh *fh,
268                            struct v4l2_event_subscription *sub)
269 {
270         struct v4l2_subscribed_event *sev;
271         unsigned long flags;
272
273         if (sub->type == V4L2_EVENT_ALL) {
274                 v4l2_event_unsubscribe_all(fh);
275                 return 0;
276         }
277
278         spin_lock_irqsave(&fh->vdev->fh_lock, flags);
279
280         sev = v4l2_event_subscribed(fh, sub->type);
281         if (sev != NULL)
282                 list_del(&sev->list);
283
284         spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
285
286         kfree(sev);
287
288         return 0;
289 }
290 EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe);