]>
Commit | Line | Data |
---|---|---|
2235acb2 JC |
1 | /* The industrial I/O simple minimally locked ring buffer. |
2 | * | |
3 | * Copyright (c) 2008 Jonathan Cameron | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms of the GNU General Public License version 2 as published by | |
7 | * the Free Software Foundation. | |
8 | */ | |
9 | ||
5a0e3ad6 | 10 | #include <linux/slab.h> |
2235acb2 | 11 | #include <linux/kernel.h> |
2235acb2 JC |
12 | #include <linux/module.h> |
13 | #include <linux/device.h> | |
14 | #include <linux/workqueue.h> | |
15 | #include "ring_sw.h" | |
16 | ||
6f2dfb31 JC |
17 | static inline int __iio_allocate_sw_ring_buffer(struct iio_sw_ring_buffer *ring, |
18 | int bytes_per_datum, int length) | |
2235acb2 JC |
19 | { |
20 | if ((length == 0) || (bytes_per_datum == 0)) | |
21 | return -EINVAL; | |
6f2dfb31 | 22 | __iio_update_ring_buffer(&ring->buf, bytes_per_datum, length); |
2235acb2 JC |
23 | ring->data = kmalloc(length*ring->buf.bpd, GFP_KERNEL); |
24 | ring->read_p = 0; | |
25 | ring->write_p = 0; | |
26 | ring->last_written_p = 0; | |
27 | ring->half_p = 0; | |
28 | return ring->data ? 0 : -ENOMEM; | |
29 | } | |
30 | ||
6f2dfb31 JC |
31 | static inline void __iio_init_sw_ring_buffer(struct iio_sw_ring_buffer *ring) |
32 | { | |
33 | spin_lock_init(&ring->use_lock); | |
34 | } | |
35 | ||
2235acb2 JC |
36 | static inline void __iio_free_sw_ring_buffer(struct iio_sw_ring_buffer *ring) |
37 | { | |
38 | kfree(ring->data); | |
39 | } | |
40 | ||
41 | void iio_mark_sw_rb_in_use(struct iio_ring_buffer *r) | |
42 | { | |
43 | struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); | |
44 | spin_lock(&ring->use_lock); | |
45 | ring->use_count++; | |
46 | spin_unlock(&ring->use_lock); | |
47 | } | |
48 | EXPORT_SYMBOL(iio_mark_sw_rb_in_use); | |
49 | ||
50 | void iio_unmark_sw_rb_in_use(struct iio_ring_buffer *r) | |
51 | { | |
52 | struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); | |
53 | spin_lock(&ring->use_lock); | |
54 | ring->use_count--; | |
55 | spin_unlock(&ring->use_lock); | |
56 | } | |
57 | EXPORT_SYMBOL(iio_unmark_sw_rb_in_use); | |
58 | ||
59 | ||
60 | /* Ring buffer related functionality */ | |
61 | /* Store to ring is typically called in the bh of a data ready interrupt handler | |
62 | * in the device driver */ | |
63 | /* Lock always held if their is a chance this may be called */ | |
64 | /* Only one of these per ring may run concurrently - enforced by drivers */ | |
65 | int iio_store_to_sw_ring(struct iio_sw_ring_buffer *ring, | |
66 | unsigned char *data, | |
67 | s64 timestamp) | |
68 | { | |
69 | int ret = 0; | |
70 | int code; | |
71 | unsigned char *temp_ptr, *change_test_ptr; | |
72 | ||
73 | /* initial store */ | |
74 | if (unlikely(ring->write_p == 0)) { | |
75 | ring->write_p = ring->data; | |
76 | /* Doesn't actually matter if this is out of the set | |
77 | * as long as the read pointer is valid before this | |
78 | * passes it - guaranteed as set later in this function. | |
79 | */ | |
80 | ring->half_p = ring->data - ring->buf.length*ring->buf.bpd/2; | |
81 | } | |
82 | /* Copy data to where ever the current write pointer says */ | |
83 | memcpy(ring->write_p, data, ring->buf.bpd); | |
84 | barrier(); | |
85 | /* Update the pointer used to get most recent value. | |
86 | * Always valid as either points to latest or second latest value. | |
87 | * Before this runs it is null and read attempts fail with -EAGAIN. | |
88 | */ | |
89 | ring->last_written_p = ring->write_p; | |
90 | barrier(); | |
91 | /* temp_ptr used to ensure we never have an invalid pointer | |
92 | * it may be slightly lagging, but never invalid | |
93 | */ | |
94 | temp_ptr = ring->write_p + ring->buf.bpd; | |
95 | /* End of ring, back to the beginning */ | |
96 | if (temp_ptr == ring->data + ring->buf.length*ring->buf.bpd) | |
97 | temp_ptr = ring->data; | |
98 | /* Update the write pointer | |
99 | * always valid as long as this is the only function able to write. | |
100 | * Care needed with smp systems to ensure more than one ring fill | |
101 | * is never scheduled. | |
102 | */ | |
103 | ring->write_p = temp_ptr; | |
104 | ||
105 | if (ring->read_p == 0) | |
106 | ring->read_p = ring->data; | |
107 | /* Buffer full - move the read pointer and create / escalate | |
108 | * ring event */ | |
109 | /* Tricky case - if the read pointer moves before we adjust it. | |
110 | * Handle by not pushing if it has moved - may result in occasional | |
111 | * unnecessary buffer full events when it wasn't quite true. | |
112 | */ | |
113 | else if (ring->write_p == ring->read_p) { | |
114 | change_test_ptr = ring->read_p; | |
115 | temp_ptr = change_test_ptr + ring->buf.bpd; | |
116 | if (temp_ptr | |
117 | == ring->data + ring->buf.length*ring->buf.bpd) { | |
118 | temp_ptr = ring->data; | |
119 | } | |
120 | /* We are moving pointer on one because the ring is full. Any | |
121 | * change to the read pointer will be this or greater. | |
122 | */ | |
123 | if (change_test_ptr == ring->read_p) | |
124 | ring->read_p = temp_ptr; | |
125 | ||
126 | spin_lock(&ring->buf.shared_ev_pointer.lock); | |
127 | ||
128 | ret = iio_push_or_escallate_ring_event(&ring->buf, | |
129 | IIO_EVENT_CODE_RING_100_FULL, | |
130 | timestamp); | |
131 | spin_unlock(&ring->buf.shared_ev_pointer.lock); | |
132 | if (ret) | |
133 | goto error_ret; | |
134 | } | |
135 | /* investigate if our event barrier has been passed */ | |
136 | /* There are definite 'issues' with this and chances of | |
137 | * simultaneous read */ | |
138 | /* Also need to use loop count to ensure this only happens once */ | |
139 | ring->half_p += ring->buf.bpd; | |
140 | if (ring->half_p == ring->data + ring->buf.length*ring->buf.bpd) | |
141 | ring->half_p = ring->data; | |
142 | if (ring->half_p == ring->read_p) { | |
143 | spin_lock(&ring->buf.shared_ev_pointer.lock); | |
144 | code = IIO_EVENT_CODE_RING_50_FULL; | |
145 | ret = __iio_push_event(&ring->buf.ev_int, | |
146 | code, | |
147 | timestamp, | |
148 | &ring->buf.shared_ev_pointer); | |
149 | spin_unlock(&ring->buf.shared_ev_pointer.lock); | |
150 | } | |
151 | error_ret: | |
152 | return ret; | |
153 | } | |
154 | ||
155 | int iio_rip_sw_rb(struct iio_ring_buffer *r, | |
156 | size_t count, u8 **data, int *dead_offset) | |
157 | { | |
158 | struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); | |
159 | ||
160 | u8 *initial_read_p, *initial_write_p, *current_read_p, *end_read_p; | |
161 | int ret, max_copied; | |
162 | int bytes_to_rip; | |
163 | ||
164 | /* A userspace program has probably made an error if it tries to | |
165 | * read something that is not a whole number of bpds. | |
166 | * Return an error. | |
167 | */ | |
168 | if (count % ring->buf.bpd) { | |
169 | ret = -EINVAL; | |
170 | printk(KERN_INFO "Ring buffer read request not whole number of" | |
171 | "samples: Request bytes %zd, Current bpd %d\n", | |
172 | count, ring->buf.bpd); | |
173 | goto error_ret; | |
174 | } | |
175 | /* Limit size to whole of ring buffer */ | |
176 | bytes_to_rip = min((size_t)(ring->buf.bpd*ring->buf.length), count); | |
177 | ||
178 | *data = kmalloc(bytes_to_rip, GFP_KERNEL); | |
179 | if (*data == NULL) { | |
180 | ret = -ENOMEM; | |
181 | goto error_ret; | |
182 | } | |
183 | ||
184 | /* build local copy */ | |
185 | initial_read_p = ring->read_p; | |
186 | if (unlikely(initial_read_p == 0)) { /* No data here as yet */ | |
187 | ret = 0; | |
188 | goto error_free_data_cpy; | |
189 | } | |
190 | ||
191 | initial_write_p = ring->write_p; | |
192 | ||
193 | /* Need a consistent pair */ | |
194 | while ((initial_read_p != ring->read_p) | |
195 | || (initial_write_p != ring->write_p)) { | |
196 | initial_read_p = ring->read_p; | |
197 | initial_write_p = ring->write_p; | |
198 | } | |
199 | if (initial_write_p == initial_read_p) { | |
200 | /* No new data available.*/ | |
201 | ret = 0; | |
202 | goto error_free_data_cpy; | |
203 | } | |
204 | ||
205 | if (initial_write_p >= initial_read_p + bytes_to_rip) { | |
206 | /* write_p is greater than necessary, all is easy */ | |
207 | max_copied = bytes_to_rip; | |
208 | memcpy(*data, initial_read_p, max_copied); | |
209 | end_read_p = initial_read_p + max_copied; | |
210 | } else if (initial_write_p > initial_read_p) { | |
211 | /*not enough data to cpy */ | |
212 | max_copied = initial_write_p - initial_read_p; | |
213 | memcpy(*data, initial_read_p, max_copied); | |
214 | end_read_p = initial_write_p; | |
215 | } else { | |
216 | /* going through 'end' of ring buffer */ | |
217 | max_copied = ring->data | |
218 | + ring->buf.length*ring->buf.bpd - initial_read_p; | |
219 | memcpy(*data, initial_read_p, max_copied); | |
220 | /* possible we are done if we align precisely with end */ | |
221 | if (max_copied == bytes_to_rip) | |
222 | end_read_p = ring->data; | |
223 | else if (initial_write_p | |
224 | > ring->data + bytes_to_rip - max_copied) { | |
225 | /* enough data to finish */ | |
226 | memcpy(*data + max_copied, ring->data, | |
227 | bytes_to_rip - max_copied); | |
228 | max_copied = bytes_to_rip; | |
229 | end_read_p = ring->data + (bytes_to_rip - max_copied); | |
230 | } else { /* not enough data */ | |
231 | memcpy(*data + max_copied, ring->data, | |
232 | initial_write_p - ring->data); | |
233 | max_copied += initial_write_p - ring->data; | |
234 | end_read_p = initial_write_p; | |
235 | } | |
236 | } | |
237 | /* Now to verify which section was cleanly copied - i.e. how far | |
238 | * read pointer has been pushed */ | |
239 | current_read_p = ring->read_p; | |
240 | ||
241 | if (initial_read_p <= current_read_p) | |
242 | *dead_offset = current_read_p - initial_read_p; | |
243 | else | |
244 | *dead_offset = ring->buf.length*ring->buf.bpd | |
245 | - (initial_read_p - current_read_p); | |
246 | ||
247 | /* possible issue if the initial write has been lapped or indeed | |
248 | * the point we were reading to has been passed */ | |
249 | /* No valid data read. | |
250 | * In this case the read pointer is already correct having been | |
251 | * pushed further than we would look. */ | |
252 | if (max_copied - *dead_offset < 0) { | |
253 | ret = 0; | |
254 | goto error_free_data_cpy; | |
255 | } | |
256 | ||
257 | /* setup the next read position */ | |
258 | /* Beware, this may fail due to concurrency fun and games. | |
259 | * Possible that sufficient fill commands have run to push the read | |
260 | * pointer past where we would be after the rip. If this occurs, leave | |
261 | * it be. | |
262 | */ | |
263 | /* Tricky - deal with loops */ | |
264 | ||
265 | while (ring->read_p != end_read_p) | |
266 | ring->read_p = end_read_p; | |
267 | ||
268 | return max_copied - *dead_offset; | |
269 | ||
270 | error_free_data_cpy: | |
271 | kfree(*data); | |
272 | error_ret: | |
273 | return ret; | |
274 | } | |
275 | EXPORT_SYMBOL(iio_rip_sw_rb); | |
276 | ||
277 | int iio_store_to_sw_rb(struct iio_ring_buffer *r, u8 *data, s64 timestamp) | |
278 | { | |
279 | struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); | |
280 | return iio_store_to_sw_ring(ring, data, timestamp); | |
281 | } | |
282 | EXPORT_SYMBOL(iio_store_to_sw_rb); | |
283 | ||
284 | int iio_read_last_from_sw_ring(struct iio_sw_ring_buffer *ring, | |
285 | unsigned char *data) | |
286 | { | |
287 | unsigned char *last_written_p_copy; | |
288 | ||
289 | iio_mark_sw_rb_in_use(&ring->buf); | |
290 | again: | |
291 | barrier(); | |
292 | last_written_p_copy = ring->last_written_p; | |
293 | barrier(); /*unnessecary? */ | |
294 | /* Check there is anything here */ | |
295 | if (last_written_p_copy == 0) | |
296 | return -EAGAIN; | |
297 | memcpy(data, last_written_p_copy, ring->buf.bpd); | |
298 | ||
299 | if (unlikely(ring->last_written_p >= last_written_p_copy)) | |
300 | goto again; | |
301 | ||
302 | iio_unmark_sw_rb_in_use(&ring->buf); | |
303 | return 0; | |
304 | } | |
305 | ||
306 | int iio_read_last_from_sw_rb(struct iio_ring_buffer *r, | |
307 | unsigned char *data) | |
308 | { | |
309 | return iio_read_last_from_sw_ring(iio_to_sw_ring(r), data); | |
310 | } | |
311 | EXPORT_SYMBOL(iio_read_last_from_sw_rb); | |
312 | ||
313 | int iio_request_update_sw_rb(struct iio_ring_buffer *r) | |
314 | { | |
315 | int ret = 0; | |
316 | struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); | |
317 | ||
318 | spin_lock(&ring->use_lock); | |
319 | if (!ring->update_needed) | |
320 | goto error_ret; | |
321 | if (ring->use_count) { | |
322 | ret = -EAGAIN; | |
323 | goto error_ret; | |
324 | } | |
325 | __iio_free_sw_ring_buffer(ring); | |
6f2dfb31 JC |
326 | ret = __iio_allocate_sw_ring_buffer(ring, ring->buf.bpd, |
327 | ring->buf.length); | |
2235acb2 JC |
328 | error_ret: |
329 | spin_unlock(&ring->use_lock); | |
330 | return ret; | |
331 | } | |
332 | EXPORT_SYMBOL(iio_request_update_sw_rb); | |
333 | ||
334 | int iio_get_bpd_sw_rb(struct iio_ring_buffer *r) | |
335 | { | |
336 | struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); | |
337 | return ring->buf.bpd; | |
338 | } | |
339 | EXPORT_SYMBOL(iio_get_bpd_sw_rb); | |
340 | ||
341 | int iio_set_bpd_sw_rb(struct iio_ring_buffer *r, size_t bpd) | |
342 | { | |
343 | if (r->bpd != bpd) { | |
344 | r->bpd = bpd; | |
345 | if (r->access.mark_param_change) | |
346 | r->access.mark_param_change(r); | |
347 | } | |
348 | return 0; | |
349 | } | |
350 | EXPORT_SYMBOL(iio_set_bpd_sw_rb); | |
351 | ||
352 | int iio_get_length_sw_rb(struct iio_ring_buffer *r) | |
353 | { | |
354 | return r->length; | |
355 | } | |
356 | EXPORT_SYMBOL(iio_get_length_sw_rb); | |
357 | ||
358 | int iio_set_length_sw_rb(struct iio_ring_buffer *r, int length) | |
359 | { | |
360 | if (r->length != length) { | |
361 | r->length = length; | |
362 | if (r->access.mark_param_change) | |
363 | r->access.mark_param_change(r); | |
364 | } | |
365 | return 0; | |
366 | } | |
367 | EXPORT_SYMBOL(iio_set_length_sw_rb); | |
368 | ||
369 | int iio_mark_update_needed_sw_rb(struct iio_ring_buffer *r) | |
370 | { | |
371 | struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); | |
372 | ring->update_needed = true; | |
373 | return 0; | |
374 | } | |
375 | EXPORT_SYMBOL(iio_mark_update_needed_sw_rb); | |
376 | ||
377 | static void iio_sw_rb_release(struct device *dev) | |
378 | { | |
379 | struct iio_ring_buffer *r = to_iio_ring_buffer(dev); | |
380 | kfree(iio_to_sw_ring(r)); | |
381 | } | |
382 | ||
383 | static IIO_RING_ENABLE_ATTR; | |
384 | static IIO_RING_BPS_ATTR; | |
385 | static IIO_RING_LENGTH_ATTR; | |
386 | ||
387 | /* Standard set of ring buffer attributes */ | |
388 | static struct attribute *iio_ring_attributes[] = { | |
389 | &dev_attr_length.attr, | |
390 | &dev_attr_bps.attr, | |
391 | &dev_attr_ring_enable.attr, | |
392 | NULL, | |
393 | }; | |
394 | ||
395 | static struct attribute_group iio_ring_attribute_group = { | |
396 | .attrs = iio_ring_attributes, | |
397 | }; | |
398 | ||
3860dc82 | 399 | static const struct attribute_group *iio_ring_attribute_groups[] = { |
2235acb2 JC |
400 | &iio_ring_attribute_group, |
401 | NULL | |
402 | }; | |
403 | ||
404 | static struct device_type iio_sw_ring_type = { | |
405 | .release = iio_sw_rb_release, | |
406 | .groups = iio_ring_attribute_groups, | |
407 | }; | |
408 | ||
409 | struct iio_ring_buffer *iio_sw_rb_allocate(struct iio_dev *indio_dev) | |
410 | { | |
411 | struct iio_ring_buffer *buf; | |
412 | struct iio_sw_ring_buffer *ring; | |
413 | ||
414 | ring = kzalloc(sizeof *ring, GFP_KERNEL); | |
415 | if (!ring) | |
416 | return 0; | |
417 | buf = &ring->buf; | |
2235acb2 | 418 | iio_ring_buffer_init(buf, indio_dev); |
6f2dfb31 | 419 | __iio_init_sw_ring_buffer(ring); |
2235acb2 JC |
420 | buf->dev.type = &iio_sw_ring_type; |
421 | device_initialize(&buf->dev); | |
422 | buf->dev.parent = &indio_dev->dev; | |
423 | buf->dev.class = &iio_class; | |
424 | dev_set_drvdata(&buf->dev, (void *)buf); | |
425 | ||
426 | return buf; | |
427 | } | |
428 | EXPORT_SYMBOL(iio_sw_rb_allocate); | |
429 | ||
430 | void iio_sw_rb_free(struct iio_ring_buffer *r) | |
431 | { | |
432 | if (r) | |
433 | iio_put_ring_buffer(r); | |
434 | } | |
435 | EXPORT_SYMBOL(iio_sw_rb_free); | |
436 | MODULE_DESCRIPTION("Industrialio I/O software ring buffer"); | |
437 | MODULE_LICENSE("GPL"); |