5 * Copyright (C) 2005 Mike Isely <isely@pobox.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #include "pvrusb2-io.h"
23 #include "pvrusb2-debug.h"
24 #include <linux/errno.h>
25 #include <linux/string.h>
26 #include <linux/slab.h>
27 #include <linux/mutex.h>
29 #define BUFFER_SIG 0x47653271
31 // #define SANITY_CHECK_BUFFERS
34 #ifdef SANITY_CHECK_BUFFERS
35 #define BUFFER_CHECK(bp) do { \
36 if ((bp)->signature != BUFFER_SIG) { \
37 pvr2_trace(PVR2_TRACE_ERROR_LEGS, \
38 "Buffer %p is bad at %s:%d", \
39 (bp),__FILE__,__LINE__); \
40 pvr2_buffer_describe(bp,"BadSig"); \
45 #define BUFFER_CHECK(bp) do {} while(0)
49 /* Buffers queued for reading */
50 struct list_head queued_list;
52 unsigned int q_bcount;
53 /* Buffers with retrieved data */
54 struct list_head ready_list;
56 unsigned int r_bcount;
57 /* Buffers available for use */
58 struct list_head idle_list;
60 unsigned int i_bcount;
61 /* Pointers to all buffers */
62 struct pvr2_buffer **buffers;
63 /* Array size of buffers */
64 unsigned int buffer_slot_count;
65 /* Total buffers actually in circulation */
66 unsigned int buffer_total_count;
67 /* Designed number of buffers to be in circulation */
68 unsigned int buffer_target_count;
69 /* Executed when ready list become non-empty */
70 pvr2_stream_callback callback_func;
72 /* Context for transfer endpoint */
73 struct usb_device *dev;
75 /* Overhead for mutex enforcement */
78 /* Tracking state for tolerating errors */
79 unsigned int fail_count;
80 unsigned int fail_tolerance;
86 enum pvr2_buffer_state state;
87 void *ptr; /* Pointer to storage area */
88 unsigned int max_count; /* Size of storage area */
89 unsigned int used_count; /* Amount of valid data in storage area */
90 int status; /* Transfer result status */
91 struct pvr2_stream *stream;
92 struct list_head list_overhead;
96 static const char *pvr2_buffer_state_decode(enum pvr2_buffer_state st)
99 case pvr2_buffer_state_none: return "none";
100 case pvr2_buffer_state_idle: return "idle";
101 case pvr2_buffer_state_queued: return "queued";
102 case pvr2_buffer_state_ready: return "ready";
107 #ifdef SANITY_CHECK_BUFFERS
108 static void pvr2_buffer_describe(struct pvr2_buffer *bp,const char *msg)
110 pvr2_trace(PVR2_TRACE_INFO,
111 "buffer%s%s %p state=%s id=%d status=%d"
112 " stream=%p purb=%p sig=0x%x",
116 (bp ? pvr2_buffer_state_decode(bp->state) : "(invalid)"),
118 (bp ? bp->status : 0),
119 (bp ? bp->stream : NULL),
120 (bp ? bp->purb : NULL),
121 (bp ? bp->signature : 0));
123 #endif /* SANITY_CHECK_BUFFERS */
125 static void pvr2_buffer_remove(struct pvr2_buffer *bp)
130 struct pvr2_stream *sp = bp->stream;
132 case pvr2_buffer_state_idle:
134 bcnt = &sp->i_bcount;
135 ccnt = bp->max_count;
137 case pvr2_buffer_state_queued:
139 bcnt = &sp->q_bcount;
140 ccnt = bp->max_count;
142 case pvr2_buffer_state_ready:
144 bcnt = &sp->r_bcount;
145 ccnt = bp->used_count;
150 list_del_init(&bp->list_overhead);
153 pvr2_trace(PVR2_TRACE_BUF_FLOW,
154 "/*---TRACE_FLOW---*/"
155 " bufferPool %8s dec cap=%07d cnt=%02d",
156 pvr2_buffer_state_decode(bp->state),*bcnt,*cnt);
157 bp->state = pvr2_buffer_state_none;
160 static void pvr2_buffer_set_none(struct pvr2_buffer *bp)
162 unsigned long irq_flags;
163 struct pvr2_stream *sp;
166 pvr2_trace(PVR2_TRACE_BUF_FLOW,
167 "/*---TRACE_FLOW---*/ bufferState %p %6s --> %6s",
169 pvr2_buffer_state_decode(bp->state),
170 pvr2_buffer_state_decode(pvr2_buffer_state_none));
171 spin_lock_irqsave(&sp->list_lock,irq_flags);
172 pvr2_buffer_remove(bp);
173 spin_unlock_irqrestore(&sp->list_lock,irq_flags);
176 static int pvr2_buffer_set_ready(struct pvr2_buffer *bp)
179 unsigned long irq_flags;
180 struct pvr2_stream *sp;
183 pvr2_trace(PVR2_TRACE_BUF_FLOW,
184 "/*---TRACE_FLOW---*/ bufferState %p %6s --> %6s",
186 pvr2_buffer_state_decode(bp->state),
187 pvr2_buffer_state_decode(pvr2_buffer_state_ready));
188 spin_lock_irqsave(&sp->list_lock,irq_flags);
189 fl = (sp->r_count == 0);
190 pvr2_buffer_remove(bp);
191 list_add_tail(&bp->list_overhead,&sp->ready_list);
192 bp->state = pvr2_buffer_state_ready;
194 sp->r_bcount += bp->used_count;
195 pvr2_trace(PVR2_TRACE_BUF_FLOW,
196 "/*---TRACE_FLOW---*/"
197 " bufferPool %8s inc cap=%07d cnt=%02d",
198 pvr2_buffer_state_decode(bp->state),
199 sp->r_bcount,sp->r_count);
200 spin_unlock_irqrestore(&sp->list_lock,irq_flags);
204 static void pvr2_buffer_set_idle(struct pvr2_buffer *bp)
206 unsigned long irq_flags;
207 struct pvr2_stream *sp;
210 pvr2_trace(PVR2_TRACE_BUF_FLOW,
211 "/*---TRACE_FLOW---*/ bufferState %p %6s --> %6s",
213 pvr2_buffer_state_decode(bp->state),
214 pvr2_buffer_state_decode(pvr2_buffer_state_idle));
215 spin_lock_irqsave(&sp->list_lock,irq_flags);
216 pvr2_buffer_remove(bp);
217 list_add_tail(&bp->list_overhead,&sp->idle_list);
218 bp->state = pvr2_buffer_state_idle;
220 sp->i_bcount += bp->max_count;
221 pvr2_trace(PVR2_TRACE_BUF_FLOW,
222 "/*---TRACE_FLOW---*/"
223 " bufferPool %8s inc cap=%07d cnt=%02d",
224 pvr2_buffer_state_decode(bp->state),
225 sp->i_bcount,sp->i_count);
226 spin_unlock_irqrestore(&sp->list_lock,irq_flags);
229 static void pvr2_buffer_set_queued(struct pvr2_buffer *bp)
231 unsigned long irq_flags;
232 struct pvr2_stream *sp;
235 pvr2_trace(PVR2_TRACE_BUF_FLOW,
236 "/*---TRACE_FLOW---*/ bufferState %p %6s --> %6s",
238 pvr2_buffer_state_decode(bp->state),
239 pvr2_buffer_state_decode(pvr2_buffer_state_queued));
240 spin_lock_irqsave(&sp->list_lock,irq_flags);
241 pvr2_buffer_remove(bp);
242 list_add_tail(&bp->list_overhead,&sp->queued_list);
243 bp->state = pvr2_buffer_state_queued;
245 sp->q_bcount += bp->max_count;
246 pvr2_trace(PVR2_TRACE_BUF_FLOW,
247 "/*---TRACE_FLOW---*/"
248 " bufferPool %8s inc cap=%07d cnt=%02d",
249 pvr2_buffer_state_decode(bp->state),
250 sp->q_bcount,sp->q_count);
251 spin_unlock_irqrestore(&sp->list_lock,irq_flags);
254 static void pvr2_buffer_wipe(struct pvr2_buffer *bp)
256 if (bp->state == pvr2_buffer_state_queued) {
257 usb_kill_urb(bp->purb);
261 static int pvr2_buffer_init(struct pvr2_buffer *bp,
262 struct pvr2_stream *sp,
265 memset(bp,0,sizeof(*bp));
266 bp->signature = BUFFER_SIG;
268 pvr2_trace(PVR2_TRACE_BUF_POOL,
269 "/*---TRACE_FLOW---*/ bufferInit %p stream=%p",bp,sp);
271 bp->state = pvr2_buffer_state_none;
272 INIT_LIST_HEAD(&bp->list_overhead);
273 bp->purb = usb_alloc_urb(0,GFP_KERNEL);
274 if (! bp->purb) return -ENOMEM;
275 #ifdef SANITY_CHECK_BUFFERS
276 pvr2_buffer_describe(bp,"create");
281 static void pvr2_buffer_done(struct pvr2_buffer *bp)
283 #ifdef SANITY_CHECK_BUFFERS
284 pvr2_buffer_describe(bp,"delete");
286 pvr2_buffer_wipe(bp);
287 pvr2_buffer_set_none(bp);
290 if (bp->purb) usb_free_urb(bp->purb);
291 pvr2_trace(PVR2_TRACE_BUF_POOL,"/*---TRACE_FLOW---*/"
292 " bufferDone %p",bp);
295 static int pvr2_stream_buffer_count(struct pvr2_stream *sp,unsigned int cnt)
300 /* Allocate buffers pointer array in multiples of 32 entries */
301 if (cnt == sp->buffer_total_count) return 0;
303 pvr2_trace(PVR2_TRACE_BUF_POOL,
304 "/*---TRACE_FLOW---*/ poolResize "
305 " stream=%p cur=%d adj=%+d",
307 sp->buffer_total_count,
308 cnt-sp->buffer_total_count);
311 if (cnt > scnt) scnt += 0x20;
313 if (cnt > sp->buffer_total_count) {
314 if (scnt > sp->buffer_slot_count) {
315 struct pvr2_buffer **nb;
316 nb = kmalloc(scnt * sizeof(*nb),GFP_KERNEL);
317 if (!nb) return -ENOMEM;
318 if (sp->buffer_slot_count) {
319 memcpy(nb,sp->buffers,
320 sp->buffer_slot_count * sizeof(*nb));
324 sp->buffer_slot_count = scnt;
326 while (sp->buffer_total_count < cnt) {
327 struct pvr2_buffer *bp;
328 bp = kmalloc(sizeof(*bp),GFP_KERNEL);
329 if (!bp) return -ENOMEM;
330 ret = pvr2_buffer_init(bp,sp,sp->buffer_total_count);
335 sp->buffers[sp->buffer_total_count] = bp;
336 (sp->buffer_total_count)++;
337 pvr2_buffer_set_idle(bp);
340 while (sp->buffer_total_count > cnt) {
341 struct pvr2_buffer *bp;
342 bp = sp->buffers[sp->buffer_total_count - 1];
344 sp->buffers[sp->buffer_total_count - 1] = NULL;
345 (sp->buffer_total_count)--;
346 pvr2_buffer_done(bp);
349 if (scnt < sp->buffer_slot_count) {
350 struct pvr2_buffer **nb = NULL;
352 nb = kmalloc(scnt * sizeof(*nb),GFP_KERNEL);
353 if (!nb) return -ENOMEM;
354 memcpy(nb,sp->buffers,scnt * sizeof(*nb));
358 sp->buffer_slot_count = scnt;
364 static int pvr2_stream_achieve_buffer_count(struct pvr2_stream *sp)
366 struct pvr2_buffer *bp;
369 if (sp->buffer_total_count == sp->buffer_target_count) return 0;
371 pvr2_trace(PVR2_TRACE_BUF_POOL,
372 "/*---TRACE_FLOW---*/"
373 " poolCheck stream=%p cur=%d tgt=%d",
374 sp,sp->buffer_total_count,sp->buffer_target_count);
376 if (sp->buffer_total_count < sp->buffer_target_count) {
377 return pvr2_stream_buffer_count(sp,sp->buffer_target_count);
381 while ((sp->buffer_total_count - cnt) > sp->buffer_target_count) {
382 bp = sp->buffers[sp->buffer_total_count - (cnt + 1)];
383 if (bp->state != pvr2_buffer_state_idle) break;
387 pvr2_stream_buffer_count(sp,sp->buffer_total_count - cnt);
393 static void pvr2_stream_internal_flush(struct pvr2_stream *sp)
395 struct list_head *lp;
396 struct pvr2_buffer *bp1;
397 while ((lp = sp->queued_list.next) != &sp->queued_list) {
398 bp1 = list_entry(lp,struct pvr2_buffer,list_overhead);
399 pvr2_buffer_wipe(bp1);
400 /* At this point, we should be guaranteed that no
401 completion callback may happen on this buffer. But it's
402 possible that it might have completed after we noticed
403 it but before we wiped it. So double check its status
405 if (bp1->state != pvr2_buffer_state_queued) continue;
406 pvr2_buffer_set_idle(bp1);
408 if (sp->buffer_total_count != sp->buffer_target_count) {
409 pvr2_stream_achieve_buffer_count(sp);
413 static void pvr2_stream_init(struct pvr2_stream *sp)
415 spin_lock_init(&sp->list_lock);
416 mutex_init(&sp->mutex);
417 INIT_LIST_HEAD(&sp->queued_list);
418 INIT_LIST_HEAD(&sp->ready_list);
419 INIT_LIST_HEAD(&sp->idle_list);
422 static void pvr2_stream_done(struct pvr2_stream *sp)
424 mutex_lock(&sp->mutex); do {
425 pvr2_stream_internal_flush(sp);
426 pvr2_stream_buffer_count(sp,0);
427 } while (0); mutex_unlock(&sp->mutex);
430 static void buffer_complete(struct urb *urb, struct pt_regs *regs)
432 struct pvr2_buffer *bp = urb->context;
433 struct pvr2_stream *sp;
434 unsigned long irq_flags;
439 pvr2_trace(PVR2_TRACE_BUF_FLOW,
440 "/*---TRACE_FLOW---*/ bufferComplete %p stat=%d cnt=%d",
441 bp,urb->status,urb->actual_length);
442 spin_lock_irqsave(&sp->list_lock,irq_flags);
443 if ((!(urb->status)) ||
444 (urb->status == -ENOENT) ||
445 (urb->status == -ECONNRESET) ||
446 (urb->status == -ESHUTDOWN)) {
447 bp->used_count = urb->actual_length;
448 if (sp->fail_count) {
449 pvr2_trace(PVR2_TRACE_TOLERANCE,
450 "stream %p transfer ok"
451 " - fail count reset",sp);
454 } else if (sp->fail_count < sp->fail_tolerance) {
455 // We can tolerate this error, because we're below the
458 pvr2_trace(PVR2_TRACE_TOLERANCE,
459 "stream %p ignoring error %d"
460 " - fail count increased to %u",
461 sp,urb->status,sp->fail_count);
463 bp->status = urb->status;
465 spin_unlock_irqrestore(&sp->list_lock,irq_flags);
466 pvr2_buffer_set_ready(bp);
467 if (sp && sp->callback_func) {
468 sp->callback_func(sp->callback_data);
472 struct pvr2_stream *pvr2_stream_create(void)
474 struct pvr2_stream *sp;
475 sp = kmalloc(sizeof(*sp),GFP_KERNEL);
477 memset(sp,0,sizeof(*sp));
478 pvr2_trace(PVR2_TRACE_INIT,"pvr2_stream_create: sp=%p",sp);
479 pvr2_stream_init(sp);
483 void pvr2_stream_destroy(struct pvr2_stream *sp)
486 pvr2_trace(PVR2_TRACE_INIT,"pvr2_stream_destroy: sp=%p",sp);
487 pvr2_stream_done(sp);
491 void pvr2_stream_setup(struct pvr2_stream *sp,
492 struct usb_device *dev,
494 unsigned int tolerance)
496 mutex_lock(&sp->mutex); do {
497 pvr2_stream_internal_flush(sp);
499 sp->endpoint = endpoint;
500 sp->fail_tolerance = tolerance;
501 } while(0); mutex_unlock(&sp->mutex);
504 void pvr2_stream_set_callback(struct pvr2_stream *sp,
505 pvr2_stream_callback func,
508 unsigned long irq_flags;
509 mutex_lock(&sp->mutex); do {
510 spin_lock_irqsave(&sp->list_lock,irq_flags);
511 sp->callback_data = data;
512 sp->callback_func = func;
513 spin_unlock_irqrestore(&sp->list_lock,irq_flags);
514 } while(0); mutex_unlock(&sp->mutex);
517 /* Query / set the nominal buffer count */
519 int pvr2_stream_set_buffer_count(struct pvr2_stream *sp,unsigned int cnt)
522 if (sp->buffer_target_count == cnt) return 0;
523 mutex_lock(&sp->mutex); do {
524 sp->buffer_target_count = cnt;
525 ret = pvr2_stream_achieve_buffer_count(sp);
526 } while(0); mutex_unlock(&sp->mutex);
530 struct pvr2_buffer *pvr2_stream_get_idle_buffer(struct pvr2_stream *sp)
532 struct list_head *lp = sp->idle_list.next;
533 if (lp == &sp->idle_list) return NULL;
534 return list_entry(lp,struct pvr2_buffer,list_overhead);
537 struct pvr2_buffer *pvr2_stream_get_ready_buffer(struct pvr2_stream *sp)
539 struct list_head *lp = sp->ready_list.next;
540 if (lp == &sp->ready_list) return NULL;
541 return list_entry(lp,struct pvr2_buffer,list_overhead);
544 struct pvr2_buffer *pvr2_stream_get_buffer(struct pvr2_stream *sp,int id)
546 if (id < 0) return NULL;
547 if (id >= sp->buffer_total_count) return NULL;
548 return sp->buffers[id];
551 int pvr2_stream_get_ready_count(struct pvr2_stream *sp)
557 void pvr2_stream_kill(struct pvr2_stream *sp)
559 struct pvr2_buffer *bp;
560 mutex_lock(&sp->mutex); do {
561 pvr2_stream_internal_flush(sp);
562 while ((bp = pvr2_stream_get_ready_buffer(sp)) != 0) {
563 pvr2_buffer_set_idle(bp);
565 if (sp->buffer_total_count != sp->buffer_target_count) {
566 pvr2_stream_achieve_buffer_count(sp);
568 } while(0); mutex_unlock(&sp->mutex);
571 int pvr2_buffer_queue(struct pvr2_buffer *bp)
579 struct pvr2_stream *sp;
580 if (!bp) return -EINVAL;
582 mutex_lock(&sp->mutex); do {
583 pvr2_buffer_wipe(bp);
588 pvr2_buffer_set_queued(bp);
590 for (idx = 0; idx < (bp->max_count) / 4; idx++) {
593 ((unsigned int *)(bp->ptr))[idx] = val;
596 bp->status = -EINPROGRESS;
597 usb_fill_bulk_urb(bp->purb, // struct urb *urb
598 sp->dev, // struct usb_device *dev
600 usb_rcvbulkpipe(sp->dev,sp->endpoint),
601 bp->ptr, // void *transfer_buffer
602 bp->max_count, // int buffer_length
605 usb_submit_urb(bp->purb,GFP_KERNEL);
606 } while(0); mutex_unlock(&sp->mutex);
611 int pvr2_buffer_set_buffer(struct pvr2_buffer *bp,void *ptr,unsigned int cnt)
614 unsigned long irq_flags;
615 struct pvr2_stream *sp;
616 if (!bp) return -EINVAL;
618 mutex_lock(&sp->mutex); do {
619 spin_lock_irqsave(&sp->list_lock,irq_flags);
620 if (bp->state != pvr2_buffer_state_idle) {
624 bp->stream->i_bcount -= bp->max_count;
626 bp->stream->i_bcount += bp->max_count;
627 pvr2_trace(PVR2_TRACE_BUF_FLOW,
628 "/*---TRACE_FLOW---*/ bufferPool "
629 " %8s cap cap=%07d cnt=%02d",
630 pvr2_buffer_state_decode(
631 pvr2_buffer_state_idle),
632 bp->stream->i_bcount,bp->stream->i_count);
634 spin_unlock_irqrestore(&sp->list_lock,irq_flags);
635 } while(0); mutex_unlock(&sp->mutex);
639 unsigned int pvr2_buffer_get_count(struct pvr2_buffer *bp)
641 return bp->used_count;
644 int pvr2_buffer_get_status(struct pvr2_buffer *bp)
650 int pvr2_buffer_get_id(struct pvr2_buffer *bp)
657 Stuff for Emacs to see, in order to encourage consistent editing style:
658 *** Local Variables: ***
660 *** fill-column: 75 ***
662 *** c-basic-offset: 8 ***