summaryrefslogtreecommitdiff
path: root/firmware/kernel.c
diff options
context:
space:
mode:
authorMichael Sevakis <jethead71@rockbox.org>2011-04-27 03:08:23 +0000
committerMichael Sevakis <jethead71@rockbox.org>2011-04-27 03:08:23 +0000
commitc537d5958e8b421ac4f9bef6c8b9e7425a6cf167 (patch)
tree7ed36518fb6524da7bbd913ba7619b85b5d15d23 /firmware/kernel.c
parentdcf0f8de4a37ff1d2ea510aef75fa67977a8bdcc (diff)
downloadrockbox-c537d5958e8b421ac4f9bef6c8b9e7425a6cf167.tar.gz
rockbox-c537d5958e8b421ac4f9bef6c8b9e7425a6cf167.zip
Commit FS#12069 - Playback rework - first stages. Gives as thorough as possible a treatment of codec management, track change and metadata logic as possible while maintaining fairly narrow focus and not rewriting everything all at once. Please see the rockbox-dev mail archive on 2011-04-25 (Playback engine rework) for a more thorough manifest of what was addressed. Plugins and codecs become incompatible.
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@29785 a1c6a512-1295-4272-9138-f99709370657
Diffstat (limited to 'firmware/kernel.c')
-rw-r--r--firmware/kernel.c215
1 files changed, 173 insertions, 42 deletions
diff --git a/firmware/kernel.c b/firmware/kernel.c
index 4fcfcb9d30..288ebbbede 100644
--- a/firmware/kernel.c
+++ b/firmware/kernel.c
@@ -516,8 +516,10 @@ void queue_wait(struct event_queue *q, struct queue_event *ev)
516 oldlevel = disable_irq_save(); 516 oldlevel = disable_irq_save();
517 corelock_lock(&q->cl); 517 corelock_lock(&q->cl);
518 518
519 /* auto-reply */ 519#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
520 /* Auto-reply (even if ev is NULL to avoid stalling a waiting thread) */
520 queue_do_auto_reply(q->send); 521 queue_do_auto_reply(q->send);
522#endif
521 523
522 while(1) 524 while(1)
523 { 525 {
@@ -541,12 +543,18 @@ void queue_wait(struct event_queue *q, struct queue_event *ev)
541 corelock_lock(&q->cl); 543 corelock_lock(&q->cl);
542 } 544 }
543 545
544 q->read = rd + 1; 546#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
545 rd &= QUEUE_LENGTH_MASK; 547 if(ev)
546 *ev = q->events[rd]; 548#endif
549 {
550 q->read = rd + 1;
551 rd &= QUEUE_LENGTH_MASK;
552 *ev = q->events[rd];
547 553
548 /* Get data for a waiting thread if one */ 554 /* Get data for a waiting thread if one */
549 queue_do_fetch_sender(q->send, rd); 555 queue_do_fetch_sender(q->send, rd);
556 }
557 /* else just waiting on non-empty */
550 558
551 corelock_unlock(&q->cl); 559 corelock_unlock(&q->cl);
552 restore_irq(oldlevel); 560 restore_irq(oldlevel);
@@ -566,8 +574,10 @@ void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
566 oldlevel = disable_irq_save(); 574 oldlevel = disable_irq_save();
567 corelock_lock(&q->cl); 575 corelock_lock(&q->cl);
568 576
569 /* Auto-reply */ 577#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
578 /* Auto-reply (even if ev is NULL to avoid stalling a waiting thread) */
570 queue_do_auto_reply(q->send); 579 queue_do_auto_reply(q->send);
580#endif
571 581
572 rd = q->read; 582 rd = q->read;
573 wr = q->write; 583 wr = q->write;
@@ -590,20 +600,26 @@ void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
590 wr = q->write; 600 wr = q->write;
591 } 601 }
592 602
593 /* no worry about a removed message here - status is checked inside 603#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
594 locks - perhaps verify if timeout or false alarm */ 604 if(ev)
595 if (rd != wr) 605#endif
596 {
597 q->read = rd + 1;
598 rd &= QUEUE_LENGTH_MASK;
599 *ev = q->events[rd];
600 /* Get data for a waiting thread if one */
601 queue_do_fetch_sender(q->send, rd);
602 }
603 else
604 { 606 {
605 ev->id = SYS_TIMEOUT; 607 /* no worry about a removed message here - status is checked inside
608 locks - perhaps verify if timeout or false alarm */
609 if (rd != wr)
610 {
611 q->read = rd + 1;
612 rd &= QUEUE_LENGTH_MASK;
613 *ev = q->events[rd];
614 /* Get data for a waiting thread if one */
615 queue_do_fetch_sender(q->send, rd);
616 }
617 else
618 {
619 ev->id = SYS_TIMEOUT;
620 }
606 } 621 }
622 /* else just waiting on non-empty */
607 623
608 corelock_unlock(&q->cl); 624 corelock_unlock(&q->cl);
609 restore_irq(oldlevel); 625 restore_irq(oldlevel);
@@ -740,23 +756,99 @@ void queue_reply(struct event_queue *q, intptr_t retval)
740} 756}
741#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */ 757#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
742 758
743bool queue_peek(struct event_queue *q, struct queue_event *ev) 759#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
760/* Scan the even queue from head to tail, returning any event from the
761 filter list that was found, optionally removing the event. If an
762 event is returned, synchronous events are handled in the same manner as
763 with queue_wait(_w_tmo); if discarded, then as queue_clear.
764 If filters are NULL, any event matches. If filters exist, the default
765 is to search the full queue depth.
766 Earlier filters take precedence.
767
768 Return true if an event was found, false otherwise. */
769bool queue_peek_ex(struct event_queue *q, struct queue_event *ev,
770 unsigned int flags, const long (*filters)[2])
744{ 771{
745 unsigned int rd; 772 bool have_msg;
773 unsigned int rd, wr;
774 int oldlevel;
746 775
747 if(q->read == q->write) 776 if(LIKELY(q->read == q->write))
748 return false; 777 return false; /* Empty: do nothing further */
749 778
750 bool have_msg = false; 779 have_msg = false;
751 780
752 int oldlevel = disable_irq_save(); 781 oldlevel = disable_irq_save();
753 corelock_lock(&q->cl); 782 corelock_lock(&q->cl);
754 783
755 rd = q->read; 784 /* Starting at the head, find first match */
756 if(rd != q->write) 785 for(rd = q->read, wr = q->write; rd != wr; rd++)
757 { 786 {
758 *ev = q->events[rd & QUEUE_LENGTH_MASK]; 787 struct queue_event *e = &q->events[rd & QUEUE_LENGTH_MASK];
788
789 if(filters)
790 {
791 /* Have filters - find the first thing that passes */
792 const long (* f)[2] = filters;
793 const long (* const f_last)[2] =
794 &filters[flags & QPEEK_FILTER_COUNT_MASK];
795 long id = e->id;
796
797 do
798 {
799 if(UNLIKELY(id >= (*f)[0] && id <= (*f)[1]))
800 goto passed_filter;
801 }
802 while(++f <= f_last);
803
804 if(LIKELY(!(flags & QPEEK_FILTER_HEAD_ONLY)))
805 continue; /* No match; test next event */
806 else
807 break; /* Only check the head */
808 }
809 /* else - anything passes */
810
811 passed_filter:
812
813 /* Found a matching event */
759 have_msg = true; 814 have_msg = true;
815
816 if(ev)
817 *ev = *e; /* Caller wants the event */
818
819 if(flags & QPEEK_REMOVE_EVENTS)
820 {
821 /* Do event removal */
822 unsigned int r = q->read;
823 q->read = r + 1; /* Advance head */
824
825 if(ev)
826 {
827 /* Auto-reply */
828 queue_do_auto_reply(q->send);
829 /* Get the thread waiting for reply, if any */
830 queue_do_fetch_sender(q->send, rd & QUEUE_LENGTH_MASK);
831 }
832 else
833 {
834 /* Release any thread waiting on this message */
835 queue_do_unblock_sender(q->send, rd & QUEUE_LENGTH_MASK);
836 }
837
838 /* Slide messages forward into the gap if not at the head */
839 while(rd != r)
840 {
841 unsigned int dst = rd & QUEUE_LENGTH_MASK;
842 unsigned int src = --rd & QUEUE_LENGTH_MASK;
843
844 q->events[dst] = q->events[src];
845 /* Keep sender wait list in sync */
846 if(q->send)
847 q->send->senders[dst] = q->send->senders[src];
848 }
849 }
850
851 break;
760 } 852 }
761 853
762 corelock_unlock(&q->cl); 854 corelock_unlock(&q->cl);
@@ -765,30 +857,42 @@ bool queue_peek(struct event_queue *q, struct queue_event *ev)
765 return have_msg; 857 return have_msg;
766} 858}
767 859
768/* Poll queue to see if a message exists - careful in using the result if 860bool queue_peek(struct event_queue *q, struct queue_event *ev)
769 * queue_remove_from_head is called when messages are posted - possibly use
770 * queue_wait_w_tmo(&q, 0) in that case or else a removed message that
771 * unsignals the queue may cause an unwanted block */
772bool queue_empty(const struct event_queue* q)
773{ 861{
774 return ( q->read == q->write ); 862 return queue_peek_ex(q, ev, 0, NULL);
775} 863}
776 864
777void queue_clear(struct event_queue* q) 865void queue_remove_from_head(struct event_queue *q, long id)
778{ 866{
779 int oldlevel; 867 const long f[2] = { id, id };
868 while (queue_peek_ex(q, NULL,
869 QPEEK_FILTER_HEAD_ONLY | QPEEK_REMOVE_EVENTS, &f));
870}
871#else /* !HAVE_EXTENDED_MESSAGING_AND_NAME */
872/* The more powerful routines aren't required */
873bool queue_peek(struct event_queue *q, struct queue_event *ev)
874{
875 unsigned int rd;
780 876
781 oldlevel = disable_irq_save(); 877 if(q->read == q->write)
782 corelock_lock(&q->cl); 878 return false;
783 879
784 /* Release all threads waiting in the queue for a reply - 880 bool have_msg = false;
785 dequeued sent message will be handled by owning thread */
786 queue_release_all_senders(q);
787 881
788 q->read = q->write; 882 int oldlevel = disable_irq_save();
883 corelock_lock(&q->cl);
884
885 rd = q->read;
886 if(rd != q->write)
887 {
888 *ev = q->events[rd & QUEUE_LENGTH_MASK];
889 have_msg = true;
890 }
789 891
790 corelock_unlock(&q->cl); 892 corelock_unlock(&q->cl);
791 restore_irq(oldlevel); 893 restore_irq(oldlevel);
894
895 return have_msg;
792} 896}
793 897
794void queue_remove_from_head(struct event_queue *q, long id) 898void queue_remove_from_head(struct event_queue *q, long id)
@@ -816,6 +920,33 @@ void queue_remove_from_head(struct event_queue *q, long id)
816 corelock_unlock(&q->cl); 920 corelock_unlock(&q->cl);
817 restore_irq(oldlevel); 921 restore_irq(oldlevel);
818} 922}
923#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
924
925/* Poll queue to see if a message exists - careful in using the result if
926 * queue_remove_from_head is called when messages are posted - possibly use
927 * queue_wait_w_tmo(&q, 0) in that case or else a removed message that
928 * unsignals the queue may cause an unwanted block */
929bool queue_empty(const struct event_queue* q)
930{
931 return ( q->read == q->write );
932}
933
934void queue_clear(struct event_queue* q)
935{
936 int oldlevel;
937
938 oldlevel = disable_irq_save();
939 corelock_lock(&q->cl);
940
941 /* Release all threads waiting in the queue for a reply -
942 dequeued sent message will be handled by owning thread */
943 queue_release_all_senders(q);
944
945 q->read = q->write;
946
947 corelock_unlock(&q->cl);
948 restore_irq(oldlevel);
949}
819 950
820/** 951/**
821 * The number of events waiting in the queue. 952 * The number of events waiting in the queue.