summaryrefslogtreecommitdiff
path: root/firmware/profile.c
diff options
context:
space:
mode:
Diffstat (limited to 'firmware/profile.c')
-rw-r--r--firmware/profile.c303
1 files changed, 303 insertions, 0 deletions
diff --git a/firmware/profile.c b/firmware/profile.c
new file mode 100644
index 0000000000..8ad46515f8
--- /dev/null
+++ b/firmware/profile.c
@@ -0,0 +1,303 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Profiling routines counts ticks and calls to each profiled function.
11 *
12 * Copyright (C) 2005 by Brandon Low
13 *
14 * All files in this archive are subject to the GNU General Public License.
15 * See the file COPYING in the source tree root for full license agreement.
16 *
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
19 *
20 ****************************************************************************
21 *
22 * profile_func_enter() based on mcount found in gmon.c:
23 *
24 ***************************************************************************
25 * Copyright (c) 1991, 1998 The Regents of the University of California.
26 * All rights reserved.
27 *
28 * Redistribution and use in source and binary forms, with or without
29 * modification, are permitted provided that the following conditions
30 * are met:
31 * 1. Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * 2. Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in the
35 * documentation and/or other materials provided with the distribution.
36 * 3. [rescinded 22 July 1999]
37 * 4. Neither the name of the University nor the names of its contributors
38 * may be used to endorse or promote products derived from this software
39 * without specific prior written permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
42 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
45 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
46 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
47 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
50 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51 * SUCH DAMAGE.
52 * @(#)gmon.c 5.3 (Berkeley) 5/22/91
53 */
54
55#include <file.h>
56#include <sprintf.h>
57#include <system.h>
58#include <string.h>
59#include <timer.h>
60#include "profile.h"
61
62static unsigned short profiling = PROF_OFF;
63static size_t recursion_level;
64static unsigned short indices[INDEX_SIZE];
65static struct pfd_struct pfds[NUMPFDS];
66/* This holds a pointer to the last pfd effected for time tracking */
67static struct pfd_struct *last_pfd;
68/* These are used to track the time when we've lost the CPU so it doesn't count
69 * against any of the profiled functions */
70static int profiling_thread = -1;
71
72/* internal function prototypes */
73static void profile_timer_tick(void);
74static void profile_timer_unregister(void);
75
76static void write_function_recursive(int fd, struct pfd_struct *pfd, int depth);
77
78/* Be careful to use the right one for the size of your variable */
79#define ADDQI_L(_var,_value) \
80 asm ("addq.l %[value],%[var];" \
81 : [var] "+g" (_var) \
82 : [value] "I" (_value) )
83
84void profile_thread_stopped(int current_thread) {
85 if (current_thread == profiling_thread) {
86 /* If profiling is busy or idle */
87 if (profiling < PROF_ERROR) {
88 /* Unregister the timer so that other threads aren't interrupted */
89 timer_unregister();
90 }
91 /* Make sure we don't waste time profiling when we're running the
92 * wrong thread */
93 profiling |= PROF_OFF_THREAD;
94 }
95}
96
97void profile_thread_started(int current_thread) {
98 if (current_thread == profiling_thread) {
99 /* Now we are allowed to profile again */
100 profiling &= PROF_ON_THREAD;
101 /* if profiling was busy or idle */
102 if (profiling < PROF_ERROR) {
103 /* After we de-mask, if profiling is active, reactivate the timer */
104 timer_register(0, profile_timer_unregister,
105 CPU_FREQ/10000, 0, profile_timer_tick);
106 }
107 }
108}
109
110static void profile_timer_tick(void) {
111 if (!profiling) {
112 register struct pfd_struct *my_last_pfd = last_pfd;
113 if (my_last_pfd) {
114 ADDQI_L(my_last_pfd->time,1);
115 }
116 }
117}
118
119static void profile_timer_unregister(void) {
120 profiling = PROF_ERROR;
121 profstop();
122}
123
124/* This function clears the links on top level linkers, and clears the needed
125 * parts of memory in the index array */
126void profstart(int current_thread) {
127 recursion_level = 0;
128 profiling_thread = current_thread;
129 last_pfd = (struct pfd_struct*)0;
130 pfds[0].link = 0;
131 pfds[0].self_pc = 0;
132 memset(&indices,0,INDEX_SIZE * sizeof(unsigned short));
133 timer_register(
134 0, profile_timer_unregister, CPU_FREQ/10000, 0, profile_timer_tick);
135 profiling = PROF_ON;
136}
137
138static void write_function_recursive(int fd, struct pfd_struct *pfd, int depth){
139 unsigned short link = pfd->link;
140 fdprintf(fd,"0x%08lX\t%08ld\t%08ld\t%04d\n", (size_t)pfd->self_pc,
141 pfd->count, pfd->time, depth);
142 if (link > 0 && link < NUMPFDS) {
143 write_function_recursive(fd, &pfds[link], depth++);
144 }
145}
146
147void profstop() {
148 int profiling_exit = profiling;
149 int fd = 0;
150 int i;
151 unsigned short current_index;
152 timer_unregister();
153 profiling = PROF_OFF;
154 fd = open("/profile.out", O_WRONLY|O_CREAT|O_TRUNC);
155 if (profiling_exit == PROF_ERROR) {
156 fdprintf(fd,"Profiling exited with an error.\n");
157 fdprintf(fd,"Overflow or timer stolen most likely.\n");
158 }
159 fdprintf(fd,"PROFILE_THREAD\tPFDS_USED\n");
160 fdprintf(fd,"%08d\t%08d\n", profiling_thread,
161 pfds[0].link);
162 fdprintf(fd,"FUNCTION_PC\tCALL_COUNT\tTICKS\t\tDEPTH\n");
163 for (i = 0; i < INDEX_SIZE; i++) {
164 current_index = indices[i];
165 if (current_index != 0) {
166 write_function_recursive(fd, &pfds[current_index], 0);
167 }
168 }
169 fdprintf(fd,"DEBUG PROFILE DATA FOLLOWS\n");
170 fdprintf(fd,"INDEX\tLOCATION\tSELF_PC\t\tCOUNT\t\tTIME\t\tLINK\tCALLER\n");
171 for (i = 0; i < NUMPFDS; i++) {
172 struct pfd_struct *my_last_pfd = &pfds[i];
173 if (my_last_pfd->self_pc != 0) {
174 fdprintf(fd,
175 "%04d\t0x%08lX\t0x%08lX\t0x%08lX\t0x%08lX\t%04d\t0x%08lX\n",
176 i, (size_t)my_last_pfd, (size_t)my_last_pfd->self_pc,
177 my_last_pfd->count, my_last_pfd->time, my_last_pfd->link,
178 (size_t)my_last_pfd->caller);
179 }
180 }
181 fdprintf(fd,"INDEX_ADDRESS=INDEX\n");
182 for (i=0; i < INDEX_SIZE; i++) {
183 fdprintf(fd,"%08lX=%04d\n",(size_t)&indices[i],indices[i]);
184 }
185 close(fd);
186}
187
188void profile_func_exit(void *self_pc, void *call_site) {
189 (void)call_site;
190 (void)self_pc;
191 /* When we started timing, we set the time to the tick at that time
192 * less the time already used in function */
193 if (profiling) {
194 return;
195 }
196 profiling = PROF_BUSY;
197 {
198 register unsigned short my_recursion_level = recursion_level;
199 if (my_recursion_level) {
200 my_recursion_level--;
201 recursion_level = my_recursion_level;
202 } else {
203 /* This shouldn't be necessary, maybe exit could be called first */
204 register struct pfd_struct *my_last_pfd = last_pfd;
205 if (my_last_pfd) {
206 last_pfd = my_last_pfd->caller;
207 }
208 }
209 }
210 profiling = PROF_ON;
211}
212
213#define ALLOCATE_PFD(temp) \
214 temp = ++pfds[0].link;\
215 if (temp >= NUMPFDS) goto overflow; \
216 pfd = &pfds[temp];\
217 pfd->self_pc = self_pc; pfd->count = 1; pfd->time = 0
218
219void profile_func_enter(void *self_pc, void *from_pc) {
220 struct pfd_struct *pfd;
221 struct pfd_struct *prev_pfd;
222 unsigned short *pfd_index_pointer;
223 unsigned short pfd_index;
224
225 /* check that we are profiling and that we aren't recursively invoked
226 * this is equivalent to 'if (profiling != PROF_ON)' but it's faster */
227 if (profiling) {
228 return;
229 }
230 /* this is equivalent to 'profiling = PROF_BUSY;' but it's faster */
231 profiling = PROF_BUSY;
232 /* A check that the PC is in the code range here wouldn't hurt, but this is
233 * logically guaranteed to be a valid address unless the constants are
234 * breaking the rules. */
235 pfd_index_pointer = &indices[((size_t)from_pc)&INDEX_MASK];
236 pfd_index = *pfd_index_pointer;
237 if (pfd_index == 0) {
238 /* new caller, allocate new storage */
239 ALLOCATE_PFD(pfd_index);
240 pfd->link = 0;
241 *pfd_index_pointer = pfd_index;
242 goto done;
243 }
244 pfd = &pfds[pfd_index];
245 if (pfd->self_pc == self_pc) {
246 /* only / most recent function called by this caller, usual case */
247 /* increment count, start timing and exit */
248 goto found;
249 }
250 /* collision, bad for performance, look down the list of functions called by
251 * colliding PCs */
252 for (; /* goto done */; ) {
253 pfd_index = pfd->link;
254 if (pfd_index == 0) {
255 /* no more previously called functions, allocate a new one */
256 ALLOCATE_PFD(pfd_index);
257 /* this function becomes the new head, link to the old head */
258 pfd->link = *pfd_index_pointer;
259 /* and set the index to point to this function */
260 *pfd_index_pointer = pfd_index;
261 /* start timing and exit */
262 goto done;
263 }
264 /* move along the chain */
265 prev_pfd = pfd;
266 pfd = &pfds[pfd_index];
267 if (pfd->self_pc == self_pc) {
268 /* found ourself */
269 /* Remove me from my old spot */
270 prev_pfd->link = pfd->link;
271 /* Link to the old head */
272 pfd->link = *pfd_index_pointer;
273 /* Make me head */
274 *pfd_index_pointer = pfd_index;
275 /* increment count, start timing and exit */
276 goto found;
277 }
278
279 }
280/* We've found a pfd, increment it */
281found:
282 ADDQI_L(pfd->count,1);
283/* We've (found or created) and updated our pfd, save it and start timing */
284done:
285 {
286 register struct pfd_struct *my_last_pfd = last_pfd;
287 if (pfd != my_last_pfd) {
288 /* If we are not recursing */
289 pfd->caller = my_last_pfd;
290 last_pfd = pfd;
291 } else {
292 ADDQI_L(recursion_level,1);
293 }
294 }
295 /* Start timing this function */
296 profiling = PROF_ON;
297 return; /* normal return restores saved registers */
298
299overflow:
300 /* this is the same as 'profiling = PROF_ERROR' */
301 profiling = PROF_ERROR;
302 return;
303}