Make heuristic stack scanning more accurate

2007-11-18  Soren Sandmann <sandmann@daimi.au.dk>

        * module/sysprof-module.c (heuristic_trace): Make heuristic stack
        scanning more accurate



svn path=/trunk/; revision=393
This commit is contained in:
Soren Sandmann
2007-11-19 03:34:19 +00:00
committed by Søren Sandmann Pedersen
parent 05c4a202b3
commit 8d73f2d391
3 changed files with 50 additions and 5 deletions

View File

@ -1,3 +1,8 @@
2007-11-18 Soren Sandmann <sandmann@daimi.au.dk>
* module/sysprof-module.c (heuristic_trace): Make heuristic stack
scanning more accurate
Sun Nov 18 18:12:09 2007 Søren Sandmann <sandmann@redhat.com>
* module/sysprof-module.c (heuristic_trace): Crude heuristic stack

4
TODO
View File

@ -25,6 +25,10 @@ Before 1.2:
* Is the move-to-front in process_locate_map() really worth it?
* Whenever we fail to lock the atomic variable, track this, and send the information
to userspace as an indication of the overhead of the profiling. Although there is
inherent aliasing here since stack scanning happens at regular intervals.
* Apparently, if you upgrade the kernel, then don't re-run configure,
the kernel Makefile will delete all of /lib/modules/<release>/kernel
if you run make install in the module directory. Need to find out what

View File

@ -177,24 +177,59 @@ heuristic_trace (struct pt_regs *regs,
if (esp < eos - (current->mm->stack_vm << PAGE_SHIFT)) {
/* Stack pointer is not in stack map */
printk (KERN_ALERT "too small stackpointer in %d\n", current->pid);
return;
}
if (eos > esp) {
if (esp < eos) {
#if 0
printk (KERN_ALERT "ok stackpointer\n");
#endif
unsigned long i;
int j;
int n_bytes = minimum (eos - esp, (SYSPROF_MAX_ADDRESSES - 1) * sizeof (void *));
j = 1;
for (i = esp; i < eos && j < SYSPROF_MAX_ADDRESSES; i += sizeof (void *)) {
unsigned long x;
struct vm_area_struct *vma;
if (__copy_from_user_inatomic (&x, (void *)i, sizeof (unsigned long)))
break;
vma = find_vma (current->mm, x);
if (vma && vma->vm_flags & VM_EXEC && vma->vm_start <= x && x <= vma->vm_end) {
trace->addresses[j++] = x;
}
}
#if 0
if (__copy_from_user_inatomic (&(trace->addresses[1]), esp, n_bytes) == 0)
trace->n_addresses = n_bytes / sizeof (void *);
else
trace->n_addresses = 1;
j = 1;
for (i = esp; i < eos && j < SYSPROF_MAX_ADDRESSES; i += sizeof (void *)) {
void *x;
if (__copy_from_user_inatomic (
&x, (char *)i, sizeof (x)) == 0)
trace->addresses[j++] = x;
&x, (char *)i, sizeof (x)) == 0) {
if ((unsigned long)x != 1)
trace->addresses[j++] = x;
}
}
#endif
trace->n_addresses = j;
return;
}
#if 0
printk (KERN_ALERT "too big stackpointer\n");
#endif
}
#ifdef OLD_PROFILE
@ -219,8 +254,9 @@ timer_notify (struct pt_regs *regs)
return 0;
/* 0: locked, 1: unlocked */
if (!atomic_dec_and_test(&in_timer_notify))
if (!atomic_dec_and_test(&in_timer_notify)) {
goto out;
}
is_user = user_mode(regs);