First attempt at making module robust agains unloading when in use.

Sun May 15 11:56:30 2005  Søren Sandmann  <sandmann@redhat.com>

	* module/sysprof-module.c: First attempt at making module robust
	agains unloading when in use.
This commit is contained in:
Søren Sandmann
2005-05-15 15:57:33 +00:00
committed by Søren Sandmann Pedersen
parent 023c75d71a
commit 720e07109c
3 changed files with 58 additions and 8 deletions

View File

@ -1,3 +1,8 @@
Sun May 15 11:56:30 2005 Søren Sandmann <sandmann@redhat.com>
* module/sysprof-module.c: First attempt at making module robust
agains unloading when in use.
Sun May 15 10:24:09 2005 Soeren Sandmann <sandmann@redhat.com>
* Makefile.am, module/Makefile: Do more-or-less what the automake

4
TODO
View File

@ -1,4 +1,4 @@
Before 1.0:
Before 0.9
* Correctness
- When the module is unloaded, kill all processes blocking in read
@ -9,6 +9,8 @@ Before 1.0:
move them)
- Consider expanding a few more levels of a new descendants tree
Before 1.0:
* Build system
- Find out what distributions it actually works on
(ask for sucess/failure-stories in 0.9.x releases)

View File

@ -71,7 +71,22 @@ struct userspace_reader
/* Access another process' address space.
* Source/target buffer must be kernel space,
* Do not walk the page table directly, use get_user_pages
*/
*/
static atomic_t ref_count;
static void ref(void)
{
atomic_inc (&ref_count);
}
static void unref(void)
{
atomic_dec (&ref_count);
wake_up_interruptible (&wait_for_exit);
}
static int refcount(void)
{
return atomic_read (&ref_count);
}
static struct mm_struct *
get_mm (struct task_struct *tsk)
@ -323,8 +338,6 @@ generate_stack_trace(struct task_struct *task,
trace->truncated = 0;
}
DECLARE_WAIT_QUEUE_HEAD (wait_to_be_scanned);
static void
do_generate (void *data)
{
@ -344,6 +357,8 @@ do_generate (void *data)
atomic_dec (&(task)->usage);
mod_timer(&timer, jiffies + INTERVAL);
unref();
}
struct work_struct work;
@ -351,12 +366,16 @@ struct work_struct work;
static void
on_timer(unsigned long dong)
{
if (exiting)
return;
if (current && current->state == TASK_RUNNING && current->pid != 0)
{
get_task_struct (current);
INIT_WORK (&work, do_generate, current);
ref();
schedule_work (&work);
}
else
@ -373,12 +392,19 @@ procfile_read(char *buffer,
int *eof,
void *data)
{
if (head == tail)
if (exiting)
return -EBUSY;
ref();
if (head == tail) {
unref();
return -EWOULDBLOCK;
}
wait_event_interruptible (wait_for_trace, head != tail);
*buffer_location = (char *)tail;
if (tail++ == &stack_traces[N_TRACES - 1])
tail = &stack_traces[0];
unref();
return sizeof (SysprofStackTrace);
}
@ -386,19 +412,28 @@ struct proc_dir_entry *trace_proc_file;
static unsigned int
procfile_poll(struct file *filp, poll_table *poll_table)
{
if (exiting)
return -EBUSY;
ref();
if (head != tail) {
unref();
return POLLIN | POLLRDNORM;
}
poll_wait(filp, &wait_for_trace, poll_table);
if (head != tail) {
unref();
return POLLIN | POLLRDNORM;
}
unref();
return 0;
}
int
init_module(void)
{
ref();
trace_proc_file =
create_proc_entry ("sysprof-trace", S_IFREG | S_IRUGO, &proc_root);
@ -422,10 +457,18 @@ void
cleanup_module(void)
{
exiting = 1;
unref();
wait_event_interruptible (wait_for_exit, (refcount() == 0));
del_timer (&timer);
remove_proc_entry("sysprof-trace", &proc_root);
printk(KERN_ALERT "stopping sysprof module\n");
printk(KERN_ALERT "stopping sysprof module (refcount: %d)\n",
refcount());
}
module_init (init_module);
module_exit (cleanup_module);