forked from rpms/kernel
		
	
		
			
				
	
	
		
			4161 lines
		
	
	
		
			146 KiB
		
	
	
	
		
			Diff
		
	
	
	
	
	
			
		
		
	
	
			4161 lines
		
	
	
		
			146 KiB
		
	
	
	
		
			Diff
		
	
	
	
	
	
| utrace core
 | |
| 
 | |
| This adds the utrace facility, a new modular interface in the kernel for
 | |
| implementing user thread tracing and debugging.  This fits on top of the
 | |
| tracehook_* layer, so the new code is well-isolated.
 | |
| 
 | |
| The new interface is in <linux/utrace.h> and the DocBook utrace book
 | |
| describes it.  It allows for multiple separate tracing engines to work in
 | |
| parallel without interfering with each other.  Higher-level tracing
 | |
| facilities can be implemented as loadable kernel modules using this layer.
 | |
| 
 | |
| The new facility is made optional under CONFIG_UTRACE.
 | |
| When this is not enabled, no new code is added.
 | |
| It can only be enabled on machines that have all the
 | |
| prerequisites and select CONFIG_HAVE_ARCH_TRACEHOOK.
 | |
| 
 | |
| In this initial version, utrace and ptrace do not play together at all.
 | |
| If ptrace is attached to a thread, the attach calls in the utrace kernel
 | |
| API return -EBUSY.  If utrace is attached to a thread, the PTRACE_ATTACH
 | |
| or PTRACE_TRACEME request will return EBUSY to userland.  The old ptrace
 | |
| code is otherwise unchanged and nothing using ptrace should be affected
 | |
| by this patch as long as utrace is not used at the same time.  In the
 | |
| future we can clean up the ptrace implementation and rework it to use
 | |
| the utrace API.
 | |
| 
 | |
| Signed-off-by: Roland McGrath <roland@redhat.com>
 | |
| ---
 | |
|  Documentation/DocBook/Makefile    |    2 +-
 | |
|  Documentation/DocBook/utrace.tmpl |  589 +++++++++
 | |
|  fs/proc/array.c                   |    3 +
 | |
|  include/linux/sched.h             |    5 +
 | |
|  include/linux/tracehook.h         |   87 ++-
 | |
|  include/linux/utrace.h            |  692 +++++++++++
 | |
|  init/Kconfig                      |    9 +
 | |
|  kernel/Makefile                   |    1 +
 | |
|  kernel/fork.c                     |    3 +
 | |
|  kernel/ptrace.c                   |   14 +
 | |
|  kernel/utrace.c                   | 2434 +++++++++++++++++++++++++++++++++++++
 | |
|  11 files changed, 3837 insertions(+), 2 deletions(-)
 | |
| 
 | |
| diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
 | |
| index 34929f2..884c36b 100644  
 | |
| --- a/Documentation/DocBook/Makefile
 | |
| +++ b/Documentation/DocBook/Makefile
 | |
| @@ -14,7 +14,7 @@ DOCBOOKS := z8530book.xml mcabook.xml de
 | |
|  	    genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \
 | |
|  	    mac80211.xml debugobjects.xml sh.xml regulator.xml \
 | |
|  	    alsa-driver-api.xml writing-an-alsa-driver.xml \
 | |
| -	    tracepoint.xml media.xml drm.xml
 | |
| +	    tracepoint.xml utrace.xml media.xml drm.xml
 | |
|  
 | |
|  ###
 | |
|  # The build process is as follows (targets):
 | |
| diff --git a/Documentation/DocBook/utrace.tmpl b/Documentation/DocBook/utrace.tmpl
 | |
| new file mode 100644
 | |
| index ...0c40add 100644  
 | |
| --- /dev/null
 | |
| +++ b/Documentation/DocBook/utrace.tmpl
 | |
| @@ -0,0 +1,589 @@
 | |
| +<?xml version="1.0" encoding="UTF-8"?>
 | |
| +<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
 | |
| +"http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
 | |
| +
 | |
| +<book id="utrace">
 | |
| +  <bookinfo>
 | |
| +    <title>The utrace User Debugging Infrastructure</title>
 | |
| +  </bookinfo>
 | |
| +
 | |
| +  <toc></toc>
 | |
| +
 | |
| +  <chapter id="concepts"><title>utrace concepts</title>
 | |
| +
 | |
| +  <sect1 id="intro"><title>Introduction</title>
 | |
| +
 | |
| +  <para>
 | |
| +    <application>utrace</application> is infrastructure code for tracing
 | |
| +    and controlling user threads.  This is the foundation for writing
 | |
| +    tracing engines, which can be loadable kernel modules.
 | |
| +  </para>
 | |
| +
 | |
| +  <para>
 | |
| +    The basic actors in <application>utrace</application> are the thread
 | |
| +    and the tracing engine.  A tracing engine is some body of code that
 | |
| +    calls into the <filename><linux/utrace.h></filename>
 | |
| +    interfaces, represented by a <structname>struct
 | |
| +    utrace_engine_ops</structname>.  (Usually it's a kernel module,
 | |
| +    though the legacy <function>ptrace</function> support is a tracing
 | |
| +    engine that is not in a kernel module.)  The interface operates on
 | |
| +    individual threads (<structname>struct task_struct</structname>).
 | |
| +    If an engine wants to treat several threads as a group, that is up
 | |
| +    to its higher-level code.
 | |
| +  </para>
 | |
| +
 | |
| +  <para>
 | |
| +    Tracing begins by attaching an engine to a thread, using
 | |
| +    <function>utrace_attach_task</function> or
 | |
| +    <function>utrace_attach_pid</function>.  If successful, it returns a
 | |
| +    pointer that is the handle used in all other calls.
 | |
| +  </para>
 | |
| +
 | |
| +  </sect1>
 | |
| +
 | |
| +  <sect1 id="callbacks"><title>Events and Callbacks</title>
 | |
| +
 | |
| +  <para>
 | |
| +    An attached engine does nothing by default.  An engine makes something
 | |
| +    happen by requesting callbacks via <function>utrace_set_events</function>
 | |
| +    and poking the thread with <function>utrace_control</function>.
 | |
| +    The synchronization issues related to these two calls
 | |
| +    are discussed further below in <xref linkend="teardown"/>.
 | |
| +  </para>
 | |
| +
 | |
| +  <para>
 | |
| +    Events are specified using the macro
 | |
| +    <constant>UTRACE_EVENT(<replaceable>type</replaceable>)</constant>.
 | |
| +    Each event type is associated with a callback in <structname>struct
 | |
| +    utrace_engine_ops</structname>.  A tracing engine can leave unused
 | |
| +    callbacks <constant>NULL</constant>.  The only callbacks required
 | |
| +    are those used by the event flags it sets.
 | |
| +  </para>
 | |
| +
 | |
| +  <para>
 | |
| +    Many engines can be attached to each thread.  When a thread has an
 | |
| +    event, each engine gets a callback if it has set the event flag for
 | |
| +    that event type.  For most events, engines are called in the order they
 | |
| +    attached.  Engines that attach after the event has occurred do not get
 | |
| +    callbacks for that event.  This includes any new engines just attached
 | |
| +    by an existing engine's callback function.  Once the sequence of
 | |
| +    callbacks for that one event has completed, such new engines are then
 | |
| +    eligible in the next sequence that starts when there is another event.
 | |
| +  </para>
 | |
| +
 | |
| +  <para>
 | |
| +    Event reporting callbacks have details particular to the event type,
 | |
| +    but are all called in similar environments and have the same
 | |
| +    constraints.  Callbacks are made from safe points, where no locks
 | |
| +    are held, no special resources are pinned (usually), and the
 | |
| +    user-mode state of the thread is accessible.  So, callback code has
 | |
| +    a pretty free hand.  But to be a good citizen, callback code should
 | |
| +    never block for long periods.  It is fine to block in
 | |
| +    <function>kmalloc</function> and the like, but never wait for i/o or
 | |
| +    for user mode to do something.  If you need the thread to wait, use
 | |
| +    <constant>UTRACE_STOP</constant> and return from the callback
 | |
| +    quickly.  When your i/o finishes or whatever, you can use
 | |
| +    <function>utrace_control</function> to resume the thread.
 | |
| +  </para>
 | |
| +
 | |
| +  <para>
 | |
| +    The <constant>UTRACE_EVENT(SYSCALL_ENTRY)</constant> event is a special
 | |
| +    case.  While other events happen in the kernel when it will return to
 | |
| +    user mode soon, this event happens when entering the kernel before it
 | |
| +    will proceed with the work requested from user mode.  Because of this
 | |
| +    difference, the <function>report_syscall_entry</function> callback is
 | |
| +    special in two ways.  For this event, engines are called in reverse of
 | |
| +    the normal order (this includes the <function>report_quiesce</function>
 | |
| +    call that precedes a <function>report_syscall_entry</function> call).
 | |
| +    This preserves the semantics that the last engine to attach is called
 | |
| +    "closest to user mode"--the engine that is first to see a thread's user
 | |
| +    state when it enters the kernel is also the last to see that state when
 | |
| +    the thread returns to user mode.  For the same reason, if these
 | |
| +    callbacks use <constant>UTRACE_STOP</constant> (see the next section),
 | |
| +    the thread stops immediately after callbacks rather than only when it's
 | |
| +    ready to return to user mode; when allowed to resume, it will actually
 | |
| +    attempt the system call indicated by the register values at that time.
 | |
| +  </para>
 | |
| +
 | |
| +  </sect1>
 | |
| +
 | |
| +  <sect1 id="safely"><title>Stopping Safely</title>
 | |
| +
 | |
| +  <sect2 id="well-behaved"><title>Writing well-behaved callbacks</title>
 | |
| +
 | |
| +  <para>
 | |
| +    Well-behaved callbacks are important to maintain two essential
 | |
| +    properties of the interface.  The first of these is that unrelated
 | |
| +    tracing engines should not interfere with each other.  If your engine's
 | |
| +    event callback does not return quickly, then another engine won't get
 | |
| +    the event notification in a timely manner.  The second important
 | |
| +    property is that tracing should be as noninvasive as possible to the
 | |
| +    normal operation of the system overall and of the traced thread in
 | |
| +    particular.  That is, attached tracing engines should not perturb a
 | |
| +    thread's behavior, except to the extent that changing its user-visible
 | |
| +    state is explicitly what you want to do.  (Obviously some perturbation
 | |
| +    is unavoidable, primarily timing changes, ranging from small delays due
 | |
| +    to the overhead of tracing, to arbitrary pauses in user code execution
 | |
| +    when a user stops a thread with a debugger for examination.)  Even when
 | |
| +    you explicitly want the perturbation of making the traced thread block,
 | |
| +    just blocking directly in your callback has more unwanted effects.  For
 | |
| +    example, the <constant>CLONE</constant> event callbacks are called when
 | |
| +    the new child thread has been created but not yet started running; the
 | |
| +    child can never be scheduled until the <constant>CLONE</constant>
 | |
| +    tracing callbacks return.  (This allows engines tracing the parent to
 | |
| +    attach to the child.)  If a <constant>CLONE</constant> event callback
 | |
| +    blocks the parent thread, it also prevents the child thread from
 | |
| +    running (even to process a <constant>SIGKILL</constant>).  If what you
 | |
| +    want is to make both the parent and child block, then use
 | |
| +    <function>utrace_attach_task</function> on the child and then use
 | |
| +    <constant>UTRACE_STOP</constant> on both threads.  A more crucial
 | |
| +    problem with blocking in callbacks is that it can prevent
 | |
| +    <constant>SIGKILL</constant> from working.  A thread that is blocking
 | |
| +    due to <constant>UTRACE_STOP</constant> will still wake up and die
 | |
| +    immediately when sent a <constant>SIGKILL</constant>, as all threads
 | |
| +    should.  Relying on the <application>utrace</application>
 | |
| +    infrastructure rather than on private synchronization calls in event
 | |
| +    callbacks is an important way to help keep tracing robustly
 | |
| +    noninvasive.
 | |
| +  </para>
 | |
| +
 | |
| +  </sect2>
 | |
| +
 | |
| +  <sect2 id="UTRACE_STOP"><title>Using <constant>UTRACE_STOP</constant></title>
 | |
| +
 | |
| +  <para>
 | |
| +    To control another thread and access its state, it must be stopped
 | |
| +    with <constant>UTRACE_STOP</constant>.  This means that it is
 | |
| +    stopped and won't start running again while we access it.  When a
 | |
| +    thread is not already stopped, <function>utrace_control</function>
 | |
| +    returns <constant>-EINPROGRESS</constant> and an engine must wait
 | |
| +    for an event callback when the thread is ready to stop.  The thread
 | |
| +    may be running on another CPU or may be blocked.  When it is ready
 | |
| +    to be examined, it will make callbacks to engines that set the
 | |
| +    <constant>UTRACE_EVENT(QUIESCE)</constant> event bit.  To wake up an
 | |
| +    interruptible wait, use <constant>UTRACE_INTERRUPT</constant>.
 | |
| +  </para>
 | |
| +
 | |
| +  <para>
 | |
| +    As long as some engine has used <constant>UTRACE_STOP</constant> and
 | |
| +    not called <function>utrace_control</function> to resume the thread,
 | |
| +    then the thread will remain stopped.  <constant>SIGKILL</constant>
 | |
| +    will wake it up, but it will not run user code.  When the stop is
 | |
| +    cleared with <function>utrace_control</function> or a callback
 | |
| +    return value, the thread starts running again.
 | |
| +    (See also <xref linkend="teardown"/>.)
 | |
| +  </para>
 | |
| +
 | |
| +  </sect2>
 | |
| +
 | |
| +  </sect1>
 | |
| +
 | |
| +  <sect1 id="teardown"><title>Tear-down Races</title>
 | |
| +
 | |
| +  <sect2 id="SIGKILL"><title>Primacy of <constant>SIGKILL</constant></title>
 | |
| +  <para>
 | |
| +    Ordinarily synchronization issues for tracing engines are kept fairly
 | |
| +    straightforward by using <constant>UTRACE_STOP</constant>.  You ask a
 | |
| +    thread to stop, and then once it makes the
 | |
| +    <function>report_quiesce</function> callback it cannot do anything else
 | |
| +    that would result in another callback, until you let it with a
 | |
| +    <function>utrace_control</function> call.  This simple arrangement
 | |
| +    avoids complex and error-prone code in each one of a tracing engine's
 | |
| +    event callbacks to keep them serialized with the engine's other
 | |
| +    operations done on that thread from another thread of control.
 | |
| +    However, giving tracing engines complete power to keep a traced thread
 | |
| +    stuck in place runs afoul of a more important kind of simplicity that
 | |
| +    the kernel overall guarantees: nothing can prevent or delay
 | |
| +    <constant>SIGKILL</constant> from making a thread die and release its
 | |
| +    resources.  To preserve this important property of
 | |
| +    <constant>SIGKILL</constant>, it as a special case can break
 | |
| +    <constant>UTRACE_STOP</constant> like nothing else normally can.  This
 | |
| +    includes both explicit <constant>SIGKILL</constant> signals and the
 | |
| +    implicit <constant>SIGKILL</constant> sent to each other thread in the
 | |
| +    same thread group by a thread doing an exec, or processing a fatal
 | |
| +    signal, or making an <function>exit_group</function> system call.  A
 | |
| +    tracing engine can prevent a thread from beginning the exit or exec or
 | |
| +    dying by signal (other than <constant>SIGKILL</constant>) if it is
 | |
| +    attached to that thread, but once the operation begins, no tracing
 | |
| +    engine can prevent or delay all other threads in the same thread group
 | |
| +    dying.
 | |
| +  </para>
 | |
| +  </sect2>
 | |
| +
 | |
| +  <sect2 id="reap"><title>Final callbacks</title>
 | |
| +  <para>
 | |
| +    The <function>report_reap</function> callback is always the final event
 | |
| +    in the life cycle of a traced thread.  Tracing engines can use this as
 | |
| +    the trigger to clean up their own data structures.  The
 | |
| +    <function>report_death</function> callback is always the penultimate
 | |
| +    event a tracing engine might see; it's seen unless the thread was
 | |
| +    already in the midst of dying when the engine attached.  Many tracing
 | |
| +    engines will have no interest in when a parent reaps a dead process,
 | |
| +    and nothing they want to do with a zombie thread once it dies; for
 | |
| +    them, the <function>report_death</function> callback is the natural
 | |
| +    place to clean up data structures and detach.  To facilitate writing
 | |
| +    such engines robustly, given the asynchrony of
 | |
| +    <constant>SIGKILL</constant>, and without error-prone manual
 | |
| +    implementation of synchronization schemes, the
 | |
| +    <application>utrace</application> infrastructure provides some special
 | |
| +    guarantees about the <function>report_death</function> and
 | |
| +    <function>report_reap</function> callbacks.  It still takes some care
 | |
| +    to be sure your tracing engine is robust to tear-down races, but these
 | |
| +    rules make it reasonably straightforward and concise to handle a lot of
 | |
| +    corner cases correctly.
 | |
| +  </para>
 | |
| +  </sect2>
 | |
| +
 | |
| +  <sect2 id="refcount"><title>Engine and task pointers</title>
 | |
| +  <para>
 | |
| +    The first sort of guarantee concerns the core data structures
 | |
| +    themselves.  <structname>struct utrace_engine</structname> is
 | |
| +    a reference-counted data structure.  While you hold a reference, an
 | |
| +    engine pointer will always stay valid so that you can safely pass it to
 | |
| +    any <application>utrace</application> call.  Each call to
 | |
| +    <function>utrace_attach_task</function> or
 | |
| +    <function>utrace_attach_pid</function> returns an engine pointer with a
 | |
| +    reference belonging to the caller.  You own that reference until you
 | |
| +    drop it using <function>utrace_engine_put</function>.  There is an
 | |
| +    implicit reference on the engine while it is attached.  So if you drop
 | |
| +    your only reference, and then use
 | |
| +    <function>utrace_attach_task</function> without
 | |
| +    <constant>UTRACE_ATTACH_CREATE</constant> to look up that same engine,
 | |
| +    you will get the same pointer with a new reference to replace the one
 | |
| +    you dropped, just like calling <function>utrace_engine_get</function>.
 | |
| +    When an engine has been detached, either explicitly with
 | |
| +    <constant>UTRACE_DETACH</constant> or implicitly after
 | |
| +    <function>report_reap</function>, then any references you hold are all
 | |
| +    that keep the old engine pointer alive.
 | |
| +  </para>
 | |
| +
 | |
| +  <para>
 | |
| +    There is nothing a kernel module can do to keep a <structname>struct
 | |
| +    task_struct</structname> alive outside of
 | |
| +    <function>rcu_read_lock</function>.  When the task dies and is reaped
 | |
| +    by its parent (or itself), that structure can be freed so that any
 | |
| +    dangling pointers you have stored become invalid.
 | |
| +    <application>utrace</application> will not prevent this, but it can
 | |
| +    help you detect it safely.  By definition, a task that has been reaped
 | |
| +    has had all its engines detached.  All
 | |
| +    <application>utrace</application> calls can be safely called on a
 | |
| +    detached engine if the caller holds a reference on that engine pointer,
 | |
| +    even if the task pointer passed in the call is invalid.  All calls
 | |
| +    return <constant>-ESRCH</constant> for a detached engine, which tells
 | |
| +    you that the task pointer you passed could be invalid now.  Since
 | |
| +    <function>utrace_control</function> and
 | |
| +    <function>utrace_set_events</function> do not block, you can call those
 | |
| +    inside a <function>rcu_read_lock</function> section and be sure after
 | |
| +    they don't return <constant>-ESRCH</constant> that the task pointer is
 | |
| +    still valid until <function>rcu_read_unlock</function>.  The
 | |
| +    infrastructure never holds task references of its own.  Though neither
 | |
| +    <function>rcu_read_lock</function> nor any other lock is held while
 | |
| +    making a callback, it's always guaranteed that the <structname>struct
 | |
| +    task_struct</structname> and the <structname>struct
 | |
| +    utrace_engine</structname> passed as arguments remain valid
 | |
| +    until the callback function returns.
 | |
| +  </para>
 | |
| +
 | |
| +  <para>
 | |
| +    The common means for safely holding task pointers that is available to
 | |
| +    kernel modules is to use <structname>struct pid</structname>, which
 | |
| +    permits <function>put_pid</function> from kernel modules.  When using
 | |
| +    that, the calls <function>utrace_attach_pid</function>,
 | |
| +    <function>utrace_control_pid</function>,
 | |
| +    <function>utrace_set_events_pid</function>, and
 | |
| +    <function>utrace_barrier_pid</function> are available.
 | |
| +  </para>
 | |
| +  </sect2>
 | |
| +
 | |
| +  <sect2 id="reap-after-death">
 | |
| +    <title>
 | |
| +      Serialization of <constant>DEATH</constant> and <constant>REAP</constant>
 | |
| +    </title>
 | |
| +    <para>
 | |
| +      The second guarantee is the serialization of
 | |
| +      <constant>DEATH</constant> and <constant>REAP</constant> event
 | |
| +      callbacks for a given thread.  The actual reaping by the parent
 | |
| +      (<function>release_task</function> call) can occur simultaneously
 | |
| +      while the thread is still doing the final steps of dying, including
 | |
| +      the <function>report_death</function> callback.  If a tracing engine
 | |
| +      has requested both <constant>DEATH</constant> and
 | |
| +      <constant>REAP</constant> event reports, it's guaranteed that the
 | |
| +      <function>report_reap</function> callback will not be made until
 | |
| +      after the <function>report_death</function> callback has returned.
 | |
| +      If the <function>report_death</function> callback itself detaches
 | |
| +      from the thread, then the <function>report_reap</function> callback
 | |
| +      will never be made.  Thus it is safe for a
 | |
| +      <function>report_death</function> callback to clean up data
 | |
| +      structures and detach.
 | |
| +    </para>
 | |
| +  </sect2>
 | |
| +
 | |
| +  <sect2 id="interlock"><title>Interlock with final callbacks</title>
 | |
| +  <para>
 | |
| +    The final sort of guarantee is that a tracing engine will know for sure
 | |
| +    whether or not the <function>report_death</function> and/or
 | |
| +    <function>report_reap</function> callbacks will be made for a certain
 | |
| +    thread.  These tear-down races are disambiguated by the error return
 | |
| +    values of <function>utrace_set_events</function> and
 | |
| +    <function>utrace_control</function>.  Normally
 | |
| +    <function>utrace_control</function> called with
 | |
| +    <constant>UTRACE_DETACH</constant> returns zero, and this means that no
 | |
| +    more callbacks will be made.  If the thread is in the midst of dying,
 | |
| +    it returns <constant>-EALREADY</constant> to indicate that the
 | |
| +    <constant>report_death</constant> callback may already be in progress;
 | |
| +    when you get this error, you know that any cleanup your
 | |
| +    <function>report_death</function> callback does is about to happen or
 | |
| +    has just happened--note that if the <function>report_death</function>
 | |
| +    callback does not detach, the engine remains attached until the thread
 | |
| +    gets reaped.  If the thread is in the midst of being reaped,
 | |
| +    <function>utrace_control</function> returns <constant>-ESRCH</constant>
 | |
| +    to indicate that the <function>report_reap</function> callback may
 | |
| +    already be in progress; this means the engine is implicitly detached
 | |
| +    when the callback completes.  This makes it possible for a tracing
 | |
| +    engine that has decided asynchronously to detach from a thread to
 | |
| +    safely clean up its data structures, knowing that no
 | |
| +    <function>report_death</function> or <function>report_reap</function>
 | |
| +    callback will try to do the same.  <constant>utrace_detach</constant>
 | |
| +    returns <constant>-ESRCH</constant> when the <structname>struct
 | |
| +    utrace_engine</structname> has already been detached, but is
 | |
| +    still a valid pointer because of its reference count.  A tracing engine
 | |
| +    can use this to safely synchronize its own independent multiple threads
 | |
| +    of control with each other and with its event callbacks that detach.
 | |
| +  </para>
 | |
| +
 | |
| +  <para>
 | |
| +    In the same vein, <function>utrace_set_events</function> normally
 | |
| +    returns zero; if the target thread was stopped before the call, then
 | |
| +    after a successful call, no event callbacks not requested in the new
 | |
| +    flags will be made.  It fails with <constant>-EALREADY</constant> if
 | |
| +    you try to clear <constant>UTRACE_EVENT(DEATH)</constant> when the
 | |
| +    <function>report_death</function> callback may already have begun, or if
 | |
| +    you try to newly set <constant>UTRACE_EVENT(DEATH)</constant> or
 | |
| +    <constant>UTRACE_EVENT(QUIESCE)</constant> when the target is already
 | |
| +    dead or dying.  Like <function>utrace_control</function>, it returns
 | |
| +    <constant>-ESRCH</constant> when the <function>report_reap</function>
 | |
| +    callback may already have begun, or the thread has already been detached
 | |
| +    (including forcible detach on reaping).  This lets the tracing engine
 | |
| +    know for sure which event callbacks it will or won't see after
 | |
| +    <function>utrace_set_events</function> has returned.  By checking for
 | |
| +    errors, it can know whether to clean up its data structures immediately
 | |
| +    or to let its callbacks do the work.
 | |
| +  </para>
 | |
| +  </sect2>
 | |
| +
 | |
| +  <sect2 id="barrier"><title>Using <function>utrace_barrier</function></title>
 | |
| +  <para>
 | |
| +    When a thread is safely stopped, calling
 | |
| +    <function>utrace_control</function> with <constant>UTRACE_DETACH</constant>
 | |
| +    or calling <function>utrace_set_events</function> to disable some events
 | |
| +    ensures synchronously that your engine won't get any more of the callbacks
 | |
| +    that have been disabled (none at all when detaching).  But these can also
 | |
| +    be used while the thread is not stopped, when it might be simultaneously
 | |
| +    making a callback to your engine.  For this situation, these calls return
 | |
| +    <constant>-EINPROGRESS</constant> when it's possible a callback is in
 | |
| +    progress.  If you are not prepared to have your old callbacks still run,
 | |
| +    then you can synchronize to be sure all the old callbacks are finished,
 | |
| +    using <function>utrace_barrier</function>.  This is necessary if the
 | |
| +    kernel module containing your callback code is going to be unloaded.
 | |
| +  </para>
 | |
| +  <para>
 | |
| +    After using <constant>UTRACE_DETACH</constant> once, further calls to
 | |
| +    <function>utrace_control</function> with the same engine pointer will
 | |
| +    return <constant>-ESRCH</constant>.  In contrast, after getting
 | |
| +    <constant>-EINPROGRESS</constant> from
 | |
| +    <function>utrace_set_events</function>, you can call
 | |
| +    <function>utrace_set_events</function> again later and if it returns zero
 | |
| +    then know the old callbacks have finished.
 | |
| +  </para>
 | |
| +  <para>
 | |
| +    Unlike all other calls, <function>utrace_barrier</function> (and
 | |
| +    <function>utrace_barrier_pid</function>) will accept any engine pointer you
 | |
| +    hold a reference on, even if <constant>UTRACE_DETACH</constant> has already
 | |
| +    been used.  After any <function>utrace_control</function> or
 | |
| +    <function>utrace_set_events</function> call (these do not block), you can
 | |
| +    call <function>utrace_barrier</function> to block until callbacks have
 | |
| +    finished.  This returns <constant>-ESRCH</constant> only if the engine is
 | |
| +    completely detached (finished all callbacks).  Otherwise it waits
 | |
| +    until the thread is definitely not in the midst of a callback to this
 | |
| +    engine and then returns zero, but can return
 | |
| +    <constant>-ERESTARTSYS</constant> if its wait is interrupted.
 | |
| +  </para>
 | |
| +  </sect2>
 | |
| +
 | |
| +</sect1>
 | |
| +
 | |
| +</chapter>
 | |
| +
 | |
| +<chapter id="core"><title>utrace core API</title>
 | |
| +
 | |
| +<para>
 | |
| +  The utrace API is declared in <filename><linux/utrace.h></filename>.
 | |
| +</para>
 | |
| +
 | |
| +!Iinclude/linux/utrace.h
 | |
| +!Ekernel/utrace.c
 | |
| +
 | |
| +</chapter>
 | |
| +
 | |
| +<chapter id="machine"><title>Machine State</title>
 | |
| +
 | |
| +<para>
 | |
| +  The <function>task_current_syscall</function> function can be used on any
 | |
| +  valid <structname>struct task_struct</structname> at any time, and does
 | |
| +  not even require that <function>utrace_attach_task</function> was used at all.
 | |
| +</para>
 | |
| +
 | |
| +<para>
 | |
| +  The other ways to access the registers and other machine-dependent state of
 | |
| +  a task can only be used on a task that is at a known safe point.  The safe
 | |
| +  points are all the places where <function>utrace_set_events</function> can
 | |
| +  request callbacks (except for the <constant>DEATH</constant> and
 | |
| +  <constant>REAP</constant> events).  So at any event callback, it is safe to
 | |
| +  examine <varname>current</varname>.
 | |
| +</para>
 | |
| +
 | |
| +<para>
 | |
| +  One task can examine another only after a callback in the target task that
 | |
| +  returns <constant>UTRACE_STOP</constant> so that task will not return to user
 | |
| +  mode after the safe point.  This guarantees that the task will not resume
 | |
| +  until the same engine uses <function>utrace_control</function>, unless the
 | |
| +  task dies suddenly.  To examine safely, one must use a pair of calls to
 | |
| +  <function>utrace_prepare_examine</function> and
 | |
| +  <function>utrace_finish_examine</function> surrounding the calls to
 | |
| +  <structname>struct user_regset</structname> functions or direct examination
 | |
| +  of task data structures.  <function>utrace_prepare_examine</function> returns
 | |
| +  an error if the task is not properly stopped, or is dead.  After a
 | |
| +  successful examination, the paired <function>utrace_finish_examine</function>
 | |
| +  call returns an error if the task ever woke up during the examination.  If
 | |
| +  so, any data gathered may be scrambled and should be discarded.  This means
 | |
| +  there was a spurious wake-up (which should not happen), or a sudden death.
 | |
| +</para>
 | |
| +
 | |
| +<sect1 id="regset"><title><structname>struct user_regset</structname></title>
 | |
| +
 | |
| +<para>
 | |
| +  The <structname>struct user_regset</structname> API
 | |
| +  is declared in <filename><linux/regset.h></filename>.
 | |
| +</para>
 | |
| +
 | |
| +!Finclude/linux/regset.h
 | |
| +
 | |
| +</sect1>
 | |
| +
 | |
| +<sect1 id="task_current_syscall">
 | |
| +  <title><filename>System Call Information</filename></title>
 | |
| +
 | |
| +<para>
 | |
| +  This function is declared in <filename><linux/ptrace.h></filename>.
 | |
| +</para>
 | |
| +
 | |
| +!Elib/syscall.c
 | |
| +
 | |
| +</sect1>
 | |
| +
 | |
| +<sect1 id="syscall"><title><filename>System Call Tracing</filename></title>
 | |
| +
 | |
| +<para>
 | |
| +  The arch API for system call information is declared in
 | |
| +  <filename><asm/syscall.h></filename>.
 | |
| +  Each of these calls can be used only at system call entry tracing,
 | |
| +  or can be used only at system call exit and the subsequent safe points
 | |
| +  before returning to user mode.
 | |
| +  At system call entry tracing means either during a
 | |
| +  <structfield>report_syscall_entry</structfield> callback,
 | |
| +  or any time after that callback has returned <constant>UTRACE_STOP</constant>.
 | |
| +</para>
 | |
| +
 | |
| +!Finclude/asm-generic/syscall.h
 | |
| +
 | |
| +</sect1>
 | |
| +
 | |
| +</chapter>
 | |
| +
 | |
| +<chapter id="internals"><title>Kernel Internals</title>
 | |
| +
 | |
| +<para>
 | |
| +  This chapter covers the interface to the tracing infrastructure
 | |
| +  from the core of the kernel and the architecture-specific code.
 | |
| +  This is for maintainers of the kernel and arch code, and not relevant
 | |
| +  to using the tracing facilities described in preceding chapters.
 | |
| +</para>
 | |
| +
 | |
| +<sect1 id="tracehook"><title>Core Calls In</title>
 | |
| +
 | |
| +<para>
 | |
| +  These calls are declared in <filename><linux/tracehook.h></filename>.
 | |
| +  The core kernel calls these functions at various important places.
 | |
| +</para>
 | |
| +
 | |
| +!Finclude/linux/tracehook.h
 | |
| +
 | |
| +</sect1>
 | |
| +
 | |
| +<sect1 id="arch"><title>Architecture Calls Out</title>
 | |
| +
 | |
| +<para>
 | |
| +  An arch that has done all these things sets
 | |
| +  <constant>CONFIG_HAVE_ARCH_TRACEHOOK</constant>.
 | |
| +  This is required to enable the <application>utrace</application> code.
 | |
| +</para>
 | |
| +
 | |
| +<sect2 id="arch-ptrace"><title><filename><asm/ptrace.h></filename></title>
 | |
| +
 | |
| +<para>
 | |
| +  An arch defines these in <filename><asm/ptrace.h></filename>
 | |
| +  if it supports hardware single-step or block-step features.
 | |
| +</para>
 | |
| +
 | |
| +!Finclude/linux/ptrace.h arch_has_single_step arch_has_block_step
 | |
| +!Finclude/linux/ptrace.h user_enable_single_step user_enable_block_step
 | |
| +!Finclude/linux/ptrace.h user_disable_single_step
 | |
| +
 | |
| +</sect2>
 | |
| +
 | |
| +<sect2 id="arch-syscall">
 | |
| +  <title><filename><asm/syscall.h></filename></title>
 | |
| +
 | |
| +  <para>
 | |
| +    An arch provides <filename><asm/syscall.h></filename> that
 | |
| +    defines these as inlines, or declares them as exported functions.
 | |
| +    These interfaces are described in <xref linkend="syscall"/>.
 | |
| +  </para>
 | |
| +
 | |
| +</sect2>
 | |
| +
 | |
| +<sect2 id="arch-tracehook">
 | |
| +  <title><filename><linux/tracehook.h></filename></title>
 | |
| +
 | |
| +  <para>
 | |
| +    An arch must define <constant>TIF_NOTIFY_RESUME</constant>
 | |
| +    and <constant>TIF_SYSCALL_TRACE</constant>
 | |
| +    in its <filename><asm/thread_info.h></filename>.
 | |
| +    The arch code must call the following functions, all declared
 | |
| +    in <filename><linux/tracehook.h></filename> and
 | |
| +    described in <xref linkend="tracehook"/>:
 | |
| +
 | |
| +    <itemizedlist>
 | |
| +      <listitem>
 | |
| +	<para><function>tracehook_notify_resume</function></para>
 | |
| +      </listitem>
 | |
| +      <listitem>
 | |
| +	<para><function>tracehook_report_syscall_entry</function></para>
 | |
| +      </listitem>
 | |
| +      <listitem>
 | |
| +	<para><function>tracehook_report_syscall_exit</function></para>
 | |
| +      </listitem>
 | |
| +      <listitem>
 | |
| +	<para><function>tracehook_signal_handler</function></para>
 | |
| +      </listitem>
 | |
| +    </itemizedlist>
 | |
| +
 | |
| +  </para>
 | |
| +
 | |
| +</sect2>
 | |
| +
 | |
| +</sect1>
 | |
| +
 | |
| +</chapter>
 | |
| +
 | |
| +</book>
 | |
| diff --git a/fs/proc/array.c b/fs/proc/array.c
 | |
| index fff6572..a67bd83 100644  
 | |
| --- a/fs/proc/array.c
 | |
| +++ b/fs/proc/array.c
 | |
| @@ -81,6 +81,7 @@
 | |
|  #include <linux/pid_namespace.h>
 | |
|  #include <linux/ptrace.h>
 | |
|  #include <linux/tracehook.h>
 | |
| +#include <linux/utrace.h>
 | |
|  
 | |
|  #include <asm/pgtable.h>
 | |
|  #include <asm/processor.h>
 | |
| @@ -192,6 +193,8 @@ static inline void task_state(struct seq
 | |
|  		cred->uid, cred->euid, cred->suid, cred->fsuid,
 | |
|  		cred->gid, cred->egid, cred->sgid, cred->fsgid);
 | |
|  
 | |
| +	task_utrace_proc_status(m, p);
 | |
| +
 | |
|  	task_lock(p);
 | |
|  	if (p->files)
 | |
|  		fdt = files_fdtable(p->files);
 | |
| diff --git a/include/linux/sched.h b/include/linux/sched.h
 | |
| index 5e7cc95..66a1ec8 100644  
 | |
| --- a/include/linux/sched.h
 | |
| +++ b/include/linux/sched.h
 | |
| @@ -1339,6 +1339,11 @@ struct task_struct {
 | |
|  #endif
 | |
|  	seccomp_t seccomp;
 | |
|  
 | |
| +#ifdef CONFIG_UTRACE
 | |
| +	struct utrace *utrace;
 | |
| +	unsigned long utrace_flags;
 | |
| +#endif
 | |
| +
 | |
|  /* Thread group tracking */
 | |
|     	u32 parent_exec_id;
 | |
|     	u32 self_exec_id;
 | |
| diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
 | |
| index c78b2f4..71fa250 100644  
 | |
| --- a/include/linux/tracehook.h
 | |
| +++ b/include/linux/tracehook.h
 | |
| @@ -49,6 +49,7 @@
 | |
|  #include <linux/sched.h>
 | |
|  #include <linux/ptrace.h>
 | |
|  #include <linux/security.h>
 | |
| +#include <linux/utrace.h>
 | |
|  struct linux_binprm;
 | |
|  
 | |
|  /**
 | |
| @@ -63,6 +64,8 @@ struct linux_binprm;
 | |
|   */
 | |
|  static inline int tracehook_expect_breakpoints(struct task_struct *task)
 | |
|  {
 | |
| +	if (unlikely(task_utrace_flags(task) & UTRACE_EVENT(SIGNAL_CORE)))
 | |
| +		return 1;
 | |
|  	return (task_ptrace(task) & PT_PTRACED) != 0;
 | |
|  }
 | |
|  
 | |
| @@ -111,6 +114,9 @@ static inline void ptrace_report_syscall
 | |
|  static inline __must_check int tracehook_report_syscall_entry(
 | |
|  	struct pt_regs *regs)
 | |
|  {
 | |
| +	if ((task_utrace_flags(current) & UTRACE_EVENT(SYSCALL_ENTRY)) &&
 | |
| +	    utrace_report_syscall_entry(regs))
 | |
| +		return 1;
 | |
|  	ptrace_report_syscall(regs);
 | |
|  	return 0;
 | |
|  }
 | |
| @@ -134,6 +140,9 @@ static inline __must_check int tracehook
 | |
|   */
 | |
|  static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step)
 | |
|  {
 | |
| +	if (task_utrace_flags(current) & UTRACE_EVENT(SYSCALL_EXIT))
 | |
| +		utrace_report_syscall_exit(regs);
 | |
| +
 | |
|  	if (step && (task_ptrace(current) & PT_PTRACED)) {
 | |
|  		siginfo_t info;
 | |
|  		user_single_step_siginfo(current, regs, &info);
 | |
| @@ -201,6 +210,8 @@ static inline void tracehook_report_exec
 | |
|  					 struct linux_binprm *bprm,
 | |
|  					 struct pt_regs *regs)
 | |
|  {
 | |
| +	if (unlikely(task_utrace_flags(current) & UTRACE_EVENT(EXEC)))
 | |
| +		utrace_report_exec(fmt, bprm, regs);
 | |
|  	if (!ptrace_event(PT_TRACE_EXEC, PTRACE_EVENT_EXEC, 0) &&
 | |
|  	    unlikely(task_ptrace(current) & PT_PTRACED))
 | |
|  		send_sig(SIGTRAP, current, 0);
 | |
| @@ -218,10 +229,37 @@ static inline void tracehook_report_exec
 | |
|   */
 | |
|  static inline void tracehook_report_exit(long *exit_code)
 | |
|  {
 | |
| +	if (unlikely(task_utrace_flags(current) & UTRACE_EVENT(EXIT)))
 | |
| +		utrace_report_exit(exit_code);
 | |
|  	ptrace_event(PT_TRACE_EXIT, PTRACE_EVENT_EXIT, *exit_code);
 | |
|  }
 | |
|  
 | |
|  /**
 | |
| + * tracehook_init_task - task_struct has just been copied
 | |
| + * @task:		new &struct task_struct just copied from parent
 | |
| + *
 | |
| + * Called from do_fork() when @task has just been duplicated.
 | |
| + * After this, @task will be passed to tracehook_free_task()
 | |
| + * even if the rest of its setup fails before it is fully created.
 | |
| + */
 | |
| +static inline void tracehook_init_task(struct task_struct *task)
 | |
| +{
 | |
| +	utrace_init_task(task);
 | |
| +}
 | |
| +
 | |
| +/**
 | |
| + * tracehook_free_task - task_struct is being freed
 | |
| + * @task:		dead &struct task_struct being freed
 | |
| + *
 | |
| + * Called from free_task() when @task is no longer in use.
 | |
| + */
 | |
| +static inline void tracehook_free_task(struct task_struct *task)
 | |
| +{
 | |
| +	if (task_utrace_struct(task))
 | |
| +		utrace_free_task(task);
 | |
| +}
 | |
| +
 | |
| +/**
 | |
|   * tracehook_prepare_clone - prepare for new child to be cloned
 | |
|   * @clone_flags:	%CLONE_* flags from clone/fork/vfork system call
 | |
|   *
 | |
| @@ -285,6 +323,8 @@ static inline void tracehook_report_clon
 | |
|  					  unsigned long clone_flags,
 | |
|  					  pid_t pid, struct task_struct *child)
 | |
|  {
 | |
| +	if (unlikely(task_utrace_flags(current) & UTRACE_EVENT(CLONE)))
 | |
| +		utrace_report_clone(clone_flags, child);
 | |
|  	if (unlikely(task_ptrace(child))) {
 | |
|  		/*
 | |
|  		 * It doesn't matter who attached/attaching to this
 | |
| @@ -317,6 +357,9 @@ static inline void tracehook_report_clon
 | |
|  						   pid_t pid,
 | |
|  						   struct task_struct *child)
 | |
|  {
 | |
| +	if (unlikely(task_utrace_flags(current) & UTRACE_EVENT(CLONE)) &&
 | |
| +	    (clone_flags & CLONE_VFORK))
 | |
| +		utrace_finish_vfork(current);
 | |
|  	if (unlikely(trace))
 | |
|  		ptrace_event(0, trace, pid);
 | |
|  }
 | |
| @@ -351,6 +394,10 @@ static inline void tracehook_report_vfor
 | |
|   */
 | |
|  static inline void tracehook_prepare_release_task(struct task_struct *task)
 | |
|  {
 | |
| +	/* see utrace_add_engine() about this barrier */
 | |
| +	smp_mb();
 | |
| +	if (task_utrace_flags(task))
 | |
| +		utrace_maybe_reap(task, task_utrace_struct(task), true);
 | |
|  }
 | |
|  
 | |
|  /**
 | |
| @@ -365,6 +412,7 @@ static inline void tracehook_prepare_rel
 | |
|  static inline void tracehook_finish_release_task(struct task_struct *task)
 | |
|  {
 | |
|  	ptrace_release_task(task);
 | |
| +	BUG_ON(task->exit_state != EXIT_DEAD);
 | |
|  }
 | |
|  
 | |
|  /**
 | |
| @@ -386,6 +434,8 @@ static inline void tracehook_signal_hand
 | |
|  					    const struct k_sigaction *ka,
 | |
|  					    struct pt_regs *regs, int stepping)
 | |
|  {
 | |
| +	if (task_utrace_flags(current))
 | |
| +		utrace_signal_handler(current, stepping);
 | |
|  	if (stepping && (task_ptrace(current) & PT_PTRACED))
 | |
|  		ptrace_notify(SIGTRAP);
 | |
|  }
 | |
| @@ -403,6 +453,8 @@ static inline void tracehook_signal_hand
 | |
|  static inline int tracehook_consider_ignored_signal(struct task_struct *task,
 | |
|  						    int sig)
 | |
|  {
 | |
| +	if (unlikely(task_utrace_flags(task) & UTRACE_EVENT(SIGNAL_IGN)))
 | |
| +		return 1;
 | |
|  	return (task_ptrace(task) & PT_PTRACED) != 0;
 | |
|  }
 | |
|  
 | |
| @@ -422,6 +474,9 @@ static inline int tracehook_consider_ign
 | |
|  static inline int tracehook_consider_fatal_signal(struct task_struct *task,
 | |
|  						  int sig)
 | |
|  {
 | |
| +	if (unlikely(task_utrace_flags(task) & (UTRACE_EVENT(SIGNAL_TERM) |
 | |
| +						UTRACE_EVENT(SIGNAL_CORE))))
 | |
| +		return 1;
 | |
|  	return (task_ptrace(task) & PT_PTRACED) != 0;
 | |
|  }
 | |
|  
 | |
| @@ -436,6 +491,8 @@ static inline int tracehook_consider_fat
 | |
|   */
 | |
|  static inline int tracehook_force_sigpending(void)
 | |
|  {
 | |
| +	if (unlikely(task_utrace_flags(current)))
 | |
| +		return utrace_interrupt_pending();
 | |
|  	return 0;
 | |
|  }
 | |
|  
 | |
| @@ -465,6 +522,8 @@ static inline int tracehook_get_signal(s
 | |
|  				       siginfo_t *info,
 | |
|  				       struct k_sigaction *return_ka)
 | |
|  {
 | |
| +	if (unlikely(task_utrace_flags(task)))
 | |
| +		return utrace_get_signal(task, regs, info, return_ka);
 | |
|  	return 0;
 | |
|  }
 | |
|  
 | |
| @@ -492,6 +551,8 @@ static inline int tracehook_get_signal(s
 | |
|   */
 | |
|  static inline int tracehook_notify_jctl(int notify, int why)
 | |
|  {
 | |
| +	if (task_utrace_flags(current) & UTRACE_EVENT(JCTL))
 | |
| +		utrace_report_jctl(notify, why);
 | |
|  	return notify ?: task_ptrace(current) ? why : 0;
 | |
|  }
 | |
|  
 | |
| @@ -502,6 +563,8 @@ static inline int tracehook_notify_jctl(
 | |
|   */
 | |
|  static inline void tracehook_finish_jctl(void)
 | |
|  {
 | |
| +	if (task_utrace_flags(current))
 | |
| +		utrace_finish_stop();
 | |
|  }
 | |
|  
 | |
|  #define DEATH_REAP			-1
 | |
| @@ -524,6 +587,8 @@ static inline void tracehook_finish_jctl
 | |
|  static inline int tracehook_notify_death(struct task_struct *task,
 | |
|  					 void **death_cookie, int group_dead)
 | |
|  {
 | |
| +	*death_cookie = task_utrace_struct(task);
 | |
| +
 | |
|  	if (task_detached(task))
 | |
|  		return task->ptrace ? SIGCHLD : DEATH_REAP;
 | |
|  
 | |
| @@ -560,6 +625,15 @@ static inline void tracehook_report_deat
 | |
|  					  int signal, void *death_cookie,
 | |
|  					  int group_dead)
 | |
|  {
 | |
| +	/*
 | |
| +	 * If utrace_set_events() was just called to enable
 | |
| +	 * UTRACE_EVENT(DEATH), then we are obliged to call
 | |
| +	 * utrace_report_death() and not miss it.  utrace_set_events()
 | |
| +	 * checks @task->exit_state under tasklist_lock to synchronize
 | |
| +	 * with exit_notify(), the caller.
 | |
| +	 */
 | |
| +	if (task_utrace_flags(task) & _UTRACE_DEATH_EVENTS)
 | |
| +		utrace_report_death(task, death_cookie, group_dead, signal);
 | |
|  }
 | |
|  
 | |
|  #ifdef TIF_NOTIFY_RESUME
 | |
| @@ -589,10 +663,21 @@ static inline void set_notify_resume(str
 | |
|   * asynchronously, this will be called again before we return to
 | |
|   * user mode.
 | |
|   *
 | |
| - * Called without locks.
 | |
| + * Called without locks.  However, on some machines this may be
 | |
| + * called with interrupts disabled.
 | |
|   */
 | |
|  static inline void tracehook_notify_resume(struct pt_regs *regs)
 | |
|  {
 | |
| +	struct task_struct *task = current;
 | |
| +	/*
 | |
| +	 * Prevent the following store/load from getting ahead of the
 | |
| +	 * caller which clears TIF_NOTIFY_RESUME. This pairs with the
 | |
| +	 * implicit mb() before setting TIF_NOTIFY_RESUME in
 | |
| +	 * set_notify_resume().
 | |
| +	 */
 | |
| +	smp_mb();
 | |
| +	if (task_utrace_flags(task))
 | |
| +		utrace_resume(task, regs);
 | |
|  }
 | |
|  #endif	/* TIF_NOTIFY_RESUME */
 | |
|  
 | |
| diff --git a/include/linux/utrace.h b/include/linux/utrace.h
 | |
| new file mode 100644
 | |
| index ...f251efe 100644  
 | |
| --- /dev/null
 | |
| +++ b/include/linux/utrace.h
 | |
| @@ -0,0 +1,692 @@
 | |
| +/*
 | |
| + * utrace infrastructure interface for debugging user processes
 | |
| + *
 | |
| + * Copyright (C) 2006-2009 Red Hat, Inc.  All rights reserved.
 | |
| + *
 | |
| + * This copyrighted material is made available to anyone wishing to use,
 | |
| + * modify, copy, or redistribute it subject to the terms and conditions
 | |
| + * of the GNU General Public License v.2.
 | |
| + *
 | |
| + * Red Hat Author: Roland McGrath.
 | |
| + *
 | |
| + * This interface allows for notification of interesting events in a
 | |
| + * thread.  It also mediates access to thread state such as registers.
 | |
| + * Multiple unrelated users can be associated with a single thread.
 | |
| + * We call each of these a tracing engine.
 | |
| + *
 | |
| + * A tracing engine starts by calling utrace_attach_task() or
 | |
| + * utrace_attach_pid() on the chosen thread, passing in a set of hooks
 | |
| + * (&struct utrace_engine_ops), and some associated data.  This produces a
 | |
| + * &struct utrace_engine, which is the handle used for all other
 | |
| + * operations.  An attached engine has its ops vector, its data, and an
 | |
| + * event mask controlled by utrace_set_events().
 | |
| + *
 | |
| + * For each event bit that is set, that engine will get the
 | |
| + * appropriate ops->report_*() callback when the event occurs.  The
 | |
| + * &struct utrace_engine_ops need not provide callbacks for an event
 | |
| + * unless the engine sets one of the associated event bits.
 | |
| + */
 | |
| +
 | |
| +#ifndef _LINUX_UTRACE_H
 | |
| +#define _LINUX_UTRACE_H	1
 | |
| +
 | |
| +#include <linux/list.h>
 | |
| +#include <linux/kref.h>
 | |
| +#include <linux/signal.h>
 | |
| +#include <linux/sched.h>
 | |
| +
 | |
| +struct linux_binprm;
 | |
| +struct pt_regs;
 | |
| +struct utrace;
 | |
| +struct user_regset;
 | |
| +struct user_regset_view;
 | |
| +
 | |
| +/*
 | |
| + * Event bits passed to utrace_set_events().
 | |
| + * These appear in &struct task_struct.@utrace_flags
 | |
| + * and &struct utrace_engine.@flags.
 | |
| + */
 | |
| +enum utrace_events {
 | |
| +	_UTRACE_EVENT_QUIESCE,	/* Thread is available for examination.  */
 | |
| +	_UTRACE_EVENT_REAP,  	/* Zombie reaped, no more tracing possible.  */
 | |
| +	_UTRACE_EVENT_CLONE,	/* Successful clone/fork/vfork just done.  */
 | |
| +	_UTRACE_EVENT_EXEC,	/* Successful execve just completed.  */
 | |
| +	_UTRACE_EVENT_EXIT,	/* Thread exit in progress.  */
 | |
| +	_UTRACE_EVENT_DEATH,	/* Thread has died.  */
 | |
| +	_UTRACE_EVENT_SYSCALL_ENTRY, /* User entered kernel for system call. */
 | |
| +	_UTRACE_EVENT_SYSCALL_EXIT, /* Returning to user after system call.  */
 | |
| +	_UTRACE_EVENT_SIGNAL,	/* Signal delivery will run a user handler.  */
 | |
| +	_UTRACE_EVENT_SIGNAL_IGN, /* No-op signal to be delivered.  */
 | |
| +	_UTRACE_EVENT_SIGNAL_STOP, /* Signal delivery will suspend.  */
 | |
| +	_UTRACE_EVENT_SIGNAL_TERM, /* Signal delivery will terminate.  */
 | |
| +	_UTRACE_EVENT_SIGNAL_CORE, /* Signal delivery will dump core.  */
 | |
| +	_UTRACE_EVENT_JCTL,	/* Job control stop or continue completed.  */
 | |
| +	_UTRACE_NEVENTS
 | |
| +};
 | |
| +#define UTRACE_EVENT(type)	(1UL << _UTRACE_EVENT_##type)
 | |
| +
 | |
| +/*
 | |
| + * All the kinds of signal events.
 | |
| + * These all use the @report_signal() callback.
 | |
| + */
 | |
| +#define UTRACE_EVENT_SIGNAL_ALL	(UTRACE_EVENT(SIGNAL) \
 | |
| +				 | UTRACE_EVENT(SIGNAL_IGN) \
 | |
| +				 | UTRACE_EVENT(SIGNAL_STOP) \
 | |
| +				 | UTRACE_EVENT(SIGNAL_TERM) \
 | |
| +				 | UTRACE_EVENT(SIGNAL_CORE))
 | |
| +/*
 | |
| + * Both kinds of syscall events; these call the @report_syscall_entry()
 | |
| + * and @report_syscall_exit() callbacks, respectively.
 | |
| + */
 | |
| +#define UTRACE_EVENT_SYSCALL	\
 | |
| +	(UTRACE_EVENT(SYSCALL_ENTRY) | UTRACE_EVENT(SYSCALL_EXIT))
 | |
| +
 | |
| +/*
 | |
| + * The event reports triggered synchronously by task death.
 | |
| + */
 | |
| +#define _UTRACE_DEATH_EVENTS (UTRACE_EVENT(DEATH) | UTRACE_EVENT(QUIESCE))
 | |
| +
 | |
| +/*
 | |
| + * Hooks in <linux/tracehook.h> call these entry points to the utrace dispatch.
 | |
| + */
 | |
| +void utrace_free_task(struct task_struct *);
 | |
| +bool utrace_interrupt_pending(void);
 | |
| +void utrace_resume(struct task_struct *, struct pt_regs *);
 | |
| +void utrace_finish_stop(void);
 | |
| +void utrace_maybe_reap(struct task_struct *, struct utrace *, bool);
 | |
| +int utrace_get_signal(struct task_struct *, struct pt_regs *,
 | |
| +		      siginfo_t *, struct k_sigaction *);
 | |
| +void utrace_report_clone(unsigned long, struct task_struct *);
 | |
| +void utrace_finish_vfork(struct task_struct *);
 | |
| +void utrace_report_exit(long *exit_code);
 | |
| +void utrace_report_death(struct task_struct *, struct utrace *, bool, int);
 | |
| +void utrace_report_jctl(int notify, int type);
 | |
| +void utrace_report_exec(struct linux_binfmt *, struct linux_binprm *,
 | |
| +			struct pt_regs *regs);
 | |
| +bool utrace_report_syscall_entry(struct pt_regs *);
 | |
| +void utrace_report_syscall_exit(struct pt_regs *);
 | |
| +void utrace_signal_handler(struct task_struct *, int);
 | |
| +
 | |
| +#ifndef CONFIG_UTRACE
 | |
| +
 | |
| +/*
 | |
| + * <linux/tracehook.h> uses these accessors to avoid #ifdef CONFIG_UTRACE.
 | |
| + */
 | |
| +static inline unsigned long task_utrace_flags(struct task_struct *task)
 | |
| +{
 | |
| +	return 0;
 | |
| +}
 | |
| +static inline struct utrace *task_utrace_struct(struct task_struct *task)
 | |
| +{
 | |
| +	return NULL;
 | |
| +}
 | |
| +static inline void utrace_init_task(struct task_struct *child)
 | |
| +{
 | |
| +}
 | |
| +
 | |
| +static inline void task_utrace_proc_status(struct seq_file *m,
 | |
| +					   struct task_struct *p)
 | |
| +{
 | |
| +}
 | |
| +
 | |
| +#else  /* CONFIG_UTRACE */
 | |
| +
 | |
| +static inline unsigned long task_utrace_flags(struct task_struct *task)
 | |
| +{
 | |
| +	return task->utrace_flags;
 | |
| +}
 | |
| +
 | |
| +static inline struct utrace *task_utrace_struct(struct task_struct *task)
 | |
| +{
 | |
| +	struct utrace *utrace;
 | |
| +
 | |
| +	/*
 | |
| +	 * This barrier ensures that any prior load of task->utrace_flags
 | |
| +	 * is ordered before this load of task->utrace.  We use those
 | |
| +	 * utrace_flags checks in the hot path to decide to call into
 | |
| +	 * the utrace code.  The first attach installs task->utrace before
 | |
| +	 * setting task->utrace_flags nonzero with implicit barrier in
 | |
| +	 * between, see utrace_add_engine().
 | |
| +	 */
 | |
| +	smp_rmb();
 | |
| +	utrace = task->utrace;
 | |
| +
 | |
| +	smp_read_barrier_depends(); /* See utrace_task_alloc().  */
 | |
| +	return utrace;
 | |
| +}
 | |
| +
 | |
| +static inline void utrace_init_task(struct task_struct *task)
 | |
| +{
 | |
| +	task->utrace_flags = 0;
 | |
| +	task->utrace = NULL;
 | |
| +}
 | |
| +
 | |
| +void task_utrace_proc_status(struct seq_file *m, struct task_struct *p);
 | |
| +
 | |
| +
 | |
| +/*
 | |
| + * Version number of the API defined in this file.  This will change
 | |
| + * whenever a tracing engine's code would need some updates to keep
 | |
| + * working.  We maintain this here for the benefit of tracing engine code
 | |
| + * that is developed concurrently with utrace API improvements before they
 | |
| + * are merged into the kernel, making LINUX_VERSION_CODE checks unwieldy.
 | |
| + */
 | |
| +#define UTRACE_API_VERSION	20091216
 | |
| +
 | |
| +/**
 | |
| + * enum utrace_resume_action - engine's choice of action for a traced task
 | |
| + * @UTRACE_STOP:		Stay quiescent after callbacks.
 | |
| + * @UTRACE_INTERRUPT:		Make @report_signal() callback soon.
 | |
| + * @UTRACE_REPORT:		Make some callback soon.
 | |
| + * @UTRACE_SINGLESTEP:		Resume in user mode for one instruction.
 | |
| + * @UTRACE_BLOCKSTEP:		Resume in user mode until next branch.
 | |
| + * @UTRACE_RESUME:		Resume normally in user mode.
 | |
| + * @UTRACE_DETACH:		Detach my engine (implies %UTRACE_RESUME).
 | |
| + *
 | |
| + * See utrace_control() for detailed descriptions of each action.  This is
 | |
| + * encoded in the @action argument and the return value for every callback
 | |
| + * with a &u32 return value.
 | |
| + *
 | |
| + * The order of these is important.  When there is more than one engine,
 | |
| + * each supplies its choice and the smallest value prevails.
 | |
| + */
 | |
| +enum utrace_resume_action {
 | |
| +	UTRACE_STOP,
 | |
| +	UTRACE_INTERRUPT,
 | |
| +	UTRACE_REPORT,
 | |
| +	UTRACE_SINGLESTEP,
 | |
| +	UTRACE_BLOCKSTEP,
 | |
| +	UTRACE_RESUME,
 | |
| +	UTRACE_DETACH,
 | |
| +	UTRACE_RESUME_MAX
 | |
| +};
 | |
| +#define UTRACE_RESUME_BITS	(ilog2(UTRACE_RESUME_MAX) + 1)
 | |
| +#define UTRACE_RESUME_MASK	((1 << UTRACE_RESUME_BITS) - 1)
 | |
| +
 | |
| +/**
 | |
| + * utrace_resume_action - &enum utrace_resume_action from callback action
 | |
| + * @action:		&u32 callback @action argument or return value
 | |
| + *
 | |
| + * This extracts the &enum utrace_resume_action from @action,
 | |
| + * which is the @action argument to a &struct utrace_engine_ops
 | |
| + * callback or the return value from one.
 | |
| + */
 | |
| +static inline enum utrace_resume_action utrace_resume_action(u32 action)
 | |
| +{
 | |
| +	return action & UTRACE_RESUME_MASK;
 | |
| +}
 | |
| +
 | |
| +/**
 | |
| + * enum utrace_signal_action - disposition of signal
 | |
| + * @UTRACE_SIGNAL_DELIVER:	Deliver according to sigaction.
 | |
| + * @UTRACE_SIGNAL_IGN:		Ignore the signal.
 | |
| + * @UTRACE_SIGNAL_TERM:		Terminate the process.
 | |
| + * @UTRACE_SIGNAL_CORE:		Terminate with core dump.
 | |
| + * @UTRACE_SIGNAL_STOP:		Deliver as absolute stop.
 | |
| + * @UTRACE_SIGNAL_TSTP:		Deliver as job control stop.
 | |
| + * @UTRACE_SIGNAL_REPORT:	Reporting before pending signals.
 | |
| + * @UTRACE_SIGNAL_HANDLER:	Reporting after signal handler setup.
 | |
| + *
 | |
| + * This is encoded in the @action argument and the return value for
 | |
| + * a @report_signal() callback.  It says what will happen to the
 | |
| + * signal described by the &siginfo_t parameter to the callback.
 | |
| + *
 | |
| + * The %UTRACE_SIGNAL_REPORT value is used in an @action argument when
 | |
| + * a tracing report is being made before dequeuing any pending signal.
 | |
| + * If this is immediately after a signal handler has been set up, then
 | |
| + * %UTRACE_SIGNAL_HANDLER is used instead.  A @report_signal callback
 | |
| + * that uses %UTRACE_SIGNAL_DELIVER|%UTRACE_SINGLESTEP will ensure
 | |
| + * it sees a %UTRACE_SIGNAL_HANDLER report.
 | |
| + */
 | |
| +enum utrace_signal_action {
 | |
| +	UTRACE_SIGNAL_DELIVER	= 0x00,
 | |
| +	UTRACE_SIGNAL_IGN	= 0x10,
 | |
| +	UTRACE_SIGNAL_TERM	= 0x20,
 | |
| +	UTRACE_SIGNAL_CORE	= 0x30,
 | |
| +	UTRACE_SIGNAL_STOP	= 0x40,
 | |
| +	UTRACE_SIGNAL_TSTP	= 0x50,
 | |
| +	UTRACE_SIGNAL_REPORT	= 0x60,
 | |
| +	UTRACE_SIGNAL_HANDLER	= 0x70
 | |
| +};
 | |
| +#define	UTRACE_SIGNAL_MASK	0xf0
 | |
| +#define UTRACE_SIGNAL_HOLD	0x100 /* Flag, push signal back on queue.  */
 | |
| +
 | |
| +/**
 | |
| + * utrace_signal_action - &enum utrace_signal_action from callback action
 | |
| + * @action:		@report_signal callback @action argument or return value
 | |
| + *
 | |
| + * This extracts the &enum utrace_signal_action from @action, which
 | |
| + * is the @action argument to a @report_signal callback or the
 | |
| + * return value from one.
 | |
| + */
 | |
| +static inline enum utrace_signal_action utrace_signal_action(u32 action)
 | |
| +{
 | |
| +	return action & UTRACE_SIGNAL_MASK;
 | |
| +}
 | |
| +
 | |
| +/**
 | |
| + * enum utrace_syscall_action - disposition of system call attempt
 | |
| + * @UTRACE_SYSCALL_RUN:		Run the system call.
 | |
| + * @UTRACE_SYSCALL_ABORT:	Don't run the system call.
 | |
| + *
 | |
| + * This is encoded in the @action argument and the return value for
 | |
| + * a @report_syscall_entry callback.
 | |
| + */
 | |
| +enum utrace_syscall_action {
 | |
| +	UTRACE_SYSCALL_RUN	= 0x00,
 | |
| +	UTRACE_SYSCALL_ABORT	= 0x10
 | |
| +};
 | |
| +#define	UTRACE_SYSCALL_MASK	0xf0
 | |
| +#define	UTRACE_SYSCALL_RESUMED	0x100 /* Flag, report_syscall_entry() repeats */
 | |
| +
 | |
| +/**
 | |
| + * utrace_syscall_action - &enum utrace_syscall_action from callback action
 | |
| + * @action:		@report_syscall_entry callback @action or return value
 | |
| + *
 | |
| + * This extracts the &enum utrace_syscall_action from @action, which
 | |
| + * is the @action argument to a @report_syscall_entry callback or the
 | |
| + * return value from one.
 | |
| + */
 | |
| +static inline enum utrace_syscall_action utrace_syscall_action(u32 action)
 | |
| +{
 | |
| +	return action & UTRACE_SYSCALL_MASK;
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Flags for utrace_attach_task() and utrace_attach_pid().
 | |
| + */
 | |
| +#define UTRACE_ATTACH_MATCH_OPS		0x0001 /* Match engines on ops.  */
 | |
| +#define UTRACE_ATTACH_MATCH_DATA	0x0002 /* Match engines on data.  */
 | |
| +#define UTRACE_ATTACH_MATCH_MASK	0x000f
 | |
| +#define UTRACE_ATTACH_CREATE		0x0010 /* Attach a new engine.  */
 | |
| +#define UTRACE_ATTACH_EXCLUSIVE		0x0020 /* Refuse if existing match.  */
 | |
| +
 | |
| +/**
 | |
| + * struct utrace_engine - per-engine structure
 | |
| + * @ops:	&struct utrace_engine_ops pointer passed to utrace_attach_task()
 | |
| + * @data:	engine-private &void * passed to utrace_attach_task()
 | |
| + * @flags:	event mask set by utrace_set_events() plus internal flag bits
 | |
| + *
 | |
| + * The task itself never has to worry about engines detaching while
 | |
| + * it's doing event callbacks.  These structures are removed from the
 | |
| + * task's active list only when it's stopped, or by the task itself.
 | |
| + *
 | |
| + * utrace_engine_get() and utrace_engine_put() maintain a reference count.
 | |
| + * When it drops to zero, the structure is freed.  One reference is held
 | |
| + * implicitly while the engine is attached to its task.
 | |
| + */
 | |
| +struct utrace_engine {
 | |
| +/* private: */
 | |
| +	struct kref kref;
 | |
| +	void (*release)(void *);
 | |
| +	struct list_head entry;
 | |
| +
 | |
| +/* public: */
 | |
| +	const struct utrace_engine_ops *ops;
 | |
| +	void *data;
 | |
| +
 | |
| +	unsigned long flags;
 | |
| +};
 | |
| +
 | |
| +/**
 | |
| + * utrace_engine_get - acquire a reference on a &struct utrace_engine
 | |
| + * @engine:	&struct utrace_engine pointer
 | |
| + *
 | |
| + * You must hold a reference on @engine, and you get another.
 | |
| + */
 | |
| +static inline void utrace_engine_get(struct utrace_engine *engine)
 | |
| +{
 | |
| +	kref_get(&engine->kref);
 | |
| +}
 | |
| +
 | |
| +void __utrace_engine_release(struct kref *);
 | |
| +
 | |
| +/**
 | |
| + * utrace_engine_put - release a reference on a &struct utrace_engine
 | |
| + * @engine:	&struct utrace_engine pointer
 | |
| + *
 | |
| + * You must hold a reference on @engine, and you lose that reference.
 | |
| + * If it was the last one, @engine becomes an invalid pointer.
 | |
| + */
 | |
| +static inline void utrace_engine_put(struct utrace_engine *engine)
 | |
| +{
 | |
| +	kref_put(&engine->kref, __utrace_engine_release);
 | |
| +}
 | |
| +
 | |
| +/**
 | |
| + * struct utrace_engine_ops - tracing engine callbacks
 | |
| + *
 | |
| + * Each @report_*() callback corresponds to an %UTRACE_EVENT(*) bit.
 | |
| + * utrace_set_events() calls on @engine choose which callbacks will
 | |
| + * be made to @engine from @task.
 | |
| + *
 | |
| + * Most callbacks take an @action argument, giving the resume action
 | |
| + * chosen by other tracing engines.  All callbacks take an @engine
 | |
| + * argument.  The @report_reap callback takes a @task argument that
 | |
| + * might or might not be @current.  All other @report_* callbacks
 | |
| + * report an event in the @current task.
 | |
| + *
 | |
| + * For some calls, @action also includes bits specific to that event
 | |
| + * and utrace_resume_action() is used to extract the resume action.
 | |
| + * This shows what would happen if @engine wasn't there, or will if
 | |
| + * the callback's return value uses %UTRACE_RESUME.  This always
 | |
| + * starts as %UTRACE_RESUME when no other tracing is being done on
 | |
| + * this task.
 | |
| + *
 | |
| + * All return values contain &enum utrace_resume_action bits.  For
 | |
| + * some calls, other bits specific to that kind of event are added to
 | |
| + * the resume action bits with OR.  These are the same bits used in
 | |
| + * the @action argument.  The resume action returned by a callback
 | |
| + * does not override previous engines' choices, it only says what
 | |
| + * @engine wants done.  What @current actually does is the action that's
 | |
| + * most constrained among the choices made by all attached engines.
 | |
| + * See utrace_control() for more information on the actions.
 | |
| + *
 | |
| + * When %UTRACE_STOP is used in @report_syscall_entry, then @current
 | |
| + * stops before attempting the system call.  In this case, another
 | |
| + * @report_syscall_entry callback will follow after @current resumes if
 | |
| + * %UTRACE_REPORT or %UTRACE_INTERRUPT was returned by some callback
 | |
| + * or passed to utrace_control().  In a second or later callback,
 | |
| + * %UTRACE_SYSCALL_RESUMED is set in the @action argument to indicate
 | |
| + * a repeat callback still waiting to attempt the same system call
 | |
| + * invocation.  This repeat callback gives each engine an opportunity
 | |
| + * to reexamine registers another engine might have changed while
 | |
| + * @current was held in %UTRACE_STOP.
 | |
| + *
 | |
| + * In other cases, the resume action does not take effect until @current
 | |
| + * is ready to check for signals and return to user mode.  If there
 | |
| + * are more callbacks to be made, the last round of calls determines
 | |
| + * the final action.  A @report_quiesce callback with @event zero, or
 | |
| + * a @report_signal callback, will always be the last one made before
 | |
| + * @current resumes.  Only %UTRACE_STOP is "sticky"--if @engine returned
 | |
| + * %UTRACE_STOP then @current stays stopped unless @engine returns
 | |
| + * different from a following callback.
 | |
| + *
 | |
| + * The report_death() and report_reap() callbacks do not take @action
 | |
| + * arguments, and only %UTRACE_DETACH is meaningful in the return value
 | |
| + * from a report_death() callback.  None of the resume actions applies
 | |
| + * to a dead thread.
 | |
| + *
 | |
| + * All @report_*() hooks are called with no locks held, in a generally
 | |
| + * safe environment when we will be returning to user mode soon (or just
 | |
| + * entered the kernel).  It is fine to block for memory allocation and
 | |
| + * the like, but all hooks are asynchronous and must not block on
 | |
| + * external events!  If you want the thread to block, use %UTRACE_STOP
 | |
| + * in your hook's return value; then later wake it up with utrace_control().
 | |
| + *
 | |
| + * @report_quiesce:
 | |
| + *	Requested by %UTRACE_EVENT(%QUIESCE).
 | |
| + *	This does not indicate any event, but just that @current is in a
 | |
| + *	safe place for examination.  This call is made before each specific
 | |
| + *	event callback, except for @report_reap.  The @event argument gives
 | |
| + *	the %UTRACE_EVENT(@which) value for the event occurring.  This
 | |
| + *	callback might be made for events @engine has not requested, if
 | |
| + *	some other engine is tracing the event; calling utrace_set_events()
 | |
| + *	call here can request the immediate callback for this occurrence of
 | |
| + *	@event.  @event is zero when there is no other event, @current is
 | |
| + *	now ready to check for signals and return to user mode, and some
 | |
| + *	engine has used %UTRACE_REPORT or %UTRACE_INTERRUPT to request this
 | |
| + *	callback.  For this case, if @report_signal is not %NULL, the
 | |
| + *	@report_quiesce callback may be replaced with a @report_signal
 | |
| + *	callback passing %UTRACE_SIGNAL_REPORT in its @action argument,
 | |
| + *	whenever @current is entering the signal-check path anyway.
 | |
| + *
 | |
| + * @report_signal:
 | |
| + *	Requested by %UTRACE_EVENT(%SIGNAL_*) or %UTRACE_EVENT(%QUIESCE).
 | |
| + *	Use utrace_signal_action() and utrace_resume_action() on @action.
 | |
| + *	The signal action is %UTRACE_SIGNAL_REPORT when some engine has
 | |
| + *	used %UTRACE_REPORT or %UTRACE_INTERRUPT; the callback can choose
 | |
| + *	to stop or to deliver an artificial signal, before pending signals.
 | |
| + *	It's %UTRACE_SIGNAL_HANDLER instead when signal handler setup just
 | |
| + *	finished (after a previous %UTRACE_SIGNAL_DELIVER return); this
 | |
| + *	serves in lieu of any %UTRACE_SIGNAL_REPORT callback requested by
 | |
| + *	%UTRACE_REPORT or %UTRACE_INTERRUPT, and is also implicitly
 | |
| + *	requested by %UTRACE_SINGLESTEP or %UTRACE_BLOCKSTEP into the
 | |
| + *	signal delivery.  The other signal actions indicate a signal about
 | |
| + *	to be delivered; the previous engine's return value sets the signal
 | |
| + *	action seen by the the following engine's callback.  The @info data
 | |
| + *	can be changed at will, including @info->si_signo.  The settings in
 | |
| + *	@return_ka determines what %UTRACE_SIGNAL_DELIVER does.  @orig_ka
 | |
| + *	is what was in force before other tracing engines intervened, and
 | |
| + *	it's %NULL when this report began as %UTRACE_SIGNAL_REPORT or
 | |
| + *	%UTRACE_SIGNAL_HANDLER.  For a report without a new signal, @info
 | |
| + *	is left uninitialized and must be set completely by an engine that
 | |
| + *	chooses to deliver a signal; if there was a previous @report_signal
 | |
| + *	callback ending in %UTRACE_STOP and it was just resumed using
 | |
| + *	%UTRACE_REPORT or %UTRACE_INTERRUPT, then @info is left unchanged
 | |
| + *	from the previous callback.  In this way, the original signal can
 | |
| + *	be left in @info while returning %UTRACE_STOP|%UTRACE_SIGNAL_IGN
 | |
| + *	and then found again when resuming with %UTRACE_INTERRUPT.
 | |
| + *	The %UTRACE_SIGNAL_HOLD flag bit can be OR'd into the return value,
 | |
| + *	and might be in @action if the previous engine returned it.  This
 | |
| + *	flag asks that the signal in @info be pushed back on @current's queue
 | |
| + *	so that it will be seen again after whatever action is taken now.
 | |
| + *
 | |
| + * @report_clone:
 | |
| + *	Requested by %UTRACE_EVENT(%CLONE).
 | |
| + *	Event reported for parent, before the new task @child might run.
 | |
| + *	@clone_flags gives the flags used in the clone system call, or
 | |
| + *	equivalent flags for a fork() or vfork() system call.  This
 | |
| + *	function can use utrace_attach_task() on @child.  Then passing
 | |
| + *	%UTRACE_STOP to utrace_control() on @child here keeps the child
 | |
| + *	stopped before it ever runs in user mode, %UTRACE_REPORT or
 | |
| + *	%UTRACE_INTERRUPT ensures a callback from @child before it
 | |
| + *	starts in user mode.
 | |
| + *
 | |
| + * @report_jctl:
 | |
| + *	Requested by %UTRACE_EVENT(%JCTL).
 | |
| + *	Job control event; @type is %CLD_STOPPED or %CLD_CONTINUED,
 | |
| + *	indicating whether we are stopping or resuming now.  If @notify
 | |
| + *	is nonzero, @current is the last thread to stop and so will send
 | |
| + *	%SIGCHLD to its parent after this callback; @notify reflects
 | |
| + *	what the parent's %SIGCHLD has in @si_code, which can sometimes
 | |
| + *	be %CLD_STOPPED even when @type is %CLD_CONTINUED.
 | |
| + *
 | |
| + * @report_exec:
 | |
| + *	Requested by %UTRACE_EVENT(%EXEC).
 | |
| + *	An execve system call has succeeded and the new program is about to
 | |
| + *	start running.  The initial user register state is handy to be tweaked
 | |
| + *	directly in @regs.  @fmt and @bprm gives the details of this exec.
 | |
| + *
 | |
| + * @report_syscall_entry:
 | |
| + *	Requested by %UTRACE_EVENT(%SYSCALL_ENTRY).
 | |
| + *	Thread has entered the kernel to request a system call.
 | |
| + *	The user register state is handy to be tweaked directly in @regs.
 | |
| + *	The @action argument contains an &enum utrace_syscall_action,
 | |
| + *	use utrace_syscall_action() to extract it.  The return value
 | |
| + *	overrides the last engine's action for the system call.
 | |
| + *	If the final action is %UTRACE_SYSCALL_ABORT, no system call
 | |
| + *	is made.  The details of the system call being attempted can
 | |
| + *	be fetched here with syscall_get_nr() and syscall_get_arguments().
 | |
| + *	The parameter registers can be changed with syscall_set_arguments().
 | |
| + *	See above about the %UTRACE_SYSCALL_RESUMED flag in @action.
 | |
| + *	Use %UTRACE_REPORT in the return value to guarantee you get
 | |
| + *	another callback (with %UTRACE_SYSCALL_RESUMED flag) in case
 | |
| + *	@current stops with %UTRACE_STOP before attempting the system call.
 | |
| + *
 | |
| + * @report_syscall_exit:
 | |
| + *	Requested by %UTRACE_EVENT(%SYSCALL_EXIT).
 | |
| + *	Thread is about to leave the kernel after a system call request.
 | |
| + *	The user register state is handy to be tweaked directly in @regs.
 | |
| + *	The results of the system call attempt can be examined here using
 | |
| + *	syscall_get_error() and syscall_get_return_value().  It is safe
 | |
| + *	here to call syscall_set_return_value() or syscall_rollback().
 | |
| + *
 | |
| + * @report_exit:
 | |
| + *	Requested by %UTRACE_EVENT(%EXIT).
 | |
| + *	Thread is exiting and cannot be prevented from doing so,
 | |
| + *	but all its state is still live.  The @code value will be
 | |
| + *	the wait result seen by the parent, and can be changed by
 | |
| + *	this engine or others.  The @orig_code value is the real
 | |
| + *	status, not changed by any tracing engine.  Returning %UTRACE_STOP
 | |
| + *	here keeps @current stopped before it cleans up its state and dies,
 | |
| + *	so it can be examined by other processes.  When @current is allowed
 | |
| + *	to run, it will die and get to the @report_death callback.
 | |
| + *
 | |
| + * @report_death:
 | |
| + *	Requested by %UTRACE_EVENT(%DEATH).
 | |
| + *	Thread is really dead now.  It might be reaped by its parent at
 | |
| + *	any time, or self-reap immediately.  Though the actual reaping
 | |
| + *	may happen in parallel, a report_reap() callback will always be
 | |
| + *	ordered after a report_death() callback.
 | |
| + *
 | |
| + * @report_reap:
 | |
| + *	Requested by %UTRACE_EVENT(%REAP).
 | |
| + *	Called when someone reaps the dead task (parent, init, or self).
 | |
| + *	This means the parent called wait, or else this was a detached
 | |
| + *	thread or a process whose parent ignores SIGCHLD.
 | |
| + *	No more callbacks are made after this one.
 | |
| + *	The engine is always detached.
 | |
| + *	There is nothing more a tracing engine can do about this thread.
 | |
| + *	After this callback, the @engine pointer will become invalid.
 | |
| + *	The @task pointer may become invalid if get_task_struct() hasn't
 | |
| + *	been used to keep it alive.
 | |
| + *	An engine should always request this callback if it stores the
 | |
| + *	@engine pointer or stores any pointer in @engine->data, so it
 | |
| + *	can clean up its data structures.
 | |
| + *	Unlike other callbacks, this can be called from the parent's context
 | |
| + *	rather than from the traced thread itself--it must not delay the
 | |
| + *	parent by blocking.
 | |
| + *
 | |
| + * @release:
 | |
| + *	If not %NULL, this is called after the last utrace_engine_put()
 | |
| + *	call for a &struct utrace_engine, which could be implicit after
 | |
| + *	a %UTRACE_DETACH return from another callback.  Its argument is
 | |
| + *	the engine's @data member.
 | |
| + */
 | |
| +struct utrace_engine_ops {
 | |
| +	u32 (*report_quiesce)(u32 action, struct utrace_engine *engine,
 | |
| +			      unsigned long event);
 | |
| +	u32 (*report_signal)(u32 action, struct utrace_engine *engine,
 | |
| +			     struct pt_regs *regs,
 | |
| +			     siginfo_t *info,
 | |
| +			     const struct k_sigaction *orig_ka,
 | |
| +			     struct k_sigaction *return_ka);
 | |
| +	u32 (*report_clone)(u32 action, struct utrace_engine *engine,
 | |
| +			    unsigned long clone_flags,
 | |
| +			    struct task_struct *child);
 | |
| +	u32 (*report_jctl)(u32 action, struct utrace_engine *engine,
 | |
| +			   int type, int notify);
 | |
| +	u32 (*report_exec)(u32 action, struct utrace_engine *engine,
 | |
| +			   const struct linux_binfmt *fmt,
 | |
| +			   const struct linux_binprm *bprm,
 | |
| +			   struct pt_regs *regs);
 | |
| +	u32 (*report_syscall_entry)(u32 action, struct utrace_engine *engine,
 | |
| +				    struct pt_regs *regs);
 | |
| +	u32 (*report_syscall_exit)(u32 action, struct utrace_engine *engine,
 | |
| +				   struct pt_regs *regs);
 | |
| +	u32 (*report_exit)(u32 action, struct utrace_engine *engine,
 | |
| +			   long orig_code, long *code);
 | |
| +	u32 (*report_death)(struct utrace_engine *engine,
 | |
| +			    bool group_dead, int signal);
 | |
| +	void (*report_reap)(struct utrace_engine *engine,
 | |
| +			    struct task_struct *task);
 | |
| +	void (*release)(void *data);
 | |
| +};
 | |
| +
 | |
| +/**
 | |
| + * struct utrace_examiner - private state for using utrace_prepare_examine()
 | |
| + *
 | |
| + * The members of &struct utrace_examiner are private to the implementation.
 | |
| + * This data type holds the state from a call to utrace_prepare_examine()
 | |
| + * to be used by a call to utrace_finish_examine().
 | |
| + */
 | |
| +struct utrace_examiner {
 | |
| +/* private: */
 | |
| +	long state;
 | |
| +	unsigned long ncsw;
 | |
| +};
 | |
| +
 | |
| +/*
 | |
| + * These are the exported entry points for tracing engines to use.
 | |
| + * See kernel/utrace.c for their kerneldoc comments with interface details.
 | |
| + */
 | |
| +struct utrace_engine *utrace_attach_task(struct task_struct *, int,
 | |
| +					 const struct utrace_engine_ops *,
 | |
| +					 void *);
 | |
| +struct utrace_engine *utrace_attach_pid(struct pid *, int,
 | |
| +					const struct utrace_engine_ops *,
 | |
| +					void *);
 | |
| +int __must_check utrace_control(struct task_struct *,
 | |
| +				struct utrace_engine *,
 | |
| +				enum utrace_resume_action);
 | |
| +int __must_check utrace_set_events(struct task_struct *,
 | |
| +				   struct utrace_engine *,
 | |
| +				   unsigned long eventmask);
 | |
| +int __must_check utrace_barrier(struct task_struct *,
 | |
| +				struct utrace_engine *);
 | |
| +int __must_check utrace_prepare_examine(struct task_struct *,
 | |
| +					struct utrace_engine *,
 | |
| +					struct utrace_examiner *);
 | |
| +int __must_check utrace_finish_examine(struct task_struct *,
 | |
| +				       struct utrace_engine *,
 | |
| +				       struct utrace_examiner *);
 | |
| +
 | |
| +/**
 | |
| + * utrace_control_pid - control a thread being traced by a tracing engine
 | |
| + * @pid:		thread to affect
 | |
| + * @engine:		attached engine to affect
 | |
| + * @action:		&enum utrace_resume_action for thread to do
 | |
| + *
 | |
| + * This is the same as utrace_control(), but takes a &struct pid
 | |
| + * pointer rather than a &struct task_struct pointer.  The caller must
 | |
| + * hold a ref on @pid, but does not need to worry about the task
 | |
| + * staying valid.  If it's been reaped so that @pid points nowhere,
 | |
| + * then this call returns -%ESRCH.
 | |
| + */
 | |
| +static inline __must_check int utrace_control_pid(
 | |
| +	struct pid *pid, struct utrace_engine *engine,
 | |
| +	enum utrace_resume_action action)
 | |
| +{
 | |
| +	/*
 | |
| +	 * We don't bother with rcu_read_lock() here to protect the
 | |
| +	 * task_struct pointer, because utrace_control will return
 | |
| +	 * -ESRCH without looking at that pointer if the engine is
 | |
| +	 * already detached.  A task_struct pointer can't die before
 | |
| +	 * all the engines are detached in release_task() first.
 | |
| +	 */
 | |
| +	struct task_struct *task = pid_task(pid, PIDTYPE_PID);
 | |
| +	return unlikely(!task) ? -ESRCH : utrace_control(task, engine, action);
 | |
| +}
 | |
| +
 | |
| +/**
 | |
| + * utrace_set_events_pid - choose which event reports a tracing engine gets
 | |
| + * @pid:		thread to affect
 | |
| + * @engine:		attached engine to affect
 | |
| + * @eventmask:		new event mask
 | |
| + *
 | |
| + * This is the same as utrace_set_events(), but takes a &struct pid
 | |
| + * pointer rather than a &struct task_struct pointer.  The caller must
 | |
| + * hold a ref on @pid, but does not need to worry about the task
 | |
| + * staying valid.  If it's been reaped so that @pid points nowhere,
 | |
| + * then this call returns -%ESRCH.
 | |
| + */
 | |
| +static inline __must_check int utrace_set_events_pid(
 | |
| +	struct pid *pid, struct utrace_engine *engine, unsigned long eventmask)
 | |
| +{
 | |
| +	struct task_struct *task = pid_task(pid, PIDTYPE_PID);
 | |
| +	return unlikely(!task) ? -ESRCH :
 | |
| +		utrace_set_events(task, engine, eventmask);
 | |
| +}
 | |
| +
 | |
| +/**
 | |
| + * utrace_barrier_pid - synchronize with simultaneous tracing callbacks
 | |
| + * @pid:		thread to affect
 | |
| + * @engine:		engine to affect (can be detached)
 | |
| + *
 | |
| + * This is the same as utrace_barrier(), but takes a &struct pid
 | |
| + * pointer rather than a &struct task_struct pointer.  The caller must
 | |
| + * hold a ref on @pid, but does not need to worry about the task
 | |
| + * staying valid.  If it's been reaped so that @pid points nowhere,
 | |
| + * then this call returns -%ESRCH.
 | |
| + */
 | |
| +static inline __must_check int utrace_barrier_pid(struct pid *pid,
 | |
| +						  struct utrace_engine *engine)
 | |
| +{
 | |
| +	struct task_struct *task = pid_task(pid, PIDTYPE_PID);
 | |
| +	return unlikely(!task) ? -ESRCH : utrace_barrier(task, engine);
 | |
| +}
 | |
| +
 | |
| +#endif	/* CONFIG_UTRACE */
 | |
| +
 | |
| +#endif	/* linux/utrace.h */
 | |
| diff --git a/init/Kconfig b/init/Kconfig
 | |
| index 2de5b1c..a283086 100644  
 | |
| --- a/init/Kconfig
 | |
| +++ b/init/Kconfig
 | |
| @@ -332,6 +332,15 @@ config AUDIT_TREE
 | |
|  	depends on AUDITSYSCALL
 | |
|  	select FSNOTIFY
 | |
|  
 | |
| +config UTRACE
 | |
| +	bool "Infrastructure for tracing and debugging user processes"
 | |
| +	depends on EXPERIMENTAL
 | |
| +	depends on HAVE_ARCH_TRACEHOOK
 | |
| +	help
 | |
| +	  Enable the utrace process tracing interface.  This is an internal
 | |
| +	  kernel interface exported to kernel modules, to track events in
 | |
| +	  user threads, extract and change user thread state.
 | |
| +
 | |
|  menu "RCU Subsystem"
 | |
|  
 | |
|  choice
 | |
| diff --git a/kernel/Makefile b/kernel/Makefile
 | |
| index 0b72d1a..6004913 100644  
 | |
| --- a/kernel/Makefile
 | |
| +++ b/kernel/Makefile
 | |
| @@ -70,6 +70,7 @@ obj-$(CONFIG_IKCONFIG) += configs.o
 | |
|  obj-$(CONFIG_RESOURCE_COUNTERS) += res_counter.o
 | |
|  obj-$(CONFIG_SMP) += stop_machine.o
 | |
|  obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o
 | |
| +obj-$(CONFIG_UTRACE) += utrace.o
 | |
|  obj-$(CONFIG_AUDIT) += audit.o auditfilter.o
 | |
|  obj-$(CONFIG_AUDITSYSCALL) += auditsc.o
 | |
|  obj-$(CONFIG_AUDIT_WATCH) += audit_watch.o
 | |
| diff --git a/kernel/fork.c b/kernel/fork.c
 | |
| index 98b4508..3ceff6f 100644  
 | |
| --- a/kernel/fork.c
 | |
| +++ b/kernel/fork.c
 | |
| @@ -161,6 +161,7 @@ void free_task(struct task_struct *tsk)
 | |
|  	free_thread_info(tsk->stack);
 | |
|  	rt_mutex_debug_task_free(tsk);
 | |
|  	ftrace_graph_exit_task(tsk);
 | |
| +	tracehook_free_task(tsk);
 | |
|  	free_task_struct(tsk);
 | |
|  }
 | |
|  EXPORT_SYMBOL(free_task);
 | |
| @@ -1008,6 +1009,8 @@ static struct task_struct *copy_process(
 | |
|  	if (!p)
 | |
|  		goto fork_out;
 | |
|  
 | |
| +	tracehook_init_task(p);
 | |
| +
 | |
|  	ftrace_graph_init_task(p);
 | |
|  
 | |
|  	rt_mutex_init_task(p);
 | |
| diff --git a/kernel/ptrace.c b/kernel/ptrace.c
 | |
| index 8049cb5..23bde94 100644  
 | |
| --- a/kernel/ptrace.c
 | |
| +++ b/kernel/ptrace.c
 | |
| @@ -15,6 +15,7 @@
 | |
|  #include <linux/highmem.h>
 | |
|  #include <linux/pagemap.h>
 | |
|  #include <linux/ptrace.h>
 | |
| +#include <linux/utrace.h>
 | |
|  #include <linux/security.h>
 | |
|  #include <linux/signal.h>
 | |
|  #include <linux/audit.h>
 | |
| @@ -163,6 +164,14 @@ bool ptrace_may_access(struct task_struc
 | |
|  	return !err;
 | |
|  }
 | |
|  
 | |
| +/*
 | |
| + * For experimental use of utrace, exclude ptrace on the same task.
 | |
| + */
 | |
| +static inline bool exclude_ptrace(struct task_struct *task)
 | |
| +{
 | |
| +	return unlikely(!!task_utrace_flags(task));
 | |
| +}
 | |
| +
 | |
|  int ptrace_attach(struct task_struct *task)
 | |
|  {
 | |
|  	int retval;
 | |
| @@ -186,6 +195,8 @@ int ptrace_attach(struct task_struct *ta
 | |
|  
 | |
|  	task_lock(task);
 | |
|  	retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
 | |
| +	if (!retval && exclude_ptrace(task))
 | |
| +		retval = -EBUSY;
 | |
|  	task_unlock(task);
 | |
|  	if (retval)
 | |
|  		goto unlock_creds;
 | |
| @@ -223,6 +234,9 @@ int ptrace_traceme(void)
 | |
|  {
 | |
|  	int ret = -EPERM;
 | |
|  
 | |
| +	if (exclude_ptrace(current)) /* XXX locking */
 | |
| +		return -EBUSY;
 | |
| +
 | |
|  	write_lock_irq(&tasklist_lock);
 | |
|  	/* Are we already being traced? */
 | |
|  	if (!current->ptrace) {
 | |
| diff --git a/kernel/utrace.c b/kernel/utrace.c
 | |
| new file mode 100644
 | |
| index ...43f38b7 100644  
 | |
| --- /dev/null
 | |
| +++ b/kernel/utrace.c
 | |
| @@ -0,0 +1,2434 @@
 | |
| +/*
 | |
| + * utrace infrastructure interface for debugging user processes
 | |
| + *
 | |
| + * Copyright (C) 2006-2010 Red Hat, Inc.  All rights reserved.
 | |
| + *
 | |
| + * This copyrighted material is made available to anyone wishing to use,
 | |
| + * modify, copy, or redistribute it subject to the terms and conditions
 | |
| + * of the GNU General Public License v.2.
 | |
| + *
 | |
| + * Red Hat Author: Roland McGrath.
 | |
| + */
 | |
| +
 | |
| +#include <linux/utrace.h>
 | |
| +#include <linux/tracehook.h>
 | |
| +#include <linux/regset.h>
 | |
| +#include <asm/syscall.h>
 | |
| +#include <linux/ptrace.h>
 | |
| +#include <linux/err.h>
 | |
| +#include <linux/sched.h>
 | |
| +#include <linux/freezer.h>
 | |
| +#include <linux/module.h>
 | |
| +#include <linux/init.h>
 | |
| +#include <linux/slab.h>
 | |
| +#include <linux/seq_file.h>
 | |
| +
 | |
| +
 | |
| +/*
 | |
| + * Per-thread structure private to utrace implementation.
 | |
| + * If task_struct.utrace_flags is nonzero, task_struct.utrace
 | |
| + * has always been allocated first.  Once allocated, it is
 | |
| + * never freed until free_task().
 | |
| + *
 | |
| + * The common event reporting loops are done by the task making the
 | |
| + * report without ever taking any locks.  To facilitate this, the two
 | |
| + * lists @attached and @attaching work together for smooth asynchronous
 | |
| + * attaching with low overhead.  Modifying either list requires @lock.
 | |
| + * The @attaching list can be modified any time while holding @lock.
 | |
| + * New engines being attached always go on this list.
 | |
| + *
 | |
| + * The @attached list is what the task itself uses for its reporting
 | |
| + * loops.  When the task itself is not quiescent, it can use the
 | |
| + * @attached list without taking any lock.  Nobody may modify the list
 | |
| + * when the task is not quiescent.  When it is quiescent, that means
 | |
| + * that it won't run again without taking @lock itself before using
 | |
| + * the list.
 | |
| + *
 | |
| + * At each place where we know the task is quiescent (or it's current),
 | |
| + * while holding @lock, we call splice_attaching(), below.  This moves
 | |
| + * the @attaching list members on to the end of the @attached list.
 | |
| + * Since this happens at the start of any reporting pass, any new
 | |
| + * engines attached asynchronously go on the stable @attached list
 | |
| + * in time to have their callbacks seen.
 | |
| + */
 | |
| +struct utrace {
 | |
| +	spinlock_t lock;
 | |
| +	struct list_head attached, attaching;
 | |
| +
 | |
| +	struct task_struct *cloning;
 | |
| +
 | |
| +	struct utrace_engine *reporting;
 | |
| +
 | |
| +	enum utrace_resume_action resume:UTRACE_RESUME_BITS;
 | |
| +	unsigned int signal_handler:1;
 | |
| +	unsigned int vfork_stop:1; /* need utrace_stop() before vfork wait */
 | |
| +	unsigned int death:1;	/* in utrace_report_death() now */
 | |
| +	unsigned int reap:1;	/* release_task() has run */
 | |
| +	unsigned int pending_attach:1; /* need splice_attaching() */
 | |
| +};
 | |
| +
 | |
| +static struct kmem_cache *utrace_cachep;
 | |
| +static struct kmem_cache *utrace_engine_cachep;
 | |
| +static const struct utrace_engine_ops utrace_detached_ops; /* forward decl */
 | |
| +
 | |
| +static int __init utrace_init(void)
 | |
| +{
 | |
| +	utrace_cachep = KMEM_CACHE(utrace, SLAB_PANIC);
 | |
| +	utrace_engine_cachep = KMEM_CACHE(utrace_engine, SLAB_PANIC);
 | |
| +	return 0;
 | |
| +}
 | |
| +module_init(utrace_init);
 | |
| +
 | |
| +/*
 | |
| + * Set up @task.utrace for the first time.  We can have races
 | |
| + * between two utrace_attach_task() calls here.  The task_lock()
 | |
| + * governs installing the new pointer.  If another one got in first,
 | |
| + * we just punt the new one we allocated.
 | |
| + *
 | |
| + * This returns false only in case of a memory allocation failure.
 | |
| + */
 | |
| +static bool utrace_task_alloc(struct task_struct *task)
 | |
| +{
 | |
| +	struct utrace *utrace = kmem_cache_zalloc(utrace_cachep, GFP_KERNEL);
 | |
| +	if (unlikely(!utrace))
 | |
| +		return false;
 | |
| +	spin_lock_init(&utrace->lock);
 | |
| +	INIT_LIST_HEAD(&utrace->attached);
 | |
| +	INIT_LIST_HEAD(&utrace->attaching);
 | |
| +	utrace->resume = UTRACE_RESUME;
 | |
| +	task_lock(task);
 | |
| +	if (likely(!task->utrace)) {
 | |
| +		/*
 | |
| +		 * This barrier makes sure the initialization of the struct
 | |
| +		 * precedes the installation of the pointer.  This pairs
 | |
| +		 * with smp_read_barrier_depends() in task_utrace_struct().
 | |
| +		 */
 | |
| +		smp_wmb();
 | |
| +		task->utrace = utrace;
 | |
| +	}
 | |
| +	task_unlock(task);
 | |
| +
 | |
| +	if (unlikely(task->utrace != utrace))
 | |
| +		kmem_cache_free(utrace_cachep, utrace);
 | |
| +	return true;
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * This is called via tracehook_free_task() from free_task()
 | |
| + * when @task is being deallocated.
 | |
| + */
 | |
| +void utrace_free_task(struct task_struct *task)
 | |
| +{
 | |
| +	kmem_cache_free(utrace_cachep, task->utrace);
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * This is calledwhen the task is safely quiescent, i.e. it won't consult
 | |
| + * utrace->attached without the lock.  Move any engines attached
 | |
| + * asynchronously from @utrace->attaching onto the @utrace->attached list.
 | |
| + */
 | |
| +static void splice_attaching(struct utrace *utrace)
 | |
| +{
 | |
| +	lockdep_assert_held(&utrace->lock);
 | |
| +	list_splice_tail_init(&utrace->attaching, &utrace->attached);
 | |
| +	utrace->pending_attach = 0;
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * This is the exported function used by the utrace_engine_put() inline.
 | |
| + */
 | |
| +void __utrace_engine_release(struct kref *kref)
 | |
| +{
 | |
| +	struct utrace_engine *engine = container_of(kref, struct utrace_engine,
 | |
| +						    kref);
 | |
| +	BUG_ON(!list_empty(&engine->entry));
 | |
| +	if (engine->release)
 | |
| +		(*engine->release)(engine->data);
 | |
| +	kmem_cache_free(utrace_engine_cachep, engine);
 | |
| +}
 | |
| +EXPORT_SYMBOL_GPL(__utrace_engine_release);
 | |
| +
 | |
| +static bool engine_matches(struct utrace_engine *engine, int flags,
 | |
| +			   const struct utrace_engine_ops *ops, void *data)
 | |
| +{
 | |
| +	if ((flags & UTRACE_ATTACH_MATCH_OPS) && engine->ops != ops)
 | |
| +		return false;
 | |
| +	if ((flags & UTRACE_ATTACH_MATCH_DATA) && engine->data != data)
 | |
| +		return false;
 | |
| +	return engine->ops && engine->ops != &utrace_detached_ops;
 | |
| +}
 | |
| +
 | |
| +static struct utrace_engine *find_matching_engine(
 | |
| +	struct utrace *utrace, int flags,
 | |
| +	const struct utrace_engine_ops *ops, void *data)
 | |
| +{
 | |
| +	struct utrace_engine *engine;
 | |
| +	list_for_each_entry(engine, &utrace->attached, entry)
 | |
| +		if (engine_matches(engine, flags, ops, data))
 | |
| +			return engine;
 | |
| +	list_for_each_entry(engine, &utrace->attaching, entry)
 | |
| +		if (engine_matches(engine, flags, ops, data))
 | |
| +			return engine;
 | |
| +	return NULL;
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Enqueue @engine, or maybe don't if UTRACE_ATTACH_EXCLUSIVE.
 | |
| + */
 | |
| +static int utrace_add_engine(struct task_struct *target,
 | |
| +			     struct utrace *utrace,
 | |
| +			     struct utrace_engine *engine,
 | |
| +			     int flags,
 | |
| +			     const struct utrace_engine_ops *ops,
 | |
| +			     void *data)
 | |
| +{
 | |
| +	int ret;
 | |
| +
 | |
| +	spin_lock(&utrace->lock);
 | |
| +
 | |
| +	ret = -EEXIST;
 | |
| +	if ((flags & UTRACE_ATTACH_EXCLUSIVE) &&
 | |
| +	     unlikely(find_matching_engine(utrace, flags, ops, data)))
 | |
| +		goto unlock;
 | |
| +
 | |
| +	/*
 | |
| +	 * In case we had no engines before, make sure that
 | |
| +	 * utrace_flags is not zero. Since we did unlock+lock
 | |
| +	 * at least once after utrace_task_alloc() installed
 | |
| +	 * ->utrace, we have the necessary barrier which pairs
 | |
| +	 * with rmb() in task_utrace_struct().
 | |
| +	 */
 | |
| +	ret = -ESRCH;
 | |
| +	if (!target->utrace_flags) {
 | |
| +		target->utrace_flags = UTRACE_EVENT(REAP);
 | |
| +		/*
 | |
| +		 * If we race with tracehook_prepare_release_task()
 | |
| +		 * make sure that either it sees utrace_flags != 0
 | |
| +		 * or we see exit_state == EXIT_DEAD.
 | |
| +		 */
 | |
| +		smp_mb();
 | |
| +		if (unlikely(target->exit_state == EXIT_DEAD)) {
 | |
| +			target->utrace_flags = 0;
 | |
| +			goto unlock;
 | |
| +		}
 | |
| +	}
 | |
| +
 | |
| +	/*
 | |
| +	 * Put the new engine on the pending ->attaching list.
 | |
| +	 * Make sure it gets onto the ->attached list by the next
 | |
| +	 * time it's examined.  Setting ->pending_attach ensures
 | |
| +	 * that start_report() takes the lock and splices the lists
 | |
| +	 * before the next new reporting pass.
 | |
| +	 *
 | |
| +	 * When target == current, it would be safe just to call
 | |
| +	 * splice_attaching() right here.  But if we're inside a
 | |
| +	 * callback, that would mean the new engine also gets
 | |
| +	 * notified about the event that precipitated its own
 | |
| +	 * creation.  This is not what the user wants.
 | |
| +	 */
 | |
| +	list_add_tail(&engine->entry, &utrace->attaching);
 | |
| +	utrace->pending_attach = 1;
 | |
| +	utrace_engine_get(engine);
 | |
| +	ret = 0;
 | |
| +unlock:
 | |
| +	spin_unlock(&utrace->lock);
 | |
| +
 | |
| +	return ret;
 | |
| +}
 | |
| +
 | |
| +/**
 | |
| + * utrace_attach_task - attach new engine, or look up an attached engine
 | |
| + * @target:	thread to attach to
 | |
| + * @flags:	flag bits combined with OR, see below
 | |
| + * @ops:	callback table for new engine
 | |
| + * @data:	engine private data pointer
 | |
| + *
 | |
| + * The caller must ensure that the @target thread does not get freed,
 | |
| + * i.e. hold a ref or be its parent.  It is always safe to call this
 | |
| + * on @current, or on the @child pointer in a @report_clone callback.
 | |
| + * For most other cases, it's easier to use utrace_attach_pid() instead.
 | |
| + *
 | |
| + * UTRACE_ATTACH_CREATE:
 | |
| + * Create a new engine.  If %UTRACE_ATTACH_CREATE is not specified, you
 | |
| + * only look up an existing engine already attached to the thread.
 | |
| + *
 | |
| + * UTRACE_ATTACH_EXCLUSIVE:
 | |
| + * Attempting to attach a second (matching) engine fails with -%EEXIST.
 | |
| + *
 | |
| + * UTRACE_ATTACH_MATCH_OPS: Only consider engines matching @ops.
 | |
| + * UTRACE_ATTACH_MATCH_DATA: Only consider engines matching @data.
 | |
| + *
 | |
| + * Calls with neither %UTRACE_ATTACH_MATCH_OPS nor %UTRACE_ATTACH_MATCH_DATA
 | |
| + * match the first among any engines attached to @target.  That means that
 | |
| + * %UTRACE_ATTACH_EXCLUSIVE in such a call fails with -%EEXIST if there
 | |
| + * are any engines on @target at all.
 | |
| + */
 | |
| +struct utrace_engine *utrace_attach_task(
 | |
| +	struct task_struct *target, int flags,
 | |
| +	const struct utrace_engine_ops *ops, void *data)
 | |
| +{
 | |
| +	struct utrace *utrace = task_utrace_struct(target);
 | |
| +	struct utrace_engine *engine;
 | |
| +	int ret;
 | |
| +
 | |
| +	if (!(flags & UTRACE_ATTACH_CREATE)) {
 | |
| +		if (unlikely(!utrace))
 | |
| +			return ERR_PTR(-ENOENT);
 | |
| +		spin_lock(&utrace->lock);
 | |
| +		engine = find_matching_engine(utrace, flags, ops, data);
 | |
| +		if (engine)
 | |
| +			utrace_engine_get(engine);
 | |
| +		spin_unlock(&utrace->lock);
 | |
| +		return engine ?: ERR_PTR(-ENOENT);
 | |
| +	}
 | |
| +
 | |
| +	if (unlikely(!ops) || unlikely(ops == &utrace_detached_ops))
 | |
| +		return ERR_PTR(-EINVAL);
 | |
| +
 | |
| +	if (unlikely(target->flags & PF_KTHREAD))
 | |
| +		/*
 | |
| +		 * Silly kernel, utrace is for users!
 | |
| +		 */
 | |
| +		return ERR_PTR(-EPERM);
 | |
| +
 | |
| +	if (!utrace) {
 | |
| +		if (unlikely(!utrace_task_alloc(target)))
 | |
| +			return ERR_PTR(-ENOMEM);
 | |
| +		utrace = task_utrace_struct(target);
 | |
| +	}
 | |
| +
 | |
| +	engine = kmem_cache_alloc(utrace_engine_cachep, GFP_KERNEL);
 | |
| +	if (unlikely(!engine))
 | |
| +		return ERR_PTR(-ENOMEM);
 | |
| +
 | |
| +	/*
 | |
| +	 * Initialize the new engine structure.  It starts out with one ref
 | |
| +	 * to return.  utrace_add_engine() adds another for being attached.
 | |
| +	 */
 | |
| +	kref_init(&engine->kref);
 | |
| +	engine->flags = 0;
 | |
| +	engine->ops = ops;
 | |
| +	engine->data = data;
 | |
| +	engine->release = ops->release;
 | |
| +
 | |
| +	ret = utrace_add_engine(target, utrace, engine, flags, ops, data);
 | |
| +
 | |
| +	if (unlikely(ret)) {
 | |
| +		kmem_cache_free(utrace_engine_cachep, engine);
 | |
| +		engine = ERR_PTR(ret);
 | |
| +	}
 | |
| +
 | |
| +
 | |
| +	return engine;
 | |
| +}
 | |
| +EXPORT_SYMBOL_GPL(utrace_attach_task);
 | |
| +
 | |
| +/**
 | |
| + * utrace_attach_pid - attach new engine, or look up an attached engine
 | |
| + * @pid:	&struct pid pointer representing thread to attach to
 | |
| + * @flags:	flag bits combined with OR, see utrace_attach_task()
 | |
| + * @ops:	callback table for new engine
 | |
| + * @data:	engine private data pointer
 | |
| + *
 | |
| + * This is the same as utrace_attach_task(), but takes a &struct pid
 | |
| + * pointer rather than a &struct task_struct pointer.  The caller must
 | |
| + * hold a ref on @pid, but does not need to worry about the task
 | |
| + * staying valid.  If it's been reaped so that @pid points nowhere,
 | |
| + * then this call returns -%ESRCH.
 | |
| + */
 | |
| +struct utrace_engine *utrace_attach_pid(
 | |
| +	struct pid *pid, int flags,
 | |
| +	const struct utrace_engine_ops *ops, void *data)
 | |
| +{
 | |
| +	struct utrace_engine *engine = ERR_PTR(-ESRCH);
 | |
| +	struct task_struct *task = get_pid_task(pid, PIDTYPE_PID);
 | |
| +	if (task) {
 | |
| +		engine = utrace_attach_task(task, flags, ops, data);
 | |
| +		put_task_struct(task);
 | |
| +	}
 | |
| +	return engine;
 | |
| +}
 | |
| +EXPORT_SYMBOL_GPL(utrace_attach_pid);
 | |
| +
 | |
| +/*
 | |
| + * When an engine is detached, the target thread may still see it and
 | |
| + * make callbacks until it quiesces.  We install a special ops vector
 | |
| + * with these two callbacks.  When the target thread quiesces, it can
 | |
| + * safely free the engine itself.  For any event we will always get
 | |
| + * the report_quiesce() callback first, so we only need this one
 | |
| + * pointer to be set.  The only exception is report_reap(), so we
 | |
| + * supply that callback too.
 | |
| + */
 | |
| +static u32 utrace_detached_quiesce(u32 action, struct utrace_engine *engine,
 | |
| +				   unsigned long event)
 | |
| +{
 | |
| +	return UTRACE_DETACH;
 | |
| +}
 | |
| +
 | |
| +static void utrace_detached_reap(struct utrace_engine *engine,
 | |
| +				 struct task_struct *task)
 | |
| +{
 | |
| +}
 | |
| +
 | |
| +static const struct utrace_engine_ops utrace_detached_ops = {
 | |
| +	.report_quiesce = &utrace_detached_quiesce,
 | |
| +	.report_reap = &utrace_detached_reap
 | |
| +};
 | |
| +
 | |
| +/*
 | |
| + * The caller has to hold a ref on the engine.  If the attached flag is
 | |
| + * true (all but utrace_barrier() calls), the engine is supposed to be
 | |
| + * attached.  If the attached flag is false (utrace_barrier() only),
 | |
| + * then return -ERESTARTSYS for an engine marked for detach but not yet
 | |
| + * fully detached.  The task pointer can be invalid if the engine is
 | |
| + * detached.
 | |
| + *
 | |
| + * Get the utrace lock for the target task.
 | |
| + * Returns the struct if locked, or ERR_PTR(-errno).
 | |
| + *
 | |
| + * This has to be robust against races with:
 | |
| + *	utrace_control(target, UTRACE_DETACH) calls
 | |
| + *	UTRACE_DETACH after reports
 | |
| + *	utrace_report_death
 | |
| + *	utrace_release_task
 | |
| + */
 | |
| +static struct utrace *get_utrace_lock(struct task_struct *target,
 | |
| +				      struct utrace_engine *engine,
 | |
| +				      bool attached)
 | |
| +	__acquires(utrace->lock)
 | |
| +{
 | |
| +	struct utrace *utrace;
 | |
| +
 | |
| +	rcu_read_lock();
 | |
| +
 | |
| +	/*
 | |
| +	 * If this engine was already detached, bail out before we look at
 | |
| +	 * the task_struct pointer at all.  If it's detached after this
 | |
| +	 * check, then RCU is still keeping this task_struct pointer valid.
 | |
| +	 *
 | |
| +	 * The ops pointer is NULL when the engine is fully detached.
 | |
| +	 * It's &utrace_detached_ops when it's marked detached but still
 | |
| +	 * on the list.  In the latter case, utrace_barrier() still works,
 | |
| +	 * since the target might be in the middle of an old callback.
 | |
| +	 */
 | |
| +	if (unlikely(!engine->ops)) {
 | |
| +		rcu_read_unlock();
 | |
| +		return ERR_PTR(-ESRCH);
 | |
| +	}
 | |
| +
 | |
| +	if (unlikely(engine->ops == &utrace_detached_ops)) {
 | |
| +		rcu_read_unlock();
 | |
| +		return attached ? ERR_PTR(-ESRCH) : ERR_PTR(-ERESTARTSYS);
 | |
| +	}
 | |
| +
 | |
| +	utrace = task_utrace_struct(target);
 | |
| +	spin_lock(&utrace->lock);
 | |
| +	if (unlikely(utrace->reap) || unlikely(!engine->ops) ||
 | |
| +	    unlikely(engine->ops == &utrace_detached_ops)) {
 | |
| +		/*
 | |
| +		 * By the time we got the utrace lock,
 | |
| +		 * it had been reaped or detached already.
 | |
| +		 */
 | |
| +		spin_unlock(&utrace->lock);
 | |
| +		utrace = ERR_PTR(-ESRCH);
 | |
| +		if (!attached && engine->ops == &utrace_detached_ops)
 | |
| +			utrace = ERR_PTR(-ERESTARTSYS);
 | |
| +	}
 | |
| +	rcu_read_unlock();
 | |
| +
 | |
| +	return utrace;
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Now that we don't hold any locks, run through any
 | |
| + * detached engines and free their references.  Each
 | |
| + * engine had one implicit ref while it was attached.
 | |
| + */
 | |
| +static void put_detached_list(struct list_head *list)
 | |
| +{
 | |
| +	struct utrace_engine *engine, *next;
 | |
| +	list_for_each_entry_safe(engine, next, list, entry) {
 | |
| +		list_del_init(&engine->entry);
 | |
| +		utrace_engine_put(engine);
 | |
| +	}
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * We use an extra bit in utrace_engine.flags past the event bits,
 | |
| + * to record whether the engine is keeping the target thread stopped.
 | |
| + *
 | |
| + * This bit is set in task_struct.utrace_flags whenever it is set in any
 | |
| + * engine's flags.  Only utrace_reset() resets it in utrace_flags.
 | |
| + */
 | |
| +#define ENGINE_STOP		(1UL << _UTRACE_NEVENTS)
 | |
| +
 | |
| +static void mark_engine_wants_stop(struct task_struct *task,
 | |
| +				   struct utrace_engine *engine)
 | |
| +{
 | |
| +	engine->flags |= ENGINE_STOP;
 | |
| +	task->utrace_flags |= ENGINE_STOP;
 | |
| +}
 | |
| +
 | |
| +static void clear_engine_wants_stop(struct utrace_engine *engine)
 | |
| +{
 | |
| +	engine->flags &= ~ENGINE_STOP;
 | |
| +}
 | |
| +
 | |
| +static bool engine_wants_stop(struct utrace_engine *engine)
 | |
| +{
 | |
| +	return (engine->flags & ENGINE_STOP) != 0;
 | |
| +}
 | |
| +
 | |
| +/**
 | |
| + * utrace_set_events - choose which event reports a tracing engine gets
 | |
| + * @target:		thread to affect
 | |
| + * @engine:		attached engine to affect
 | |
| + * @events:		new event mask
 | |
| + *
 | |
| + * This changes the set of events for which @engine wants callbacks made.
 | |
| + *
 | |
| + * This fails with -%EALREADY and does nothing if you try to clear
 | |
| + * %UTRACE_EVENT(%DEATH) when the @report_death callback may already have
 | |
| + * begun, or if you try to newly set %UTRACE_EVENT(%DEATH) or
 | |
| + * %UTRACE_EVENT(%QUIESCE) when @target is already dead or dying.
 | |
| + *
 | |
| + * This fails with -%ESRCH if you try to clear %UTRACE_EVENT(%REAP) when
 | |
| + * the @report_reap callback may already have begun, or when @target has
 | |
| + * already been detached, including forcible detach on reaping.
 | |
| + *
 | |
| + * If @target was stopped before the call, then after a successful call,
 | |
| + * no event callbacks not requested in @events will be made; if
 | |
| + * %UTRACE_EVENT(%QUIESCE) is included in @events, then a
 | |
| + * @report_quiesce callback will be made when @target resumes.
 | |
| + *
 | |
| + * If @target was not stopped and @events excludes some bits that were
 | |
| + * set before, this can return -%EINPROGRESS to indicate that @target
 | |
| + * may have been making some callback to @engine.  When this returns
 | |
| + * zero, you can be sure that no event callbacks you've disabled in
 | |
| + * @events can be made.  If @events only sets new bits that were not set
 | |
| + * before on @engine, then -%EINPROGRESS will never be returned.
 | |
| + *
 | |
| + * To synchronize after an -%EINPROGRESS return, see utrace_barrier().
 | |
| + *
 | |
| + * When @target is @current, -%EINPROGRESS is not returned.  But note
 | |
| + * that a newly-created engine will not receive any callbacks related to
 | |
| + * an event notification already in progress.  This call enables @events
 | |
| + * callbacks to be made as soon as @engine becomes eligible for any
 | |
| + * callbacks, see utrace_attach_task().
 | |
| + *
 | |
| + * These rules provide for coherent synchronization based on %UTRACE_STOP,
 | |
| + * even when %SIGKILL is breaking its normal simple rules.
 | |
| + */
 | |
| +int utrace_set_events(struct task_struct *target,
 | |
| +		      struct utrace_engine *engine,
 | |
| +		      unsigned long events)
 | |
| +{
 | |
| +	struct utrace *utrace;
 | |
| +	unsigned long old_flags, old_utrace_flags;
 | |
| +	int ret = -EALREADY;
 | |
| +
 | |
| +	/*
 | |
| +	 * We just ignore the internal bit, so callers can use
 | |
| +	 * engine->flags to seed bitwise ops for our argument.
 | |
| +	 */
 | |
| +	events &= ~ENGINE_STOP;
 | |
| +
 | |
| +	utrace = get_utrace_lock(target, engine, true);
 | |
| +	if (unlikely(IS_ERR(utrace)))
 | |
| +		return PTR_ERR(utrace);
 | |
| +
 | |
| +	old_utrace_flags = target->utrace_flags;
 | |
| +	old_flags = engine->flags & ~ENGINE_STOP;
 | |
| +
 | |
| +	/*
 | |
| +	 * If utrace_report_death() is already progress now,
 | |
| +	 * it's too late to clear the death event bits.
 | |
| +	 */
 | |
| +	if (((old_flags & ~events) & _UTRACE_DEATH_EVENTS) && utrace->death)
 | |
| +		goto unlock;
 | |
| +
 | |
| +	/*
 | |
| +	 * When setting these flags, it's essential that we really
 | |
| +	 * synchronize with exit_notify().  They cannot be set after
 | |
| +	 * exit_notify() takes the tasklist_lock.  By holding the read
 | |
| +	 * lock here while setting the flags, we ensure that the calls
 | |
| +	 * to tracehook_notify_death() and tracehook_report_death() will
 | |
| +	 * see the new flags.  This ensures that utrace_release_task()
 | |
| +	 * knows positively that utrace_report_death() will be called or
 | |
| +	 * that it won't.
 | |
| +	 */
 | |
| +	if ((events & ~old_flags) & _UTRACE_DEATH_EVENTS) {
 | |
| +		read_lock(&tasklist_lock);
 | |
| +		if (unlikely(target->exit_state)) {
 | |
| +			read_unlock(&tasklist_lock);
 | |
| +			goto unlock;
 | |
| +		}
 | |
| +		target->utrace_flags |= events;
 | |
| +		read_unlock(&tasklist_lock);
 | |
| +	}
 | |
| +
 | |
| +	engine->flags = events | (engine->flags & ENGINE_STOP);
 | |
| +	target->utrace_flags |= events;
 | |
| +
 | |
| +	if ((events & UTRACE_EVENT_SYSCALL) &&
 | |
| +	    !(old_utrace_flags & UTRACE_EVENT_SYSCALL))
 | |
| +		set_tsk_thread_flag(target, TIF_SYSCALL_TRACE);
 | |
| +
 | |
| +	ret = 0;
 | |
| +	if ((old_flags & ~events) && target != current &&
 | |
| +	    !task_is_stopped_or_traced(target) && !target->exit_state) {
 | |
| +		/*
 | |
| +		 * This barrier ensures that our engine->flags changes
 | |
| +		 * have hit before we examine utrace->reporting,
 | |
| +		 * pairing with the barrier in start_callback().  If
 | |
| +		 * @target has not yet hit finish_callback() to clear
 | |
| +		 * utrace->reporting, we might be in the middle of a
 | |
| +		 * callback to @engine.
 | |
| +		 */
 | |
| +		smp_mb();
 | |
| +		if (utrace->reporting == engine)
 | |
| +			ret = -EINPROGRESS;
 | |
| +	}
 | |
| +unlock:
 | |
| +	spin_unlock(&utrace->lock);
 | |
| +
 | |
| +	return ret;
 | |
| +}
 | |
| +EXPORT_SYMBOL_GPL(utrace_set_events);
 | |
| +
 | |
| +/*
 | |
| + * Asynchronously mark an engine as being detached.
 | |
| + *
 | |
| + * This must work while the target thread races with us doing
 | |
| + * start_callback(), defined below.  It uses smp_rmb() between checking
 | |
| + * @engine->flags and using @engine->ops.  Here we change @engine->ops
 | |
| + * first, then use smp_wmb() before changing @engine->flags.  This ensures
 | |
| + * it can check the old flags before using the old ops, or check the old
 | |
| + * flags before using the new ops, or check the new flags before using the
 | |
| + * new ops, but can never check the new flags before using the old ops.
 | |
| + * Hence, utrace_detached_ops might be used with any old flags in place.
 | |
| + * It has report_quiesce() and report_reap() callbacks to handle all cases.
 | |
| + */
 | |
| +static void mark_engine_detached(struct utrace_engine *engine)
 | |
| +{
 | |
| +	engine->ops = &utrace_detached_ops;
 | |
| +	smp_wmb();
 | |
| +	engine->flags = UTRACE_EVENT(QUIESCE);
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Get @target to stop and return true if it is already stopped now.
 | |
| + * If we return false, it will make some event callback soonish.
 | |
| + * Called with @utrace locked.
 | |
| + */
 | |
| +static bool utrace_do_stop(struct task_struct *target, struct utrace *utrace)
 | |
| +{
 | |
| +	if (task_is_stopped(target)) {
 | |
| +		/*
 | |
| +		 * Stopped is considered quiescent; when it wakes up, it will
 | |
| +		 * go through utrace_finish_stop() before doing anything else.
 | |
| +		 */
 | |
| +		spin_lock_irq(&target->sighand->siglock);
 | |
| +		if (likely(task_is_stopped(target)))
 | |
| +			__set_task_state(target, TASK_TRACED);
 | |
| +		spin_unlock_irq(&target->sighand->siglock);
 | |
| +	} else if (utrace->resume > UTRACE_REPORT) {
 | |
| +		utrace->resume = UTRACE_REPORT;
 | |
| +		set_notify_resume(target);
 | |
| +	}
 | |
| +
 | |
| +	return task_is_traced(target);
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * If the target is not dead it should not be in tracing
 | |
| + * stop any more.  Wake it unless it's in job control stop.
 | |
| + */
 | |
| +static void utrace_wakeup(struct task_struct *target, struct utrace *utrace)
 | |
| +{
 | |
| +	lockdep_assert_held(&utrace->lock);
 | |
| +	spin_lock_irq(&target->sighand->siglock);
 | |
| +	if (target->signal->flags & SIGNAL_STOP_STOPPED ||
 | |
| +	    target->signal->group_stop_count)
 | |
| +		target->state = TASK_STOPPED;
 | |
| +	else
 | |
| +		wake_up_state(target, __TASK_TRACED);
 | |
| +	spin_unlock_irq(&target->sighand->siglock);
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * This is called when there might be some detached engines on the list or
 | |
| + * some stale bits in @task->utrace_flags.  Clean them up and recompute the
 | |
| + * flags.  Returns true if we're now fully detached.
 | |
| + *
 | |
| + * Called with @utrace->lock held, returns with it released.
 | |
| + * After this returns, @utrace might be freed if everything detached.
 | |
| + */
 | |
| +static bool utrace_reset(struct task_struct *task, struct utrace *utrace)
 | |
| +	__releases(utrace->lock)
 | |
| +{
 | |
| +	struct utrace_engine *engine, *next;
 | |
| +	unsigned long flags = 0;
 | |
| +	LIST_HEAD(detached);
 | |
| +
 | |
| +	splice_attaching(utrace);
 | |
| +
 | |
| +	/*
 | |
| +	 * Update the set of events of interest from the union
 | |
| +	 * of the interests of the remaining tracing engines.
 | |
| +	 * For any engine marked detached, remove it from the list.
 | |
| +	 * We'll collect them on the detached list.
 | |
| +	 */
 | |
| +	list_for_each_entry_safe(engine, next, &utrace->attached, entry) {
 | |
| +		if (engine->ops == &utrace_detached_ops) {
 | |
| +			engine->ops = NULL;
 | |
| +			list_move(&engine->entry, &detached);
 | |
| +		} else {
 | |
| +			flags |= engine->flags | UTRACE_EVENT(REAP);
 | |
| +		}
 | |
| +	}
 | |
| +
 | |
| +	if (task->exit_state) {
 | |
| +		/*
 | |
| +		 * Once it's already dead, we never install any flags
 | |
| +		 * except REAP.  When ->exit_state is set and events
 | |
| +		 * like DEATH are not set, then they never can be set.
 | |
| +		 * This ensures that utrace_release_task() knows
 | |
| +		 * positively that utrace_report_death() can never run.
 | |
| +		 */
 | |
| +		BUG_ON(utrace->death);
 | |
| +		flags &= UTRACE_EVENT(REAP);
 | |
| +	} else if (!(flags & UTRACE_EVENT_SYSCALL) &&
 | |
| +		   test_tsk_thread_flag(task, TIF_SYSCALL_TRACE)) {
 | |
| +		clear_tsk_thread_flag(task, TIF_SYSCALL_TRACE);
 | |
| +	}
 | |
| +
 | |
| +	if (!flags) {
 | |
| +		/*
 | |
| +		 * No more engines, cleared out the utrace.
 | |
| +		 */
 | |
| +		utrace->resume = UTRACE_RESUME;
 | |
| +		utrace->signal_handler = 0;
 | |
| +	}
 | |
| +
 | |
| +	/*
 | |
| +	 * If no more engines want it stopped, wake it up.
 | |
| +	 */
 | |
| +	if (task_is_traced(task) && !(flags & ENGINE_STOP))
 | |
| +		utrace_wakeup(task, utrace);
 | |
| +
 | |
| +	/*
 | |
| +	 * In theory spin_lock() doesn't imply rcu_read_lock().
 | |
| +	 * Once we clear ->utrace_flags this task_struct can go away
 | |
| +	 * because tracehook_prepare_release_task() path does not take
 | |
| +	 * utrace->lock when ->utrace_flags == 0.
 | |
| +	 */
 | |
| +	rcu_read_lock();
 | |
| +	task->utrace_flags = flags;
 | |
| +	spin_unlock(&utrace->lock);
 | |
| +	rcu_read_unlock();
 | |
| +
 | |
| +	put_detached_list(&detached);
 | |
| +
 | |
| +	return !flags;
 | |
| +}
 | |
| +
 | |
| +void utrace_finish_stop(void)
 | |
| +{
 | |
| +	/*
 | |
| +	 * If we were task_is_traced() and then SIGKILL'ed, make
 | |
| +	 * sure we do nothing until the tracer drops utrace->lock.
 | |
| +	 */
 | |
| +	if (unlikely(__fatal_signal_pending(current))) {
 | |
| +		struct utrace *utrace = task_utrace_struct(current);
 | |
| +		spin_unlock_wait(&utrace->lock);
 | |
| +	}
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Perform %UTRACE_STOP, i.e. block in TASK_TRACED until woken up.
 | |
| + * @task == current, @utrace == current->utrace, which is not locked.
 | |
| + * Return true if we were woken up by SIGKILL even though some utrace
 | |
| + * engine may still want us to stay stopped.
 | |
| + */
 | |
| +static void utrace_stop(struct task_struct *task, struct utrace *utrace,
 | |
| +			enum utrace_resume_action action)
 | |
| +{
 | |
| +relock:
 | |
| +	spin_lock(&utrace->lock);
 | |
| +
 | |
| +	if (action < utrace->resume) {
 | |
| +		/*
 | |
| +		 * Ensure a reporting pass when we're resumed.
 | |
| +		 */
 | |
| +		utrace->resume = action;
 | |
| +		if (action == UTRACE_INTERRUPT)
 | |
| +			set_thread_flag(TIF_SIGPENDING);
 | |
| +		else
 | |
| +			set_thread_flag(TIF_NOTIFY_RESUME);
 | |
| +	}
 | |
| +
 | |
| +	/*
 | |
| +	 * If the ENGINE_STOP bit is clear in utrace_flags, that means
 | |
| +	 * utrace_reset() ran after we processed some UTRACE_STOP return
 | |
| +	 * values from callbacks to get here.  If all engines have detached
 | |
| +	 * or resumed us, we don't stop.  This check doesn't require
 | |
| +	 * siglock, but it should follow the interrupt/report bookkeeping
 | |
| +	 * steps (this can matter for UTRACE_RESUME but not UTRACE_DETACH).
 | |
| +	 */
 | |
| +	if (unlikely(!(task->utrace_flags & ENGINE_STOP))) {
 | |
| +		utrace_reset(task, utrace);
 | |
| +		if (task->utrace_flags & ENGINE_STOP)
 | |
| +			goto relock;
 | |
| +		return;
 | |
| +	}
 | |
| +
 | |
| +	/*
 | |
| +	 * The siglock protects us against signals.  As well as SIGKILL
 | |
| +	 * waking us up, we must synchronize with the signal bookkeeping
 | |
| +	 * for stop signals and SIGCONT.
 | |
| +	 */
 | |
| +	spin_lock_irq(&task->sighand->siglock);
 | |
| +
 | |
| +	if (unlikely(__fatal_signal_pending(task))) {
 | |
| +		spin_unlock_irq(&task->sighand->siglock);
 | |
| +		spin_unlock(&utrace->lock);
 | |
| +		return;
 | |
| +	}
 | |
| +
 | |
| +	__set_current_state(TASK_TRACED);
 | |
| +
 | |
| +	/*
 | |
| +	 * If there is a group stop in progress,
 | |
| +	 * we must participate in the bookkeeping.
 | |
| +	 */
 | |
| +	if (unlikely(task->signal->group_stop_count) &&
 | |
| +			!--task->signal->group_stop_count)
 | |
| +		task->signal->flags = SIGNAL_STOP_STOPPED;
 | |
| +
 | |
| +	spin_unlock_irq(&task->sighand->siglock);
 | |
| +	spin_unlock(&utrace->lock);
 | |
| +
 | |
| +	schedule();
 | |
| +
 | |
| +	utrace_finish_stop();
 | |
| +
 | |
| +	/*
 | |
| +	 * While in TASK_TRACED, we were considered "frozen enough".
 | |
| +	 * Now that we woke up, it's crucial if we're supposed to be
 | |
| +	 * frozen that we freeze now before running anything substantial.
 | |
| +	 */
 | |
| +	try_to_freeze();
 | |
| +
 | |
| +	/*
 | |
| +	 * While we were in TASK_TRACED, complete_signal() considered
 | |
| +	 * us "uninterested" in signal wakeups.  Now make sure our
 | |
| +	 * TIF_SIGPENDING state is correct for normal running.
 | |
| +	 */
 | |
| +	spin_lock_irq(&task->sighand->siglock);
 | |
| +	recalc_sigpending();
 | |
| +	spin_unlock_irq(&task->sighand->siglock);
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Called by release_task() with @reap set to true.
 | |
| + * Called by utrace_report_death() with @reap set to false.
 | |
| + * On reap, make report_reap callbacks and clean out @utrace
 | |
| + * unless still making callbacks.  On death, update bookkeeping
 | |
| + * and handle the reap work if release_task() came in first.
 | |
| + */
 | |
| +void utrace_maybe_reap(struct task_struct *target, struct utrace *utrace,
 | |
| +		       bool reap)
 | |
| +{
 | |
| +	struct utrace_engine *engine, *next;
 | |
| +	struct list_head attached;
 | |
| +
 | |
| +	spin_lock(&utrace->lock);
 | |
| +
 | |
| +	if (reap) {
 | |
| +		/*
 | |
| +		 * If the target will do some final callbacks but hasn't
 | |
| +		 * finished them yet, we know because it clears these event
 | |
| +		 * bits after it's done.  Instead of cleaning up here and
 | |
| +		 * requiring utrace_report_death() to cope with it, we
 | |
| +		 * delay the REAP report and the teardown until after the
 | |
| +		 * target finishes its death reports.
 | |
| +		 */
 | |
| +		utrace->reap = 1;
 | |
| +
 | |
| +		if (target->utrace_flags & _UTRACE_DEATH_EVENTS) {
 | |
| +			spin_unlock(&utrace->lock);
 | |
| +			return;
 | |
| +		}
 | |
| +	} else {
 | |
| +		/*
 | |
| +		 * After we unlock with this flag clear, any competing
 | |
| +		 * utrace_control/utrace_set_events calls know that we've
 | |
| +		 * finished our callbacks and any detach bookkeeping.
 | |
| +		 */
 | |
| +		utrace->death = 0;
 | |
| +
 | |
| +		if (!utrace->reap) {
 | |
| +			/*
 | |
| +			 * We're just dead, not reaped yet.  This will
 | |
| +			 * reset @target->utrace_flags so the later call
 | |
| +			 * with @reap set won't hit the check above.
 | |
| +			 */
 | |
| +			utrace_reset(target, utrace);
 | |
| +			return;
 | |
| +		}
 | |
| +	}
 | |
| +
 | |
| +	/*
 | |
| +	 * utrace_add_engine() checks ->utrace_flags != 0.  Since
 | |
| +	 * @utrace->reap is set, nobody can set or clear UTRACE_EVENT(REAP)
 | |
| +	 * in @engine->flags or change @engine->ops and nobody can change
 | |
| +	 * @utrace->attached after we drop the lock.
 | |
| +	 */
 | |
| +	target->utrace_flags = 0;
 | |
| +
 | |
| +	/*
 | |
| +	 * We clear out @utrace->attached before we drop the lock so
 | |
| +	 * that find_matching_engine() can't come across any old engine
 | |
| +	 * while we are busy tearing it down.
 | |
| +	 */
 | |
| +	list_replace_init(&utrace->attached, &attached);
 | |
| +	list_splice_tail_init(&utrace->attaching, &attached);
 | |
| +
 | |
| +	spin_unlock(&utrace->lock);
 | |
| +
 | |
| +	list_for_each_entry_safe(engine, next, &attached, entry) {
 | |
| +		if (engine->flags & UTRACE_EVENT(REAP))
 | |
| +			engine->ops->report_reap(engine, target);
 | |
| +
 | |
| +		engine->ops = NULL;
 | |
| +		engine->flags = 0;
 | |
| +		list_del_init(&engine->entry);
 | |
| +
 | |
| +		utrace_engine_put(engine);
 | |
| +	}
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * You can't do anything to a dead task but detach it.
 | |
| + * If release_task() has been called, you can't do that.
 | |
| + *
 | |
| + * On the exit path, DEATH and QUIESCE event bits are set only
 | |
| + * before utrace_report_death() has taken the lock.  At that point,
 | |
| + * the death report will come soon, so disallow detach until it's
 | |
| + * done.  This prevents us from racing with it detaching itself.
 | |
| + *
 | |
| + * Called only when @target->exit_state is nonzero.
 | |
| + */
 | |
| +static inline int utrace_control_dead(struct task_struct *target,
 | |
| +				      struct utrace *utrace,
 | |
| +				      enum utrace_resume_action action)
 | |
| +{
 | |
| +	lockdep_assert_held(&utrace->lock);
 | |
| +
 | |
| +	if (action != UTRACE_DETACH || unlikely(utrace->reap))
 | |
| +		return -ESRCH;
 | |
| +
 | |
| +	if (unlikely(utrace->death))
 | |
| +		/*
 | |
| +		 * We have already started the death report.  We can't
 | |
| +		 * prevent the report_death and report_reap callbacks,
 | |
| +		 * so tell the caller they will happen.
 | |
| +		 */
 | |
| +		return -EALREADY;
 | |
| +
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +/**
 | |
| + * utrace_control - control a thread being traced by a tracing engine
 | |
| + * @target:		thread to affect
 | |
| + * @engine:		attached engine to affect
 | |
| + * @action:		&enum utrace_resume_action for thread to do
 | |
| + *
 | |
| + * This is how a tracing engine asks a traced thread to do something.
 | |
| + * This call is controlled by the @action argument, which has the
 | |
| + * same meaning as the &enum utrace_resume_action value returned by
 | |
| + * event reporting callbacks.
 | |
| + *
 | |
| + * If @target is already dead (@target->exit_state nonzero),
 | |
| + * all actions except %UTRACE_DETACH fail with -%ESRCH.
 | |
| + *
 | |
| + * The following sections describe each option for the @action argument.
 | |
| + *
 | |
| + * UTRACE_DETACH:
 | |
| + *
 | |
| + * After this, the @engine data structure is no longer accessible,
 | |
| + * and the thread might be reaped.  The thread will start running
 | |
| + * again if it was stopped and no longer has any attached engines
 | |
| + * that want it stopped.
 | |
| + *
 | |
| + * If the @report_reap callback may already have begun, this fails
 | |
| + * with -%ESRCH.  If the @report_death callback may already have
 | |
| + * begun, this fails with -%EALREADY.
 | |
| + *
 | |
| + * If @target is not already stopped, then a callback to this engine
 | |
| + * might be in progress or about to start on another CPU.  If so,
 | |
| + * then this returns -%EINPROGRESS; the detach happens as soon as
 | |
| + * the pending callback is finished.  To synchronize after an
 | |
| + * -%EINPROGRESS return, see utrace_barrier().
 | |
| + *
 | |
| + * If @target is properly stopped before utrace_control() is called,
 | |
| + * then after successful return it's guaranteed that no more callbacks
 | |
| + * to the @engine->ops vector will be made.
 | |
| + *
 | |
| + * The only exception is %SIGKILL (and exec or group-exit by another
 | |
| + * thread in the group), which can cause asynchronous @report_death
 | |
| + * and/or @report_reap callbacks even when %UTRACE_STOP was used.
 | |
| + * (In that event, this fails with -%ESRCH or -%EALREADY, see above.)
 | |
| + *
 | |
| + * UTRACE_STOP:
 | |
| + *
 | |
| + * This asks that @target stop running.  This returns 0 only if
 | |
| + * @target is already stopped, either for tracing or for job
 | |
| + * control.  Then @target will remain stopped until another
 | |
| + * utrace_control() call is made on @engine; @target can be woken
 | |
| + * only by %SIGKILL (or equivalent, such as exec or termination by
 | |
| + * another thread in the same thread group).
 | |
| + *
 | |
| + * This returns -%EINPROGRESS if @target is not already stopped.
 | |
| + * Then the effect is like %UTRACE_REPORT.  A @report_quiesce or
 | |
| + * @report_signal callback will be made soon.  Your callback can
 | |
| + * then return %UTRACE_STOP to keep @target stopped.
 | |
| + *
 | |
| + * This does not interrupt system calls in progress, including ones
 | |
| + * that sleep for a long time.  For that, use %UTRACE_INTERRUPT.
 | |
| + * To interrupt system calls and then keep @target stopped, your
 | |
| + * @report_signal callback can return %UTRACE_STOP.
 | |
| + *
 | |
| + * UTRACE_RESUME:
 | |
| + *
 | |
| + * Just let @target continue running normally, reversing the effect
 | |
| + * of a previous %UTRACE_STOP.  If another engine is keeping @target
 | |
| + * stopped, then it remains stopped until all engines let it resume.
 | |
| + * If @target was not stopped, this has no effect.
 | |
| + *
 | |
| + * UTRACE_REPORT:
 | |
| + *
 | |
| + * This is like %UTRACE_RESUME, but also ensures that there will be
 | |
| + * a @report_quiesce or @report_signal callback made soon.  If
 | |
| + * @target had been stopped, then there will be a callback before it
 | |
| + * resumes running normally.  If another engine is keeping @target
 | |
| + * stopped, then there might be no callbacks until all engines let
 | |
| + * it resume.
 | |
| + *
 | |
| + * Since this is meaningless unless @report_quiesce callbacks will
 | |
| + * be made, it returns -%EINVAL if @engine lacks %UTRACE_EVENT(%QUIESCE).
 | |
| + *
 | |
| + * UTRACE_INTERRUPT:
 | |
| + *
 | |
| + * This is like %UTRACE_REPORT, but ensures that @target will make a
 | |
| + * @report_signal callback before it resumes or delivers signals.
 | |
| + * If @target was in a system call or about to enter one, work in
 | |
| + * progress will be interrupted as if by %SIGSTOP.  If another
 | |
| + * engine is keeping @target stopped, then there might be no
 | |
| + * callbacks until all engines let it resume.
 | |
| + *
 | |
| + * This gives @engine an opportunity to introduce a forced signal
 | |
| + * disposition via its @report_signal callback.
 | |
| + *
 | |
| + * UTRACE_SINGLESTEP:
 | |
| + *
 | |
| + * It's invalid to use this unless arch_has_single_step() returned true.
 | |
| + * This is like %UTRACE_RESUME, but resumes for one user instruction only.
 | |
| + *
 | |
| + * Note that passing %UTRACE_SINGLESTEP or %UTRACE_BLOCKSTEP to
 | |
| + * utrace_control() or returning it from an event callback alone does
 | |
| + * not necessarily ensure that stepping will be enabled.  If there are
 | |
| + * more callbacks made to any engine before returning to user mode,
 | |
| + * then the resume action is chosen only by the last set of callbacks.
 | |
| + * To be sure, enable %UTRACE_EVENT(%QUIESCE) and look for the
 | |
| + * @report_quiesce callback with a zero event mask, or the
 | |
| + * @report_signal callback with %UTRACE_SIGNAL_REPORT.
 | |
| + *
 | |
| + * Since this is not robust unless @report_quiesce callbacks will
 | |
| + * be made, it returns -%EINVAL if @engine lacks %UTRACE_EVENT(%QUIESCE).
 | |
| + *
 | |
| + * UTRACE_BLOCKSTEP:
 | |
| + *
 | |
| + * It's invalid to use this unless arch_has_block_step() returned true.
 | |
| + * This is like %UTRACE_SINGLESTEP, but resumes for one whole basic
 | |
| + * block of user instructions.
 | |
| + *
 | |
| + * Since this is not robust unless @report_quiesce callbacks will
 | |
| + * be made, it returns -%EINVAL if @engine lacks %UTRACE_EVENT(%QUIESCE).
 | |
| + *
 | |
| + * %UTRACE_BLOCKSTEP devolves to %UTRACE_SINGLESTEP when another
 | |
| + * tracing engine is using %UTRACE_SINGLESTEP at the same time.
 | |
| + */
 | |
| +int utrace_control(struct task_struct *target,
 | |
| +		   struct utrace_engine *engine,
 | |
| +		   enum utrace_resume_action action)
 | |
| +{
 | |
| +	struct utrace *utrace;
 | |
| +	bool reset;
 | |
| +	int ret;
 | |
| +
 | |
| +	if (unlikely(action >= UTRACE_RESUME_MAX)) {
 | |
| +		WARN(1, "invalid action argument to utrace_control()!");
 | |
| +		return -EINVAL;
 | |
| +	}
 | |
| +
 | |
| +	/*
 | |
| +	 * This is a sanity check for a programming error in the caller.
 | |
| +	 * Their request can only work properly in all cases by relying on
 | |
| +	 * a follow-up callback, but they didn't set one up!  This check
 | |
| +	 * doesn't do locking, but it shouldn't matter.  The caller has to
 | |
| +	 * be synchronously sure the callback is set up to be operating the
 | |
| +	 * interface properly.
 | |
| +	 */
 | |
| +	if (action >= UTRACE_REPORT && action < UTRACE_RESUME &&
 | |
| +	    unlikely(!(engine->flags & UTRACE_EVENT(QUIESCE)))) {
 | |
| +		WARN(1, "utrace_control() with no QUIESCE callback in place!");
 | |
| +		return -EINVAL;
 | |
| +	}
 | |
| +
 | |
| +	utrace = get_utrace_lock(target, engine, true);
 | |
| +	if (unlikely(IS_ERR(utrace)))
 | |
| +		return PTR_ERR(utrace);
 | |
| +
 | |
| +	reset = task_is_traced(target);
 | |
| +	ret = 0;
 | |
| +
 | |
| +	/*
 | |
| +	 * ->exit_state can change under us, this doesn't matter.
 | |
| +	 * We do not care about ->exit_state in fact, but we do
 | |
| +	 * care about ->reap and ->death. If either flag is set,
 | |
| +	 * we must also see ->exit_state != 0.
 | |
| +	 */
 | |
| +	if (unlikely(target->exit_state)) {
 | |
| +		ret = utrace_control_dead(target, utrace, action);
 | |
| +		if (ret) {
 | |
| +			spin_unlock(&utrace->lock);
 | |
| +			return ret;
 | |
| +		}
 | |
| +		reset = true;
 | |
| +	}
 | |
| +
 | |
| +	switch (action) {
 | |
| +	case UTRACE_STOP:
 | |
| +		mark_engine_wants_stop(target, engine);
 | |
| +		if (!reset && !utrace_do_stop(target, utrace))
 | |
| +			ret = -EINPROGRESS;
 | |
| +		reset = false;
 | |
| +		break;
 | |
| +
 | |
| +	case UTRACE_DETACH:
 | |
| +		if (engine_wants_stop(engine))
 | |
| +			target->utrace_flags &= ~ENGINE_STOP;
 | |
| +		mark_engine_detached(engine);
 | |
| +		reset = reset || utrace_do_stop(target, utrace);
 | |
| +		if (!reset) {
 | |
| +			/*
 | |
| +			 * As in utrace_set_events(), this barrier ensures
 | |
| +			 * that our engine->flags changes have hit before we
 | |
| +			 * examine utrace->reporting, pairing with the barrier
 | |
| +			 * in start_callback().  If @target has not yet hit
 | |
| +			 * finish_callback() to clear utrace->reporting, we
 | |
| +			 * might be in the middle of a callback to @engine.
 | |
| +			 */
 | |
| +			smp_mb();
 | |
| +			if (utrace->reporting == engine)
 | |
| +				ret = -EINPROGRESS;
 | |
| +		}
 | |
| +		break;
 | |
| +
 | |
| +	case UTRACE_RESUME:
 | |
| +		/*
 | |
| +		 * This and all other cases imply resuming if stopped.
 | |
| +		 * There might not be another report before it just
 | |
| +		 * resumes, so make sure single-step is not left set.
 | |
| +		 */
 | |
| +		clear_engine_wants_stop(engine);
 | |
| +		if (likely(reset))
 | |
| +			user_disable_single_step(target);
 | |
| +		break;
 | |
| +
 | |
| +	case UTRACE_BLOCKSTEP:
 | |
| +		/*
 | |
| +		 * Resume from stopped, step one block.
 | |
| +		 * We fall through to treat it like UTRACE_SINGLESTEP.
 | |
| +		 */
 | |
| +		if (unlikely(!arch_has_block_step())) {
 | |
| +			WARN(1, "UTRACE_BLOCKSTEP when !arch_has_block_step()");
 | |
| +			action = UTRACE_SINGLESTEP;
 | |
| +		}
 | |
| +
 | |
| +	case UTRACE_SINGLESTEP:
 | |
| +		/*
 | |
| +		 * Resume from stopped, step one instruction.
 | |
| +		 * We fall through to the UTRACE_REPORT case.
 | |
| +		 */
 | |
| +		if (unlikely(!arch_has_single_step())) {
 | |
| +			WARN(1,
 | |
| +			     "UTRACE_SINGLESTEP when !arch_has_single_step()");
 | |
| +			reset = false;
 | |
| +			ret = -EOPNOTSUPP;
 | |
| +			break;
 | |
| +		}
 | |
| +
 | |
| +	case UTRACE_REPORT:
 | |
| +		/*
 | |
| +		 * Make the thread call tracehook_notify_resume() soon.
 | |
| +		 * But don't bother if it's already been interrupted.
 | |
| +		 * In that case, utrace_get_signal() will be reporting soon.
 | |
| +		 */
 | |
| +		clear_engine_wants_stop(engine);
 | |
| +		if (action < utrace->resume) {
 | |
| +			utrace->resume = action;
 | |
| +			set_notify_resume(target);
 | |
| +		}
 | |
| +		break;
 | |
| +
 | |
| +	case UTRACE_INTERRUPT:
 | |
| +		/*
 | |
| +		 * Make the thread call tracehook_get_signal() soon.
 | |
| +		 */
 | |
| +		clear_engine_wants_stop(engine);
 | |
| +		if (utrace->resume == UTRACE_INTERRUPT)
 | |
| +			break;
 | |
| +		utrace->resume = UTRACE_INTERRUPT;
 | |
| +
 | |
| +		/*
 | |
| +		 * If it's not already stopped, interrupt it now.  We need
 | |
| +		 * the siglock here in case it calls recalc_sigpending()
 | |
| +		 * and clears its own TIF_SIGPENDING.  By taking the lock,
 | |
| +		 * we've serialized any later recalc_sigpending() after our
 | |
| +		 * setting of utrace->resume to force it on.
 | |
| +		 */
 | |
| +		if (reset) {
 | |
| +			/*
 | |
| +			 * This is really just to keep the invariant that
 | |
| +			 * TIF_SIGPENDING is set with UTRACE_INTERRUPT.
 | |
| +			 * When it's stopped, we know it's always going
 | |
| +			 * through utrace_get_signal() and will recalculate.
 | |
| +			 */
 | |
| +			set_tsk_thread_flag(target, TIF_SIGPENDING);
 | |
| +		} else {
 | |
| +			struct sighand_struct *sighand;
 | |
| +			unsigned long irqflags;
 | |
| +			sighand = lock_task_sighand(target, &irqflags);
 | |
| +			if (likely(sighand)) {
 | |
| +				signal_wake_up(target, 0);
 | |
| +				unlock_task_sighand(target, &irqflags);
 | |
| +			}
 | |
| +		}
 | |
| +		break;
 | |
| +
 | |
| +	default:
 | |
| +		BUG();		/* We checked it on entry.  */
 | |
| +	}
 | |
| +
 | |
| +	/*
 | |
| +	 * Let the thread resume running.  If it's not stopped now,
 | |
| +	 * there is nothing more we need to do.
 | |
| +	 */
 | |
| +	if (reset)
 | |
| +		utrace_reset(target, utrace);
 | |
| +	else
 | |
| +		spin_unlock(&utrace->lock);
 | |
| +
 | |
| +	return ret;
 | |
| +}
 | |
| +EXPORT_SYMBOL_GPL(utrace_control);
 | |
| +
 | |
| +/**
 | |
| + * utrace_barrier - synchronize with simultaneous tracing callbacks
 | |
| + * @target:		thread to affect
 | |
| + * @engine:		engine to affect (can be detached)
 | |
| + *
 | |
| + * This blocks while @target might be in the midst of making a callback to
 | |
| + * @engine.  It can be interrupted by signals and will return -%ERESTARTSYS.
 | |
| + * A return value of zero means no callback from @target to @engine was
 | |
| + * in progress.  Any effect of its return value (such as %UTRACE_STOP) has
 | |
| + * already been applied to @engine.
 | |
| + *
 | |
| + * It's not necessary to keep the @target pointer alive for this call.
 | |
| + * It's only necessary to hold a ref on @engine.  This will return
 | |
| + * safely even if @target has been reaped and has no task refs.
 | |
| + *
 | |
| + * A successful return from utrace_barrier() guarantees its ordering
 | |
| + * with respect to utrace_set_events() and utrace_control() calls.  If
 | |
| + * @target was not properly stopped, event callbacks just disabled might
 | |
| + * still be in progress; utrace_barrier() waits until there is no chance
 | |
| + * an unwanted callback can be in progress.
 | |
| + */
 | |
| +int utrace_barrier(struct task_struct *target, struct utrace_engine *engine)
 | |
| +{
 | |
| +	struct utrace *utrace;
 | |
| +	int ret = -ERESTARTSYS;
 | |
| +
 | |
| +	if (unlikely(target == current))
 | |
| +		return 0;
 | |
| +
 | |
| +	do {
 | |
| +		utrace = get_utrace_lock(target, engine, false);
 | |
| +		if (unlikely(IS_ERR(utrace))) {
 | |
| +			ret = PTR_ERR(utrace);
 | |
| +			if (ret != -ERESTARTSYS)
 | |
| +				break;
 | |
| +		} else {
 | |
| +			/*
 | |
| +			 * All engine state changes are done while
 | |
| +			 * holding the lock, i.e. before we get here.
 | |
| +			 * Since we have the lock, we only need to
 | |
| +			 * worry about @target making a callback.
 | |
| +			 * When it has entered start_callback() but
 | |
| +			 * not yet gotten to finish_callback(), we
 | |
| +			 * will see utrace->reporting == @engine.
 | |
| +			 * When @target doesn't take the lock, it uses
 | |
| +			 * barriers to order setting utrace->reporting
 | |
| +			 * before it examines the engine state.
 | |
| +			 */
 | |
| +			if (utrace->reporting != engine)
 | |
| +				ret = 0;
 | |
| +			spin_unlock(&utrace->lock);
 | |
| +			if (!ret)
 | |
| +				break;
 | |
| +		}
 | |
| +		schedule_timeout_interruptible(1);
 | |
| +	} while (!signal_pending(current));
 | |
| +
 | |
| +	return ret;
 | |
| +}
 | |
| +EXPORT_SYMBOL_GPL(utrace_barrier);
 | |
| +
 | |
| +/*
 | |
| + * This is local state used for reporting loops, perhaps optimized away.
 | |
| + */
 | |
| +struct utrace_report {
 | |
| +	u32 result;
 | |
| +	enum utrace_resume_action action;
 | |
| +	enum utrace_resume_action resume_action;
 | |
| +	bool detaches;
 | |
| +	bool spurious;
 | |
| +};
 | |
| +
 | |
| +#define INIT_REPORT(var)			\
 | |
| +	struct utrace_report var = {		\
 | |
| +		.action = UTRACE_RESUME,	\
 | |
| +		.resume_action = UTRACE_RESUME,	\
 | |
| +		.spurious = true 		\
 | |
| +	}
 | |
| +
 | |
| +/*
 | |
| + * We are now making the report, so clear the flag saying we need one.
 | |
| + * When there is a new attach, ->pending_attach is set just so we will
 | |
| + * know to do splice_attaching() here before the callback loop.
 | |
| + */
 | |
| +static enum utrace_resume_action start_report(struct utrace *utrace)
 | |
| +{
 | |
| +	enum utrace_resume_action resume = utrace->resume;
 | |
| +	if (utrace->pending_attach ||
 | |
| +	    (resume > UTRACE_INTERRUPT && resume < UTRACE_RESUME)) {
 | |
| +		spin_lock(&utrace->lock);
 | |
| +		splice_attaching(utrace);
 | |
| +		resume = utrace->resume;
 | |
| +		if (resume > UTRACE_INTERRUPT)
 | |
| +			utrace->resume = UTRACE_RESUME;
 | |
| +		spin_unlock(&utrace->lock);
 | |
| +	}
 | |
| +	return resume;
 | |
| +}
 | |
| +
 | |
| +static inline void finish_report_reset(struct task_struct *task,
 | |
| +				       struct utrace *utrace,
 | |
| +				       struct utrace_report *report)
 | |
| +{
 | |
| +	if (unlikely(report->spurious || report->detaches)) {
 | |
| +		spin_lock(&utrace->lock);
 | |
| +		if (utrace_reset(task, utrace))
 | |
| +			report->action = UTRACE_RESUME;
 | |
| +	}
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Complete a normal reporting pass, pairing with a start_report() call.
 | |
| + * This handles any UTRACE_DETACH or UTRACE_REPORT or UTRACE_INTERRUPT
 | |
| + * returns from engine callbacks.  If @will_not_stop is true and any
 | |
| + * engine's last callback used UTRACE_STOP, we do UTRACE_REPORT here to
 | |
| + * ensure we stop before user mode.  If there were no callbacks made, it
 | |
| + * will recompute @task->utrace_flags to avoid another false-positive.
 | |
| + */
 | |
| +static void finish_report(struct task_struct *task, struct utrace *utrace,
 | |
| +			  struct utrace_report *report, bool will_not_stop)
 | |
| +{
 | |
| +	enum utrace_resume_action resume = report->action;
 | |
| +
 | |
| +	if (resume == UTRACE_STOP)
 | |
| +		resume = will_not_stop ? UTRACE_REPORT : UTRACE_RESUME;
 | |
| +
 | |
| +	if (resume < utrace->resume) {
 | |
| +		spin_lock(&utrace->lock);
 | |
| +		utrace->resume = resume;
 | |
| +		if (resume == UTRACE_INTERRUPT)
 | |
| +			set_tsk_thread_flag(task, TIF_SIGPENDING);
 | |
| +		else
 | |
| +			set_tsk_thread_flag(task, TIF_NOTIFY_RESUME);
 | |
| +		spin_unlock(&utrace->lock);
 | |
| +	}
 | |
| +
 | |
| +	finish_report_reset(task, utrace, report);
 | |
| +}
 | |
| +
 | |
| +static void finish_callback_report(struct task_struct *task,
 | |
| +				   struct utrace *utrace,
 | |
| +				   struct utrace_report *report,
 | |
| +				   struct utrace_engine *engine,
 | |
| +				   enum utrace_resume_action action)
 | |
| +{
 | |
| +	if (action == UTRACE_DETACH) {
 | |
| +		/*
 | |
| +		 * By holding the lock here, we make sure that
 | |
| +		 * utrace_barrier() (really get_utrace_lock()) sees the
 | |
| +		 * effect of this detach.  Otherwise utrace_barrier() could
 | |
| +		 * return 0 after this callback had returned UTRACE_DETACH.
 | |
| +		 * This way, a 0 return is an unambiguous indicator that any
 | |
| +		 * callback returning UTRACE_DETACH has indeed caused detach.
 | |
| +		 */
 | |
| +		spin_lock(&utrace->lock);
 | |
| +		engine->ops = &utrace_detached_ops;
 | |
| +		spin_unlock(&utrace->lock);
 | |
| +	}
 | |
| +
 | |
| +	/*
 | |
| +	 * If utrace_control() was used, treat that like UTRACE_DETACH here.
 | |
| +	 */
 | |
| +	if (engine->ops == &utrace_detached_ops) {
 | |
| +		report->detaches = true;
 | |
| +		return;
 | |
| +	}
 | |
| +
 | |
| +	if (action < report->action)
 | |
| +		report->action = action;
 | |
| +
 | |
| +	if (action != UTRACE_STOP) {
 | |
| +		if (action < report->resume_action)
 | |
| +			report->resume_action = action;
 | |
| +
 | |
| +		if (engine_wants_stop(engine)) {
 | |
| +			spin_lock(&utrace->lock);
 | |
| +			clear_engine_wants_stop(engine);
 | |
| +			spin_unlock(&utrace->lock);
 | |
| +		}
 | |
| +
 | |
| +		return;
 | |
| +	}
 | |
| +
 | |
| +	if (!engine_wants_stop(engine)) {
 | |
| +		spin_lock(&utrace->lock);
 | |
| +		/*
 | |
| +		 * If utrace_control() came in and detached us
 | |
| +		 * before we got the lock, we must not stop now.
 | |
| +		 */
 | |
| +		if (unlikely(engine->ops == &utrace_detached_ops))
 | |
| +			report->detaches = true;
 | |
| +		else
 | |
| +			mark_engine_wants_stop(task, engine);
 | |
| +		spin_unlock(&utrace->lock);
 | |
| +	}
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Apply the return value of one engine callback to @report.
 | |
| + * Returns true if @engine detached and should not get any more callbacks.
 | |
| + */
 | |
| +static bool finish_callback(struct task_struct *task, struct utrace *utrace,
 | |
| +			    struct utrace_report *report,
 | |
| +			    struct utrace_engine *engine,
 | |
| +			    u32 ret)
 | |
| +{
 | |
| +	report->result = ret & ~UTRACE_RESUME_MASK;
 | |
| +	finish_callback_report(task, utrace, report, engine,
 | |
| +			       utrace_resume_action(ret));
 | |
| +
 | |
| +	/*
 | |
| +	 * Now that we have applied the effect of the return value,
 | |
| +	 * clear this so that utrace_barrier() can stop waiting.
 | |
| +	 * A subsequent utrace_control() can stop or resume @engine
 | |
| +	 * and know this was ordered after its callback's action.
 | |
| +	 *
 | |
| +	 * We don't need any barriers here because utrace_barrier()
 | |
| +	 * takes utrace->lock.  If we touched engine->flags above,
 | |
| +	 * the lock guaranteed this change was before utrace_barrier()
 | |
| +	 * examined utrace->reporting.
 | |
| +	 */
 | |
| +	utrace->reporting = NULL;
 | |
| +
 | |
| +	/*
 | |
| +	 * We've just done an engine callback.  These are allowed to sleep,
 | |
| +	 * though all well-behaved ones restrict that to blocking kalloc()
 | |
| +	 * or quickly-acquired mutex_lock() and the like.  This is a good
 | |
| +	 * place to make sure tracing engines don't introduce too much
 | |
| +	 * latency under voluntary preemption.
 | |
| +	 */
 | |
| +	might_sleep();
 | |
| +
 | |
| +	return engine->ops == &utrace_detached_ops;
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Start the callbacks for @engine to consider @event (a bit mask).
 | |
| + * This makes the report_quiesce() callback first.  If @engine wants
 | |
| + * a specific callback for @event, we return the ops vector to use.
 | |
| + * If not, we return NULL.  The return value from the ops->callback
 | |
| + * function called should be passed to finish_callback().
 | |
| + */
 | |
| +static const struct utrace_engine_ops *start_callback(
 | |
| +	struct utrace *utrace, struct utrace_report *report,
 | |
| +	struct utrace_engine *engine, struct task_struct *task,
 | |
| +	unsigned long event)
 | |
| +{
 | |
| +	const struct utrace_engine_ops *ops;
 | |
| +	unsigned long want;
 | |
| +
 | |
| +	/*
 | |
| +	 * This barrier ensures that we've set utrace->reporting before
 | |
| +	 * we examine engine->flags or engine->ops.  utrace_barrier()
 | |
| +	 * relies on this ordering to indicate that the effect of any
 | |
| +	 * utrace_control() and utrace_set_events() calls is in place
 | |
| +	 * by the time utrace->reporting can be seen to be NULL.
 | |
| +	 */
 | |
| +	utrace->reporting = engine;
 | |
| +	smp_mb();
 | |
| +
 | |
| +	/*
 | |
| +	 * This pairs with the barrier in mark_engine_detached().
 | |
| +	 * It makes sure that we never see the old ops vector with
 | |
| +	 * the new flags, in case the original vector had no report_quiesce.
 | |
| +	 */
 | |
| +	want = engine->flags;
 | |
| +	smp_rmb();
 | |
| +	ops = engine->ops;
 | |
| +
 | |
| +	if ((want & UTRACE_EVENT(QUIESCE)) || ops == &utrace_detached_ops) {
 | |
| +		if (finish_callback(task, utrace, report, engine,
 | |
| +				    (*ops->report_quiesce)(report->action,
 | |
| +							   engine, event)))
 | |
| +			return NULL;
 | |
| +
 | |
| +		/*
 | |
| +		 * finish_callback() reset utrace->reporting after the
 | |
| +		 * quiesce callback.  Now we set it again (as above)
 | |
| +		 * before re-examining engine->flags, which could have
 | |
| +		 * been changed synchronously by ->report_quiesce or
 | |
| +		 * asynchronously by utrace_control() or utrace_set_events().
 | |
| +		 */
 | |
| +		utrace->reporting = engine;
 | |
| +		smp_mb();
 | |
| +		want = engine->flags;
 | |
| +	}
 | |
| +
 | |
| +	if (want & ENGINE_STOP)
 | |
| +		report->action = UTRACE_STOP;
 | |
| +
 | |
| +	if (want & (event ?: UTRACE_EVENT(QUIESCE))) {
 | |
| +		report->spurious = false;
 | |
| +		return ops;
 | |
| +	}
 | |
| +
 | |
| +	utrace->reporting = NULL;
 | |
| +	return NULL;
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Do a normal reporting pass for engines interested in @event.
 | |
| + * @callback is the name of the member in the ops vector, and remaining
 | |
| + * args are the extras it takes after the standard three args.
 | |
| + */
 | |
| +#define REPORT_CALLBACKS(rev, task, utrace, report, event, callback, ...)     \
 | |
| +	do {								      \
 | |
| +		struct utrace_engine *engine;				      \
 | |
| +		const struct utrace_engine_ops *ops;			      \
 | |
| +		list_for_each_entry##rev(engine, &utrace->attached, entry) {  \
 | |
| +			ops = start_callback(utrace, report, engine, task,    \
 | |
| +					     event);			      \
 | |
| +			if (!ops)					      \
 | |
| +				continue;				      \
 | |
| +			finish_callback(task, utrace, report, engine,	      \
 | |
| +					(*ops->callback)(__VA_ARGS__));	      \
 | |
| +		}							      \
 | |
| +	} while (0)
 | |
| +#define REPORT(task, utrace, report, event, callback, ...)		      \
 | |
| +	do {								      \
 | |
| +		start_report(utrace);					      \
 | |
| +		REPORT_CALLBACKS(, task, utrace, report, event, callback,     \
 | |
| +				 (report)->action, engine, ## __VA_ARGS__);   \
 | |
| +		finish_report(task, utrace, report, true);		      \
 | |
| +	} while (0)
 | |
| +
 | |
| +/*
 | |
| + * Called iff UTRACE_EVENT(EXEC) flag is set.
 | |
| + */
 | |
| +void utrace_report_exec(struct linux_binfmt *fmt, struct linux_binprm *bprm,
 | |
| +			struct pt_regs *regs)
 | |
| +{
 | |
| +	struct task_struct *task = current;
 | |
| +	struct utrace *utrace = task_utrace_struct(task);
 | |
| +	INIT_REPORT(report);
 | |
| +
 | |
| +	REPORT(task, utrace, &report, UTRACE_EVENT(EXEC),
 | |
| +	       report_exec, fmt, bprm, regs);
 | |
| +}
 | |
| +
 | |
| +static u32 do_report_syscall_entry(struct pt_regs *regs,
 | |
| +				   struct task_struct *task,
 | |
| +				   struct utrace *utrace,
 | |
| +				   struct utrace_report *report,
 | |
| +				   u32 resume_report)
 | |
| +{
 | |
| +	start_report(utrace);
 | |
| +	REPORT_CALLBACKS(_reverse, task, utrace, report,
 | |
| +			 UTRACE_EVENT(SYSCALL_ENTRY), report_syscall_entry,
 | |
| +			 resume_report | report->result | report->action,
 | |
| +			 engine, regs);
 | |
| +	finish_report(task, utrace, report, false);
 | |
| +
 | |
| +	if (report->action != UTRACE_STOP)
 | |
| +		return 0;
 | |
| +
 | |
| +	utrace_stop(task, utrace, report->resume_action);
 | |
| +
 | |
| +	if (fatal_signal_pending(task)) {
 | |
| +		/*
 | |
| +		 * We are continuing despite UTRACE_STOP because of a
 | |
| +		 * SIGKILL.  Don't let the system call actually proceed.
 | |
| +		 */
 | |
| +		report->result = UTRACE_SYSCALL_ABORT;
 | |
| +	} else if (utrace->resume <= UTRACE_REPORT) {
 | |
| +		/*
 | |
| +		 * If we've been asked for another report after our stop,
 | |
| +		 * go back to report (and maybe stop) again before we run
 | |
| +		 * the system call.  The second (and later) reports are
 | |
| +		 * marked with the UTRACE_SYSCALL_RESUMED flag so that
 | |
| +		 * engines know this is a second report at the same
 | |
| +		 * entry.  This gives them the chance to examine the
 | |
| +		 * registers anew after they might have been changed
 | |
| +		 * while we were stopped.
 | |
| +		 */
 | |
| +		report->detaches = false;
 | |
| +		report->spurious = true;
 | |
| +		report->action = report->resume_action = UTRACE_RESUME;
 | |
| +		return UTRACE_SYSCALL_RESUMED;
 | |
| +	}
 | |
| +
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Called iff UTRACE_EVENT(SYSCALL_ENTRY) flag is set.
 | |
| + * Return true to prevent the system call.
 | |
| + */
 | |
| +bool utrace_report_syscall_entry(struct pt_regs *regs)
 | |
| +{
 | |
| +	struct task_struct *task = current;
 | |
| +	struct utrace *utrace = task_utrace_struct(task);
 | |
| +	INIT_REPORT(report);
 | |
| +	u32 resume_report = 0;
 | |
| +
 | |
| +	do {
 | |
| +		resume_report = do_report_syscall_entry(regs, task, utrace,
 | |
| +							&report, resume_report);
 | |
| +	} while (resume_report);
 | |
| +
 | |
| +	return utrace_syscall_action(report.result) == UTRACE_SYSCALL_ABORT;
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Called iff UTRACE_EVENT(SYSCALL_EXIT) flag is set.
 | |
| + */
 | |
| +void utrace_report_syscall_exit(struct pt_regs *regs)
 | |
| +{
 | |
| +	struct task_struct *task = current;
 | |
| +	struct utrace *utrace = task_utrace_struct(task);
 | |
| +	INIT_REPORT(report);
 | |
| +
 | |
| +	REPORT(task, utrace, &report, UTRACE_EVENT(SYSCALL_EXIT),
 | |
| +	       report_syscall_exit, regs);
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Called iff UTRACE_EVENT(CLONE) flag is set.
 | |
| + * This notification call blocks the wake_up_new_task call on the child.
 | |
| + * So we must not quiesce here.  tracehook_report_clone_complete will do
 | |
| + * a quiescence check momentarily.
 | |
| + */
 | |
| +void utrace_report_clone(unsigned long clone_flags, struct task_struct *child)
 | |
| +{
 | |
| +	struct task_struct *task = current;
 | |
| +	struct utrace *utrace = task_utrace_struct(task);
 | |
| +	INIT_REPORT(report);
 | |
| +
 | |
| +	/*
 | |
| +	 * We don't use the REPORT() macro here, because we need
 | |
| +	 * to clear utrace->cloning before finish_report().
 | |
| +	 * After finish_report(), utrace can be a stale pointer
 | |
| +	 * in cases when report.action is still UTRACE_RESUME.
 | |
| +	 */
 | |
| +	start_report(utrace);
 | |
| +	utrace->cloning = child;
 | |
| +
 | |
| +	REPORT_CALLBACKS(, task, utrace, &report,
 | |
| +			 UTRACE_EVENT(CLONE), report_clone,
 | |
| +			 report.action, engine, clone_flags, child);
 | |
| +
 | |
| +	utrace->cloning = NULL;
 | |
| +	finish_report(task, utrace, &report, !(clone_flags & CLONE_VFORK));
 | |
| +
 | |
| +	/*
 | |
| +	 * For a vfork, we will go into an uninterruptible block waiting
 | |
| +	 * for the child.  We need UTRACE_STOP to happen before this, not
 | |
| +	 * after.  For CLONE_VFORK, utrace_finish_vfork() will be called.
 | |
| +	 */
 | |
| +	if (report.action == UTRACE_STOP && (clone_flags & CLONE_VFORK)) {
 | |
| +		spin_lock(&utrace->lock);
 | |
| +		utrace->vfork_stop = 1;
 | |
| +		spin_unlock(&utrace->lock);
 | |
| +	}
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * We're called after utrace_report_clone() for a CLONE_VFORK.
 | |
| + * If UTRACE_STOP was left from the clone report, we stop here.
 | |
| + * After this, we'll enter the uninterruptible wait_for_completion()
 | |
| + * waiting for the child.
 | |
| + */
 | |
| +void utrace_finish_vfork(struct task_struct *task)
 | |
| +{
 | |
| +	struct utrace *utrace = task_utrace_struct(task);
 | |
| +
 | |
| +	if (utrace->vfork_stop) {
 | |
| +		spin_lock(&utrace->lock);
 | |
| +		utrace->vfork_stop = 0;
 | |
| +		spin_unlock(&utrace->lock);
 | |
| +		utrace_stop(task, utrace, UTRACE_RESUME); /* XXX */
 | |
| +	}
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Called iff UTRACE_EVENT(JCTL) flag is set.
 | |
| + *
 | |
| + * Called with siglock held.
 | |
| + */
 | |
| +void utrace_report_jctl(int notify, int what)
 | |
| +{
 | |
| +	struct task_struct *task = current;
 | |
| +	struct utrace *utrace = task_utrace_struct(task);
 | |
| +	INIT_REPORT(report);
 | |
| +
 | |
| +	spin_unlock_irq(&task->sighand->siglock);
 | |
| +
 | |
| +	REPORT(task, utrace, &report, UTRACE_EVENT(JCTL),
 | |
| +	       report_jctl, what, notify);
 | |
| +
 | |
| +	spin_lock_irq(&task->sighand->siglock);
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Called iff UTRACE_EVENT(EXIT) flag is set.
 | |
| + */
 | |
| +void utrace_report_exit(long *exit_code)
 | |
| +{
 | |
| +	struct task_struct *task = current;
 | |
| +	struct utrace *utrace = task_utrace_struct(task);
 | |
| +	INIT_REPORT(report);
 | |
| +	long orig_code = *exit_code;
 | |
| +
 | |
| +	REPORT(task, utrace, &report, UTRACE_EVENT(EXIT),
 | |
| +	       report_exit, orig_code, exit_code);
 | |
| +
 | |
| +	if (report.action == UTRACE_STOP)
 | |
| +		utrace_stop(task, utrace, report.resume_action);
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Called iff UTRACE_EVENT(DEATH) or UTRACE_EVENT(QUIESCE) flag is set.
 | |
| + *
 | |
| + * It is always possible that we are racing with utrace_release_task here.
 | |
| + * For this reason, utrace_release_task checks for the event bits that get
 | |
| + * us here, and delays its cleanup for us to do.
 | |
| + */
 | |
| +void utrace_report_death(struct task_struct *task, struct utrace *utrace,
 | |
| +			 bool group_dead, int signal)
 | |
| +{
 | |
| +	INIT_REPORT(report);
 | |
| +
 | |
| +	BUG_ON(!task->exit_state);
 | |
| +
 | |
| +	/*
 | |
| +	 * We are presently considered "quiescent"--which is accurate
 | |
| +	 * inasmuch as we won't run any more user instructions ever again.
 | |
| +	 * But for utrace_control and utrace_set_events to be robust, they
 | |
| +	 * must be sure whether or not we will run any more callbacks.  If
 | |
| +	 * a call comes in before we do, taking the lock here synchronizes
 | |
| +	 * us so we don't run any callbacks just disabled.  Calls that come
 | |
| +	 * in while we're running the callbacks will see the exit.death
 | |
| +	 * flag and know that we are not yet fully quiescent for purposes
 | |
| +	 * of detach bookkeeping.
 | |
| +	 */
 | |
| +	spin_lock(&utrace->lock);
 | |
| +	BUG_ON(utrace->death);
 | |
| +	utrace->death = 1;
 | |
| +	utrace->resume = UTRACE_RESUME;
 | |
| +	splice_attaching(utrace);
 | |
| +	spin_unlock(&utrace->lock);
 | |
| +
 | |
| +	REPORT_CALLBACKS(, task, utrace, &report, UTRACE_EVENT(DEATH),
 | |
| +			 report_death, engine, group_dead, signal);
 | |
| +
 | |
| +	utrace_maybe_reap(task, utrace, false);
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Finish the last reporting pass before returning to user mode.
 | |
| + */
 | |
| +static void finish_resume_report(struct task_struct *task,
 | |
| +				 struct utrace *utrace,
 | |
| +				 struct utrace_report *report)
 | |
| +{
 | |
| +	finish_report_reset(task, utrace, report);
 | |
| +
 | |
| +	switch (report->action) {
 | |
| +	case UTRACE_STOP:
 | |
| +		utrace_stop(task, utrace, report->resume_action);
 | |
| +		break;
 | |
| +
 | |
| +	case UTRACE_INTERRUPT:
 | |
| +		if (!signal_pending(task))
 | |
| +			set_tsk_thread_flag(task, TIF_SIGPENDING);
 | |
| +		break;
 | |
| +
 | |
| +	case UTRACE_BLOCKSTEP:
 | |
| +		if (likely(arch_has_block_step())) {
 | |
| +			user_enable_block_step(task);
 | |
| +			break;
 | |
| +		}
 | |
| +
 | |
| +		/*
 | |
| +		 * This means some callback is to blame for failing
 | |
| +		 * to check arch_has_block_step() itself.  Warn and
 | |
| +		 * then fall through to treat it as SINGLESTEP.
 | |
| +		 */
 | |
| +		WARN(1, "UTRACE_BLOCKSTEP when !arch_has_block_step()");
 | |
| +
 | |
| +	case UTRACE_SINGLESTEP:
 | |
| +		if (likely(arch_has_single_step())) {
 | |
| +			user_enable_single_step(task);
 | |
| +		} else {
 | |
| +			/*
 | |
| +			 * This means some callback is to blame for failing
 | |
| +			 * to check arch_has_single_step() itself.  Spew
 | |
| +			 * about it so the loser will fix his module.
 | |
| +			 */
 | |
| +			WARN(1,
 | |
| +			     "UTRACE_SINGLESTEP when !arch_has_single_step()");
 | |
| +		}
 | |
| +		break;
 | |
| +
 | |
| +	case UTRACE_REPORT:
 | |
| +	case UTRACE_RESUME:
 | |
| +	default:
 | |
| +		user_disable_single_step(task);
 | |
| +		break;
 | |
| +	}
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * This is called when TIF_NOTIFY_RESUME had been set (and is now clear).
 | |
| + * We are close to user mode, and this is the place to report or stop.
 | |
| + * When we return, we're going to user mode or into the signals code.
 | |
| + */
 | |
| +void utrace_resume(struct task_struct *task, struct pt_regs *regs)
 | |
| +{
 | |
| +	struct utrace *utrace = task_utrace_struct(task);
 | |
| +	INIT_REPORT(report);
 | |
| +	struct utrace_engine *engine;
 | |
| +
 | |
| +	/*
 | |
| +	 * Some machines get here with interrupts disabled.  The same arch
 | |
| +	 * code path leads to calling into get_signal_to_deliver(), which
 | |
| +	 * implicitly reenables them by virtue of spin_unlock_irq.
 | |
| +	 */
 | |
| +	local_irq_enable();
 | |
| +
 | |
| +	/*
 | |
| +	 * If this flag is still set it's because there was a signal
 | |
| +	 * handler setup done but no report_signal following it.  Clear
 | |
| +	 * the flag before we get to user so it doesn't confuse us later.
 | |
| +	 */
 | |
| +	if (unlikely(utrace->signal_handler)) {
 | |
| +		spin_lock(&utrace->lock);
 | |
| +		utrace->signal_handler = 0;
 | |
| +		spin_unlock(&utrace->lock);
 | |
| +	}
 | |
| +
 | |
| +	/*
 | |
| +	 * Update our bookkeeping even if there are no callbacks made here.
 | |
| +	 */
 | |
| +	report.action = start_report(utrace);
 | |
| +
 | |
| +	switch (report.action) {
 | |
| +	case UTRACE_RESUME:
 | |
| +		/*
 | |
| +		 * Anything we might have done was already handled by
 | |
| +		 * utrace_get_signal(), or this is an entirely spurious
 | |
| +		 * call.  (The arch might use TIF_NOTIFY_RESUME for other
 | |
| +		 * purposes as well as calling us.)
 | |
| +		 */
 | |
| +		return;
 | |
| +	case UTRACE_REPORT:
 | |
| +		if (unlikely(!(task->utrace_flags & UTRACE_EVENT(QUIESCE))))
 | |
| +			break;
 | |
| +		/*
 | |
| +		 * Do a simple reporting pass, with no specific
 | |
| +		 * callback after report_quiesce.
 | |
| +		 */
 | |
| +		report.action = UTRACE_RESUME;
 | |
| +		list_for_each_entry(engine, &utrace->attached, entry)
 | |
| +			start_callback(utrace, &report, engine, task, 0);
 | |
| +		break;
 | |
| +	default:
 | |
| +		/*
 | |
| +		 * Even if this report was truly spurious, there is no need
 | |
| +		 * for utrace_reset() now.  TIF_NOTIFY_RESUME was already
 | |
| +		 * cleared--it doesn't stay spuriously set.
 | |
| +		 */
 | |
| +		report.spurious = false;
 | |
| +		break;
 | |
| +	}
 | |
| +
 | |
| +	/*
 | |
| +	 * Finish the report and either stop or get ready to resume.
 | |
| +	 * If utrace->resume was not UTRACE_REPORT, this applies its
 | |
| +	 * effect now (i.e. step or interrupt).
 | |
| +	 */
 | |
| +	finish_resume_report(task, utrace, &report);
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Return true if current has forced signal_pending().
 | |
| + *
 | |
| + * This is called only when current->utrace_flags is nonzero, so we know
 | |
| + * that current->utrace must be set.  It's not inlined in tracehook.h
 | |
| + * just so that struct utrace can stay opaque outside this file.
 | |
| + */
 | |
| +bool utrace_interrupt_pending(void)
 | |
| +{
 | |
| +	return task_utrace_struct(current)->resume == UTRACE_INTERRUPT;
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Take the siglock and push @info back on our queue.
 | |
| + * Returns with @task->sighand->siglock held.
 | |
| + */
 | |
| +static void push_back_signal(struct task_struct *task, siginfo_t *info)
 | |
| +	__acquires(task->sighand->siglock)
 | |
| +{
 | |
| +	struct sigqueue *q;
 | |
| +
 | |
| +	if (unlikely(!info->si_signo)) { /* Oh, a wise guy! */
 | |
| +		spin_lock_irq(&task->sighand->siglock);
 | |
| +		return;
 | |
| +	}
 | |
| +
 | |
| +	q = sigqueue_alloc();
 | |
| +	if (likely(q)) {
 | |
| +		q->flags = 0;
 | |
| +		copy_siginfo(&q->info, info);
 | |
| +	}
 | |
| +
 | |
| +	spin_lock_irq(&task->sighand->siglock);
 | |
| +
 | |
| +	sigaddset(&task->pending.signal, info->si_signo);
 | |
| +	if (likely(q))
 | |
| +		list_add(&q->list, &task->pending.list);
 | |
| +
 | |
| +	set_tsk_thread_flag(task, TIF_SIGPENDING);
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * This is the hook from the signals code, called with the siglock held.
 | |
| + * Here is the ideal place to stop.  We also dequeue and intercept signals.
 | |
| + */
 | |
| +int utrace_get_signal(struct task_struct *task, struct pt_regs *regs,
 | |
| +		      siginfo_t *info, struct k_sigaction *return_ka)
 | |
| +	__releases(task->sighand->siglock)
 | |
| +	__acquires(task->sighand->siglock)
 | |
| +{
 | |
| +	struct utrace *utrace;
 | |
| +	struct k_sigaction *ka;
 | |
| +	INIT_REPORT(report);
 | |
| +	struct utrace_engine *engine;
 | |
| +	const struct utrace_engine_ops *ops;
 | |
| +	unsigned long event, want;
 | |
| +	u32 ret;
 | |
| +	int signr;
 | |
| +
 | |
| +	utrace = task_utrace_struct(task);
 | |
| +	if (utrace->resume < UTRACE_RESUME ||
 | |
| +	    utrace->pending_attach || utrace->signal_handler) {
 | |
| +		enum utrace_resume_action resume;
 | |
| +
 | |
| +		/*
 | |
| +		 * We've been asked for an explicit report before we
 | |
| +		 * even check for pending signals.
 | |
| +		 */
 | |
| +
 | |
| +		spin_unlock_irq(&task->sighand->siglock);
 | |
| +
 | |
| +		spin_lock(&utrace->lock);
 | |
| +
 | |
| +		splice_attaching(utrace);
 | |
| +
 | |
| +		report.result = utrace->signal_handler ?
 | |
| +			UTRACE_SIGNAL_HANDLER : UTRACE_SIGNAL_REPORT;
 | |
| +		utrace->signal_handler = 0;
 | |
| +
 | |
| +		resume = utrace->resume;
 | |
| +		utrace->resume = UTRACE_RESUME;
 | |
| +
 | |
| +		spin_unlock(&utrace->lock);
 | |
| +
 | |
| +		/*
 | |
| +		 * Make sure signal_pending() only returns true
 | |
| +		 * if there are real signals pending.
 | |
| +		 */
 | |
| +		if (signal_pending(task)) {
 | |
| +			spin_lock_irq(&task->sighand->siglock);
 | |
| +			recalc_sigpending();
 | |
| +			spin_unlock_irq(&task->sighand->siglock);
 | |
| +		}
 | |
| +
 | |
| +		if (resume > UTRACE_REPORT) {
 | |
| +			/*
 | |
| +			 * We only got here to process utrace->resume.
 | |
| +			 * Despite no callbacks, this report is not spurious.
 | |
| +			 */
 | |
| +			report.action = resume;
 | |
| +			report.spurious = false;
 | |
| +			finish_resume_report(task, utrace, &report);
 | |
| +			return -1;
 | |
| +		} else if (!(task->utrace_flags & UTRACE_EVENT(QUIESCE))) {
 | |
| +			/*
 | |
| +			 * We only got here to clear utrace->signal_handler.
 | |
| +			 */
 | |
| +			return -1;
 | |
| +		}
 | |
| +
 | |
| +		/*
 | |
| +		 * Do a reporting pass for no signal, just for EVENT(QUIESCE).
 | |
| +		 * The engine callbacks can fill in *info and *return_ka.
 | |
| +		 * We'll pass NULL for the @orig_ka argument to indicate
 | |
| +		 * that there was no original signal.
 | |
| +		 */
 | |
| +		event = 0;
 | |
| +		ka = NULL;
 | |
| +		memset(return_ka, 0, sizeof *return_ka);
 | |
| +	} else if (!(task->utrace_flags & UTRACE_EVENT_SIGNAL_ALL) ||
 | |
| +		   unlikely(task->signal->group_stop_count)) {
 | |
| +		/*
 | |
| +		 * If no engine is interested in intercepting signals or
 | |
| +		 * we must stop, let the caller just dequeue them normally
 | |
| +		 * or participate in group-stop.
 | |
| +		 */
 | |
| +		return 0;
 | |
| +	} else {
 | |
| +		/*
 | |
| +		 * Steal the next signal so we can let tracing engines
 | |
| +		 * examine it.  From the signal number and sigaction,
 | |
| +		 * determine what normal delivery would do.  If no
 | |
| +		 * engine perturbs it, we'll do that by returning the
 | |
| +		 * signal number after setting *return_ka.
 | |
| +		 */
 | |
| +		signr = dequeue_signal(task, &task->blocked, info);
 | |
| +		if (signr == 0)
 | |
| +			return signr;
 | |
| +		BUG_ON(signr != info->si_signo);
 | |
| +
 | |
| +		ka = &task->sighand->action[signr - 1];
 | |
| +		*return_ka = *ka;
 | |
| +
 | |
| +		/*
 | |
| +		 * We are never allowed to interfere with SIGKILL.
 | |
| +		 * Just punt after filling in *return_ka for our caller.
 | |
| +		 */
 | |
| +		if (signr == SIGKILL)
 | |
| +			return signr;
 | |
| +
 | |
| +		if (ka->sa.sa_handler == SIG_IGN) {
 | |
| +			event = UTRACE_EVENT(SIGNAL_IGN);
 | |
| +			report.result = UTRACE_SIGNAL_IGN;
 | |
| +		} else if (ka->sa.sa_handler != SIG_DFL) {
 | |
| +			event = UTRACE_EVENT(SIGNAL);
 | |
| +			report.result = UTRACE_SIGNAL_DELIVER;
 | |
| +		} else if (sig_kernel_coredump(signr)) {
 | |
| +			event = UTRACE_EVENT(SIGNAL_CORE);
 | |
| +			report.result = UTRACE_SIGNAL_CORE;
 | |
| +		} else if (sig_kernel_ignore(signr)) {
 | |
| +			event = UTRACE_EVENT(SIGNAL_IGN);
 | |
| +			report.result = UTRACE_SIGNAL_IGN;
 | |
| +		} else if (signr == SIGSTOP) {
 | |
| +			event = UTRACE_EVENT(SIGNAL_STOP);
 | |
| +			report.result = UTRACE_SIGNAL_STOP;
 | |
| +		} else if (sig_kernel_stop(signr)) {
 | |
| +			event = UTRACE_EVENT(SIGNAL_STOP);
 | |
| +			report.result = UTRACE_SIGNAL_TSTP;
 | |
| +		} else {
 | |
| +			event = UTRACE_EVENT(SIGNAL_TERM);
 | |
| +			report.result = UTRACE_SIGNAL_TERM;
 | |
| +		}
 | |
| +
 | |
| +		/*
 | |
| +		 * Now that we know what event type this signal is, we
 | |
| +		 * can short-circuit if no engines care about those.
 | |
| +		 */
 | |
| +		if ((task->utrace_flags & (event | UTRACE_EVENT(QUIESCE))) == 0)
 | |
| +			return signr;
 | |
| +
 | |
| +		/*
 | |
| +		 * We have some interested engines, so tell them about
 | |
| +		 * the signal and let them change its disposition.
 | |
| +		 */
 | |
| +		spin_unlock_irq(&task->sighand->siglock);
 | |
| +	}
 | |
| +
 | |
| +	/*
 | |
| +	 * This reporting pass chooses what signal disposition we'll act on.
 | |
| +	 */
 | |
| +	list_for_each_entry(engine, &utrace->attached, entry) {
 | |
| +		/*
 | |
| +		 * See start_callback() comment about this barrier.
 | |
| +		 */
 | |
| +		utrace->reporting = engine;
 | |
| +		smp_mb();
 | |
| +
 | |
| +		/*
 | |
| +		 * This pairs with the barrier in mark_engine_detached(),
 | |
| +		 * see start_callback() comments.
 | |
| +		 */
 | |
| +		want = engine->flags;
 | |
| +		smp_rmb();
 | |
| +		ops = engine->ops;
 | |
| +
 | |
| +		if ((want & (event | UTRACE_EVENT(QUIESCE))) == 0) {
 | |
| +			utrace->reporting = NULL;
 | |
| +			continue;
 | |
| +		}
 | |
| +
 | |
| +		if (ops->report_signal)
 | |
| +			ret = (*ops->report_signal)(
 | |
| +				report.result | report.action, engine,
 | |
| +				regs, info, ka, return_ka);
 | |
| +		else
 | |
| +			ret = (report.result | (*ops->report_quiesce)(
 | |
| +				       report.action, engine, event));
 | |
| +
 | |
| +		/*
 | |
| +		 * Avoid a tight loop reporting again and again if some
 | |
| +		 * engine is too stupid.
 | |
| +		 */
 | |
| +		switch (utrace_resume_action(ret)) {
 | |
| +		default:
 | |
| +			break;
 | |
| +		case UTRACE_INTERRUPT:
 | |
| +		case UTRACE_REPORT:
 | |
| +			ret = (ret & ~UTRACE_RESUME_MASK) | UTRACE_RESUME;
 | |
| +			break;
 | |
| +		}
 | |
| +
 | |
| +		finish_callback(task, utrace, &report, engine, ret);
 | |
| +	}
 | |
| +
 | |
| +	/*
 | |
| +	 * We express the chosen action to the signals code in terms
 | |
| +	 * of a representative signal whose default action does it.
 | |
| +	 * Our caller uses our return value (signr) to decide what to
 | |
| +	 * do, but uses info->si_signo as the signal number to report.
 | |
| +	 */
 | |
| +	switch (utrace_signal_action(report.result)) {
 | |
| +	case UTRACE_SIGNAL_TERM:
 | |
| +		signr = SIGTERM;
 | |
| +		break;
 | |
| +
 | |
| +	case UTRACE_SIGNAL_CORE:
 | |
| +		signr = SIGQUIT;
 | |
| +		break;
 | |
| +
 | |
| +	case UTRACE_SIGNAL_STOP:
 | |
| +		signr = SIGSTOP;
 | |
| +		break;
 | |
| +
 | |
| +	case UTRACE_SIGNAL_TSTP:
 | |
| +		signr = SIGTSTP;
 | |
| +		break;
 | |
| +
 | |
| +	case UTRACE_SIGNAL_DELIVER:
 | |
| +		signr = info->si_signo;
 | |
| +
 | |
| +		if (return_ka->sa.sa_handler == SIG_DFL) {
 | |
| +			/*
 | |
| +			 * We'll do signr's normal default action.
 | |
| +			 * For ignore, we'll fall through below.
 | |
| +			 * For stop/death, break locks and returns it.
 | |
| +			 */
 | |
| +			if (likely(signr) && !sig_kernel_ignore(signr))
 | |
| +				break;
 | |
| +		} else if (return_ka->sa.sa_handler != SIG_IGN &&
 | |
| +			   likely(signr)) {
 | |
| +			/*
 | |
| +			 * Complete the bookkeeping after the report.
 | |
| +			 * The handler will run.  If an engine wanted to
 | |
| +			 * stop or step, then make sure we do another
 | |
| +			 * report after signal handler setup.
 | |
| +			 */
 | |
| +			if (report.action != UTRACE_RESUME)
 | |
| +				report.action = UTRACE_INTERRUPT;
 | |
| +			finish_report(task, utrace, &report, true);
 | |
| +
 | |
| +			if (unlikely(report.result & UTRACE_SIGNAL_HOLD))
 | |
| +				push_back_signal(task, info);
 | |
| +			else
 | |
| +				spin_lock_irq(&task->sighand->siglock);
 | |
| +
 | |
| +			/*
 | |
| +			 * We do the SA_ONESHOT work here since the
 | |
| +			 * normal path will only touch *return_ka now.
 | |
| +			 */
 | |
| +			if (unlikely(return_ka->sa.sa_flags & SA_ONESHOT)) {
 | |
| +				return_ka->sa.sa_flags &= ~SA_ONESHOT;
 | |
| +				if (likely(valid_signal(signr))) {
 | |
| +					ka = &task->sighand->action[signr - 1];
 | |
| +					ka->sa.sa_handler = SIG_DFL;
 | |
| +				}
 | |
| +			}
 | |
| +
 | |
| +			return signr;
 | |
| +		}
 | |
| +
 | |
| +		/* Fall through for an ignored signal.  */
 | |
| +
 | |
| +	case UTRACE_SIGNAL_IGN:
 | |
| +	case UTRACE_SIGNAL_REPORT:
 | |
| +	default:
 | |
| +		/*
 | |
| +		 * If the signal is being ignored, then we are on the way
 | |
| +		 * directly back to user mode.  We can stop here, or step,
 | |
| +		 * as in utrace_resume(), above.  After we've dealt with that,
 | |
| +		 * our caller will relock and come back through here.
 | |
| +		 */
 | |
| +		finish_resume_report(task, utrace, &report);
 | |
| +
 | |
| +		if (unlikely(fatal_signal_pending(task))) {
 | |
| +			/*
 | |
| +			 * The only reason we woke up now was because of a
 | |
| +			 * SIGKILL.  Don't do normal dequeuing in case it
 | |
| +			 * might get a signal other than SIGKILL.  That would
 | |
| +			 * perturb the death state so it might differ from
 | |
| +			 * what the debugger would have allowed to happen.
 | |
| +			 * Instead, pluck out just the SIGKILL to be sure
 | |
| +			 * we'll die immediately with nothing else different
 | |
| +			 * from the quiescent state the debugger wanted us in.
 | |
| +			 */
 | |
| +			sigset_t sigkill_only;
 | |
| +			siginitsetinv(&sigkill_only, sigmask(SIGKILL));
 | |
| +			spin_lock_irq(&task->sighand->siglock);
 | |
| +			signr = dequeue_signal(task, &sigkill_only, info);
 | |
| +			BUG_ON(signr != SIGKILL);
 | |
| +			*return_ka = task->sighand->action[SIGKILL - 1];
 | |
| +			return signr;
 | |
| +		}
 | |
| +
 | |
| +		if (unlikely(report.result & UTRACE_SIGNAL_HOLD)) {
 | |
| +			push_back_signal(task, info);
 | |
| +			spin_unlock_irq(&task->sighand->siglock);
 | |
| +		}
 | |
| +
 | |
| +		return -1;
 | |
| +	}
 | |
| +
 | |
| +	/*
 | |
| +	 * Complete the bookkeeping after the report.
 | |
| +	 * This sets utrace->resume if UTRACE_STOP was used.
 | |
| +	 */
 | |
| +	finish_report(task, utrace, &report, true);
 | |
| +
 | |
| +	return_ka->sa.sa_handler = SIG_DFL;
 | |
| +
 | |
| +	/*
 | |
| +	 * If this signal is fatal, si_signo gets through as exit_code.
 | |
| +	 * We can't allow a completely bogus value there or else core
 | |
| +	 * kernel code can freak out.  (If an engine wants to control
 | |
| +	 * the exit_code value exactly, it can do so in report_exit.)
 | |
| +	 * We'll produce a big complaint in dmesg, but won't crash.
 | |
| +	 * That's nicer for debugging your utrace engine.
 | |
| +	 */
 | |
| +	if (unlikely(info->si_signo & 0x80)) {
 | |
| +		WARN(1, "utrace engine left bogus si_signo value!");
 | |
| +		info->si_signo = SIGTRAP;
 | |
| +	}
 | |
| +
 | |
| +	if (unlikely(report.result & UTRACE_SIGNAL_HOLD))
 | |
| +		push_back_signal(task, info);
 | |
| +	else
 | |
| +		spin_lock_irq(&task->sighand->siglock);
 | |
| +
 | |
| +	if (sig_kernel_stop(signr))
 | |
| +		task->signal->flags |= SIGNAL_STOP_DEQUEUED;
 | |
| +
 | |
| +	return signr;
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * This gets called after a signal handler has been set up.
 | |
| + * We set a flag so the next report knows it happened.
 | |
| + * If we're already stepping, make sure we do a report_signal.
 | |
| + * If not, make sure we get into utrace_resume() where we can
 | |
| + * clear the signal_handler flag before resuming.
 | |
| + */
 | |
| +void utrace_signal_handler(struct task_struct *task, int stepping)
 | |
| +{
 | |
| +	struct utrace *utrace = task_utrace_struct(task);
 | |
| +
 | |
| +	spin_lock(&utrace->lock);
 | |
| +
 | |
| +	utrace->signal_handler = 1;
 | |
| +	if (utrace->resume > UTRACE_INTERRUPT) {
 | |
| +		if (stepping) {
 | |
| +			utrace->resume = UTRACE_INTERRUPT;
 | |
| +			set_tsk_thread_flag(task, TIF_SIGPENDING);
 | |
| +		} else if (utrace->resume == UTRACE_RESUME) {
 | |
| +			set_tsk_thread_flag(task, TIF_NOTIFY_RESUME);
 | |
| +		}
 | |
| +	}
 | |
| +
 | |
| +	spin_unlock(&utrace->lock);
 | |
| +}
 | |
| +
 | |
| +/**
 | |
| + * utrace_prepare_examine - prepare to examine thread state
 | |
| + * @target:		thread of interest, a &struct task_struct pointer
 | |
| + * @engine:		engine pointer returned by utrace_attach_task()
 | |
| + * @exam:		temporary state, a &struct utrace_examiner pointer
 | |
| + *
 | |
| + * This call prepares to safely examine the thread @target using
 | |
| + * &struct user_regset calls, or direct access to thread-synchronous fields.
 | |
| + *
 | |
| + * When @target is current, this call is superfluous.  When @target is
 | |
| + * another thread, it must be held stopped via %UTRACE_STOP by @engine.
 | |
| + *
 | |
| + * This call may block the caller until @target stays stopped, so it must
 | |
| + * be called only after the caller is sure @target is about to unschedule.
 | |
| + * This means a zero return from a utrace_control() call on @engine giving
 | |
| + * %UTRACE_STOP, or a report_quiesce() or report_signal() callback to
 | |
| + * @engine that used %UTRACE_STOP in its return value.
 | |
| + *
 | |
| + * Returns -%ESRCH if @target is dead or -%EINVAL if %UTRACE_STOP was
 | |
| + * not used.  If @target has started running again despite %UTRACE_STOP
 | |
| + * (for %SIGKILL or a spurious wakeup), this call returns -%EAGAIN.
 | |
| + *
 | |
| + * When this call returns zero, it's safe to use &struct user_regset
 | |
| + * calls and task_user_regset_view() on @target and to examine some of
 | |
| + * its fields directly.  When the examination is complete, a
 | |
| + * utrace_finish_examine() call must follow to check whether it was
 | |
| + * completed safely.
 | |
| + */
 | |
| +int utrace_prepare_examine(struct task_struct *target,
 | |
| +			   struct utrace_engine *engine,
 | |
| +			   struct utrace_examiner *exam)
 | |
| +{
 | |
| +	int ret = 0;
 | |
| +
 | |
| +	if (unlikely(target == current))
 | |
| +		return 0;
 | |
| +
 | |
| +	rcu_read_lock();
 | |
| +	if (unlikely(!engine_wants_stop(engine)))
 | |
| +		ret = -EINVAL;
 | |
| +	else if (unlikely(target->exit_state))
 | |
| +		ret = -ESRCH;
 | |
| +	else {
 | |
| +		exam->state = target->state;
 | |
| +		if (unlikely(exam->state == TASK_RUNNING))
 | |
| +			ret = -EAGAIN;
 | |
| +		else
 | |
| +			get_task_struct(target);
 | |
| +	}
 | |
| +	rcu_read_unlock();
 | |
| +
 | |
| +	if (likely(!ret)) {
 | |
| +		exam->ncsw = wait_task_inactive(target, exam->state);
 | |
| +		put_task_struct(target);
 | |
| +		if (unlikely(!exam->ncsw))
 | |
| +			ret = -EAGAIN;
 | |
| +	}
 | |
| +
 | |
| +	return ret;
 | |
| +}
 | |
| +EXPORT_SYMBOL_GPL(utrace_prepare_examine);
 | |
| +
 | |
| +/**
 | |
| + * utrace_finish_examine - complete an examination of thread state
 | |
| + * @target:		thread of interest, a &struct task_struct pointer
 | |
| + * @engine:		engine pointer returned by utrace_attach_task()
 | |
| + * @exam:		pointer passed to utrace_prepare_examine() call
 | |
| + *
 | |
| + * This call completes an examination on the thread @target begun by a
 | |
| + * paired utrace_prepare_examine() call with the same arguments that
 | |
| + * returned success (zero).
 | |
| + *
 | |
| + * When @target is current, this call is superfluous.  When @target is
 | |
| + * another thread, this returns zero if @target has remained unscheduled
 | |
| + * since the paired utrace_prepare_examine() call returned zero.
 | |
| + *
 | |
| + * When this returns an error, any examination done since the paired
 | |
| + * utrace_prepare_examine() call is unreliable and the data extracted
 | |
| + * should be discarded.  The error is -%EINVAL if @engine is not
 | |
| + * keeping @target stopped, or -%EAGAIN if @target woke up unexpectedly.
 | |
| + */
 | |
| +int utrace_finish_examine(struct task_struct *target,
 | |
| +			  struct utrace_engine *engine,
 | |
| +			  struct utrace_examiner *exam)
 | |
| +{
 | |
| +	int ret = 0;
 | |
| +
 | |
| +	if (unlikely(target == current))
 | |
| +		return 0;
 | |
| +
 | |
| +	rcu_read_lock();
 | |
| +	if (unlikely(!engine_wants_stop(engine)))
 | |
| +		ret = -EINVAL;
 | |
| +	else if (unlikely(target->state != exam->state))
 | |
| +		ret = -EAGAIN;
 | |
| +	else
 | |
| +		get_task_struct(target);
 | |
| +	rcu_read_unlock();
 | |
| +
 | |
| +	if (likely(!ret)) {
 | |
| +		unsigned long ncsw = wait_task_inactive(target, exam->state);
 | |
| +		if (unlikely(ncsw != exam->ncsw))
 | |
| +			ret = -EAGAIN;
 | |
| +		put_task_struct(target);
 | |
| +	}
 | |
| +
 | |
| +	return ret;
 | |
| +}
 | |
| +EXPORT_SYMBOL_GPL(utrace_finish_examine);
 | |
| +
 | |
| +/*
 | |
| + * This is declared in linux/regset.h and defined in machine-dependent
 | |
| + * code.  We put the export here to ensure no machine forgets it.
 | |
| + */
 | |
| +EXPORT_SYMBOL_GPL(task_user_regset_view);
 | |
| +
 | |
| +/*
 | |
| + * Called with rcu_read_lock() held.
 | |
| + */
 | |
| +void task_utrace_proc_status(struct seq_file *m, struct task_struct *p)
 | |
| +{
 | |
| +	seq_printf(m, "Utrace:\t%lx\n", p->utrace_flags);
 | |
| +}
 |