Merge tag 'trace-3.19' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux...
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 11 Dec 2014 03:58:13 +0000 (19:58 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 11 Dec 2014 03:58:13 +0000 (19:58 -0800)
Pull tracing updates from Steven Rostedt:
 "There was a lot of clean ups and minor fixes.  One of those clean ups
  was to the trace_seq code.  It also removed the return values to the
  trace_seq_*() functions and use trace_seq_has_overflowed() to see if
  the buffer filled up or not.  This is similar to work being done to
  the seq_file code as well in another tree.

  Some of the other goodies include:

   - Added some "!" (NOT) logic to the tracing filter.

   - Fixed the frame pointer logic to the x86_64 mcount trampolines

   - Added the logic for dynamic trampolines on !CONFIG_PREEMPT systems.
     That is, the ftrace trampoline can be dynamically allocated and be
     called directly by functions that only have a single hook to them"

* tag 'trace-3.19' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (55 commits)
  tracing: Truncated output is better than nothing
  tracing: Add additional marks to signal very large time deltas
  Documentation: describe trace_buf_size parameter more accurately
  tracing: Allow NOT to filter AND and OR clauses
  tracing: Add NOT to filtering logic
  ftrace/fgraph/x86: Have prepare_ftrace_return() take ip as first parameter
  ftrace/x86: Get rid of ftrace_caller_setup
  ftrace/x86: Have save_mcount_regs macro also save stack frames if needed
  ftrace/x86: Add macro MCOUNT_REG_SIZE for amount of stack used to save mcount regs
  ftrace/x86: Simplify save_mcount_regs on getting RIP
  ftrace/x86: Have save_mcount_regs store RIP in %rdi for first parameter
  ftrace/x86: Rename MCOUNT_SAVE_FRAME and add more detailed comments
  ftrace/x86: Move MCOUNT_SAVE_FRAME out of header file
  ftrace/x86: Have static tracing also use ftrace_caller_setup
  ftrace/x86: Have static function tracing always test for function graph
  kprobes: Add IPMODIFY flag to kprobe_ftrace_ops
  ftrace, kprobes: Support IPMODIFY flag to find IP modify conflict
  kprobes/ftrace: Recover original IP if pre_handler doesn't change it
  tracing/trivial: Fix typos and make an int into a bool
  tracing: Deletion of an unnecessary check before iput()
  ...

1  2 
Documentation/kernel-parameters.txt
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace_events.c
kernel/trace/trace_syscalls.c

index d6eb3636fe5a4c12205e028da84f0e09b7d704ae,1d09eb37c562f94a9de306c25349f79b378314de..32ba9ea5934ac141a52dde25e86a1a51a9f766f4
@@@ -1015,14 -1015,10 +1015,14 @@@ bytes respectively. Such letter suffixe
                        Format: {"off" | "on" | "skip[mbr]"}
  
        efi=            [EFI]
 -                      Format: { "old_map" }
 +                      Format: { "old_map", "nochunk", "noruntime" }
                        old_map [X86-64]: switch to the old ioremap-based EFI
                        runtime services mapping. 32-bit still uses this one by
                        default.
 +                      nochunk: disable reading files in "chunks" in the EFI
 +                      boot stub, as chunking can cause problems with some
 +                      firmware implementations.
 +                      noruntime : disable EFI runtime services support
  
        efi_no_storage_paranoia [EFI; X86]
                        Using this parameter you can use more than 50% of
        i8042.noloop    [HW] Disable the AUX Loopback command while probing
                             for the AUX port
        i8042.nomux     [HW] Don't check presence of an active multiplexing
 -                           controller. Default: true.
 +                           controller
        i8042.nopnp     [HW] Don't use ACPIPnP / PnPBIOS to discover KBD/AUX
                             controllers
        i8042.notimeout [HW] Ignore timeout condition signalled by controller
                        .cdrom .chs .ignore_cable are additional options
                        See Documentation/ide/ide.txt.
  
 +      ide-generic.probe-mask= [HW] (E)IDE subsystem
 +                      Format: <int>
 +                      Probe mask for legacy ISA IDE ports.  Depending on
 +                      platform up to 6 ports are supported, enabled by
 +                      setting corresponding bits in the mask to 1.  The
 +                      default value is 0x0, which has a special meaning.
 +                      On systems that have PCI, it triggers scanning the
 +                      PCI bus for the first and the second port, which
 +                      are then probed.  On systems without PCI the value
 +                      of 0x0 enables probing the two first ports as if it
 +                      was 0x3.
 +
        ide-pci-generic.all-generic-ide [HW] (E)IDE subsystem
                        Claim all unknown PCI IDE storage controllers.
  
        kmemleak=       [KNL] Boot-time kmemleak enable/disable
                        Valid arguments: on, off
                        Default: on
 +                      Built with CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y,
 +                      the default is off.
  
        kmemcheck=      [X86] Boot-time kmemcheck enable/disable/one-shot mode
                        Valid arguments: 0, 1, 2
  
        nodsp           [SH] Disable hardware DSP at boot time.
  
 -      noefi           [X86] Disable EFI runtime services support.
 +      noefi           Disable EFI runtime services support.
  
        noexec          [IA-64]
  
                        timeout < 0: reboot immediately
                        Format: <timeout>
  
 +      panic_on_warn   panic() instead of WARN().  Useful to cause kdump
 +                      on a WARN().
 +
        crash_kexec_post_notifiers
                        Run kdump after running panic-notifiers and dumping
                        kmsg. This only for the users who doubt kdump always
                        quiescent states.  Units are jiffies, minimum
                        value is one, and maximum value is HZ.
  
 +      rcutree.kthread_prio=    [KNL,BOOT]
 +                      Set the SCHED_FIFO priority of the RCU
 +                      per-CPU kthreads (rcuc/N). This value is also
 +                      used for the priority of the RCU boost threads
 +                      (rcub/N). Valid values are 1-99 and the default
 +                      is 1 (the least-favored priority).
 +
        rcutree.rcu_nocb_leader_stride= [KNL]
                        Set the number of NOCB kthread groups, which
                        defaults to the square root of the number of
                        messages.  Disable with a value less than or equal
                        to zero.
  
 +      rcupdate.rcu_self_test= [KNL]
 +                      Run the RCU early boot self tests
 +
 +      rcupdate.rcu_self_test_bh= [KNL]
 +                      Run the RCU bh early boot self tests
 +
 +      rcupdate.rcu_self_test_sched= [KNL]
 +                      Run the RCU sched early boot self tests
 +
        rdinit=         [KNL]
                        Format: <full_path>
                        Run specified binary instead of /init from the ramdisk,
                        e.g. base its process migration decisions on it.
                        Default is on.
  
 +      topology_updates= [KNL, PPC, NUMA]
 +                      Format: {off}
 +                      Specify if the kernel should ignore (off)
 +                      topology updates sent by the hypervisor to this
 +                      LPAR.
 +
        tp720=          [HW,PS2]
  
        tpm_suspend_pcr=[HW,TPM]
                        are saved.
  
        trace_buf_size=nn[KMG]
-                       [FTRACE] will set tracing buffer size.
+                       [FTRACE] will set tracing buffer size on each cpu.
  
        trace_event=[event-list]
                        [FTRACE] Set and start specified trace events in order
  
        usb-storage.delay_use=
                        [UMS] The delay in seconds before a new device is
 -                      scanned for Logical Units (default 5).
 +                      scanned for Logical Units (default 1).
  
        usb-storage.quirks=
                        [UMS] A list of quirks entries to supplement or
index a56e07c8d15b8b730eb54f2020a018ff440eec6c,a28bdd17c853ed5cb9ce7fc1a843b82575579148..7a4104cb95cb28792364d60c0abb5826a3a0aad5
@@@ -34,21 -34,19 +34,19 @@@ static void update_pages_handler(struc
   */
  int ring_buffer_print_entry_header(struct trace_seq *s)
  {
-       int ret;
-       ret = trace_seq_puts(s, "# compressed entry header\n");
-       ret = trace_seq_puts(s, "\ttype_len    :    5 bits\n");
-       ret = trace_seq_puts(s, "\ttime_delta  :   27 bits\n");
-       ret = trace_seq_puts(s, "\tarray       :   32 bits\n");
-       ret = trace_seq_putc(s, '\n');
-       ret = trace_seq_printf(s, "\tpadding     : type == %d\n",
-                              RINGBUF_TYPE_PADDING);
-       ret = trace_seq_printf(s, "\ttime_extend : type == %d\n",
-                              RINGBUF_TYPE_TIME_EXTEND);
-       ret = trace_seq_printf(s, "\tdata max type_len  == %d\n",
-                              RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
-       return ret;
+       trace_seq_puts(s, "# compressed entry header\n");
+       trace_seq_puts(s, "\ttype_len    :    5 bits\n");
+       trace_seq_puts(s, "\ttime_delta  :   27 bits\n");
+       trace_seq_puts(s, "\tarray       :   32 bits\n");
+       trace_seq_putc(s, '\n');
+       trace_seq_printf(s, "\tpadding     : type == %d\n",
+                        RINGBUF_TYPE_PADDING);
+       trace_seq_printf(s, "\ttime_extend : type == %d\n",
+                        RINGBUF_TYPE_TIME_EXTEND);
+       trace_seq_printf(s, "\tdata max type_len  == %d\n",
+                        RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
+       return !trace_seq_has_overflowed(s);
  }
  
  /*
@@@ -419,32 -417,31 +417,31 @@@ static inline int test_time_stamp(u64 d
  int ring_buffer_print_page_header(struct trace_seq *s)
  {
        struct buffer_data_page field;
-       int ret;
-       ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
-                              "offset:0;\tsize:%u;\tsigned:%u;\n",
-                              (unsigned int)sizeof(field.time_stamp),
-                              (unsigned int)is_signed_type(u64));
-       ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
-                              "offset:%u;\tsize:%u;\tsigned:%u;\n",
-                              (unsigned int)offsetof(typeof(field), commit),
-                              (unsigned int)sizeof(field.commit),
-                              (unsigned int)is_signed_type(long));
-       ret = trace_seq_printf(s, "\tfield: int overwrite;\t"
-                              "offset:%u;\tsize:%u;\tsigned:%u;\n",
-                              (unsigned int)offsetof(typeof(field), commit),
-                              1,
-                              (unsigned int)is_signed_type(long));
-       ret = trace_seq_printf(s, "\tfield: char data;\t"
-                              "offset:%u;\tsize:%u;\tsigned:%u;\n",
-                              (unsigned int)offsetof(typeof(field), data),
-                              (unsigned int)BUF_PAGE_SIZE,
-                              (unsigned int)is_signed_type(char));
-       return ret;
+       trace_seq_printf(s, "\tfield: u64 timestamp;\t"
+                        "offset:0;\tsize:%u;\tsigned:%u;\n",
+                        (unsigned int)sizeof(field.time_stamp),
+                        (unsigned int)is_signed_type(u64));
+       trace_seq_printf(s, "\tfield: local_t commit;\t"
+                        "offset:%u;\tsize:%u;\tsigned:%u;\n",
+                        (unsigned int)offsetof(typeof(field), commit),
+                        (unsigned int)sizeof(field.commit),
+                        (unsigned int)is_signed_type(long));
+       trace_seq_printf(s, "\tfield: int overwrite;\t"
+                        "offset:%u;\tsize:%u;\tsigned:%u;\n",
+                        (unsigned int)offsetof(typeof(field), commit),
+                        1,
+                        (unsigned int)is_signed_type(long));
+       trace_seq_printf(s, "\tfield: char data;\t"
+                        "offset:%u;\tsize:%u;\tsigned:%u;\n",
+                        (unsigned int)offsetof(typeof(field), data),
+                        (unsigned int)BUF_PAGE_SIZE,
+                        (unsigned int)is_signed_type(char));
+       return !trace_seq_has_overflowed(s);
  }
  
  struct rb_irq_work {
@@@ -538,18 -535,16 +535,18 @@@ static void rb_wake_up_waiters(struct i
   * ring_buffer_wait - wait for input to the ring buffer
   * @buffer: buffer to wait on
   * @cpu: the cpu buffer to wait on
 + * @full: wait until a full page is available, if @cpu != RING_BUFFER_ALL_CPUS
   *
   * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
   * as data is added to any of the @buffer's cpu buffers. Otherwise
   * it will wait for data to be added to a specific cpu buffer.
   */
 -int ring_buffer_wait(struct ring_buffer *buffer, int cpu)
 +int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full)
  {
 -      struct ring_buffer_per_cpu *cpu_buffer;
 +      struct ring_buffer_per_cpu *uninitialized_var(cpu_buffer);
        DEFINE_WAIT(wait);
        struct rb_irq_work *work;
 +      int ret = 0;
  
        /*
         * Depending on what the caller is waiting for, either any
        }
  
  
 -      prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
 +      while (true) {
 +              prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
 +
 +              /*
 +               * The events can happen in critical sections where
 +               * checking a work queue can cause deadlocks.
 +               * After adding a task to the queue, this flag is set
 +               * only to notify events to try to wake up the queue
 +               * using irq_work.
 +               *
 +               * We don't clear it even if the buffer is no longer
 +               * empty. The flag only causes the next event to run
 +               * irq_work to do the work queue wake up. The worse
 +               * that can happen if we race with !trace_empty() is that
 +               * an event will cause an irq_work to try to wake up
 +               * an empty queue.
 +               *
 +               * There's no reason to protect this flag either, as
 +               * the work queue and irq_work logic will do the necessary
 +               * synchronization for the wake ups. The only thing
 +               * that is necessary is that the wake up happens after
 +               * a task has been queued. It's OK for spurious wake ups.
 +               */
 +              work->waiters_pending = true;
 +
 +              if (signal_pending(current)) {
 +                      ret = -EINTR;
 +                      break;
 +              }
 +
 +              if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer))
 +                      break;
 +
 +              if (cpu != RING_BUFFER_ALL_CPUS &&
 +                  !ring_buffer_empty_cpu(buffer, cpu)) {
 +                      unsigned long flags;
 +                      bool pagebusy;
 +
 +                      if (!full)
 +                              break;
 +
 +                      raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
 +                      pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
 +                      raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
 +
 +                      if (!pagebusy)
 +                              break;
 +              }
  
 -      /*
 -       * The events can happen in critical sections where
 -       * checking a work queue can cause deadlocks.
 -       * After adding a task to the queue, this flag is set
 -       * only to notify events to try to wake up the queue
 -       * using irq_work.
 -       *
 -       * We don't clear it even if the buffer is no longer
 -       * empty. The flag only causes the next event to run
 -       * irq_work to do the work queue wake up. The worse
 -       * that can happen if we race with !trace_empty() is that
 -       * an event will cause an irq_work to try to wake up
 -       * an empty queue.
 -       *
 -       * There's no reason to protect this flag either, as
 -       * the work queue and irq_work logic will do the necessary
 -       * synchronization for the wake ups. The only thing
 -       * that is necessary is that the wake up happens after
 -       * a task has been queued. It's OK for spurious wake ups.
 -       */
 -      work->waiters_pending = true;
 -
 -      if ((cpu == RING_BUFFER_ALL_CPUS && ring_buffer_empty(buffer)) ||
 -          (cpu != RING_BUFFER_ALL_CPUS && ring_buffer_empty_cpu(buffer, cpu)))
                schedule();
 +      }
  
        finish_wait(&work->waiters, &wait);
 -      return 0;
 +
 +      return ret;
  }
  
  /**
diff --combined kernel/trace/trace.c
index 426962b041838bc8706f8ed048689d443afd56c4,4ceb2546c7efb98d2509a79e92816bb8f7282ef7..ce11fa50a2f01de7cfa01eef4ffecd2c0e6c98a8
@@@ -155,10 -155,11 +155,11 @@@ __setup("ftrace_dump_on_oops", set_ftra
  
  static int __init stop_trace_on_warning(char *str)
  {
-       __disable_trace_on_warning = 1;
+       if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
+               __disable_trace_on_warning = 1;
        return 1;
  }
- __setup("traceoff_on_warning=", stop_trace_on_warning);
+ __setup("traceoff_on_warning", stop_trace_on_warning);
  
  static int __init boot_alloc_snapshot(char *str)
  {
@@@ -1076,14 -1077,13 +1077,14 @@@ update_max_tr_single(struct trace_arra
  }
  #endif /* CONFIG_TRACER_MAX_TRACE */
  
 -static int wait_on_pipe(struct trace_iterator *iter)
 +static int wait_on_pipe(struct trace_iterator *iter, bool full)
  {
        /* Iterators are static, they should be filled or empty */
        if (trace_buffer_iter(iter, iter->cpu_file))
                return 0;
  
 -      return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
 +      return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
 +                              full);
  }
  
  #ifdef CONFIG_FTRACE_STARTUP_TEST
@@@ -2158,9 -2158,7 +2159,7 @@@ __trace_array_vprintk(struct ring_buffe
                goto out;
        }
  
-       len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
-       if (len > TRACE_BUF_SIZE)
-               goto out;
+       len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
  
        local_save_flags(flags);
        size = sizeof(*entry) + len + 1;
        entry = ring_buffer_event_data(event);
        entry->ip = ip;
  
-       memcpy(&entry->buf, tbuffer, len);
-       entry->buf[len] = '\0';
+       memcpy(&entry->buf, tbuffer, len + 1);
        if (!call_filter_check_discard(call, entry, buffer, event)) {
                __buffer_unlock_commit(buffer, event);
                ftrace_trace_stack(buffer, flags, 6, pc);
@@@ -2509,14 -2506,14 +2507,14 @@@ get_total_entries(struct trace_buffer *
  
  static void print_lat_help_header(struct seq_file *m)
  {
-       seq_puts(m, "#                  _------=> CPU#            \n");
-       seq_puts(m, "#                 / _-----=> irqs-off        \n");
-       seq_puts(m, "#                | / _----=> need-resched    \n");
-       seq_puts(m, "#                || / _---=> hardirq/softirq \n");
-       seq_puts(m, "#                ||| / _--=> preempt-depth   \n");
-       seq_puts(m, "#                |||| /     delay             \n");
-       seq_puts(m, "#  cmd     pid   ||||| time  |   caller      \n");
-       seq_puts(m, "#     \\   /      |||||  \\    |   /           \n");
+       seq_puts(m, "#                  _------=> CPU#            \n"
+                   "#                 / _-----=> irqs-off        \n"
+                   "#                | / _----=> need-resched    \n"
+                   "#                || / _---=> hardirq/softirq \n"
+                   "#                ||| / _--=> preempt-depth   \n"
+                   "#                |||| /     delay            \n"
+                   "#  cmd     pid   ||||| time  |   caller      \n"
+                   "#     \\   /      |||||  \\    |   /         \n");
  }
  
  static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
  static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
  {
        print_event_info(buf, m);
-       seq_puts(m, "#           TASK-PID   CPU#      TIMESTAMP  FUNCTION\n");
-       seq_puts(m, "#              | |       |          |         |\n");
+       seq_puts(m, "#           TASK-PID   CPU#      TIMESTAMP  FUNCTION\n"
+                   "#              | |       |          |         |\n");
  }
  
  static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
  {
        print_event_info(buf, m);
-       seq_puts(m, "#                              _-----=> irqs-off\n");
-       seq_puts(m, "#                             / _----=> need-resched\n");
-       seq_puts(m, "#                            | / _---=> hardirq/softirq\n");
-       seq_puts(m, "#                            || / _--=> preempt-depth\n");
-       seq_puts(m, "#                            ||| /     delay\n");
-       seq_puts(m, "#           TASK-PID   CPU#  ||||    TIMESTAMP  FUNCTION\n");
-       seq_puts(m, "#              | |       |   ||||       |         |\n");
+       seq_puts(m, "#                              _-----=> irqs-off\n"
+                   "#                             / _----=> need-resched\n"
+                   "#                            | / _---=> hardirq/softirq\n"
+                   "#                            || / _--=> preempt-depth\n"
+                   "#                            ||| /     delay\n"
+                   "#           TASK-PID   CPU#  ||||    TIMESTAMP  FUNCTION\n"
+                   "#              | |       |   ||||       |         |\n");
  }
  
  void
@@@ -2649,24 -2646,21 +2647,21 @@@ static enum print_line_t print_trace_fm
        event = ftrace_find_event(entry->type);
  
        if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
-               if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
-                       if (!trace_print_lat_context(iter))
-                               goto partial;
-               } else {
-                       if (!trace_print_context(iter))
-                               goto partial;
-               }
+               if (iter->iter_flags & TRACE_FILE_LAT_FMT)
+                       trace_print_lat_context(iter);
+               else
+                       trace_print_context(iter);
        }
  
+       if (trace_seq_has_overflowed(s))
+               return TRACE_TYPE_PARTIAL_LINE;
        if (event)
                return event->funcs->trace(iter, sym_flags, event);
  
-       if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
-               goto partial;
+       trace_seq_printf(s, "Unknown type %d\n", entry->type);
  
-       return TRACE_TYPE_HANDLED;
- partial:
-       return TRACE_TYPE_PARTIAL_LINE;
+       return trace_handle_return(s);
  }
  
  static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
  
        entry = iter->ent;
  
-       if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
-               if (!trace_seq_printf(s, "%d %d %llu ",
-                                     entry->pid, iter->cpu, iter->ts))
-                       goto partial;
-       }
+       if (trace_flags & TRACE_ITER_CONTEXT_INFO)
+               trace_seq_printf(s, "%d %d %llu ",
+                                entry->pid, iter->cpu, iter->ts);
+       if (trace_seq_has_overflowed(s))
+               return TRACE_TYPE_PARTIAL_LINE;
  
        event = ftrace_find_event(entry->type);
        if (event)
                return event->funcs->raw(iter, 0, event);
  
-       if (!trace_seq_printf(s, "%d ?\n", entry->type))
-               goto partial;
+       trace_seq_printf(s, "%d ?\n", entry->type);
  
-       return TRACE_TYPE_HANDLED;
- partial:
-       return TRACE_TYPE_PARTIAL_LINE;
+       return trace_handle_return(s);
  }
  
  static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
        entry = iter->ent;
  
        if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
-               SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
-               SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
-               SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
+               SEQ_PUT_HEX_FIELD(s, entry->pid);
+               SEQ_PUT_HEX_FIELD(s, iter->cpu);
+               SEQ_PUT_HEX_FIELD(s, iter->ts);
+               if (trace_seq_has_overflowed(s))
+                       return TRACE_TYPE_PARTIAL_LINE;
        }
  
        event = ftrace_find_event(entry->type);
                        return ret;
        }
  
-       SEQ_PUT_FIELD_RET(s, newline);
+       SEQ_PUT_FIELD(s, newline);
  
-       return TRACE_TYPE_HANDLED;
+       return trace_handle_return(s);
  }
  
  static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
        entry = iter->ent;
  
        if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
-               SEQ_PUT_FIELD_RET(s, entry->pid);
-               SEQ_PUT_FIELD_RET(s, iter->cpu);
-               SEQ_PUT_FIELD_RET(s, iter->ts);
+               SEQ_PUT_FIELD(s, entry->pid);
+               SEQ_PUT_FIELD(s, iter->cpu);
+               SEQ_PUT_FIELD(s, iter->ts);
+               if (trace_seq_has_overflowed(s))
+                       return TRACE_TYPE_PARTIAL_LINE;
        }
  
        event = ftrace_find_event(entry->type);
@@@ -2779,10 -2775,12 +2776,12 @@@ enum print_line_t print_trace_line(stru
  {
        enum print_line_t ret;
  
-       if (iter->lost_events &&
-           !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
-                                iter->cpu, iter->lost_events))
-               return TRACE_TYPE_PARTIAL_LINE;
+       if (iter->lost_events) {
+               trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
+                                iter->cpu, iter->lost_events);
+               if (trace_seq_has_overflowed(&iter->seq))
+                       return TRACE_TYPE_PARTIAL_LINE;
+       }
  
        if (iter->trace && iter->trace->print_line) {
                ret = iter->trace->print_line(iter);
@@@ -2860,44 -2858,44 +2859,44 @@@ static void test_ftrace_alive(struct se
  {
        if (!ftrace_is_dead())
                return;
-       seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
-       seq_printf(m, "#          MAY BE MISSING FUNCTION EVENTS\n");
+       seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
+                   "#          MAY BE MISSING FUNCTION EVENTS\n");
  }
  
  #ifdef CONFIG_TRACER_MAX_TRACE
  static void show_snapshot_main_help(struct seq_file *m)
  {
-       seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
-       seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
-       seq_printf(m, "#                      Takes a snapshot of the main buffer.\n");
-       seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n");
-       seq_printf(m, "#                      (Doesn't have to be '2' works with any number that\n");
-       seq_printf(m, "#                       is not a '0' or '1')\n");
+       seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
+                   "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
+                   "#                      Takes a snapshot of the main buffer.\n"
+                   "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
+                   "#                      (Doesn't have to be '2' works with any number that\n"
+                   "#                       is not a '0' or '1')\n");
  }
  
  static void show_snapshot_percpu_help(struct seq_file *m)
  {
-       seq_printf(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
+       seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
  #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
-       seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
-       seq_printf(m, "#                      Takes a snapshot of the main buffer for this cpu.\n");
+       seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
+                   "#                      Takes a snapshot of the main buffer for this cpu.\n");
  #else
-       seq_printf(m, "# echo 1 > snapshot : Not supported with this kernel.\n");
-       seq_printf(m, "#                     Must use main snapshot file to allocate.\n");
+       seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
+                   "#                     Must use main snapshot file to allocate.\n");
  #endif
-       seq_printf(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n");
-       seq_printf(m, "#                      (Doesn't have to be '2' works with any number that\n");
-       seq_printf(m, "#                       is not a '0' or '1')\n");
+       seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
+                   "#                      (Doesn't have to be '2' works with any number that\n"
+                   "#                       is not a '0' or '1')\n");
  }
  
  static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
  {
        if (iter->tr->allocated_snapshot)
-               seq_printf(m, "#\n# * Snapshot is allocated *\n#\n");
+               seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
        else
-               seq_printf(m, "#\n# * Snapshot is freed *\n#\n");
+               seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
  
-       seq_printf(m, "# Snapshot commands:\n");
+       seq_puts(m, "# Snapshot commands:\n");
        if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
                show_snapshot_main_help(m);
        else
@@@ -3251,7 -3249,7 +3250,7 @@@ static int t_show(struct seq_file *m, v
        if (!t)
                return 0;
  
-       seq_printf(m, "%s", t->name);
+       seq_puts(m, t->name);
        if (t->next)
                seq_putc(m, ' ');
        else
@@@ -4435,12 -4433,15 +4434,12 @@@ static int tracing_wait_pipe(struct fil
  
                mutex_unlock(&iter->mutex);
  
 -              ret = wait_on_pipe(iter);
 +              ret = wait_on_pipe(iter, false);
  
                mutex_lock(&iter->mutex);
  
                if (ret)
                        return ret;
 -
 -              if (signal_pending(current))
 -                      return -EINTR;
        }
  
        return 1;
@@@ -5370,12 -5371,16 +5369,12 @@@ tracing_buffers_read(struct file *filp
                                goto out_unlock;
                        }
                        mutex_unlock(&trace_types_lock);
 -                      ret = wait_on_pipe(iter);
 +                      ret = wait_on_pipe(iter, false);
                        mutex_lock(&trace_types_lock);
                        if (ret) {
                                size = ret;
                                goto out_unlock;
                        }
 -                      if (signal_pending(current)) {
 -                              size = -EINTR;
 -                              goto out_unlock;
 -                      }
                        goto again;
                }
                size = 0;
@@@ -5494,7 -5499,7 +5493,7 @@@ tracing_buffers_splice_read(struct fil
        };
        struct buffer_ref *ref;
        int entries, size, i;
 -      ssize_t ret;
 +      ssize_t ret = 0;
  
        mutex_lock(&trace_types_lock);
  
                int r;
  
                ref = kzalloc(sizeof(*ref), GFP_KERNEL);
 -              if (!ref)
 +              if (!ref) {
 +                      ret = -ENOMEM;
                        break;
 +              }
  
                ref->ref = 1;
                ref->buffer = iter->trace_buffer->buffer;
                ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
                if (!ref->page) {
 +                      ret = -ENOMEM;
                        kfree(ref);
                        break;
                }
  
        /* did we read anything? */
        if (!spd.nr_pages) {
 +              if (ret)
 +                      goto out;
 +
                if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
                        ret = -EAGAIN;
                        goto out;
                }
                mutex_unlock(&trace_types_lock);
 -              ret = wait_on_pipe(iter);
 +              ret = wait_on_pipe(iter, true);
                mutex_lock(&trace_types_lock);
                if (ret)
                        goto out;
 -              if (signal_pending(current)) {
 -                      ret = -EINTR;
 -                      goto out;
 -              }
 +
                goto again;
        }
  
@@@ -5749,10 -5751,10 +5748,10 @@@ ftrace_snapshot_print(struct seq_file *
  
        seq_printf(m, "%ps:", (void *)ip);
  
-       seq_printf(m, "snapshot");
+       seq_puts(m, "snapshot");
  
        if (count == -1)
-               seq_printf(m, ":unlimited\n");
+               seq_puts(m, ":unlimited\n");
        else
                seq_printf(m, ":count=%ld\n", count);
  
@@@ -6417,7 -6419,7 +6416,7 @@@ static int instance_mkdir (struct inod
        int ret;
  
        /* Paranoid: Make sure the parent is the "instances" directory */
 -      parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
 +      parent = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
        if (WARN_ON_ONCE(parent != trace_instance_dir))
                return -ENOENT;
  
@@@ -6444,7 -6446,7 +6443,7 @@@ static int instance_rmdir(struct inode 
        int ret;
  
        /* Paranoid: Make sure the parent is the "instances" directory */
 -      parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
 +      parent = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
        if (WARN_ON_ONCE(parent != trace_instance_dir))
                return -ENOENT;
  
index 1b0df1e504f02b5d8828ec9d5ad2768b2916e173,f9d0cbe014b7aa331f34dffbcc732c77412b73a6..139716bcef7a5eccaf620040e7433aca3597ee77
@@@ -461,7 -461,7 +461,7 @@@ static void remove_event_file_dir(struc
  
        if (dir) {
                spin_lock(&dir->d_lock);        /* probably unneeded */
 -              list_for_each_entry(child, &dir->d_subdirs, d_u.d_child) {
 +              list_for_each_entry(child, &dir->d_subdirs, d_child) {
                        if (child->d_inode)     /* probably unneeded */
                                child->d_inode->i_private = NULL;
                }
@@@ -918,7 -918,7 +918,7 @@@ static int f_show(struct seq_file *m, v
        case FORMAT_HEADER:
                seq_printf(m, "name: %s\n", ftrace_event_name(call));
                seq_printf(m, "ID: %d\n", call->event.type);
-               seq_printf(m, "format:\n");
+               seq_puts(m, "format:\n");
                return 0;
  
        case FORMAT_FIELD_SEPERATOR:
@@@ -1988,7 -1988,7 +1988,7 @@@ event_enable_print(struct seq_file *m, 
                   ftrace_event_name(data->file->event_call));
  
        if (data->count == -1)
-               seq_printf(m, ":unlimited\n");
+               seq_puts(m, ":unlimited\n");
        else
                seq_printf(m, ":count=%ld\n", data->count);
  
index 29228c4d569692ea4234d880d13e94233b3e32f4,a72f3d8d813efb1f98697cdd35f4cc19ccefa236..dfe00a4f3f3e062ac461f7d1aecc76c5a8adf67c
@@@ -114,7 -114,7 +114,7 @@@ print_syscall_enter(struct trace_iterat
        struct trace_entry *ent = iter->ent;
        struct syscall_trace_enter *trace;
        struct syscall_metadata *entry;
-       int i, ret, syscall;
+       int i, syscall;
  
        trace = (typeof(trace))ent;
        syscall = trace->nr;
                goto end;
        }
  
-       ret = trace_seq_printf(s, "%s(", entry->name);
-       if (!ret)
-               return TRACE_TYPE_PARTIAL_LINE;
+       trace_seq_printf(s, "%s(", entry->name);
  
        for (i = 0; i < entry->nb_args; i++) {
+               if (trace_seq_has_overflowed(s))
+                       goto end;
                /* parameter types */
-               if (trace_flags & TRACE_ITER_VERBOSE) {
-                       ret = trace_seq_printf(s, "%s ", entry->types[i]);
-                       if (!ret)
-                               return TRACE_TYPE_PARTIAL_LINE;
-               }
+               if (trace_flags & TRACE_ITER_VERBOSE)
+                       trace_seq_printf(s, "%s ", entry->types[i]);
                /* parameter values */
-               ret = trace_seq_printf(s, "%s: %lx%s", entry->args[i],
-                                      trace->args[i],
-                                      i == entry->nb_args - 1 ? "" : ", ");
-               if (!ret)
-                       return TRACE_TYPE_PARTIAL_LINE;
+               trace_seq_printf(s, "%s: %lx%s", entry->args[i],
+                                trace->args[i],
+                                i == entry->nb_args - 1 ? "" : ", ");
        }
  
-       ret = trace_seq_putc(s, ')');
-       if (!ret)
-               return TRACE_TYPE_PARTIAL_LINE;
+       trace_seq_putc(s, ')');
  end:
-       ret =  trace_seq_putc(s, '\n');
-       if (!ret)
-               return TRACE_TYPE_PARTIAL_LINE;
+       trace_seq_putc(s, '\n');
  
-       return TRACE_TYPE_HANDLED;
+       return trace_handle_return(s);
  }
  
  static enum print_line_t
@@@ -168,7 -161,6 +161,6 @@@ print_syscall_exit(struct trace_iterato
        struct syscall_trace_exit *trace;
        int syscall;
        struct syscall_metadata *entry;
-       int ret;
  
        trace = (typeof(trace))ent;
        syscall = trace->nr;
  
        if (!entry) {
                trace_seq_putc(s, '\n');
-               return TRACE_TYPE_HANDLED;
+               goto out;
        }
  
        if (entry->exit_event->event.type != ent->type) {
                return TRACE_TYPE_UNHANDLED;
        }
  
-       ret = trace_seq_printf(s, "%s -> 0x%lx\n", entry->name,
+       trace_seq_printf(s, "%s -> 0x%lx\n", entry->name,
                                trace->ret);
-       if (!ret)
-               return TRACE_TYPE_PARTIAL_LINE;
  
-       return TRACE_TYPE_HANDLED;
+  out:
+       return trace_handle_return(s);
  }
  
  extern char *__bad_type_size(void);
@@@ -313,7 -304,7 +304,7 @@@ static void ftrace_syscall_enter(void *
        int size;
  
        syscall_nr = trace_get_syscall_nr(current, regs);
 -      if (syscall_nr < 0)
 +      if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
                return;
  
        /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */
@@@ -360,7 -351,7 +351,7 @@@ static void ftrace_syscall_exit(void *d
        int syscall_nr;
  
        syscall_nr = trace_get_syscall_nr(current, regs);
 -      if (syscall_nr < 0)
 +      if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
                return;
  
        /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */
@@@ -567,7 -558,7 +558,7 @@@ static void perf_syscall_enter(void *ig
        int size;
  
        syscall_nr = trace_get_syscall_nr(current, regs);
 -      if (syscall_nr < 0)
 +      if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
                return;
        if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
                return;
@@@ -641,7 -632,7 +632,7 @@@ static void perf_syscall_exit(void *ign
        int size;
  
        syscall_nr = trace_get_syscall_nr(current, regs);
 -      if (syscall_nr < 0)
 +      if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
                return;
        if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
                return;