Version:  2.0.40 2.2.26 2.4.37 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4 4.5 4.6 4.7 4.8 4.9 4.10

Linux/kernel/trace/Kconfig

  1 #
  2 # Architectures that offer an FUNCTION_TRACER implementation should
  3 #  select HAVE_FUNCTION_TRACER:
  4 #
  5 
  6 config USER_STACKTRACE_SUPPORT
  7         bool
  8 
  9 config NOP_TRACER
 10         bool
 11 
 12 config HAVE_FTRACE_NMI_ENTER
 13         bool
 14         help
 15           See Documentation/trace/ftrace-design.txt
 16 
 17 config HAVE_FUNCTION_TRACER
 18         bool
 19         help
 20           See Documentation/trace/ftrace-design.txt
 21 
 22 config HAVE_FUNCTION_GRAPH_TRACER
 23         bool
 24         help
 25           See Documentation/trace/ftrace-design.txt
 26 
 27 config HAVE_DYNAMIC_FTRACE
 28         bool
 29         help
 30           See Documentation/trace/ftrace-design.txt
 31 
 32 config HAVE_DYNAMIC_FTRACE_WITH_REGS
 33         bool
 34 
 35 config HAVE_FTRACE_MCOUNT_RECORD
 36         bool
 37         help
 38           See Documentation/trace/ftrace-design.txt
 39 
 40 config HAVE_SYSCALL_TRACEPOINTS
 41         bool
 42         help
 43           See Documentation/trace/ftrace-design.txt
 44 
 45 config HAVE_FENTRY
 46         bool
 47         help
 48           Arch supports the gcc options -pg with -mfentry
 49 
 50 config HAVE_C_RECORDMCOUNT
 51         bool
 52         help
 53           C version of recordmcount available?
 54 
 55 config TRACER_MAX_TRACE
 56         bool
 57 
 58 config TRACE_CLOCK
 59         bool
 60 
 61 config RING_BUFFER
 62         bool
 63         select TRACE_CLOCK
 64         select IRQ_WORK
 65 
 66 config FTRACE_NMI_ENTER
 67        bool
 68        depends on HAVE_FTRACE_NMI_ENTER
 69        default y
 70 
 71 config EVENT_TRACING
 72         select CONTEXT_SWITCH_TRACER
 73         select GLOB
 74         bool
 75 
 76 config CONTEXT_SWITCH_TRACER
 77         bool
 78 
 79 config RING_BUFFER_ALLOW_SWAP
 80         bool
 81         help
 82          Allow the use of ring_buffer_swap_cpu.
 83          Adds a very slight overhead to tracing when enabled.
 84 
 85 # All tracer options should select GENERIC_TRACER. For those options that are
 86 # enabled by all tracers (context switch and event tracer) they select TRACING.
 87 # This allows those options to appear when no other tracer is selected. But the
 88 # options do not appear when something else selects it. We need the two options
 89 # GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the
 90 # hiding of the automatic options.
 91 
 92 config TRACING
 93         bool
 94         select DEBUG_FS
 95         select RING_BUFFER
 96         select STACKTRACE if STACKTRACE_SUPPORT
 97         select TRACEPOINTS
 98         select NOP_TRACER
 99         select BINARY_PRINTF
100         select EVENT_TRACING
101         select TRACE_CLOCK
102 
103 config GENERIC_TRACER
104         bool
105         select TRACING
106 
107 #
108 # Minimum requirements an architecture has to meet for us to
109 # be able to offer generic tracing facilities:
110 #
111 config TRACING_SUPPORT
112         bool
113         # PPC32 has no irqflags tracing support, but it can use most of the
114         # tracers anyway, they were tested to build and work. Note that new
115         # exceptions to this list aren't welcomed, better implement the
116         # irqflags tracing for your architecture.
117         depends on TRACE_IRQFLAGS_SUPPORT || PPC32
118         depends on STACKTRACE_SUPPORT
119         default y
120 
121 if TRACING_SUPPORT
122 
123 menuconfig FTRACE
124         bool "Tracers"
125         default y if DEBUG_KERNEL
126         help
127           Enable the kernel tracing infrastructure.
128 
129 if FTRACE
130 
131 config FUNCTION_TRACER
132         bool "Kernel Function Tracer"
133         depends on HAVE_FUNCTION_TRACER
134         select KALLSYMS
135         select GENERIC_TRACER
136         select CONTEXT_SWITCH_TRACER
137         select GLOB
138         help
139           Enable the kernel to trace every kernel function. This is done
140           by using a compiler feature to insert a small, 5-byte No-Operation
141           instruction at the beginning of every kernel function, which NOP
142           sequence is then dynamically patched into a tracer call when
143           tracing is enabled by the administrator. If it's runtime disabled
144           (the bootup default), then the overhead of the instructions is very
145           small and not measurable even in micro-benchmarks.
146 
147 config FUNCTION_GRAPH_TRACER
148         bool "Kernel Function Graph Tracer"
149         depends on HAVE_FUNCTION_GRAPH_TRACER
150         depends on FUNCTION_TRACER
151         depends on !X86_32 || !CC_OPTIMIZE_FOR_SIZE
152         default y
153         help
154           Enable the kernel to trace a function at both its return
155           and its entry.
156           Its first purpose is to trace the duration of functions and
157           draw a call graph for each thread with some information like
158           the return value. This is done by setting the current return
159           address on the current task structure into a stack of calls.
160 
161 
162 config IRQSOFF_TRACER
163         bool "Interrupts-off Latency Tracer"
164         default n
165         depends on TRACE_IRQFLAGS_SUPPORT
166         depends on !ARCH_USES_GETTIMEOFFSET
167         select TRACE_IRQFLAGS
168         select GENERIC_TRACER
169         select TRACER_MAX_TRACE
170         select RING_BUFFER_ALLOW_SWAP
171         select TRACER_SNAPSHOT
172         select TRACER_SNAPSHOT_PER_CPU_SWAP
173         help
174           This option measures the time spent in irqs-off critical
175           sections, with microsecond accuracy.
176 
177           The default measurement method is a maximum search, which is
178           disabled by default and can be runtime (re-)started
179           via:
180 
181               echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
182 
183           (Note that kernel size and overhead increase with this option
184           enabled. This option and the preempt-off timing option can be
185           used together or separately.)
186 
187 config PREEMPT_TRACER
188         bool "Preemption-off Latency Tracer"
189         default n
190         depends on !ARCH_USES_GETTIMEOFFSET
191         depends on PREEMPT
192         select GENERIC_TRACER
193         select TRACER_MAX_TRACE
194         select RING_BUFFER_ALLOW_SWAP
195         select TRACER_SNAPSHOT
196         select TRACER_SNAPSHOT_PER_CPU_SWAP
197         help
198           This option measures the time spent in preemption-off critical
199           sections, with microsecond accuracy.
200 
201           The default measurement method is a maximum search, which is
202           disabled by default and can be runtime (re-)started
203           via:
204 
205               echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
206 
207           (Note that kernel size and overhead increase with this option
208           enabled. This option and the irqs-off timing option can be
209           used together or separately.)
210 
211 config SCHED_TRACER
212         bool "Scheduling Latency Tracer"
213         select GENERIC_TRACER
214         select CONTEXT_SWITCH_TRACER
215         select TRACER_MAX_TRACE
216         select TRACER_SNAPSHOT
217         help
218           This tracer tracks the latency of the highest priority task
219           to be scheduled in, starting from the point it has woken up.
220 
221 config HWLAT_TRACER
222         bool "Tracer to detect hardware latencies (like SMIs)"
223         select GENERIC_TRACER
224         help
225          This tracer, when enabled will create one or more kernel threads,
226          depening on what the cpumask file is set to, which each thread
227          spinning in a loop looking for interruptions caused by
228          something other than the kernel. For example, if a
229          System Management Interrupt (SMI) takes a noticeable amount of
230          time, this tracer will detect it. This is useful for testing
231          if a system is reliable for Real Time tasks.
232 
233          Some files are created in the tracing directory when this
234          is enabled:
235 
236            hwlat_detector/width   - time in usecs for how long to spin for
237            hwlat_detector/window  - time in usecs between the start of each
238                                      iteration
239 
240          A kernel thread is created that will spin with interrupts disabled
241          for "width" microseconds in every "widow" cycle. It will not spin
242          for "window - width" microseconds, where the system can
243          continue to operate.
244 
245          The output will appear in the trace and trace_pipe files.
246 
247          When the tracer is not running, it has no affect on the system,
248          but when it is running, it can cause the system to be
249          periodically non responsive. Do not run this tracer on a
250          production system.
251 
252          To enable this tracer, echo in "hwlat" into the current_tracer
253          file. Every time a latency is greater than tracing_thresh, it will
254          be recorded into the ring buffer.
255 
256 config ENABLE_DEFAULT_TRACERS
257         bool "Trace process context switches and events"
258         depends on !GENERIC_TRACER
259         select TRACING
260         help
261           This tracer hooks to various trace points in the kernel,
262           allowing the user to pick and choose which trace point they
263           want to trace. It also includes the sched_switch tracer plugin.
264 
265 config FTRACE_SYSCALLS
266         bool "Trace syscalls"
267         depends on HAVE_SYSCALL_TRACEPOINTS
268         select GENERIC_TRACER
269         select KALLSYMS
270         help
271           Basic tracer to catch the syscall entry and exit events.
272 
273 config TRACER_SNAPSHOT
274         bool "Create a snapshot trace buffer"
275         select TRACER_MAX_TRACE
276         help
277           Allow tracing users to take snapshot of the current buffer using the
278           ftrace interface, e.g.:
279 
280               echo 1 > /sys/kernel/debug/tracing/snapshot
281               cat snapshot
282 
283 config TRACER_SNAPSHOT_PER_CPU_SWAP
284         bool "Allow snapshot to swap per CPU"
285         depends on TRACER_SNAPSHOT
286         select RING_BUFFER_ALLOW_SWAP
287         help
288           Allow doing a snapshot of a single CPU buffer instead of a
289           full swap (all buffers). If this is set, then the following is
290           allowed:
291 
292               echo 1 > /sys/kernel/debug/tracing/per_cpu/cpu2/snapshot
293 
294           After which, only the tracing buffer for CPU 2 was swapped with
295           the main tracing buffer, and the other CPU buffers remain the same.
296 
297           When this is enabled, this adds a little more overhead to the
298           trace recording, as it needs to add some checks to synchronize
299           recording with swaps. But this does not affect the performance
300           of the overall system. This is enabled by default when the preempt
301           or irq latency tracers are enabled, as those need to swap as well
302           and already adds the overhead (plus a lot more).
303 
304 config TRACE_BRANCH_PROFILING
305         bool
306         select GENERIC_TRACER
307 
308 choice
309         prompt "Branch Profiling"
310         default BRANCH_PROFILE_NONE
311         help
312          The branch profiling is a software profiler. It will add hooks
313          into the C conditionals to test which path a branch takes.
314 
315          The likely/unlikely profiler only looks at the conditions that
316          are annotated with a likely or unlikely macro.
317 
318          The "all branch" profiler will profile every if-statement in the
319          kernel. This profiler will also enable the likely/unlikely
320          profiler.
321 
322          Either of the above profilers adds a bit of overhead to the system.
323          If unsure, choose "No branch profiling".
324 
325 config BRANCH_PROFILE_NONE
326         bool "No branch profiling"
327         help
328           No branch profiling. Branch profiling adds a bit of overhead.
329           Only enable it if you want to analyse the branching behavior.
330           Otherwise keep it disabled.
331 
332 config PROFILE_ANNOTATED_BRANCHES
333         bool "Trace likely/unlikely profiler"
334         select TRACE_BRANCH_PROFILING
335         help
336           This tracer profiles all likely and unlikely macros
337           in the kernel. It will display the results in:
338 
339           /sys/kernel/debug/tracing/trace_stat/branch_annotated
340 
341           Note: this will add a significant overhead; only turn this
342           on if you need to profile the system's use of these macros.
343 
344 config PROFILE_ALL_BRANCHES
345         bool "Profile all if conditionals"
346         select TRACE_BRANCH_PROFILING
347         help
348           This tracer profiles all branch conditions. Every if ()
349           taken in the kernel is recorded whether it hit or miss.
350           The results will be displayed in:
351 
352           /sys/kernel/debug/tracing/trace_stat/branch_all
353 
354           This option also enables the likely/unlikely profiler.
355 
356           This configuration, when enabled, will impose a great overhead
357           on the system. This should only be enabled when the system
358           is to be analyzed in much detail.
359 endchoice
360 
361 config TRACING_BRANCHES
362         bool
363         help
364           Selected by tracers that will trace the likely and unlikely
365           conditions. This prevents the tracers themselves from being
366           profiled. Profiling the tracing infrastructure can only happen
367           when the likelys and unlikelys are not being traced.
368 
369 config BRANCH_TRACER
370         bool "Trace likely/unlikely instances"
371         depends on TRACE_BRANCH_PROFILING
372         select TRACING_BRANCHES
373         help
374           This traces the events of likely and unlikely condition
375           calls in the kernel.  The difference between this and the
376           "Trace likely/unlikely profiler" is that this is not a
377           histogram of the callers, but actually places the calling
378           events into a running trace buffer to see when and where the
379           events happened, as well as their results.
380 
381           Say N if unsure.
382 
383 config STACK_TRACER
384         bool "Trace max stack"
385         depends on HAVE_FUNCTION_TRACER
386         select FUNCTION_TRACER
387         select STACKTRACE
388         select KALLSYMS
389         help
390           This special tracer records the maximum stack footprint of the
391           kernel and displays it in /sys/kernel/debug/tracing/stack_trace.
392 
393           This tracer works by hooking into every function call that the
394           kernel executes, and keeping a maximum stack depth value and
395           stack-trace saved.  If this is configured with DYNAMIC_FTRACE
396           then it will not have any overhead while the stack tracer
397           is disabled.
398 
399           To enable the stack tracer on bootup, pass in 'stacktrace'
400           on the kernel command line.
401 
402           The stack tracer can also be enabled or disabled via the
403           sysctl kernel.stack_tracer_enabled
404 
405           Say N if unsure.
406 
407 config BLK_DEV_IO_TRACE
408         bool "Support for tracing block IO actions"
409         depends on SYSFS
410         depends on BLOCK
411         select RELAY
412         select DEBUG_FS
413         select TRACEPOINTS
414         select GENERIC_TRACER
415         select STACKTRACE
416         help
417           Say Y here if you want to be able to trace the block layer actions
418           on a given queue. Tracing allows you to see any traffic happening
419           on a block device queue. For more information (and the userspace
420           support tools needed), fetch the blktrace tools from:
421 
422           git://git.kernel.dk/blktrace.git
423 
424           Tracing also is possible using the ftrace interface, e.g.:
425 
426             echo 1 > /sys/block/sda/sda1/trace/enable
427             echo blk > /sys/kernel/debug/tracing/current_tracer
428             cat /sys/kernel/debug/tracing/trace_pipe
429 
430           If unsure, say N.
431 
432 config KPROBE_EVENT
433         depends on KPROBES
434         depends on HAVE_REGS_AND_STACK_ACCESS_API
435         bool "Enable kprobes-based dynamic events"
436         select TRACING
437         select PROBE_EVENTS
438         default y
439         help
440           This allows the user to add tracing events (similar to tracepoints)
441           on the fly via the ftrace interface. See
442           Documentation/trace/kprobetrace.txt for more details.
443 
444           Those events can be inserted wherever kprobes can probe, and record
445           various register and memory values.
446 
447           This option is also required by perf-probe subcommand of perf tools.
448           If you want to use perf tools, this option is strongly recommended.
449 
450 config UPROBE_EVENT
451         bool "Enable uprobes-based dynamic events"
452         depends on ARCH_SUPPORTS_UPROBES
453         depends on MMU
454         depends on PERF_EVENTS
455         select UPROBES
456         select PROBE_EVENTS
457         select TRACING
458         default n
459         help
460           This allows the user to add tracing events on top of userspace
461           dynamic events (similar to tracepoints) on the fly via the trace
462           events interface. Those events can be inserted wherever uprobes
463           can probe, and record various registers.
464           This option is required if you plan to use perf-probe subcommand
465           of perf tools on user space applications.
466 
467 config BPF_EVENTS
468         depends on BPF_SYSCALL
469         depends on (KPROBE_EVENT || UPROBE_EVENT) && PERF_EVENTS
470         bool
471         default y
472         help
473           This allows the user to attach BPF programs to kprobe events.
474 
475 config PROBE_EVENTS
476         def_bool n
477 
478 config DYNAMIC_FTRACE
479         bool "enable/disable function tracing dynamically"
480         depends on FUNCTION_TRACER
481         depends on HAVE_DYNAMIC_FTRACE
482         default y
483         help
484           This option will modify all the calls to function tracing
485           dynamically (will patch them out of the binary image and
486           replace them with a No-Op instruction) on boot up. During
487           compile time, a table is made of all the locations that ftrace
488           can function trace, and this table is linked into the kernel
489           image. When this is enabled, functions can be individually
490           enabled, and the functions not enabled will not affect
491           performance of the system.
492 
493           See the files in /sys/kernel/debug/tracing:
494             available_filter_functions
495             set_ftrace_filter
496             set_ftrace_notrace
497 
498           This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but
499           otherwise has native performance as long as no tracing is active.
500 
501 config DYNAMIC_FTRACE_WITH_REGS
502         def_bool y
503         depends on DYNAMIC_FTRACE
504         depends on HAVE_DYNAMIC_FTRACE_WITH_REGS
505 
506 config FUNCTION_PROFILER
507         bool "Kernel function profiler"
508         depends on FUNCTION_TRACER
509         default n
510         help
511           This option enables the kernel function profiler. A file is created
512           in debugfs called function_profile_enabled which defaults to zero.
513           When a 1 is echoed into this file profiling begins, and when a
514           zero is entered, profiling stops. A "functions" file is created in
515           the trace_stats directory; this file shows the list of functions that
516           have been hit and their counters.
517 
518           If in doubt, say N.
519 
520 config FTRACE_MCOUNT_RECORD
521         def_bool y
522         depends on DYNAMIC_FTRACE
523         depends on HAVE_FTRACE_MCOUNT_RECORD
524 
525 config FTRACE_SELFTEST
526         bool
527 
528 config FTRACE_STARTUP_TEST
529         bool "Perform a startup test on ftrace"
530         depends on GENERIC_TRACER
531         select FTRACE_SELFTEST
532         help
533           This option performs a series of startup tests on ftrace. On bootup
534           a series of tests are made to verify that the tracer is
535           functioning properly. It will do tests on all the configured
536           tracers of ftrace.
537 
538 config EVENT_TRACE_TEST_SYSCALLS
539         bool "Run selftest on syscall events"
540         depends on FTRACE_STARTUP_TEST
541         help
542          This option will also enable testing every syscall event.
543          It only enables the event and disables it and runs various loads
544          with the event enabled. This adds a bit more time for kernel boot
545          up since it runs this on every system call defined.
546 
547          TBD - enable a way to actually call the syscalls as we test their
548                events
549 
550 config MMIOTRACE
551         bool "Memory mapped IO tracing"
552         depends on HAVE_MMIOTRACE_SUPPORT && PCI
553         select GENERIC_TRACER
554         help
555           Mmiotrace traces Memory Mapped I/O access and is meant for
556           debugging and reverse engineering. It is called from the ioremap
557           implementation and works via page faults. Tracing is disabled by
558           default and can be enabled at run-time.
559 
560           See Documentation/trace/mmiotrace.txt.
561           If you are not helping to develop drivers, say N.
562 
563 config TRACING_MAP
564         bool
565         depends on ARCH_HAVE_NMI_SAFE_CMPXCHG
566         help
567           tracing_map is a special-purpose lock-free map for tracing,
568           separated out as a stand-alone facility in order to allow it
569           to be shared between multiple tracers.  It isn't meant to be
570           generally used outside of that context, and is normally
571           selected by tracers that use it.
572 
573 config HIST_TRIGGERS
574         bool "Histogram triggers"
575         depends on ARCH_HAVE_NMI_SAFE_CMPXCHG
576         select TRACING_MAP
577         select TRACING
578         default n
579         help
580           Hist triggers allow one or more arbitrary trace event fields
581           to be aggregated into hash tables and dumped to stdout by
582           reading a debugfs/tracefs file.  They're useful for
583           gathering quick and dirty (though precise) summaries of
584           event activity as an initial guide for further investigation
585           using more advanced tools.
586 
587           See Documentation/trace/events.txt.
588           If in doubt, say N.
589 
590 config MMIOTRACE_TEST
591         tristate "Test module for mmiotrace"
592         depends on MMIOTRACE && m
593         help
594           This is a dumb module for testing mmiotrace. It is very dangerous
595           as it will write garbage to IO memory starting at a given address.
596           However, it should be safe to use on e.g. unused portion of VRAM.
597 
598           Say N, unless you absolutely know what you are doing.
599 
600 config TRACEPOINT_BENCHMARK
601         bool "Add tracepoint that benchmarks tracepoints"
602         help
603          This option creates the tracepoint "benchmark:benchmark_event".
604          When the tracepoint is enabled, it kicks off a kernel thread that
605          goes into an infinite loop (calling cond_sched() to let other tasks
606          run), and calls the tracepoint. Each iteration will record the time
607          it took to write to the tracepoint and the next iteration that
608          data will be passed to the tracepoint itself. That is, the tracepoint
609          will report the time it took to do the previous tracepoint.
610          The string written to the tracepoint is a static string of 128 bytes
611          to keep the time the same. The initial string is simply a write of
612          "START". The second string records the cold cache time of the first
613          write which is not added to the rest of the calculations.
614 
615          As it is a tight loop, it benchmarks as hot cache. That's fine because
616          we care most about hot paths that are probably in cache already.
617 
618          An example of the output:
619 
620               START
621               first=3672 [COLD CACHED]
622               last=632 first=3672 max=632 min=632 avg=316 std=446 std^2=199712
623               last=278 first=3672 max=632 min=278 avg=303 std=316 std^2=100337
624               last=277 first=3672 max=632 min=277 avg=296 std=258 std^2=67064
625               last=273 first=3672 max=632 min=273 avg=292 std=224 std^2=50411
626               last=273 first=3672 max=632 min=273 avg=288 std=200 std^2=40389
627               last=281 first=3672 max=632 min=273 avg=287 std=183 std^2=33666
628 
629 
630 config RING_BUFFER_BENCHMARK
631         tristate "Ring buffer benchmark stress tester"
632         depends on RING_BUFFER
633         help
634           This option creates a test to stress the ring buffer and benchmark it.
635           It creates its own ring buffer such that it will not interfere with
636           any other users of the ring buffer (such as ftrace). It then creates
637           a producer and consumer that will run for 10 seconds and sleep for
638           10 seconds. Each interval it will print out the number of events
639           it recorded and give a rough estimate of how long each iteration took.
640 
641           It does not disable interrupts or raise its priority, so it may be
642           affected by processes that are running.
643 
644           If unsure, say N.
645 
646 config RING_BUFFER_STARTUP_TEST
647        bool "Ring buffer startup self test"
648        depends on RING_BUFFER
649        help
650          Run a simple self test on the ring buffer on boot up. Late in the
651          kernel boot sequence, the test will start that kicks off
652          a thread per cpu. Each thread will write various size events
653          into the ring buffer. Another thread is created to send IPIs
654          to each of the threads, where the IPI handler will also write
655          to the ring buffer, to test/stress the nesting ability.
656          If any anomalies are discovered, a warning will be displayed
657          and all ring buffers will be disabled.
658 
659          The test runs for 10 seconds. This will slow your boot time
660          by at least 10 more seconds.
661 
662          At the end of the test, statics and more checks are done.
663          It will output the stats of each per cpu buffer. What
664          was written, the sizes, what was read, what was lost, and
665          other similar details.
666 
667          If unsure, say N
668 
669 config TRACE_ENUM_MAP_FILE
670        bool "Show enum mappings for trace events"
671        depends on TRACING
672        help
673         The "print fmt" of the trace events will show the enum names instead
674         of their values. This can cause problems for user space tools that
675         use this string to parse the raw data as user space does not know
676         how to convert the string to its value.
677 
678         To fix this, there's a special macro in the kernel that can be used
679         to convert the enum into its value. If this macro is used, then the
680         print fmt strings will have the enums converted to their values.
681 
682         If something does not get converted properly, this option can be
683         used to show what enums the kernel tried to convert.
684 
685         This option is for debugging the enum conversions. A file is created
686         in the tracing directory called "enum_map" that will show the enum
687         names matched with their values and what trace event system they
688         belong too.
689 
690         Normally, the mapping of the strings to values will be freed after
691         boot up or module load. With this option, they will not be freed, as
692         they are needed for the "enum_map" file. Enabling this option will
693         increase the memory footprint of the running kernel.
694 
695         If unsure, say N
696 
697 config TRACING_EVENTS_GPIO
698         bool "Trace gpio events"
699         depends on GPIOLIB
700         default y
701         help
702           Enable tracing events for gpio subsystem
703 
704 endif # FTRACE
705 
706 endif # TRACING_SUPPORT
707 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us