Version:  2.0.40 2.2.26 2.4.37 3.11 3.12 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4 4.5 4.6 4.7 4.8

Linux/kernel/seccomp.c

Diff markup

Differences between /kernel/seccomp.c (Version 4.8) and /kernel/seccomp.c (Version 3.6)


  1 /*                                                  1 /*
  2  * linux/kernel/seccomp.c                           2  * linux/kernel/seccomp.c
  3  *                                                  3  *
  4  * Copyright 2004-2005  Andrea Arcangeli <andr      4  * Copyright 2004-2005  Andrea Arcangeli <andrea@cpushare.com>
  5  *                                                  5  *
  6  * Copyright (C) 2012 Google, Inc.                  6  * Copyright (C) 2012 Google, Inc.
  7  * Will Drewry <wad@chromium.org>                   7  * Will Drewry <wad@chromium.org>
  8  *                                                  8  *
  9  * This defines a simple but solid secure-comp      9  * This defines a simple but solid secure-computing facility.
 10  *                                                 10  *
 11  * Mode 1 uses a fixed list of allowed system      11  * Mode 1 uses a fixed list of allowed system calls.
 12  * Mode 2 allows user-defined system call filt     12  * Mode 2 allows user-defined system call filters in the form
 13  *        of Berkeley Packet Filters/Linux Soc     13  *        of Berkeley Packet Filters/Linux Socket Filters.
 14  */                                                14  */
 15                                                    15 
 16 #include <linux/atomic.h>                          16 #include <linux/atomic.h>
 17 #include <linux/audit.h>                           17 #include <linux/audit.h>
 18 #include <linux/compat.h>                          18 #include <linux/compat.h>
 19 #include <linux/sched.h>                           19 #include <linux/sched.h>
 20 #include <linux/seccomp.h>                         20 #include <linux/seccomp.h>
 21 #include <linux/slab.h>                        << 
 22 #include <linux/syscalls.h>                    << 
 23                                                    21 
 24 #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER         !!  22 /* #define SECCOMP_DEBUG 1 */
 25 #include <asm/syscall.h>                       << 
 26 #endif                                         << 
 27                                                    23 
 28 #ifdef CONFIG_SECCOMP_FILTER                       24 #ifdef CONFIG_SECCOMP_FILTER
                                                   >>  25 #include <asm/syscall.h>
 29 #include <linux/filter.h>                          26 #include <linux/filter.h>
 30 #include <linux/pid.h>                         << 
 31 #include <linux/ptrace.h>                          27 #include <linux/ptrace.h>
 32 #include <linux/security.h>                        28 #include <linux/security.h>
                                                   >>  29 #include <linux/slab.h>
 33 #include <linux/tracehook.h>                       30 #include <linux/tracehook.h>
 34 #include <linux/uaccess.h>                         31 #include <linux/uaccess.h>
 35                                                    32 
 36 /**                                                33 /**
 37  * struct seccomp_filter - container for secco     34  * struct seccomp_filter - container for seccomp BPF programs
 38  *                                                 35  *
 39  * @usage: reference count to manage the objec     36  * @usage: reference count to manage the object lifetime.
 40  *         get/put helpers should be used when     37  *         get/put helpers should be used when accessing an instance
 41  *         outside of a lifetime-guarded secti     38  *         outside of a lifetime-guarded section.  In general, this
 42  *         is only needed for handling filters     39  *         is only needed for handling filters shared across tasks.
 43  * @prev: points to a previously installed, or     40  * @prev: points to a previously installed, or inherited, filter
 44  * @len: the number of instructions in the pro     41  * @len: the number of instructions in the program
 45  * @insnsi: the BPF program instructions to ev !!  42  * @insns: the BPF program instructions to evaluate
 46  *                                                 43  *
 47  * seccomp_filter objects are organized in a t     44  * seccomp_filter objects are organized in a tree linked via the @prev
 48  * pointer.  For any task, it appears to be a      45  * pointer.  For any task, it appears to be a singly-linked list starting
 49  * with current->seccomp.filter, the most rece     46  * with current->seccomp.filter, the most recently attached or inherited filter.
 50  * However, multiple filters may share a @prev     47  * However, multiple filters may share a @prev node, by way of fork(), which
 51  * results in a unidirectional tree existing i     48  * results in a unidirectional tree existing in memory.  This is similar to
 52  * how namespaces work.                            49  * how namespaces work.
 53  *                                                 50  *
 54  * seccomp_filter objects should never be modi     51  * seccomp_filter objects should never be modified after being attached
 55  * to a task_struct (other than @usage).           52  * to a task_struct (other than @usage).
 56  */                                                53  */
 57 struct seccomp_filter {                            54 struct seccomp_filter {
 58         atomic_t usage;                            55         atomic_t usage;
 59         struct seccomp_filter *prev;               56         struct seccomp_filter *prev;
 60         struct bpf_prog *prog;                 !!  57         unsigned short len;  /* Instruction count */
                                                   >>  58         struct sock_filter insns[];
 61 };                                                 59 };
 62                                                    60 
 63 /* Limit any path through the tree to 256KB wo     61 /* Limit any path through the tree to 256KB worth of instructions. */
 64 #define MAX_INSNS_PER_PATH ((1 << 18) / sizeof     62 #define MAX_INSNS_PER_PATH ((1 << 18) / sizeof(struct sock_filter))
 65                                                    63 
 66 /*                                             !!  64 /**
                                                   >>  65  * get_u32 - returns a u32 offset into data
                                                   >>  66  * @data: a unsigned 64 bit value
                                                   >>  67  * @index: 0 or 1 to return the first or second 32-bits
                                                   >>  68  *
                                                   >>  69  * This inline exists to hide the length of unsigned long.  If a 32-bit
                                                   >>  70  * unsigned long is passed in, it will be extended and the top 32-bits will be
                                                   >>  71  * 0. If it is a 64-bit unsigned long, then whatever data is resident will be
                                                   >>  72  * properly returned.
                                                   >>  73  *
 67  * Endianness is explicitly ignored and left f     74  * Endianness is explicitly ignored and left for BPF program authors to manage
 68  * as per the specific architecture.               75  * as per the specific architecture.
 69  */                                                76  */
 70 static void populate_seccomp_data(struct secco !!  77 static inline u32 get_u32(u64 data, int index)
 71 {                                                  78 {
 72         struct task_struct *task = current;    !!  79         return ((u32 *)&data)[index];
 73         struct pt_regs *regs = task_pt_regs(ta !!  80 }
 74         unsigned long args[6];                 !!  81 
 75                                                !!  82 /* Helper for bpf_load below. */
 76         sd->nr = syscall_get_nr(task, regs);   !!  83 #define BPF_DATA(_name) offsetof(struct seccomp_data, _name)
 77         sd->arch = syscall_get_arch();         !!  84 /**
 78         syscall_get_arguments(task, regs, 0, 6 !!  85  * bpf_load: checks and returns a pointer to the requested offset
 79         sd->args[0] = args[0];                 !!  86  * @off: offset into struct seccomp_data to load from
 80         sd->args[1] = args[1];                 !!  87  *
 81         sd->args[2] = args[2];                 !!  88  * Returns the requested 32-bits of data.
 82         sd->args[3] = args[3];                 !!  89  * seccomp_check_filter() should assure that @off is 32-bit aligned
 83         sd->args[4] = args[4];                 !!  90  * and not out of bounds.  Failure to do so is a BUG.
 84         sd->args[5] = args[5];                 !!  91  */
 85         sd->instruction_pointer = KSTK_EIP(tas !!  92 u32 seccomp_bpf_load(int off)
                                                   >>  93 {
                                                   >>  94         struct pt_regs *regs = task_pt_regs(current);
                                                   >>  95         if (off == BPF_DATA(nr))
                                                   >>  96                 return syscall_get_nr(current, regs);
                                                   >>  97         if (off == BPF_DATA(arch))
                                                   >>  98                 return syscall_get_arch(current, regs);
                                                   >>  99         if (off >= BPF_DATA(args[0]) && off < BPF_DATA(args[6])) {
                                                   >> 100                 unsigned long value;
                                                   >> 101                 int arg = (off - BPF_DATA(args[0])) / sizeof(u64);
                                                   >> 102                 int index = !!(off % sizeof(u64));
                                                   >> 103                 syscall_get_arguments(current, regs, arg, 1, &value);
                                                   >> 104                 return get_u32(value, index);
                                                   >> 105         }
                                                   >> 106         if (off == BPF_DATA(instruction_pointer))
                                                   >> 107                 return get_u32(KSTK_EIP(current), 0);
                                                   >> 108         if (off == BPF_DATA(instruction_pointer) + sizeof(u32))
                                                   >> 109                 return get_u32(KSTK_EIP(current), 1);
                                                   >> 110         /* seccomp_check_filter should make this impossible. */
                                                   >> 111         BUG();
 86 }                                                 112 }
 87                                                   113 
 88 /**                                               114 /**
 89  *      seccomp_check_filter - verify seccomp     115  *      seccomp_check_filter - verify seccomp filter code
 90  *      @filter: filter to verify                 116  *      @filter: filter to verify
 91  *      @flen: length of filter                   117  *      @flen: length of filter
 92  *                                                118  *
 93  * Takes a previously checked filter (by bpf_c !! 119  * Takes a previously checked filter (by sk_chk_filter) and
 94  * redirects all filter code that loads struct    120  * redirects all filter code that loads struct sk_buff data
 95  * and related data through seccomp_bpf_load.     121  * and related data through seccomp_bpf_load.  It also
 96  * enforces length and alignment checking of t    122  * enforces length and alignment checking of those loads.
 97  *                                                123  *
 98  * Returns 0 if the rule set is legal or -EINV    124  * Returns 0 if the rule set is legal or -EINVAL if not.
 99  */                                               125  */
100 static int seccomp_check_filter(struct sock_fi    126 static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen)
101 {                                                 127 {
102         int pc;                                   128         int pc;
103         for (pc = 0; pc < flen; pc++) {           129         for (pc = 0; pc < flen; pc++) {
104                 struct sock_filter *ftest = &f    130                 struct sock_filter *ftest = &filter[pc];
105                 u16 code = ftest->code;           131                 u16 code = ftest->code;
106                 u32 k = ftest->k;                 132                 u32 k = ftest->k;
107                                                   133 
108                 switch (code) {                   134                 switch (code) {
109                 case BPF_LD | BPF_W | BPF_ABS: !! 135                 case BPF_S_LD_W_ABS:
110                         ftest->code = BPF_LDX  !! 136                         ftest->code = BPF_S_ANC_SECCOMP_LD_W;
111                         /* 32-bit aligned and     137                         /* 32-bit aligned and not out of bounds. */
112                         if (k >= sizeof(struct    138                         if (k >= sizeof(struct seccomp_data) || k & 3)
113                                 return -EINVAL    139                                 return -EINVAL;
114                         continue;                 140                         continue;
115                 case BPF_LD | BPF_W | BPF_LEN: !! 141                 case BPF_S_LD_W_LEN:
116                         ftest->code = BPF_LD | !! 142                         ftest->code = BPF_S_LD_IMM;
117                         ftest->k = sizeof(stru    143                         ftest->k = sizeof(struct seccomp_data);
118                         continue;                 144                         continue;
119                 case BPF_LDX | BPF_W | BPF_LEN !! 145                 case BPF_S_LDX_W_LEN:
120                         ftest->code = BPF_LDX  !! 146                         ftest->code = BPF_S_LDX_IMM;
121                         ftest->k = sizeof(stru    147                         ftest->k = sizeof(struct seccomp_data);
122                         continue;                 148                         continue;
123                 /* Explicitly include allowed     149                 /* Explicitly include allowed calls. */
124                 case BPF_RET | BPF_K:          !! 150                 case BPF_S_RET_K:
125                 case BPF_RET | BPF_A:          !! 151                 case BPF_S_RET_A:
126                 case BPF_ALU | BPF_ADD | BPF_K !! 152                 case BPF_S_ALU_ADD_K:
127                 case BPF_ALU | BPF_ADD | BPF_X !! 153                 case BPF_S_ALU_ADD_X:
128                 case BPF_ALU | BPF_SUB | BPF_K !! 154                 case BPF_S_ALU_SUB_K:
129                 case BPF_ALU | BPF_SUB | BPF_X !! 155                 case BPF_S_ALU_SUB_X:
130                 case BPF_ALU | BPF_MUL | BPF_K !! 156                 case BPF_S_ALU_MUL_K:
131                 case BPF_ALU | BPF_MUL | BPF_X !! 157                 case BPF_S_ALU_MUL_X:
132                 case BPF_ALU | BPF_DIV | BPF_K !! 158                 case BPF_S_ALU_DIV_X:
133                 case BPF_ALU | BPF_DIV | BPF_X !! 159                 case BPF_S_ALU_AND_K:
134                 case BPF_ALU | BPF_AND | BPF_K !! 160                 case BPF_S_ALU_AND_X:
135                 case BPF_ALU | BPF_AND | BPF_X !! 161                 case BPF_S_ALU_OR_K:
136                 case BPF_ALU | BPF_OR | BPF_K: !! 162                 case BPF_S_ALU_OR_X:
137                 case BPF_ALU | BPF_OR | BPF_X: !! 163                 case BPF_S_ALU_LSH_K:
138                 case BPF_ALU | BPF_XOR | BPF_K !! 164                 case BPF_S_ALU_LSH_X:
139                 case BPF_ALU | BPF_XOR | BPF_X !! 165                 case BPF_S_ALU_RSH_K:
140                 case BPF_ALU | BPF_LSH | BPF_K !! 166                 case BPF_S_ALU_RSH_X:
141                 case BPF_ALU | BPF_LSH | BPF_X !! 167                 case BPF_S_ALU_NEG:
142                 case BPF_ALU | BPF_RSH | BPF_K !! 168                 case BPF_S_LD_IMM:
143                 case BPF_ALU | BPF_RSH | BPF_X !! 169                 case BPF_S_LDX_IMM:
144                 case BPF_ALU | BPF_NEG:        !! 170                 case BPF_S_MISC_TAX:
145                 case BPF_LD | BPF_IMM:         !! 171                 case BPF_S_MISC_TXA:
146                 case BPF_LDX | BPF_IMM:        !! 172                 case BPF_S_ALU_DIV_K:
147                 case BPF_MISC | BPF_TAX:       !! 173                 case BPF_S_LD_MEM:
148                 case BPF_MISC | BPF_TXA:       !! 174                 case BPF_S_LDX_MEM:
149                 case BPF_LD | BPF_MEM:         !! 175                 case BPF_S_ST:
150                 case BPF_LDX | BPF_MEM:        !! 176                 case BPF_S_STX:
151                 case BPF_ST:                   !! 177                 case BPF_S_JMP_JA:
152                 case BPF_STX:                  !! 178                 case BPF_S_JMP_JEQ_K:
153                 case BPF_JMP | BPF_JA:         !! 179                 case BPF_S_JMP_JEQ_X:
154                 case BPF_JMP | BPF_JEQ | BPF_K !! 180                 case BPF_S_JMP_JGE_K:
155                 case BPF_JMP | BPF_JEQ | BPF_X !! 181                 case BPF_S_JMP_JGE_X:
156                 case BPF_JMP | BPF_JGE | BPF_K !! 182                 case BPF_S_JMP_JGT_K:
157                 case BPF_JMP | BPF_JGE | BPF_X !! 183                 case BPF_S_JMP_JGT_X:
158                 case BPF_JMP | BPF_JGT | BPF_K !! 184                 case BPF_S_JMP_JSET_K:
159                 case BPF_JMP | BPF_JGT | BPF_X !! 185                 case BPF_S_JMP_JSET_X:
160                 case BPF_JMP | BPF_JSET | BPF_ << 
161                 case BPF_JMP | BPF_JSET | BPF_ << 
162                         continue;                 186                         continue;
163                 default:                          187                 default:
164                         return -EINVAL;           188                         return -EINVAL;
165                 }                                 189                 }
166         }                                         190         }
167         return 0;                                 191         return 0;
168 }                                                 192 }
169                                                   193 
170 /**                                               194 /**
171  * seccomp_run_filters - evaluates all seccomp    195  * seccomp_run_filters - evaluates all seccomp filters against @syscall
172  * @syscall: number of the current system call    196  * @syscall: number of the current system call
173  *                                                197  *
174  * Returns valid seccomp BPF response codes.      198  * Returns valid seccomp BPF response codes.
175  */                                               199  */
176 static u32 seccomp_run_filters(const struct se !! 200 static u32 seccomp_run_filters(int syscall)
177 {                                                 201 {
178         struct seccomp_data sd_local;          !! 202         struct seccomp_filter *f;
179         u32 ret = SECCOMP_RET_ALLOW;              203         u32 ret = SECCOMP_RET_ALLOW;
180         /* Make sure cross-thread synced filte << 
181         struct seccomp_filter *f =             << 
182                         lockless_dereference(c << 
183                                                   204 
184         /* Ensure unexpected behavior doesn't     205         /* Ensure unexpected behavior doesn't result in failing open. */
185         if (unlikely(WARN_ON(f == NULL)))      !! 206         if (WARN_ON(current->seccomp.filter == NULL))
186                 return SECCOMP_RET_KILL;          207                 return SECCOMP_RET_KILL;
187                                                   208 
188         if (!sd) {                             << 
189                 populate_seccomp_data(&sd_loca << 
190                 sd = &sd_local;                << 
191         }                                      << 
192                                                << 
193         /*                                        209         /*
194          * All filters in the list are evaluat    210          * All filters in the list are evaluated and the lowest BPF return
195          * value always takes priority (ignori    211          * value always takes priority (ignoring the DATA).
196          */                                       212          */
197         for (; f; f = f->prev) {               !! 213         for (f = current->seccomp.filter; f; f = f->prev) {
198                 u32 cur_ret = BPF_PROG_RUN(f-> !! 214                 u32 cur_ret = sk_run_filter(NULL, f->insns);
199                                                << 
200                 if ((cur_ret & SECCOMP_RET_ACT    215                 if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION))
201                         ret = cur_ret;            216                         ret = cur_ret;
202         }                                         217         }
203         return ret;                               218         return ret;
204 }                                                 219 }
205 #endif /* CONFIG_SECCOMP_FILTER */             << 
206                                                << 
207 static inline bool seccomp_may_assign_mode(uns << 
208 {                                              << 
209         assert_spin_locked(&current->sighand-> << 
210                                                << 
211         if (current->seccomp.mode && current-> << 
212                 return false;                  << 
213                                                << 
214         return true;                           << 
215 }                                              << 
216                                                << 
217 static inline void seccomp_assign_mode(struct  << 
218                                        unsigne << 
219 {                                              << 
220         assert_spin_locked(&task->sighand->sig << 
221                                                << 
222         task->seccomp.mode = seccomp_mode;     << 
223         /*                                     << 
224          * Make sure TIF_SECCOMP cannot be set << 
225          * filter) is set.                     << 
226          */                                    << 
227         smp_mb__before_atomic();               << 
228         set_tsk_thread_flag(task, TIF_SECCOMP) << 
229 }                                              << 
230                                                << 
231 #ifdef CONFIG_SECCOMP_FILTER                   << 
232 /* Returns 1 if the parent is an ancestor of t << 
233 static int is_ancestor(struct seccomp_filter * << 
234                        struct seccomp_filter * << 
235 {                                              << 
236         /* NULL is the root ancestor. */       << 
237         if (parent == NULL)                    << 
238                 return 1;                      << 
239         for (; child; child = child->prev)     << 
240                 if (child == parent)           << 
241                         return 1;              << 
242         return 0;                              << 
243 }                                              << 
244                                                << 
245 /**                                            << 
246  * seccomp_can_sync_threads: checks if all thr << 
247  *                                             << 
248  * Expects sighand and cred_guard_mutex locks  << 
249  *                                             << 
250  * Returns 0 on success, -ve on error, or the  << 
251  * either not in the correct seccomp mode or i << 
252  * seccomp filter.                             << 
253  */                                            << 
254 static inline pid_t seccomp_can_sync_threads(v << 
255 {                                              << 
256         struct task_struct *thread, *caller;   << 
257                                                << 
258         BUG_ON(!mutex_is_locked(&current->sign << 
259         assert_spin_locked(&current->sighand-> << 
260                                                << 
261         /* Validate all threads being eligible << 
262         caller = current;                      << 
263         for_each_thread(caller, thread) {      << 
264                 pid_t failed;                  << 
265                                                << 
266                 /* Skip current, since it is i << 
267                 if (thread == caller)          << 
268                         continue;              << 
269                                                << 
270                 if (thread->seccomp.mode == SE << 
271                     (thread->seccomp.mode == S << 
272                      is_ancestor(thread->secco << 
273                                  caller->secco << 
274                         continue;              << 
275                                                << 
276                 /* Return the first thread tha << 
277                 failed = task_pid_vnr(thread); << 
278                 /* If the pid cannot be resolv << 
279                 if (unlikely(WARN_ON(failed == << 
280                         failed = -ESRCH;       << 
281                 return failed;                 << 
282         }                                      << 
283                                                << 
284         return 0;                              << 
285 }                                              << 
286                                                << 
287 /**                                            << 
288  * seccomp_sync_threads: sets all threads to u << 
289  *                                             << 
290  * Expects sighand and cred_guard_mutex locks  << 
291  * seccomp_can_sync_threads() to have returned << 
292  * without dropping the locks.                 << 
293  *                                             << 
294  */                                            << 
295 static inline void seccomp_sync_threads(void)  << 
296 {                                              << 
297         struct task_struct *thread, *caller;   << 
298                                                << 
299         BUG_ON(!mutex_is_locked(&current->sign << 
300         assert_spin_locked(&current->sighand-> << 
301                                                << 
302         /* Synchronize all threads. */         << 
303         caller = current;                      << 
304         for_each_thread(caller, thread) {      << 
305                 /* Skip current, since it need << 
306                 if (thread == caller)          << 
307                         continue;              << 
308                                                << 
309                 /* Get a task reference for th << 
310                 get_seccomp_filter(caller);    << 
311                 /*                             << 
312                  * Drop the task reference to  << 
313                  * current's path will hold a  << 
314                  * allows a put before the ass << 
315                  */                            << 
316                 put_seccomp_filter(thread);    << 
317                 smp_store_release(&thread->sec << 
318                                   caller->secc << 
319                                                << 
320                 /*                             << 
321                  * Don't let an unprivileged t << 
322                  * the no_new_privs restrictio << 
323                  * a thread that sets it up, e << 
324                  * then dies.                  << 
325                  */                            << 
326                 if (task_no_new_privs(caller)) << 
327                         task_set_no_new_privs( << 
328                                                << 
329                 /*                             << 
330                  * Opt the other thread into s << 
331                  * As threads are considered t << 
332                  * equivalent (see ptrace_may_ << 
333                  * allow one thread to transit << 
334                  */                            << 
335                 if (thread->seccomp.mode == SE << 
336                         seccomp_assign_mode(th << 
337         }                                      << 
338 }                                              << 
339                                                   220 
340 /**                                               221 /**
341  * seccomp_prepare_filter: Prepares a seccomp  !! 222  * seccomp_attach_filter: Attaches a seccomp filter to current.
342  * @fprog: BPF program to install                 223  * @fprog: BPF program to install
343  *                                                224  *
344  * Returns filter on success or an ERR_PTR on  !! 225  * Returns 0 on success or an errno on failure.
345  */                                               226  */
346 static struct seccomp_filter *seccomp_prepare_ !! 227 static long seccomp_attach_filter(struct sock_fprog *fprog)
347 {                                                 228 {
348         struct seccomp_filter *sfilter;        !! 229         struct seccomp_filter *filter;
349         int ret;                               !! 230         unsigned long fp_size = fprog->len * sizeof(struct sock_filter);
350         const bool save_orig = IS_ENABLED(CONF !! 231         unsigned long total_insns = fprog->len;
                                                   >> 232         long ret;
351                                                   233 
352         if (fprog->len == 0 || fprog->len > BP    234         if (fprog->len == 0 || fprog->len > BPF_MAXINSNS)
353                 return ERR_PTR(-EINVAL);       !! 235                 return -EINVAL;
354                                                   236 
355         BUG_ON(INT_MAX / fprog->len < sizeof(s !! 237         for (filter = current->seccomp.filter; filter; filter = filter->prev)
                                                   >> 238                 total_insns += filter->len + 4;  /* include a 4 instr penalty */
                                                   >> 239         if (total_insns > MAX_INSNS_PER_PATH)
                                                   >> 240                 return -ENOMEM;
356                                                   241 
357         /*                                        242         /*
358          * Installing a seccomp filter require !! 243          * Installing a seccomp filter requires that the task have
359          * CAP_SYS_ADMIN in its namespace or b    244          * CAP_SYS_ADMIN in its namespace or be running with no_new_privs.
360          * This avoids scenarios where unprivi    245          * This avoids scenarios where unprivileged tasks can affect the
361          * behavior of privileged children.       246          * behavior of privileged children.
362          */                                       247          */
363         if (!task_no_new_privs(current) &&     !! 248         if (!current->no_new_privs &&
364             security_capable_noaudit(current_c    249             security_capable_noaudit(current_cred(), current_user_ns(),
365                                      CAP_SYS_A    250                                      CAP_SYS_ADMIN) != 0)
366                 return ERR_PTR(-EACCES);       !! 251                 return -EACCES;
367                                                   252 
368         /* Allocate a new seccomp_filter */       253         /* Allocate a new seccomp_filter */
369         sfilter = kzalloc(sizeof(*sfilter), GF !! 254         filter = kzalloc(sizeof(struct seccomp_filter) + fp_size,
370         if (!sfilter)                          !! 255                          GFP_KERNEL|__GFP_NOWARN);
371                 return ERR_PTR(-ENOMEM);       !! 256         if (!filter)
372                                                !! 257                 return -ENOMEM;
373         ret = bpf_prog_create_from_user(&sfilt !! 258         atomic_set(&filter->usage, 1);
374                                         seccom !! 259         filter->len = fprog->len;
375         if (ret < 0) {                         !! 260 
376                 kfree(sfilter);                !! 261         /* Copy the instructions from fprog. */
377                 return ERR_PTR(ret);           !! 262         ret = -EFAULT;
378         }                                      !! 263         if (copy_from_user(filter->insns, fprog->filter, fp_size))
                                                   >> 264                 goto fail;
379                                                   265 
380         atomic_set(&sfilter->usage, 1);        !! 266         /* Check and rewrite the fprog via the skb checker */
                                                   >> 267         ret = sk_chk_filter(filter->insns, filter->len);
                                                   >> 268         if (ret)
                                                   >> 269                 goto fail;
                                                   >> 270 
                                                   >> 271         /* Check and rewrite the fprog for seccomp use */
                                                   >> 272         ret = seccomp_check_filter(filter->insns, filter->len);
                                                   >> 273         if (ret)
                                                   >> 274                 goto fail;
381                                                   275 
382         return sfilter;                        !! 276         /*
                                                   >> 277          * If there is an existing filter, make it the prev and don't drop its
                                                   >> 278          * task reference.
                                                   >> 279          */
                                                   >> 280         filter->prev = current->seccomp.filter;
                                                   >> 281         current->seccomp.filter = filter;
                                                   >> 282         return 0;
                                                   >> 283 fail:
                                                   >> 284         kfree(filter);
                                                   >> 285         return ret;
383 }                                                 286 }
384                                                   287 
385 /**                                               288 /**
386  * seccomp_prepare_user_filter - prepares a us !! 289  * seccomp_attach_user_filter - attaches a user-supplied sock_fprog
387  * @user_filter: pointer to the user data cont    290  * @user_filter: pointer to the user data containing a sock_fprog.
388  *                                                291  *
389  * Returns 0 on success and non-zero otherwise    292  * Returns 0 on success and non-zero otherwise.
390  */                                               293  */
391 static struct seccomp_filter *                 !! 294 long seccomp_attach_user_filter(char __user *user_filter)
392 seccomp_prepare_user_filter(const char __user  << 
393 {                                                 295 {
394         struct sock_fprog fprog;                  296         struct sock_fprog fprog;
395         struct seccomp_filter *filter = ERR_PT !! 297         long ret = -EFAULT;
396                                                   298 
397 #ifdef CONFIG_COMPAT                              299 #ifdef CONFIG_COMPAT
398         if (in_compat_syscall()) {             !! 300         if (is_compat_task()) {
399                 struct compat_sock_fprog fprog    301                 struct compat_sock_fprog fprog32;
400                 if (copy_from_user(&fprog32, u    302                 if (copy_from_user(&fprog32, user_filter, sizeof(fprog32)))
401                         goto out;                 303                         goto out;
402                 fprog.len = fprog32.len;          304                 fprog.len = fprog32.len;
403                 fprog.filter = compat_ptr(fpro    305                 fprog.filter = compat_ptr(fprog32.filter);
404         } else /* falls through to the if belo    306         } else /* falls through to the if below. */
405 #endif                                            307 #endif
406         if (copy_from_user(&fprog, user_filter    308         if (copy_from_user(&fprog, user_filter, sizeof(fprog)))
407                 goto out;                         309                 goto out;
408         filter = seccomp_prepare_filter(&fprog !! 310         ret = seccomp_attach_filter(&fprog);
409 out:                                              311 out:
410         return filter;                         !! 312         return ret;
411 }                                              << 
412                                                << 
413 /**                                            << 
414  * seccomp_attach_filter: validate and attach  << 
415  * @flags:  flags to change filter behavior    << 
416  * @filter: seccomp filter to add to the curre << 
417  *                                             << 
418  * Caller must be holding current->sighand->si << 
419  *                                             << 
420  * Returns 0 on success, -ve on error.         << 
421  */                                            << 
422 static long seccomp_attach_filter(unsigned int << 
423                                   struct secco << 
424 {                                              << 
425         unsigned long total_insns;             << 
426         struct seccomp_filter *walker;         << 
427                                                << 
428         assert_spin_locked(&current->sighand-> << 
429                                                << 
430         /* Validate resulting filter length. * << 
431         total_insns = filter->prog->len;       << 
432         for (walker = current->seccomp.filter; << 
433                 total_insns += walker->prog->l << 
434         if (total_insns > MAX_INSNS_PER_PATH)  << 
435                 return -ENOMEM;                << 
436                                                << 
437         /* If thread sync has been requested,  << 
438         if (flags & SECCOMP_FILTER_FLAG_TSYNC) << 
439                 int ret;                       << 
440                                                << 
441                 ret = seccomp_can_sync_threads << 
442                 if (ret)                       << 
443                         return ret;            << 
444         }                                      << 
445                                                << 
446         /*                                     << 
447          * If there is an existing filter, mak << 
448          * task reference.                     << 
449          */                                    << 
450         filter->prev = current->seccomp.filter << 
451         current->seccomp.filter = filter;      << 
452                                                << 
453         /* Now that the new filter is in place << 
454         if (flags & SECCOMP_FILTER_FLAG_TSYNC) << 
455                 seccomp_sync_threads();        << 
456                                                << 
457         return 0;                              << 
458 }                                                 313 }
459                                                   314 
460 /* get_seccomp_filter - increments the referen    315 /* get_seccomp_filter - increments the reference count of the filter on @tsk */
461 void get_seccomp_filter(struct task_struct *ts    316 void get_seccomp_filter(struct task_struct *tsk)
462 {                                                 317 {
463         struct seccomp_filter *orig = tsk->sec    318         struct seccomp_filter *orig = tsk->seccomp.filter;
464         if (!orig)                                319         if (!orig)
465                 return;                           320                 return;
466         /* Reference count is bounded by the n    321         /* Reference count is bounded by the number of total processes. */
467         atomic_inc(&orig->usage);                 322         atomic_inc(&orig->usage);
468 }                                                 323 }
469                                                   324 
470 static inline void seccomp_filter_free(struct  << 
471 {                                              << 
472         if (filter) {                          << 
473                 bpf_prog_destroy(filter->prog) << 
474                 kfree(filter);                 << 
475         }                                      << 
476 }                                              << 
477                                                << 
478 /* put_seccomp_filter - decrements the ref cou    325 /* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
479 void put_seccomp_filter(struct task_struct *ts    326 void put_seccomp_filter(struct task_struct *tsk)
480 {                                                 327 {
481         struct seccomp_filter *orig = tsk->sec    328         struct seccomp_filter *orig = tsk->seccomp.filter;
482         /* Clean up single-reference branches     329         /* Clean up single-reference branches iteratively. */
483         while (orig && atomic_dec_and_test(&or    330         while (orig && atomic_dec_and_test(&orig->usage)) {
484                 struct seccomp_filter *freeme     331                 struct seccomp_filter *freeme = orig;
485                 orig = orig->prev;                332                 orig = orig->prev;
486                 seccomp_filter_free(freeme);   !! 333                 kfree(freeme);
487         }                                         334         }
488 }                                                 335 }
489                                                   336 
490 /**                                               337 /**
491  * seccomp_send_sigsys - signals the task to a    338  * seccomp_send_sigsys - signals the task to allow in-process syscall emulation
492  * @syscall: syscall number to send to userlan    339  * @syscall: syscall number to send to userland
493  * @reason: filter-supplied reason code to sen    340  * @reason: filter-supplied reason code to send to userland (via si_errno)
494  *                                                341  *
495  * Forces a SIGSYS with a code of SYS_SECCOMP     342  * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
496  */                                               343  */
497 static void seccomp_send_sigsys(int syscall, i    344 static void seccomp_send_sigsys(int syscall, int reason)
498 {                                                 345 {
499         struct siginfo info;                      346         struct siginfo info;
500         memset(&info, 0, sizeof(info));           347         memset(&info, 0, sizeof(info));
501         info.si_signo = SIGSYS;                   348         info.si_signo = SIGSYS;
502         info.si_code = SYS_SECCOMP;               349         info.si_code = SYS_SECCOMP;
503         info.si_call_addr = (void __user *)KST    350         info.si_call_addr = (void __user *)KSTK_EIP(current);
504         info.si_errno = reason;                   351         info.si_errno = reason;
505         info.si_arch = syscall_get_arch();     !! 352         info.si_arch = syscall_get_arch(current, task_pt_regs(current));
506         info.si_syscall = syscall;                353         info.si_syscall = syscall;
507         force_sig_info(SIGSYS, &info, current)    354         force_sig_info(SIGSYS, &info, current);
508 }                                                 355 }
509 #endif  /* CONFIG_SECCOMP_FILTER */               356 #endif  /* CONFIG_SECCOMP_FILTER */
510                                                   357 
511 /*                                                358 /*
512  * Secure computing mode 1 allows only read/wr    359  * Secure computing mode 1 allows only read/write/exit/sigreturn.
513  * To be fully secure this must be combined wi    360  * To be fully secure this must be combined with rlimit
514  * to limit the stack allocations too.            361  * to limit the stack allocations too.
515  */                                               362  */
516 static const int mode1_syscalls[] = {          !! 363 static int mode1_syscalls[] = {
517         __NR_seccomp_read, __NR_seccomp_write,    364         __NR_seccomp_read, __NR_seccomp_write, __NR_seccomp_exit, __NR_seccomp_sigreturn,
518         0, /* null terminated */                  365         0, /* null terminated */
519 };                                                366 };
520                                                   367 
521 static void __secure_computing_strict(int this << 
522 {                                              << 
523         const int *syscall_whitelist = mode1_s << 
524 #ifdef CONFIG_COMPAT                              368 #ifdef CONFIG_COMPAT
525         if (in_compat_syscall())               !! 369 static int mode1_syscalls_32[] = {
526                 syscall_whitelist = get_compat !! 370         __NR_seccomp_read_32, __NR_seccomp_write_32, __NR_seccomp_exit_32, __NR_seccomp_sigreturn_32,
527 #endif                                         !! 371         0, /* null terminated */
528         do {                                   !! 372 };
529                 if (*syscall_whitelist == this << 
530                         return;                << 
531         } while (*++syscall_whitelist);        << 
532                                                << 
533 #ifdef SECCOMP_DEBUG                           << 
534         dump_stack();                          << 
535 #endif                                            373 #endif
536         audit_seccomp(this_syscall, SIGKILL, S << 
537         do_exit(SIGKILL);                      << 
538 }                                              << 
539                                                   374 
540 #ifndef CONFIG_HAVE_ARCH_SECCOMP_FILTER        !! 375 int __secure_computing(int this_syscall)
541 void secure_computing_strict(int this_syscall) << 
542 {                                                 376 {
543         int mode = current->seccomp.mode;         377         int mode = current->seccomp.mode;
                                                   >> 378         int exit_sig = 0;
                                                   >> 379         int *syscall;
                                                   >> 380         u32 ret;
544                                                   381 
545         if (IS_ENABLED(CONFIG_CHECKPOINT_RESTO !! 382         switch (mode) {
546             unlikely(current->ptrace & PT_SUSP !! 383         case SECCOMP_MODE_STRICT:
547                 return;                        !! 384                 syscall = mode1_syscalls;
548                                                !! 385 #ifdef CONFIG_COMPAT
549         if (mode == SECCOMP_MODE_DISABLED)     !! 386                 if (is_compat_task())
550                 return;                        !! 387                         syscall = mode1_syscalls_32;
551         else if (mode == SECCOMP_MODE_STRICT)  !! 388 #endif
552                 __secure_computing_strict(this !! 389                 do {
553         else                                   !! 390                         if (*syscall == this_syscall)
554                 BUG();                         !! 391                                 return 0;
555 }                                              !! 392                 } while (*++syscall);
556 #else                                          !! 393                 exit_sig = SIGKILL;
557                                                !! 394                 ret = SECCOMP_RET_KILL;
                                                   >> 395                 break;
558 #ifdef CONFIG_SECCOMP_FILTER                      396 #ifdef CONFIG_SECCOMP_FILTER
559 static int __seccomp_filter(int this_syscall,  !! 397         case SECCOMP_MODE_FILTER: {
560                             const bool recheck !! 398                 int data;
561 {                                              !! 399                 ret = seccomp_run_filters(this_syscall);
562         u32 filter_ret, action;                !! 400                 data = ret & SECCOMP_RET_DATA;
563         int data;                              !! 401                 ret &= SECCOMP_RET_ACTION;
564                                                !! 402                 switch (ret) {
565         /*                                     !! 403                 case SECCOMP_RET_ERRNO:
566          * Make sure that any changes to mode  !! 404                         /* Set the low-order 16-bits as a errno. */
567          * been seen after TIF_SECCOMP was see !! 405                         syscall_set_return_value(current, task_pt_regs(current),
568          */                                    !! 406                                                  -data, 0);
569         rmb();                                 << 
570                                                << 
571         filter_ret = seccomp_run_filters(sd);  << 
572         data = filter_ret & SECCOMP_RET_DATA;  << 
573         action = filter_ret & SECCOMP_RET_ACTI << 
574                                                << 
575         switch (action) {                      << 
576         case SECCOMP_RET_ERRNO:                << 
577                 /* Set low-order bits as an er << 
578                 if (data > MAX_ERRNO)          << 
579                         data = MAX_ERRNO;      << 
580                 syscall_set_return_value(curre << 
581                                          -data << 
582                 goto skip;                     << 
583                                                << 
584         case SECCOMP_RET_TRAP:                 << 
585                 /* Show the handler the origin << 
586                 syscall_rollback(current, task << 
587                 /* Let the filter pass back 16 << 
588                 seccomp_send_sigsys(this_sysca << 
589                 goto skip;                     << 
590                                                << 
591         case SECCOMP_RET_TRACE:                << 
592                 /* We've been put in this stat << 
593                 if (recheck_after_trace)       << 
594                         return 0;              << 
595                                                << 
596                 /* ENOSYS these calls if there << 
597                 if (!ptrace_event_enabled(curr << 
598                         syscall_set_return_val << 
599                                                << 
600                                                << 
601                         goto skip;                407                         goto skip;
602                 }                              !! 408                 case SECCOMP_RET_TRAP:
603                                                !! 409                         /* Show the handler the original registers. */
604                 /* Allow the BPF to provide th !! 410                         syscall_rollback(current, task_pt_regs(current));
605                 ptrace_event(PTRACE_EVENT_SECC !! 411                         /* Let the filter pass back 16 bits of data. */
606                 /*                             !! 412                         seccomp_send_sigsys(this_syscall, data);
607                  * The delivery of a fatal sig << 
608                  * notification may silently s << 
609                  * which could leave us with a << 
610                  * syscall that the tracer wou << 
611                  * changed. Since the process  << 
612                  * force the syscall to be ski << 
613                  * kill the process and correc << 
614                  * notifications.              << 
615                  */                            << 
616                 if (fatal_signal_pending(curre << 
617                         goto skip;             << 
618                 /* Check if the tracer forced  << 
619                 this_syscall = syscall_get_nr( << 
620                 if (this_syscall < 0)          << 
621                         goto skip;                413                         goto skip;
622                                                !! 414                 case SECCOMP_RET_TRACE:
623                 /*                             !! 415                         /* Skip these calls if there is no tracer. */
624                  * Recheck the syscall, since  !! 416                         if (!ptrace_event_enabled(current, PTRACE_EVENT_SECCOMP))
625                  * intentionally uses a NULL s !! 417                                 goto skip;
626                  * a reload of all registers.  !! 418                         /* Allow the BPF to provide the event message */
627                  * a skip would have already b !! 419                         ptrace_event(PTRACE_EVENT_SECCOMP, data);
628                  */                            !! 420                         /*
629                 if (__seccomp_filter(this_sysc !! 421                          * The delivery of a fatal signal during event
630                         return -1;             !! 422                          * notification may silently skip tracer notification.
631                                                !! 423                          * Terminating the task now avoids executing a system
632                 return 0;                      !! 424                          * call that may not be intended.
633                                                !! 425                          */
634         case SECCOMP_RET_ALLOW:                !! 426                         if (fatal_signal_pending(current))
635                 return 0;                      !! 427                                 break;
636                                                !! 428                         return 0;
637         case SECCOMP_RET_KILL:                 !! 429                 case SECCOMP_RET_ALLOW:
638         default:                               !! 430                         return 0;
639                 audit_seccomp(this_syscall, SI !! 431                 case SECCOMP_RET_KILL:
640                 do_exit(SIGSYS);               !! 432                 default:
                                                   >> 433                         break;
                                                   >> 434                 }
                                                   >> 435                 exit_sig = SIGSYS;
                                                   >> 436                 break;
641         }                                         437         }
642                                                << 
643         unreachable();                         << 
644                                                << 
645 skip:                                          << 
646         audit_seccomp(this_syscall, 0, action) << 
647         return -1;                             << 
648 }                                              << 
649 #else                                          << 
650 static int __seccomp_filter(int this_syscall,  << 
651                             const bool recheck << 
652 {                                              << 
653         BUG();                                 << 
654 }                                              << 
655 #endif                                            438 #endif
656                                                << 
657 int __secure_computing(const struct seccomp_da << 
658 {                                              << 
659         int mode = current->seccomp.mode;      << 
660         int this_syscall;                      << 
661                                                << 
662         if (IS_ENABLED(CONFIG_CHECKPOINT_RESTO << 
663             unlikely(current->ptrace & PT_SUSP << 
664                 return 0;                      << 
665                                                << 
666         this_syscall = sd ? sd->nr :           << 
667                 syscall_get_nr(current, task_p << 
668                                                << 
669         switch (mode) {                        << 
670         case SECCOMP_MODE_STRICT:              << 
671                 __secure_computing_strict(this << 
672                 return 0;                      << 
673         case SECCOMP_MODE_FILTER:              << 
674                 return __seccomp_filter(this_s << 
675         default:                                  439         default:
676                 BUG();                            440                 BUG();
677         }                                         441         }
678 }                                              << 
679 #endif /* CONFIG_HAVE_ARCH_SECCOMP_FILTER */   << 
680                                                << 
681 long prctl_get_seccomp(void)                   << 
682 {                                              << 
683         return current->seccomp.mode;          << 
684 }                                              << 
685                                                   442 
686 /**                                            !! 443 #ifdef SECCOMP_DEBUG
687  * seccomp_set_mode_strict: internal function  !! 444         dump_stack();
688  *                                             << 
689  * Once current->seccomp.mode is non-zero, it  << 
690  *                                             << 
691  * Returns 0 on success or -EINVAL on failure. << 
692  */                                            << 
693 static long seccomp_set_mode_strict(void)      << 
694 {                                              << 
695         const unsigned long seccomp_mode = SEC << 
696         long ret = -EINVAL;                    << 
697                                                << 
698         spin_lock_irq(&current->sighand->siglo << 
699                                                << 
700         if (!seccomp_may_assign_mode(seccomp_m << 
701                 goto out;                      << 
702                                                << 
703 #ifdef TIF_NOTSC                               << 
704         disable_TSC();                         << 
705 #endif                                            445 #endif
706         seccomp_assign_mode(current, seccomp_m !! 446         audit_seccomp(this_syscall, exit_sig, ret);
707         ret = 0;                               !! 447         do_exit(exit_sig);
708                                                << 
709 out:                                           << 
710         spin_unlock_irq(&current->sighand->sig << 
711                                                << 
712         return ret;                            << 
713 }                                              << 
714                                                << 
715 #ifdef CONFIG_SECCOMP_FILTER                      448 #ifdef CONFIG_SECCOMP_FILTER
716 /**                                            !! 449 skip:
717  * seccomp_set_mode_filter: internal function  !! 450         audit_seccomp(this_syscall, exit_sig, ret);
718  * @flags:  flags to change filter behavior    << 
719  * @filter: struct sock_fprog containing filte << 
720  *                                             << 
721  * This function may be called repeatedly to i << 
722  * Every filter successfully installed will be << 
723  * for each system call the task makes.        << 
724  *                                             << 
725  * Once current->seccomp.mode is non-zero, it  << 
726  *                                             << 
727  * Returns 0 on success or -EINVAL on failure. << 
728  */                                            << 
729 static long seccomp_set_mode_filter(unsigned i << 
730                                     const char << 
731 {                                              << 
732         const unsigned long seccomp_mode = SEC << 
733         struct seccomp_filter *prepared = NULL << 
734         long ret = -EINVAL;                    << 
735                                                << 
736         /* Validate flags. */                  << 
737         if (flags & ~SECCOMP_FILTER_FLAG_MASK) << 
738                 return -EINVAL;                << 
739                                                << 
740         /* Prepare the new filter before holdi << 
741         prepared = seccomp_prepare_user_filter << 
742         if (IS_ERR(prepared))                  << 
743                 return PTR_ERR(prepared);      << 
744                                                << 
745         /*                                     << 
746          * Make sure we cannot change seccomp  << 
747          * while another thread is in the midd << 
748          */                                    << 
749         if (flags & SECCOMP_FILTER_FLAG_TSYNC  << 
750             mutex_lock_killable(&current->sign << 
751                 goto out_free;                 << 
752                                                << 
753         spin_lock_irq(&current->sighand->siglo << 
754                                                << 
755         if (!seccomp_may_assign_mode(seccomp_m << 
756                 goto out;                      << 
757                                                << 
758         ret = seccomp_attach_filter(flags, pre << 
759         if (ret)                               << 
760                 goto out;                      << 
761         /* Do not free the successfully attach << 
762         prepared = NULL;                       << 
763                                                << 
764         seccomp_assign_mode(current, seccomp_m << 
765 out:                                           << 
766         spin_unlock_irq(&current->sighand->sig << 
767         if (flags & SECCOMP_FILTER_FLAG_TSYNC) << 
768                 mutex_unlock(&current->signal- << 
769 out_free:                                      << 
770         seccomp_filter_free(prepared);         << 
771         return ret;                            << 
772 }                                              << 
773 #else                                          << 
774 static inline long seccomp_set_mode_filter(uns << 
775                                            con << 
776 {                                              << 
777         return -EINVAL;                        << 
778 }                                              << 
779 #endif                                            451 #endif
780                                                !! 452         return -1;
781 /* Common entry point for both prctl and sysca << 
782 static long do_seccomp(unsigned int op, unsign << 
783                        const char __user *uarg << 
784 {                                              << 
785         switch (op) {                          << 
786         case SECCOMP_SET_MODE_STRICT:          << 
787                 if (flags != 0 || uargs != NUL << 
788                         return -EINVAL;        << 
789                 return seccomp_set_mode_strict << 
790         case SECCOMP_SET_MODE_FILTER:          << 
791                 return seccomp_set_mode_filter << 
792         default:                               << 
793                 return -EINVAL;                << 
794         }                                      << 
795 }                                                 453 }
796                                                   454 
797 SYSCALL_DEFINE3(seccomp, unsigned int, op, uns !! 455 long prctl_get_seccomp(void)
798                          const char __user *,  << 
799 {                                                 456 {
800         return do_seccomp(op, flags, uargs);   !! 457         return current->seccomp.mode;
801 }                                                 458 }
802                                                   459 
803 /**                                               460 /**
804  * prctl_set_seccomp: configures current->secc    461  * prctl_set_seccomp: configures current->seccomp.mode
805  * @seccomp_mode: requested mode to use           462  * @seccomp_mode: requested mode to use
806  * @filter: optional struct sock_fprog for use    463  * @filter: optional struct sock_fprog for use with SECCOMP_MODE_FILTER
807  *                                                464  *
                                                   >> 465  * This function may be called repeatedly with a @seccomp_mode of
                                                   >> 466  * SECCOMP_MODE_FILTER to install additional filters.  Every filter
                                                   >> 467  * successfully installed will be evaluated (in reverse order) for each system
                                                   >> 468  * call the task makes.
                                                   >> 469  *
                                                   >> 470  * Once current->seccomp.mode is non-zero, it may not be changed.
                                                   >> 471  *
808  * Returns 0 on success or -EINVAL on failure.    472  * Returns 0 on success or -EINVAL on failure.
809  */                                               473  */
810 long prctl_set_seccomp(unsigned long seccomp_m    474 long prctl_set_seccomp(unsigned long seccomp_mode, char __user *filter)
811 {                                                 475 {
812         unsigned int op;                       !! 476         long ret = -EINVAL;
813         char __user *uargs;                    !! 477 
                                                   >> 478         if (current->seccomp.mode &&
                                                   >> 479             current->seccomp.mode != seccomp_mode)
                                                   >> 480                 goto out;
814                                                   481 
815         switch (seccomp_mode) {                   482         switch (seccomp_mode) {
816         case SECCOMP_MODE_STRICT:                 483         case SECCOMP_MODE_STRICT:
817                 op = SECCOMP_SET_MODE_STRICT;  !! 484                 ret = 0;
818                 /*                             !! 485 #ifdef TIF_NOTSC
819                  * Setting strict mode through !! 486                 disable_TSC();
820                  * so make sure it is always N !! 487 #endif
821                  * check in do_seccomp().      << 
822                  */                            << 
823                 uargs = NULL;                  << 
824                 break;                            488                 break;
                                                   >> 489 #ifdef CONFIG_SECCOMP_FILTER
825         case SECCOMP_MODE_FILTER:                 490         case SECCOMP_MODE_FILTER:
826                 op = SECCOMP_SET_MODE_FILTER;  !! 491                 ret = seccomp_attach_user_filter(filter);
827                 uargs = filter;                !! 492                 if (ret)
                                                   >> 493                         goto out;
828                 break;                            494                 break;
                                                   >> 495 #endif
829         default:                                  496         default:
830                 return -EINVAL;                << 
831         }                                      << 
832                                                << 
833         /* prctl interface doesn't have flags, << 
834         return do_seccomp(op, 0, uargs);       << 
835 }                                              << 
836                                                << 
837 #if defined(CONFIG_SECCOMP_FILTER) && defined( << 
838 long seccomp_get_filter(struct task_struct *ta << 
839                         void __user *data)     << 
840 {                                              << 
841         struct seccomp_filter *filter;         << 
842         struct sock_fprog_kern *fprog;         << 
843         long ret;                              << 
844         unsigned long count = 0;               << 
845                                                << 
846         if (!capable(CAP_SYS_ADMIN) ||         << 
847             current->seccomp.mode != SECCOMP_M << 
848                 return -EACCES;                << 
849         }                                      << 
850                                                << 
851         spin_lock_irq(&task->sighand->siglock) << 
852         if (task->seccomp.mode != SECCOMP_MODE << 
853                 ret = -EINVAL;                 << 
854                 goto out;                         497                 goto out;
855         }                                         498         }
856                                                   499 
857         filter = task->seccomp.filter;         !! 500         current->seccomp.mode = seccomp_mode;
858         while (filter) {                       !! 501         set_thread_flag(TIF_SECCOMP);
859                 filter = filter->prev;         << 
860                 count++;                       << 
861         }                                      << 
862                                                << 
863         if (filter_off >= count) {             << 
864                 ret = -ENOENT;                 << 
865                 goto out;                      << 
866         }                                      << 
867         count -= filter_off;                   << 
868                                                << 
869         filter = task->seccomp.filter;         << 
870         while (filter && count > 1) {          << 
871                 filter = filter->prev;         << 
872                 count--;                       << 
873         }                                      << 
874                                                << 
875         if (WARN_ON(count != 1 || !filter)) {  << 
876                 /* The filter tree shouldn't s << 
877                 ret = -ENOENT;                 << 
878                 goto out;                      << 
879         }                                      << 
880                                                << 
881         fprog = filter->prog->orig_prog;       << 
882         if (!fprog) {                          << 
883                 /* This must be a new non-cBPF << 
884                  * every cBPF filter's orig_pr << 
885                  * CONFIG_CHECKPOINT_RESTORE i << 
886                  */                            << 
887                 ret = -EMEDIUMTYPE;            << 
888                 goto out;                      << 
889         }                                      << 
890                                                << 
891         ret = fprog->len;                      << 
892         if (!data)                             << 
893                 goto out;                      << 
894                                                << 
895         get_seccomp_filter(task);              << 
896         spin_unlock_irq(&task->sighand->sigloc << 
897                                                << 
898         if (copy_to_user(data, fprog->filter,  << 
899                 ret = -EFAULT;                 << 
900                                                << 
901         put_seccomp_filter(task);              << 
902         return ret;                            << 
903                                                << 
904 out:                                              502 out:
905         spin_unlock_irq(&task->sighand->sigloc << 
906         return ret;                               503         return ret;
907 }                                                 504 }
908 #endif                                         << 
909                                                   505 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us