Version:  2.0.40 2.2.26 2.4.37 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4 4.5 4.6 4.7 4.8 4.9 4.10

Linux/kernel/seccomp.c

Diff markup

Differences between /kernel/seccomp.c (Version 4.10) and /kernel/seccomp.c (Version 3.6)


  1 /*                                                  1 /*
  2  * linux/kernel/seccomp.c                           2  * linux/kernel/seccomp.c
  3  *                                                  3  *
  4  * Copyright 2004-2005  Andrea Arcangeli <andr      4  * Copyright 2004-2005  Andrea Arcangeli <andrea@cpushare.com>
  5  *                                                  5  *
  6  * Copyright (C) 2012 Google, Inc.                  6  * Copyright (C) 2012 Google, Inc.
  7  * Will Drewry <wad@chromium.org>                   7  * Will Drewry <wad@chromium.org>
  8  *                                                  8  *
  9  * This defines a simple but solid secure-comp      9  * This defines a simple but solid secure-computing facility.
 10  *                                                 10  *
 11  * Mode 1 uses a fixed list of allowed system      11  * Mode 1 uses a fixed list of allowed system calls.
 12  * Mode 2 allows user-defined system call filt     12  * Mode 2 allows user-defined system call filters in the form
 13  *        of Berkeley Packet Filters/Linux Soc     13  *        of Berkeley Packet Filters/Linux Socket Filters.
 14  */                                                14  */
 15                                                    15 
 16 #include <linux/atomic.h>                          16 #include <linux/atomic.h>
 17 #include <linux/audit.h>                           17 #include <linux/audit.h>
 18 #include <linux/compat.h>                          18 #include <linux/compat.h>
 19 #include <linux/sched.h>                           19 #include <linux/sched.h>
 20 #include <linux/seccomp.h>                         20 #include <linux/seccomp.h>
 21 #include <linux/slab.h>                        << 
 22 #include <linux/syscalls.h>                    << 
 23                                                    21 
 24 #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER         !!  22 /* #define SECCOMP_DEBUG 1 */
 25 #include <asm/syscall.h>                       << 
 26 #endif                                         << 
 27                                                    23 
 28 #ifdef CONFIG_SECCOMP_FILTER                       24 #ifdef CONFIG_SECCOMP_FILTER
                                                   >>  25 #include <asm/syscall.h>
 29 #include <linux/filter.h>                          26 #include <linux/filter.h>
 30 #include <linux/pid.h>                         << 
 31 #include <linux/ptrace.h>                          27 #include <linux/ptrace.h>
 32 #include <linux/security.h>                        28 #include <linux/security.h>
                                                   >>  29 #include <linux/slab.h>
 33 #include <linux/tracehook.h>                       30 #include <linux/tracehook.h>
 34 #include <linux/uaccess.h>                         31 #include <linux/uaccess.h>
 35                                                    32 
 36 /**                                                33 /**
 37  * struct seccomp_filter - container for secco     34  * struct seccomp_filter - container for seccomp BPF programs
 38  *                                                 35  *
 39  * @usage: reference count to manage the objec     36  * @usage: reference count to manage the object lifetime.
 40  *         get/put helpers should be used when     37  *         get/put helpers should be used when accessing an instance
 41  *         outside of a lifetime-guarded secti     38  *         outside of a lifetime-guarded section.  In general, this
 42  *         is only needed for handling filters     39  *         is only needed for handling filters shared across tasks.
 43  * @prev: points to a previously installed, or     40  * @prev: points to a previously installed, or inherited, filter
 44  * @prog: the BPF program to evaluate          !!  41  * @len: the number of instructions in the program
                                                   >>  42  * @insns: the BPF program instructions to evaluate
 45  *                                                 43  *
 46  * seccomp_filter objects are organized in a t     44  * seccomp_filter objects are organized in a tree linked via the @prev
 47  * pointer.  For any task, it appears to be a      45  * pointer.  For any task, it appears to be a singly-linked list starting
 48  * with current->seccomp.filter, the most rece     46  * with current->seccomp.filter, the most recently attached or inherited filter.
 49  * However, multiple filters may share a @prev     47  * However, multiple filters may share a @prev node, by way of fork(), which
 50  * results in a unidirectional tree existing i     48  * results in a unidirectional tree existing in memory.  This is similar to
 51  * how namespaces work.                            49  * how namespaces work.
 52  *                                                 50  *
 53  * seccomp_filter objects should never be modi     51  * seccomp_filter objects should never be modified after being attached
 54  * to a task_struct (other than @usage).           52  * to a task_struct (other than @usage).
 55  */                                                53  */
 56 struct seccomp_filter {                            54 struct seccomp_filter {
 57         atomic_t usage;                            55         atomic_t usage;
 58         struct seccomp_filter *prev;               56         struct seccomp_filter *prev;
 59         struct bpf_prog *prog;                 !!  57         unsigned short len;  /* Instruction count */
                                                   >>  58         struct sock_filter insns[];
 60 };                                                 59 };
 61                                                    60 
 62 /* Limit any path through the tree to 256KB wo     61 /* Limit any path through the tree to 256KB worth of instructions. */
 63 #define MAX_INSNS_PER_PATH ((1 << 18) / sizeof     62 #define MAX_INSNS_PER_PATH ((1 << 18) / sizeof(struct sock_filter))
 64                                                    63 
 65 /*                                             !!  64 /**
                                                   >>  65  * get_u32 - returns a u32 offset into data
                                                   >>  66  * @data: a unsigned 64 bit value
                                                   >>  67  * @index: 0 or 1 to return the first or second 32-bits
                                                   >>  68  *
                                                   >>  69  * This inline exists to hide the length of unsigned long.  If a 32-bit
                                                   >>  70  * unsigned long is passed in, it will be extended and the top 32-bits will be
                                                   >>  71  * 0. If it is a 64-bit unsigned long, then whatever data is resident will be
                                                   >>  72  * properly returned.
                                                   >>  73  *
 66  * Endianness is explicitly ignored and left f     74  * Endianness is explicitly ignored and left for BPF program authors to manage
 67  * as per the specific architecture.               75  * as per the specific architecture.
 68  */                                                76  */
 69 static void populate_seccomp_data(struct secco !!  77 static inline u32 get_u32(u64 data, int index)
 70 {                                                  78 {
 71         struct task_struct *task = current;    !!  79         return ((u32 *)&data)[index];
 72         struct pt_regs *regs = task_pt_regs(ta !!  80 }
 73         unsigned long args[6];                 !!  81 
 74                                                !!  82 /* Helper for bpf_load below. */
 75         sd->nr = syscall_get_nr(task, regs);   !!  83 #define BPF_DATA(_name) offsetof(struct seccomp_data, _name)
 76         sd->arch = syscall_get_arch();         !!  84 /**
 77         syscall_get_arguments(task, regs, 0, 6 !!  85  * bpf_load: checks and returns a pointer to the requested offset
 78         sd->args[0] = args[0];                 !!  86  * @off: offset into struct seccomp_data to load from
 79         sd->args[1] = args[1];                 !!  87  *
 80         sd->args[2] = args[2];                 !!  88  * Returns the requested 32-bits of data.
 81         sd->args[3] = args[3];                 !!  89  * seccomp_check_filter() should assure that @off is 32-bit aligned
 82         sd->args[4] = args[4];                 !!  90  * and not out of bounds.  Failure to do so is a BUG.
 83         sd->args[5] = args[5];                 !!  91  */
 84         sd->instruction_pointer = KSTK_EIP(tas !!  92 u32 seccomp_bpf_load(int off)
                                                   >>  93 {
                                                   >>  94         struct pt_regs *regs = task_pt_regs(current);
                                                   >>  95         if (off == BPF_DATA(nr))
                                                   >>  96                 return syscall_get_nr(current, regs);
                                                   >>  97         if (off == BPF_DATA(arch))
                                                   >>  98                 return syscall_get_arch(current, regs);
                                                   >>  99         if (off >= BPF_DATA(args[0]) && off < BPF_DATA(args[6])) {
                                                   >> 100                 unsigned long value;
                                                   >> 101                 int arg = (off - BPF_DATA(args[0])) / sizeof(u64);
                                                   >> 102                 int index = !!(off % sizeof(u64));
                                                   >> 103                 syscall_get_arguments(current, regs, arg, 1, &value);
                                                   >> 104                 return get_u32(value, index);
                                                   >> 105         }
                                                   >> 106         if (off == BPF_DATA(instruction_pointer))
                                                   >> 107                 return get_u32(KSTK_EIP(current), 0);
                                                   >> 108         if (off == BPF_DATA(instruction_pointer) + sizeof(u32))
                                                   >> 109                 return get_u32(KSTK_EIP(current), 1);
                                                   >> 110         /* seccomp_check_filter should make this impossible. */
                                                   >> 111         BUG();
 85 }                                                 112 }
 86                                                   113 
 87 /**                                               114 /**
 88  *      seccomp_check_filter - verify seccomp     115  *      seccomp_check_filter - verify seccomp filter code
 89  *      @filter: filter to verify                 116  *      @filter: filter to verify
 90  *      @flen: length of filter                   117  *      @flen: length of filter
 91  *                                                118  *
 92  * Takes a previously checked filter (by bpf_c !! 119  * Takes a previously checked filter (by sk_chk_filter) and
 93  * redirects all filter code that loads struct    120  * redirects all filter code that loads struct sk_buff data
 94  * and related data through seccomp_bpf_load.     121  * and related data through seccomp_bpf_load.  It also
 95  * enforces length and alignment checking of t    122  * enforces length and alignment checking of those loads.
 96  *                                                123  *
 97  * Returns 0 if the rule set is legal or -EINV    124  * Returns 0 if the rule set is legal or -EINVAL if not.
 98  */                                               125  */
 99 static int seccomp_check_filter(struct sock_fi    126 static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen)
100 {                                                 127 {
101         int pc;                                   128         int pc;
102         for (pc = 0; pc < flen; pc++) {           129         for (pc = 0; pc < flen; pc++) {
103                 struct sock_filter *ftest = &f    130                 struct sock_filter *ftest = &filter[pc];
104                 u16 code = ftest->code;           131                 u16 code = ftest->code;
105                 u32 k = ftest->k;                 132                 u32 k = ftest->k;
106                                                   133 
107                 switch (code) {                   134                 switch (code) {
108                 case BPF_LD | BPF_W | BPF_ABS: !! 135                 case BPF_S_LD_W_ABS:
109                         ftest->code = BPF_LDX  !! 136                         ftest->code = BPF_S_ANC_SECCOMP_LD_W;
110                         /* 32-bit aligned and     137                         /* 32-bit aligned and not out of bounds. */
111                         if (k >= sizeof(struct    138                         if (k >= sizeof(struct seccomp_data) || k & 3)
112                                 return -EINVAL    139                                 return -EINVAL;
113                         continue;                 140                         continue;
114                 case BPF_LD | BPF_W | BPF_LEN: !! 141                 case BPF_S_LD_W_LEN:
115                         ftest->code = BPF_LD | !! 142                         ftest->code = BPF_S_LD_IMM;
116                         ftest->k = sizeof(stru    143                         ftest->k = sizeof(struct seccomp_data);
117                         continue;                 144                         continue;
118                 case BPF_LDX | BPF_W | BPF_LEN !! 145                 case BPF_S_LDX_W_LEN:
119                         ftest->code = BPF_LDX  !! 146                         ftest->code = BPF_S_LDX_IMM;
120                         ftest->k = sizeof(stru    147                         ftest->k = sizeof(struct seccomp_data);
121                         continue;                 148                         continue;
122                 /* Explicitly include allowed     149                 /* Explicitly include allowed calls. */
123                 case BPF_RET | BPF_K:          !! 150                 case BPF_S_RET_K:
124                 case BPF_RET | BPF_A:          !! 151                 case BPF_S_RET_A:
125                 case BPF_ALU | BPF_ADD | BPF_K !! 152                 case BPF_S_ALU_ADD_K:
126                 case BPF_ALU | BPF_ADD | BPF_X !! 153                 case BPF_S_ALU_ADD_X:
127                 case BPF_ALU | BPF_SUB | BPF_K !! 154                 case BPF_S_ALU_SUB_K:
128                 case BPF_ALU | BPF_SUB | BPF_X !! 155                 case BPF_S_ALU_SUB_X:
129                 case BPF_ALU | BPF_MUL | BPF_K !! 156                 case BPF_S_ALU_MUL_K:
130                 case BPF_ALU | BPF_MUL | BPF_X !! 157                 case BPF_S_ALU_MUL_X:
131                 case BPF_ALU | BPF_DIV | BPF_K !! 158                 case BPF_S_ALU_DIV_X:
132                 case BPF_ALU | BPF_DIV | BPF_X !! 159                 case BPF_S_ALU_AND_K:
133                 case BPF_ALU | BPF_AND | BPF_K !! 160                 case BPF_S_ALU_AND_X:
134                 case BPF_ALU | BPF_AND | BPF_X !! 161                 case BPF_S_ALU_OR_K:
135                 case BPF_ALU | BPF_OR | BPF_K: !! 162                 case BPF_S_ALU_OR_X:
136                 case BPF_ALU | BPF_OR | BPF_X: !! 163                 case BPF_S_ALU_LSH_K:
137                 case BPF_ALU | BPF_XOR | BPF_K !! 164                 case BPF_S_ALU_LSH_X:
138                 case BPF_ALU | BPF_XOR | BPF_X !! 165                 case BPF_S_ALU_RSH_K:
139                 case BPF_ALU | BPF_LSH | BPF_K !! 166                 case BPF_S_ALU_RSH_X:
140                 case BPF_ALU | BPF_LSH | BPF_X !! 167                 case BPF_S_ALU_NEG:
141                 case BPF_ALU | BPF_RSH | BPF_K !! 168                 case BPF_S_LD_IMM:
142                 case BPF_ALU | BPF_RSH | BPF_X !! 169                 case BPF_S_LDX_IMM:
143                 case BPF_ALU | BPF_NEG:        !! 170                 case BPF_S_MISC_TAX:
144                 case BPF_LD | BPF_IMM:         !! 171                 case BPF_S_MISC_TXA:
145                 case BPF_LDX | BPF_IMM:        !! 172                 case BPF_S_ALU_DIV_K:
146                 case BPF_MISC | BPF_TAX:       !! 173                 case BPF_S_LD_MEM:
147                 case BPF_MISC | BPF_TXA:       !! 174                 case BPF_S_LDX_MEM:
148                 case BPF_LD | BPF_MEM:         !! 175                 case BPF_S_ST:
149                 case BPF_LDX | BPF_MEM:        !! 176                 case BPF_S_STX:
150                 case BPF_ST:                   !! 177                 case BPF_S_JMP_JA:
151                 case BPF_STX:                  !! 178                 case BPF_S_JMP_JEQ_K:
152                 case BPF_JMP | BPF_JA:         !! 179                 case BPF_S_JMP_JEQ_X:
153                 case BPF_JMP | BPF_JEQ | BPF_K !! 180                 case BPF_S_JMP_JGE_K:
154                 case BPF_JMP | BPF_JEQ | BPF_X !! 181                 case BPF_S_JMP_JGE_X:
155                 case BPF_JMP | BPF_JGE | BPF_K !! 182                 case BPF_S_JMP_JGT_K:
156                 case BPF_JMP | BPF_JGE | BPF_X !! 183                 case BPF_S_JMP_JGT_X:
157                 case BPF_JMP | BPF_JGT | BPF_K !! 184                 case BPF_S_JMP_JSET_K:
158                 case BPF_JMP | BPF_JGT | BPF_X !! 185                 case BPF_S_JMP_JSET_X:
159                 case BPF_JMP | BPF_JSET | BPF_ << 
160                 case BPF_JMP | BPF_JSET | BPF_ << 
161                         continue;                 186                         continue;
162                 default:                          187                 default:
163                         return -EINVAL;           188                         return -EINVAL;
164                 }                                 189                 }
165         }                                         190         }
166         return 0;                                 191         return 0;
167 }                                                 192 }
168                                                   193 
169 /**                                               194 /**
170  * seccomp_run_filters - evaluates all seccomp !! 195  * seccomp_run_filters - evaluates all seccomp filters against @syscall
171  * @sd: optional seccomp data to be passed to  !! 196  * @syscall: number of the current system call
172  *                                                197  *
173  * Returns valid seccomp BPF response codes.      198  * Returns valid seccomp BPF response codes.
174  */                                               199  */
175 static u32 seccomp_run_filters(const struct se !! 200 static u32 seccomp_run_filters(int syscall)
176 {                                                 201 {
177         struct seccomp_data sd_local;          !! 202         struct seccomp_filter *f;
178         u32 ret = SECCOMP_RET_ALLOW;              203         u32 ret = SECCOMP_RET_ALLOW;
179         /* Make sure cross-thread synced filte << 
180         struct seccomp_filter *f =             << 
181                         lockless_dereference(c << 
182                                                   204 
183         /* Ensure unexpected behavior doesn't     205         /* Ensure unexpected behavior doesn't result in failing open. */
184         if (unlikely(WARN_ON(f == NULL)))      !! 206         if (WARN_ON(current->seccomp.filter == NULL))
185                 return SECCOMP_RET_KILL;          207                 return SECCOMP_RET_KILL;
186                                                   208 
187         if (!sd) {                             << 
188                 populate_seccomp_data(&sd_loca << 
189                 sd = &sd_local;                << 
190         }                                      << 
191                                                << 
192         /*                                        209         /*
193          * All filters in the list are evaluat    210          * All filters in the list are evaluated and the lowest BPF return
194          * value always takes priority (ignori    211          * value always takes priority (ignoring the DATA).
195          */                                       212          */
196         for (; f; f = f->prev) {               !! 213         for (f = current->seccomp.filter; f; f = f->prev) {
197                 u32 cur_ret = BPF_PROG_RUN(f-> !! 214                 u32 cur_ret = sk_run_filter(NULL, f->insns);
198                                                << 
199                 if ((cur_ret & SECCOMP_RET_ACT    215                 if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION))
200                         ret = cur_ret;            216                         ret = cur_ret;
201         }                                         217         }
202         return ret;                               218         return ret;
203 }                                                 219 }
204 #endif /* CONFIG_SECCOMP_FILTER */             << 
205                                                << 
206 static inline bool seccomp_may_assign_mode(uns << 
207 {                                              << 
208         assert_spin_locked(&current->sighand-> << 
209                                                << 
210         if (current->seccomp.mode && current-> << 
211                 return false;                  << 
212                                                << 
213         return true;                           << 
214 }                                              << 
215                                                << 
216 static inline void seccomp_assign_mode(struct  << 
217                                        unsigne << 
218 {                                              << 
219         assert_spin_locked(&task->sighand->sig << 
220                                                << 
221         task->seccomp.mode = seccomp_mode;     << 
222         /*                                     << 
223          * Make sure TIF_SECCOMP cannot be set << 
224          * filter) is set.                     << 
225          */                                    << 
226         smp_mb__before_atomic();               << 
227         set_tsk_thread_flag(task, TIF_SECCOMP) << 
228 }                                              << 
229                                                << 
230 #ifdef CONFIG_SECCOMP_FILTER                   << 
231 /* Returns 1 if the parent is an ancestor of t << 
232 static int is_ancestor(struct seccomp_filter * << 
233                        struct seccomp_filter * << 
234 {                                              << 
235         /* NULL is the root ancestor. */       << 
236         if (parent == NULL)                    << 
237                 return 1;                      << 
238         for (; child; child = child->prev)     << 
239                 if (child == parent)           << 
240                         return 1;              << 
241         return 0;                              << 
242 }                                              << 
243                                                << 
244 /**                                            << 
245  * seccomp_can_sync_threads: checks if all thr << 
246  *                                             << 
247  * Expects sighand and cred_guard_mutex locks  << 
248  *                                             << 
249  * Returns 0 on success, -ve on error, or the  << 
250  * either not in the correct seccomp mode or i << 
251  * seccomp filter.                             << 
252  */                                            << 
253 static inline pid_t seccomp_can_sync_threads(v << 
254 {                                              << 
255         struct task_struct *thread, *caller;   << 
256                                                << 
257         BUG_ON(!mutex_is_locked(&current->sign << 
258         assert_spin_locked(&current->sighand-> << 
259                                                << 
260         /* Validate all threads being eligible << 
261         caller = current;                      << 
262         for_each_thread(caller, thread) {      << 
263                 pid_t failed;                  << 
264                                                << 
265                 /* Skip current, since it is i << 
266                 if (thread == caller)          << 
267                         continue;              << 
268                                                << 
269                 if (thread->seccomp.mode == SE << 
270                     (thread->seccomp.mode == S << 
271                      is_ancestor(thread->secco << 
272                                  caller->secco << 
273                         continue;              << 
274                                                << 
275                 /* Return the first thread tha << 
276                 failed = task_pid_vnr(thread); << 
277                 /* If the pid cannot be resolv << 
278                 if (unlikely(WARN_ON(failed == << 
279                         failed = -ESRCH;       << 
280                 return failed;                 << 
281         }                                      << 
282                                                << 
283         return 0;                              << 
284 }                                              << 
285                                                << 
286 /**                                            << 
287  * seccomp_sync_threads: sets all threads to u << 
288  *                                             << 
289  * Expects sighand and cred_guard_mutex locks  << 
290  * seccomp_can_sync_threads() to have returned << 
291  * without dropping the locks.                 << 
292  *                                             << 
293  */                                            << 
294 static inline void seccomp_sync_threads(void)  << 
295 {                                              << 
296         struct task_struct *thread, *caller;   << 
297                                                << 
298         BUG_ON(!mutex_is_locked(&current->sign << 
299         assert_spin_locked(&current->sighand-> << 
300                                                << 
301         /* Synchronize all threads. */         << 
302         caller = current;                      << 
303         for_each_thread(caller, thread) {      << 
304                 /* Skip current, since it need << 
305                 if (thread == caller)          << 
306                         continue;              << 
307                                                << 
308                 /* Get a task reference for th << 
309                 get_seccomp_filter(caller);    << 
310                 /*                             << 
311                  * Drop the task reference to  << 
312                  * current's path will hold a  << 
313                  * allows a put before the ass << 
314                  */                            << 
315                 put_seccomp_filter(thread);    << 
316                 smp_store_release(&thread->sec << 
317                                   caller->secc << 
318                                                << 
319                 /*                             << 
320                  * Don't let an unprivileged t << 
321                  * the no_new_privs restrictio << 
322                  * a thread that sets it up, e << 
323                  * then dies.                  << 
324                  */                            << 
325                 if (task_no_new_privs(caller)) << 
326                         task_set_no_new_privs( << 
327                                                << 
328                 /*                             << 
329                  * Opt the other thread into s << 
330                  * As threads are considered t << 
331                  * equivalent (see ptrace_may_ << 
332                  * allow one thread to transit << 
333                  */                            << 
334                 if (thread->seccomp.mode == SE << 
335                         seccomp_assign_mode(th << 
336         }                                      << 
337 }                                              << 
338                                                   220 
339 /**                                               221 /**
340  * seccomp_prepare_filter: Prepares a seccomp  !! 222  * seccomp_attach_filter: Attaches a seccomp filter to current.
341  * @fprog: BPF program to install                 223  * @fprog: BPF program to install
342  *                                                224  *
343  * Returns filter on success or an ERR_PTR on  !! 225  * Returns 0 on success or an errno on failure.
344  */                                               226  */
345 static struct seccomp_filter *seccomp_prepare_ !! 227 static long seccomp_attach_filter(struct sock_fprog *fprog)
346 {                                                 228 {
347         struct seccomp_filter *sfilter;        !! 229         struct seccomp_filter *filter;
348         int ret;                               !! 230         unsigned long fp_size = fprog->len * sizeof(struct sock_filter);
349         const bool save_orig = IS_ENABLED(CONF !! 231         unsigned long total_insns = fprog->len;
                                                   >> 232         long ret;
350                                                   233 
351         if (fprog->len == 0 || fprog->len > BP    234         if (fprog->len == 0 || fprog->len > BPF_MAXINSNS)
352                 return ERR_PTR(-EINVAL);       !! 235                 return -EINVAL;
353                                                   236 
354         BUG_ON(INT_MAX / fprog->len < sizeof(s !! 237         for (filter = current->seccomp.filter; filter; filter = filter->prev)
                                                   >> 238                 total_insns += filter->len + 4;  /* include a 4 instr penalty */
                                                   >> 239         if (total_insns > MAX_INSNS_PER_PATH)
                                                   >> 240                 return -ENOMEM;
355                                                   241 
356         /*                                        242         /*
357          * Installing a seccomp filter require !! 243          * Installing a seccomp filter requires that the task have
358          * CAP_SYS_ADMIN in its namespace or b    244          * CAP_SYS_ADMIN in its namespace or be running with no_new_privs.
359          * This avoids scenarios where unprivi    245          * This avoids scenarios where unprivileged tasks can affect the
360          * behavior of privileged children.       246          * behavior of privileged children.
361          */                                       247          */
362         if (!task_no_new_privs(current) &&     !! 248         if (!current->no_new_privs &&
363             security_capable_noaudit(current_c    249             security_capable_noaudit(current_cred(), current_user_ns(),
364                                      CAP_SYS_A    250                                      CAP_SYS_ADMIN) != 0)
365                 return ERR_PTR(-EACCES);       !! 251                 return -EACCES;
366                                                   252 
367         /* Allocate a new seccomp_filter */       253         /* Allocate a new seccomp_filter */
368         sfilter = kzalloc(sizeof(*sfilter), GF !! 254         filter = kzalloc(sizeof(struct seccomp_filter) + fp_size,
369         if (!sfilter)                          !! 255                          GFP_KERNEL|__GFP_NOWARN);
370                 return ERR_PTR(-ENOMEM);       !! 256         if (!filter)
371                                                !! 257                 return -ENOMEM;
372         ret = bpf_prog_create_from_user(&sfilt !! 258         atomic_set(&filter->usage, 1);
373                                         seccom !! 259         filter->len = fprog->len;
374         if (ret < 0) {                         !! 260 
375                 kfree(sfilter);                !! 261         /* Copy the instructions from fprog. */
376                 return ERR_PTR(ret);           !! 262         ret = -EFAULT;
377         }                                      !! 263         if (copy_from_user(filter->insns, fprog->filter, fp_size))
                                                   >> 264                 goto fail;
378                                                   265 
379         atomic_set(&sfilter->usage, 1);        !! 266         /* Check and rewrite the fprog via the skb checker */
                                                   >> 267         ret = sk_chk_filter(filter->insns, filter->len);
                                                   >> 268         if (ret)
                                                   >> 269                 goto fail;
                                                   >> 270 
                                                   >> 271         /* Check and rewrite the fprog for seccomp use */
                                                   >> 272         ret = seccomp_check_filter(filter->insns, filter->len);
                                                   >> 273         if (ret)
                                                   >> 274                 goto fail;
380                                                   275 
381         return sfilter;                        !! 276         /*
                                                   >> 277          * If there is an existing filter, make it the prev and don't drop its
                                                   >> 278          * task reference.
                                                   >> 279          */
                                                   >> 280         filter->prev = current->seccomp.filter;
                                                   >> 281         current->seccomp.filter = filter;
                                                   >> 282         return 0;
                                                   >> 283 fail:
                                                   >> 284         kfree(filter);
                                                   >> 285         return ret;
382 }                                                 286 }
383                                                   287 
384 /**                                               288 /**
385  * seccomp_prepare_user_filter - prepares a us !! 289  * seccomp_attach_user_filter - attaches a user-supplied sock_fprog
386  * @user_filter: pointer to the user data cont    290  * @user_filter: pointer to the user data containing a sock_fprog.
387  *                                                291  *
388  * Returns 0 on success and non-zero otherwise    292  * Returns 0 on success and non-zero otherwise.
389  */                                               293  */
390 static struct seccomp_filter *                 !! 294 long seccomp_attach_user_filter(char __user *user_filter)
391 seccomp_prepare_user_filter(const char __user  << 
392 {                                                 295 {
393         struct sock_fprog fprog;                  296         struct sock_fprog fprog;
394         struct seccomp_filter *filter = ERR_PT !! 297         long ret = -EFAULT;
395                                                   298 
396 #ifdef CONFIG_COMPAT                              299 #ifdef CONFIG_COMPAT
397         if (in_compat_syscall()) {             !! 300         if (is_compat_task()) {
398                 struct compat_sock_fprog fprog    301                 struct compat_sock_fprog fprog32;
399                 if (copy_from_user(&fprog32, u    302                 if (copy_from_user(&fprog32, user_filter, sizeof(fprog32)))
400                         goto out;                 303                         goto out;
401                 fprog.len = fprog32.len;          304                 fprog.len = fprog32.len;
402                 fprog.filter = compat_ptr(fpro    305                 fprog.filter = compat_ptr(fprog32.filter);
403         } else /* falls through to the if belo    306         } else /* falls through to the if below. */
404 #endif                                            307 #endif
405         if (copy_from_user(&fprog, user_filter    308         if (copy_from_user(&fprog, user_filter, sizeof(fprog)))
406                 goto out;                         309                 goto out;
407         filter = seccomp_prepare_filter(&fprog !! 310         ret = seccomp_attach_filter(&fprog);
408 out:                                              311 out:
409         return filter;                         !! 312         return ret;
410 }                                              << 
411                                                << 
412 /**                                            << 
413  * seccomp_attach_filter: validate and attach  << 
414  * @flags:  flags to change filter behavior    << 
415  * @filter: seccomp filter to add to the curre << 
416  *                                             << 
417  * Caller must be holding current->sighand->si << 
418  *                                             << 
419  * Returns 0 on success, -ve on error.         << 
420  */                                            << 
421 static long seccomp_attach_filter(unsigned int << 
422                                   struct secco << 
423 {                                              << 
424         unsigned long total_insns;             << 
425         struct seccomp_filter *walker;         << 
426                                                << 
427         assert_spin_locked(&current->sighand-> << 
428                                                << 
429         /* Validate resulting filter length. * << 
430         total_insns = filter->prog->len;       << 
431         for (walker = current->seccomp.filter; << 
432                 total_insns += walker->prog->l << 
433         if (total_insns > MAX_INSNS_PER_PATH)  << 
434                 return -ENOMEM;                << 
435                                                << 
436         /* If thread sync has been requested,  << 
437         if (flags & SECCOMP_FILTER_FLAG_TSYNC) << 
438                 int ret;                       << 
439                                                << 
440                 ret = seccomp_can_sync_threads << 
441                 if (ret)                       << 
442                         return ret;            << 
443         }                                      << 
444                                                << 
445         /*                                     << 
446          * If there is an existing filter, mak << 
447          * task reference.                     << 
448          */                                    << 
449         filter->prev = current->seccomp.filter << 
450         current->seccomp.filter = filter;      << 
451                                                << 
452         /* Now that the new filter is in place << 
453         if (flags & SECCOMP_FILTER_FLAG_TSYNC) << 
454                 seccomp_sync_threads();        << 
455                                                << 
456         return 0;                              << 
457 }                                                 313 }
458                                                   314 
459 /* get_seccomp_filter - increments the referen    315 /* get_seccomp_filter - increments the reference count of the filter on @tsk */
460 void get_seccomp_filter(struct task_struct *ts    316 void get_seccomp_filter(struct task_struct *tsk)
461 {                                                 317 {
462         struct seccomp_filter *orig = tsk->sec    318         struct seccomp_filter *orig = tsk->seccomp.filter;
463         if (!orig)                                319         if (!orig)
464                 return;                           320                 return;
465         /* Reference count is bounded by the n    321         /* Reference count is bounded by the number of total processes. */
466         atomic_inc(&orig->usage);                 322         atomic_inc(&orig->usage);
467 }                                                 323 }
468                                                   324 
469 static inline void seccomp_filter_free(struct  << 
470 {                                              << 
471         if (filter) {                          << 
472                 bpf_prog_destroy(filter->prog) << 
473                 kfree(filter);                 << 
474         }                                      << 
475 }                                              << 
476                                                << 
477 /* put_seccomp_filter - decrements the ref cou    325 /* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
478 void put_seccomp_filter(struct task_struct *ts    326 void put_seccomp_filter(struct task_struct *tsk)
479 {                                                 327 {
480         struct seccomp_filter *orig = tsk->sec    328         struct seccomp_filter *orig = tsk->seccomp.filter;
481         /* Clean up single-reference branches     329         /* Clean up single-reference branches iteratively. */
482         while (orig && atomic_dec_and_test(&or    330         while (orig && atomic_dec_and_test(&orig->usage)) {
483                 struct seccomp_filter *freeme     331                 struct seccomp_filter *freeme = orig;
484                 orig = orig->prev;                332                 orig = orig->prev;
485                 seccomp_filter_free(freeme);   !! 333                 kfree(freeme);
486         }                                         334         }
487 }                                                 335 }
488                                                   336 
489 /**                                               337 /**
490  * seccomp_send_sigsys - signals the task to a    338  * seccomp_send_sigsys - signals the task to allow in-process syscall emulation
491  * @syscall: syscall number to send to userlan    339  * @syscall: syscall number to send to userland
492  * @reason: filter-supplied reason code to sen    340  * @reason: filter-supplied reason code to send to userland (via si_errno)
493  *                                                341  *
494  * Forces a SIGSYS with a code of SYS_SECCOMP     342  * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
495  */                                               343  */
496 static void seccomp_send_sigsys(int syscall, i    344 static void seccomp_send_sigsys(int syscall, int reason)
497 {                                                 345 {
498         struct siginfo info;                      346         struct siginfo info;
499         memset(&info, 0, sizeof(info));           347         memset(&info, 0, sizeof(info));
500         info.si_signo = SIGSYS;                   348         info.si_signo = SIGSYS;
501         info.si_code = SYS_SECCOMP;               349         info.si_code = SYS_SECCOMP;
502         info.si_call_addr = (void __user *)KST    350         info.si_call_addr = (void __user *)KSTK_EIP(current);
503         info.si_errno = reason;                   351         info.si_errno = reason;
504         info.si_arch = syscall_get_arch();     !! 352         info.si_arch = syscall_get_arch(current, task_pt_regs(current));
505         info.si_syscall = syscall;                353         info.si_syscall = syscall;
506         force_sig_info(SIGSYS, &info, current)    354         force_sig_info(SIGSYS, &info, current);
507 }                                                 355 }
508 #endif  /* CONFIG_SECCOMP_FILTER */               356 #endif  /* CONFIG_SECCOMP_FILTER */
509                                                   357 
510 /*                                                358 /*
511  * Secure computing mode 1 allows only read/wr    359  * Secure computing mode 1 allows only read/write/exit/sigreturn.
512  * To be fully secure this must be combined wi    360  * To be fully secure this must be combined with rlimit
513  * to limit the stack allocations too.            361  * to limit the stack allocations too.
514  */                                               362  */
515 static const int mode1_syscalls[] = {          !! 363 static int mode1_syscalls[] = {
516         __NR_seccomp_read, __NR_seccomp_write,    364         __NR_seccomp_read, __NR_seccomp_write, __NR_seccomp_exit, __NR_seccomp_sigreturn,
517         0, /* null terminated */                  365         0, /* null terminated */
518 };                                                366 };
519                                                   367 
520 static void __secure_computing_strict(int this << 
521 {                                              << 
522         const int *syscall_whitelist = mode1_s << 
523 #ifdef CONFIG_COMPAT                              368 #ifdef CONFIG_COMPAT
524         if (in_compat_syscall())               !! 369 static int mode1_syscalls_32[] = {
525                 syscall_whitelist = get_compat !! 370         __NR_seccomp_read_32, __NR_seccomp_write_32, __NR_seccomp_exit_32, __NR_seccomp_sigreturn_32,
526 #endif                                         !! 371         0, /* null terminated */
527         do {                                   !! 372 };
528                 if (*syscall_whitelist == this << 
529                         return;                << 
530         } while (*++syscall_whitelist);        << 
531                                                << 
532 #ifdef SECCOMP_DEBUG                           << 
533         dump_stack();                          << 
534 #endif                                            373 #endif
535         audit_seccomp(this_syscall, SIGKILL, S << 
536         do_exit(SIGKILL);                      << 
537 }                                              << 
538                                                   374 
539 #ifndef CONFIG_HAVE_ARCH_SECCOMP_FILTER        !! 375 int __secure_computing(int this_syscall)
540 void secure_computing_strict(int this_syscall) << 
541 {                                                 376 {
542         int mode = current->seccomp.mode;         377         int mode = current->seccomp.mode;
                                                   >> 378         int exit_sig = 0;
                                                   >> 379         int *syscall;
                                                   >> 380         u32 ret;
543                                                   381 
544         if (IS_ENABLED(CONFIG_CHECKPOINT_RESTO !! 382         switch (mode) {
545             unlikely(current->ptrace & PT_SUSP !! 383         case SECCOMP_MODE_STRICT:
546                 return;                        !! 384                 syscall = mode1_syscalls;
547                                                !! 385 #ifdef CONFIG_COMPAT
548         if (mode == SECCOMP_MODE_DISABLED)     !! 386                 if (is_compat_task())
549                 return;                        !! 387                         syscall = mode1_syscalls_32;
550         else if (mode == SECCOMP_MODE_STRICT)  !! 388 #endif
551                 __secure_computing_strict(this !! 389                 do {
552         else                                   !! 390                         if (*syscall == this_syscall)
553                 BUG();                         !! 391                                 return 0;
554 }                                              !! 392                 } while (*++syscall);
555 #else                                          !! 393                 exit_sig = SIGKILL;
556                                                !! 394                 ret = SECCOMP_RET_KILL;
                                                   >> 395                 break;
557 #ifdef CONFIG_SECCOMP_FILTER                      396 #ifdef CONFIG_SECCOMP_FILTER
558 static int __seccomp_filter(int this_syscall,  !! 397         case SECCOMP_MODE_FILTER: {
559                             const bool recheck !! 398                 int data;
560 {                                              !! 399                 ret = seccomp_run_filters(this_syscall);
561         u32 filter_ret, action;                !! 400                 data = ret & SECCOMP_RET_DATA;
562         int data;                              !! 401                 ret &= SECCOMP_RET_ACTION;
563                                                !! 402                 switch (ret) {
564         /*                                     !! 403                 case SECCOMP_RET_ERRNO:
565          * Make sure that any changes to mode  !! 404                         /* Set the low-order 16-bits as a errno. */
566          * been seen after TIF_SECCOMP was see !! 405                         syscall_set_return_value(current, task_pt_regs(current),
567          */                                    !! 406                                                  -data, 0);
568         rmb();                                 << 
569                                                << 
570         filter_ret = seccomp_run_filters(sd);  << 
571         data = filter_ret & SECCOMP_RET_DATA;  << 
572         action = filter_ret & SECCOMP_RET_ACTI << 
573                                                << 
574         switch (action) {                      << 
575         case SECCOMP_RET_ERRNO:                << 
576                 /* Set low-order bits as an er << 
577                 if (data > MAX_ERRNO)          << 
578                         data = MAX_ERRNO;      << 
579                 syscall_set_return_value(curre << 
580                                          -data << 
581                 goto skip;                     << 
582                                                << 
583         case SECCOMP_RET_TRAP:                 << 
584                 /* Show the handler the origin << 
585                 syscall_rollback(current, task << 
586                 /* Let the filter pass back 16 << 
587                 seccomp_send_sigsys(this_sysca << 
588                 goto skip;                     << 
589                                                << 
590         case SECCOMP_RET_TRACE:                << 
591                 /* We've been put in this stat << 
592                 if (recheck_after_trace)       << 
593                         return 0;              << 
594                                                << 
595                 /* ENOSYS these calls if there << 
596                 if (!ptrace_event_enabled(curr << 
597                         syscall_set_return_val << 
598                                                << 
599                                                << 
600                         goto skip;                407                         goto skip;
601                 }                              !! 408                 case SECCOMP_RET_TRAP:
602                                                !! 409                         /* Show the handler the original registers. */
603                 /* Allow the BPF to provide th !! 410                         syscall_rollback(current, task_pt_regs(current));
604                 ptrace_event(PTRACE_EVENT_SECC !! 411                         /* Let the filter pass back 16 bits of data. */
605                 /*                             !! 412                         seccomp_send_sigsys(this_syscall, data);
606                  * The delivery of a fatal sig << 
607                  * notification may silently s << 
608                  * which could leave us with a << 
609                  * syscall that the tracer wou << 
610                  * changed. Since the process  << 
611                  * force the syscall to be ski << 
612                  * kill the process and correc << 
613                  * notifications.              << 
614                  */                            << 
615                 if (fatal_signal_pending(curre << 
616                         goto skip;             << 
617                 /* Check if the tracer forced  << 
618                 this_syscall = syscall_get_nr( << 
619                 if (this_syscall < 0)          << 
620                         goto skip;                413                         goto skip;
621                                                !! 414                 case SECCOMP_RET_TRACE:
622                 /*                             !! 415                         /* Skip these calls if there is no tracer. */
623                  * Recheck the syscall, since  !! 416                         if (!ptrace_event_enabled(current, PTRACE_EVENT_SECCOMP))
624                  * intentionally uses a NULL s !! 417                                 goto skip;
625                  * a reload of all registers.  !! 418                         /* Allow the BPF to provide the event message */
626                  * a skip would have already b !! 419                         ptrace_event(PTRACE_EVENT_SECCOMP, data);
627                  */                            !! 420                         /*
628                 if (__seccomp_filter(this_sysc !! 421                          * The delivery of a fatal signal during event
629                         return -1;             !! 422                          * notification may silently skip tracer notification.
630                                                !! 423                          * Terminating the task now avoids executing a system
631                 return 0;                      !! 424                          * call that may not be intended.
632                                                !! 425                          */
633         case SECCOMP_RET_ALLOW:                !! 426                         if (fatal_signal_pending(current))
634                 return 0;                      !! 427                                 break;
635                                                !! 428                         return 0;
636         case SECCOMP_RET_KILL:                 !! 429                 case SECCOMP_RET_ALLOW:
637         default:                               !! 430                         return 0;
638                 audit_seccomp(this_syscall, SI !! 431                 case SECCOMP_RET_KILL:
639                 do_exit(SIGSYS);               !! 432                 default:
                                                   >> 433                         break;
                                                   >> 434                 }
                                                   >> 435                 exit_sig = SIGSYS;
                                                   >> 436                 break;
640         }                                         437         }
641                                                << 
642         unreachable();                         << 
643                                                << 
644 skip:                                          << 
645         audit_seccomp(this_syscall, 0, action) << 
646         return -1;                             << 
647 }                                              << 
648 #else                                          << 
649 static int __seccomp_filter(int this_syscall,  << 
650                             const bool recheck << 
651 {                                              << 
652         BUG();                                 << 
653 }                                              << 
654 #endif                                            438 #endif
655                                                << 
656 int __secure_computing(const struct seccomp_da << 
657 {                                              << 
658         int mode = current->seccomp.mode;      << 
659         int this_syscall;                      << 
660                                                << 
661         if (IS_ENABLED(CONFIG_CHECKPOINT_RESTO << 
662             unlikely(current->ptrace & PT_SUSP << 
663                 return 0;                      << 
664                                                << 
665         this_syscall = sd ? sd->nr :           << 
666                 syscall_get_nr(current, task_p << 
667                                                << 
668         switch (mode) {                        << 
669         case SECCOMP_MODE_STRICT:              << 
670                 __secure_computing_strict(this << 
671                 return 0;                      << 
672         case SECCOMP_MODE_FILTER:              << 
673                 return __seccomp_filter(this_s << 
674         default:                                  439         default:
675                 BUG();                            440                 BUG();
676         }                                         441         }
677 }                                              << 
678 #endif /* CONFIG_HAVE_ARCH_SECCOMP_FILTER */   << 
679                                                << 
680 long prctl_get_seccomp(void)                   << 
681 {                                              << 
682         return current->seccomp.mode;          << 
683 }                                              << 
684                                                   442 
685 /**                                            !! 443 #ifdef SECCOMP_DEBUG
686  * seccomp_set_mode_strict: internal function  !! 444         dump_stack();
687  *                                             << 
688  * Once current->seccomp.mode is non-zero, it  << 
689  *                                             << 
690  * Returns 0 on success or -EINVAL on failure. << 
691  */                                            << 
692 static long seccomp_set_mode_strict(void)      << 
693 {                                              << 
694         const unsigned long seccomp_mode = SEC << 
695         long ret = -EINVAL;                    << 
696                                                << 
697         spin_lock_irq(&current->sighand->siglo << 
698                                                << 
699         if (!seccomp_may_assign_mode(seccomp_m << 
700                 goto out;                      << 
701                                                << 
702 #ifdef TIF_NOTSC                               << 
703         disable_TSC();                         << 
704 #endif                                            445 #endif
705         seccomp_assign_mode(current, seccomp_m !! 446         audit_seccomp(this_syscall, exit_sig, ret);
706         ret = 0;                               !! 447         do_exit(exit_sig);
707                                                << 
708 out:                                           << 
709         spin_unlock_irq(&current->sighand->sig << 
710                                                << 
711         return ret;                            << 
712 }                                              << 
713                                                << 
714 #ifdef CONFIG_SECCOMP_FILTER                      448 #ifdef CONFIG_SECCOMP_FILTER
715 /**                                            !! 449 skip:
716  * seccomp_set_mode_filter: internal function  !! 450         audit_seccomp(this_syscall, exit_sig, ret);
717  * @flags:  flags to change filter behavior    << 
718  * @filter: struct sock_fprog containing filte << 
719  *                                             << 
720  * This function may be called repeatedly to i << 
721  * Every filter successfully installed will be << 
722  * for each system call the task makes.        << 
723  *                                             << 
724  * Once current->seccomp.mode is non-zero, it  << 
725  *                                             << 
726  * Returns 0 on success or -EINVAL on failure. << 
727  */                                            << 
728 static long seccomp_set_mode_filter(unsigned i << 
729                                     const char << 
730 {                                              << 
731         const unsigned long seccomp_mode = SEC << 
732         struct seccomp_filter *prepared = NULL << 
733         long ret = -EINVAL;                    << 
734                                                << 
735         /* Validate flags. */                  << 
736         if (flags & ~SECCOMP_FILTER_FLAG_MASK) << 
737                 return -EINVAL;                << 
738                                                << 
739         /* Prepare the new filter before holdi << 
740         prepared = seccomp_prepare_user_filter << 
741         if (IS_ERR(prepared))                  << 
742                 return PTR_ERR(prepared);      << 
743                                                << 
744         /*                                     << 
745          * Make sure we cannot change seccomp  << 
746          * while another thread is in the midd << 
747          */                                    << 
748         if (flags & SECCOMP_FILTER_FLAG_TSYNC  << 
749             mutex_lock_killable(&current->sign << 
750                 goto out_free;                 << 
751                                                << 
752         spin_lock_irq(&current->sighand->siglo << 
753                                                << 
754         if (!seccomp_may_assign_mode(seccomp_m << 
755                 goto out;                      << 
756                                                << 
757         ret = seccomp_attach_filter(flags, pre << 
758         if (ret)                               << 
759                 goto out;                      << 
760         /* Do not free the successfully attach << 
761         prepared = NULL;                       << 
762                                                << 
763         seccomp_assign_mode(current, seccomp_m << 
764 out:                                           << 
765         spin_unlock_irq(&current->sighand->sig << 
766         if (flags & SECCOMP_FILTER_FLAG_TSYNC) << 
767                 mutex_unlock(&current->signal- << 
768 out_free:                                      << 
769         seccomp_filter_free(prepared);         << 
770         return ret;                            << 
771 }                                              << 
772 #else                                          << 
773 static inline long seccomp_set_mode_filter(uns << 
774                                            con << 
775 {                                              << 
776         return -EINVAL;                        << 
777 }                                              << 
778 #endif                                            451 #endif
779                                                !! 452         return -1;
780 /* Common entry point for both prctl and sysca << 
781 static long do_seccomp(unsigned int op, unsign << 
782                        const char __user *uarg << 
783 {                                              << 
784         switch (op) {                          << 
785         case SECCOMP_SET_MODE_STRICT:          << 
786                 if (flags != 0 || uargs != NUL << 
787                         return -EINVAL;        << 
788                 return seccomp_set_mode_strict << 
789         case SECCOMP_SET_MODE_FILTER:          << 
790                 return seccomp_set_mode_filter << 
791         default:                               << 
792                 return -EINVAL;                << 
793         }                                      << 
794 }                                                 453 }
795                                                   454 
796 SYSCALL_DEFINE3(seccomp, unsigned int, op, uns !! 455 long prctl_get_seccomp(void)
797                          const char __user *,  << 
798 {                                                 456 {
799         return do_seccomp(op, flags, uargs);   !! 457         return current->seccomp.mode;
800 }                                                 458 }
801                                                   459 
802 /**                                               460 /**
803  * prctl_set_seccomp: configures current->secc    461  * prctl_set_seccomp: configures current->seccomp.mode
804  * @seccomp_mode: requested mode to use           462  * @seccomp_mode: requested mode to use
805  * @filter: optional struct sock_fprog for use    463  * @filter: optional struct sock_fprog for use with SECCOMP_MODE_FILTER
806  *                                                464  *
                                                   >> 465  * This function may be called repeatedly with a @seccomp_mode of
                                                   >> 466  * SECCOMP_MODE_FILTER to install additional filters.  Every filter
                                                   >> 467  * successfully installed will be evaluated (in reverse order) for each system
                                                   >> 468  * call the task makes.
                                                   >> 469  *
                                                   >> 470  * Once current->seccomp.mode is non-zero, it may not be changed.
                                                   >> 471  *
807  * Returns 0 on success or -EINVAL on failure.    472  * Returns 0 on success or -EINVAL on failure.
808  */                                               473  */
809 long prctl_set_seccomp(unsigned long seccomp_m    474 long prctl_set_seccomp(unsigned long seccomp_mode, char __user *filter)
810 {                                                 475 {
811         unsigned int op;                       !! 476         long ret = -EINVAL;
812         char __user *uargs;                    !! 477 
                                                   >> 478         if (current->seccomp.mode &&
                                                   >> 479             current->seccomp.mode != seccomp_mode)
                                                   >> 480                 goto out;
813                                                   481 
814         switch (seccomp_mode) {                   482         switch (seccomp_mode) {
815         case SECCOMP_MODE_STRICT:                 483         case SECCOMP_MODE_STRICT:
816                 op = SECCOMP_SET_MODE_STRICT;  !! 484                 ret = 0;
817                 /*                             !! 485 #ifdef TIF_NOTSC
818                  * Setting strict mode through !! 486                 disable_TSC();
819                  * so make sure it is always N !! 487 #endif
820                  * check in do_seccomp().      << 
821                  */                            << 
822                 uargs = NULL;                  << 
823                 break;                            488                 break;
                                                   >> 489 #ifdef CONFIG_SECCOMP_FILTER
824         case SECCOMP_MODE_FILTER:                 490         case SECCOMP_MODE_FILTER:
825                 op = SECCOMP_SET_MODE_FILTER;  !! 491                 ret = seccomp_attach_user_filter(filter);
826                 uargs = filter;                !! 492                 if (ret)
                                                   >> 493                         goto out;
827                 break;                            494                 break;
                                                   >> 495 #endif
828         default:                                  496         default:
829                 return -EINVAL;                << 
830         }                                      << 
831                                                << 
832         /* prctl interface doesn't have flags, << 
833         return do_seccomp(op, 0, uargs);       << 
834 }                                              << 
835                                                << 
836 #if defined(CONFIG_SECCOMP_FILTER) && defined( << 
837 long seccomp_get_filter(struct task_struct *ta << 
838                         void __user *data)     << 
839 {                                              << 
840         struct seccomp_filter *filter;         << 
841         struct sock_fprog_kern *fprog;         << 
842         long ret;                              << 
843         unsigned long count = 0;               << 
844                                                << 
845         if (!capable(CAP_SYS_ADMIN) ||         << 
846             current->seccomp.mode != SECCOMP_M << 
847                 return -EACCES;                << 
848         }                                      << 
849                                                << 
850         spin_lock_irq(&task->sighand->siglock) << 
851         if (task->seccomp.mode != SECCOMP_MODE << 
852                 ret = -EINVAL;                 << 
853                 goto out;                         497                 goto out;
854         }                                         498         }
855                                                   499 
856         filter = task->seccomp.filter;         !! 500         current->seccomp.mode = seccomp_mode;
857         while (filter) {                       !! 501         set_thread_flag(TIF_SECCOMP);
858                 filter = filter->prev;         << 
859                 count++;                       << 
860         }                                      << 
861                                                << 
862         if (filter_off >= count) {             << 
863                 ret = -ENOENT;                 << 
864                 goto out;                      << 
865         }                                      << 
866         count -= filter_off;                   << 
867                                                << 
868         filter = task->seccomp.filter;         << 
869         while (filter && count > 1) {          << 
870                 filter = filter->prev;         << 
871                 count--;                       << 
872         }                                      << 
873                                                << 
874         if (WARN_ON(count != 1 || !filter)) {  << 
875                 /* The filter tree shouldn't s << 
876                 ret = -ENOENT;                 << 
877                 goto out;                      << 
878         }                                      << 
879                                                << 
880         fprog = filter->prog->orig_prog;       << 
881         if (!fprog) {                          << 
882                 /* This must be a new non-cBPF << 
883                  * every cBPF filter's orig_pr << 
884                  * CONFIG_CHECKPOINT_RESTORE i << 
885                  */                            << 
886                 ret = -EMEDIUMTYPE;            << 
887                 goto out;                      << 
888         }                                      << 
889                                                << 
890         ret = fprog->len;                      << 
891         if (!data)                             << 
892                 goto out;                      << 
893                                                << 
894         get_seccomp_filter(task);              << 
895         spin_unlock_irq(&task->sighand->sigloc << 
896                                                << 
897         if (copy_to_user(data, fprog->filter,  << 
898                 ret = -EFAULT;                 << 
899                                                << 
900         put_seccomp_filter(task);              << 
901         return ret;                            << 
902                                                << 
903 out:                                              502 out:
904         spin_unlock_irq(&task->sighand->sigloc << 
905         return ret;                               503         return ret;
906 }                                                 504 }
907 #endif                                         << 
908                                                   505 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us