Version:  2.0.40 2.2.26 2.4.37 3.10 3.11 3.12 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4 4.5 4.6 4.7

Linux/drivers/net/tun.c

  1 /*
  2  *  TUN - Universal TUN/TAP device driver.
  3  *  Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com>
  4  *
  5  *  This program is free software; you can redistribute it and/or modify
  6  *  it under the terms of the GNU General Public License as published by
  7  *  the Free Software Foundation; either version 2 of the License, or
  8  *  (at your option) any later version.
  9  *
 10  *  This program is distributed in the hope that it will be useful,
 11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 13  *  GNU General Public License for more details.
 14  *
 15  *  $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $
 16  */
 17 
 18 /*
 19  *  Changes:
 20  *
 21  *  Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14
 22  *    Add TUNSETLINK ioctl to set the link encapsulation
 23  *
 24  *  Mark Smith <markzzzsmith@yahoo.com.au>
 25  *    Use eth_random_addr() for tap MAC address.
 26  *
 27  *  Harald Roelle <harald.roelle@ifi.lmu.de>  2004/04/20
 28  *    Fixes in packet dropping, queue length setting and queue wakeup.
 29  *    Increased default tx queue length.
 30  *    Added ethtool API.
 31  *    Minor cleanups
 32  *
 33  *  Daniel Podlejski <underley@underley.eu.org>
 34  *    Modifications for 2.3.99-pre5 kernel.
 35  */
 36 
 37 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 38 
 39 #define DRV_NAME        "tun"
 40 #define DRV_VERSION     "1.6"
 41 #define DRV_DESCRIPTION "Universal TUN/TAP device driver"
 42 #define DRV_COPYRIGHT   "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>"
 43 
 44 #include <linux/module.h>
 45 #include <linux/errno.h>
 46 #include <linux/kernel.h>
 47 #include <linux/major.h>
 48 #include <linux/slab.h>
 49 #include <linux/poll.h>
 50 #include <linux/fcntl.h>
 51 #include <linux/init.h>
 52 #include <linux/skbuff.h>
 53 #include <linux/netdevice.h>
 54 #include <linux/etherdevice.h>
 55 #include <linux/miscdevice.h>
 56 #include <linux/ethtool.h>
 57 #include <linux/rtnetlink.h>
 58 #include <linux/compat.h>
 59 #include <linux/if.h>
 60 #include <linux/if_arp.h>
 61 #include <linux/if_ether.h>
 62 #include <linux/if_tun.h>
 63 #include <linux/if_vlan.h>
 64 #include <linux/crc32.h>
 65 #include <linux/nsproxy.h>
 66 #include <linux/virtio_net.h>
 67 #include <linux/rcupdate.h>
 68 #include <net/net_namespace.h>
 69 #include <net/netns/generic.h>
 70 #include <net/rtnetlink.h>
 71 #include <net/sock.h>
 72 #include <linux/seq_file.h>
 73 #include <linux/uio.h>
 74 
 75 #include <asm/uaccess.h>
 76 
 77 /* Uncomment to enable debugging */
 78 /* #define TUN_DEBUG 1 */
 79 
 80 #ifdef TUN_DEBUG
 81 static int debug;
 82 
 83 #define tun_debug(level, tun, fmt, args...)                     \
 84 do {                                                            \
 85         if (tun->debug)                                         \
 86                 netdev_printk(level, tun->dev, fmt, ##args);    \
 87 } while (0)
 88 #define DBG1(level, fmt, args...)                               \
 89 do {                                                            \
 90         if (debug == 2)                                         \
 91                 printk(level fmt, ##args);                      \
 92 } while (0)
 93 #else
 94 #define tun_debug(level, tun, fmt, args...)                     \
 95 do {                                                            \
 96         if (0)                                                  \
 97                 netdev_printk(level, tun->dev, fmt, ##args);    \
 98 } while (0)
 99 #define DBG1(level, fmt, args...)                               \
100 do {                                                            \
101         if (0)                                                  \
102                 printk(level fmt, ##args);                      \
103 } while (0)
104 #endif
105 
106 /* TUN device flags */
107 
108 /* IFF_ATTACH_QUEUE is never stored in device flags,
109  * overload it to mean fasync when stored there.
110  */
111 #define TUN_FASYNC      IFF_ATTACH_QUEUE
112 /* High bits in flags field are unused. */
113 #define TUN_VNET_LE     0x80000000
114 #define TUN_VNET_BE     0x40000000
115 
116 #define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \
117                       IFF_MULTI_QUEUE)
118 #define GOODCOPY_LEN 128
119 
120 #define FLT_EXACT_COUNT 8
121 struct tap_filter {
122         unsigned int    count;    /* Number of addrs. Zero means disabled */
123         u32             mask[2];  /* Mask of the hashed addrs */
124         unsigned char   addr[FLT_EXACT_COUNT][ETH_ALEN];
125 };
126 
127 /* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal
128  * to max number of VCPUs in guest. */
129 #define MAX_TAP_QUEUES 256
130 #define MAX_TAP_FLOWS  4096
131 
132 #define TUN_FLOW_EXPIRE (3 * HZ)
133 
134 struct tun_pcpu_stats {
135         u64 rx_packets;
136         u64 rx_bytes;
137         u64 tx_packets;
138         u64 tx_bytes;
139         struct u64_stats_sync syncp;
140         u32 rx_dropped;
141         u32 tx_dropped;
142         u32 rx_frame_errors;
143 };
144 
145 /* A tun_file connects an open character device to a tuntap netdevice. It
146  * also contains all socket related structures (except sock_fprog and tap_filter)
147  * to serve as one transmit queue for tuntap device. The sock_fprog and
148  * tap_filter were kept in tun_struct since they were used for filtering for the
149  * netdevice not for a specific queue (at least I didn't see the requirement for
150  * this).
151  *
152  * RCU usage:
153  * The tun_file and tun_struct are loosely coupled, the pointer from one to the
154  * other can only be read while rcu_read_lock or rtnl_lock is held.
155  */
156 struct tun_file {
157         struct sock sk;
158         struct socket socket;
159         struct socket_wq wq;
160         struct tun_struct __rcu *tun;
161         struct fasync_struct *fasync;
162         /* only used for fasnyc */
163         unsigned int flags;
164         union {
165                 u16 queue_index;
166                 unsigned int ifindex;
167         };
168         struct list_head next;
169         struct tun_struct *detached;
170 };
171 
172 struct tun_flow_entry {
173         struct hlist_node hash_link;
174         struct rcu_head rcu;
175         struct tun_struct *tun;
176 
177         u32 rxhash;
178         u32 rps_rxhash;
179         int queue_index;
180         unsigned long updated;
181 };
182 
183 #define TUN_NUM_FLOW_ENTRIES 1024
184 
185 /* Since the socket were moved to tun_file, to preserve the behavior of persist
186  * device, socket filter, sndbuf and vnet header size were restore when the
187  * file were attached to a persist device.
188  */
189 struct tun_struct {
190         struct tun_file __rcu   *tfiles[MAX_TAP_QUEUES];
191         unsigned int            numqueues;
192         unsigned int            flags;
193         kuid_t                  owner;
194         kgid_t                  group;
195 
196         struct net_device       *dev;
197         netdev_features_t       set_features;
198 #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
199                           NETIF_F_TSO6|NETIF_F_UFO)
200 
201         int                     align;
202         int                     vnet_hdr_sz;
203         int                     sndbuf;
204         struct tap_filter       txflt;
205         struct sock_fprog       fprog;
206         /* protected by rtnl lock */
207         bool                    filter_attached;
208 #ifdef TUN_DEBUG
209         int debug;
210 #endif
211         spinlock_t lock;
212         struct hlist_head flows[TUN_NUM_FLOW_ENTRIES];
213         struct timer_list flow_gc_timer;
214         unsigned long ageing_time;
215         unsigned int numdisabled;
216         struct list_head disabled;
217         void *security;
218         u32 flow_count;
219         struct tun_pcpu_stats __percpu *pcpu_stats;
220 };
221 
222 #ifdef CONFIG_TUN_VNET_CROSS_LE
223 static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
224 {
225         return tun->flags & TUN_VNET_BE ? false :
226                 virtio_legacy_is_little_endian();
227 }
228 
229 static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
230 {
231         int be = !!(tun->flags & TUN_VNET_BE);
232 
233         if (put_user(be, argp))
234                 return -EFAULT;
235 
236         return 0;
237 }
238 
239 static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
240 {
241         int be;
242 
243         if (get_user(be, argp))
244                 return -EFAULT;
245 
246         if (be)
247                 tun->flags |= TUN_VNET_BE;
248         else
249                 tun->flags &= ~TUN_VNET_BE;
250 
251         return 0;
252 }
253 #else
254 static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
255 {
256         return virtio_legacy_is_little_endian();
257 }
258 
259 static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
260 {
261         return -EINVAL;
262 }
263 
264 static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
265 {
266         return -EINVAL;
267 }
268 #endif /* CONFIG_TUN_VNET_CROSS_LE */
269 
270 static inline bool tun_is_little_endian(struct tun_struct *tun)
271 {
272         return tun->flags & TUN_VNET_LE ||
273                 tun_legacy_is_little_endian(tun);
274 }
275 
276 static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val)
277 {
278         return __virtio16_to_cpu(tun_is_little_endian(tun), val);
279 }
280 
281 static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val)
282 {
283         return __cpu_to_virtio16(tun_is_little_endian(tun), val);
284 }
285 
286 static inline u32 tun_hashfn(u32 rxhash)
287 {
288         return rxhash & 0x3ff;
289 }
290 
291 static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash)
292 {
293         struct tun_flow_entry *e;
294 
295         hlist_for_each_entry_rcu(e, head, hash_link) {
296                 if (e->rxhash == rxhash)
297                         return e;
298         }
299         return NULL;
300 }
301 
302 static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
303                                               struct hlist_head *head,
304                                               u32 rxhash, u16 queue_index)
305 {
306         struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC);
307 
308         if (e) {
309                 tun_debug(KERN_INFO, tun, "create flow: hash %u index %u\n",
310                           rxhash, queue_index);
311                 e->updated = jiffies;
312                 e->rxhash = rxhash;
313                 e->rps_rxhash = 0;
314                 e->queue_index = queue_index;
315                 e->tun = tun;
316                 hlist_add_head_rcu(&e->hash_link, head);
317                 ++tun->flow_count;
318         }
319         return e;
320 }
321 
322 static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
323 {
324         tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n",
325                   e->rxhash, e->queue_index);
326         hlist_del_rcu(&e->hash_link);
327         kfree_rcu(e, rcu);
328         --tun->flow_count;
329 }
330 
331 static void tun_flow_flush(struct tun_struct *tun)
332 {
333         int i;
334 
335         spin_lock_bh(&tun->lock);
336         for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
337                 struct tun_flow_entry *e;
338                 struct hlist_node *n;
339 
340                 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link)
341                         tun_flow_delete(tun, e);
342         }
343         spin_unlock_bh(&tun->lock);
344 }
345 
346 static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
347 {
348         int i;
349 
350         spin_lock_bh(&tun->lock);
351         for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
352                 struct tun_flow_entry *e;
353                 struct hlist_node *n;
354 
355                 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
356                         if (e->queue_index == queue_index)
357                                 tun_flow_delete(tun, e);
358                 }
359         }
360         spin_unlock_bh(&tun->lock);
361 }
362 
363 static void tun_flow_cleanup(unsigned long data)
364 {
365         struct tun_struct *tun = (struct tun_struct *)data;
366         unsigned long delay = tun->ageing_time;
367         unsigned long next_timer = jiffies + delay;
368         unsigned long count = 0;
369         int i;
370 
371         tun_debug(KERN_INFO, tun, "tun_flow_cleanup\n");
372 
373         spin_lock_bh(&tun->lock);
374         for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
375                 struct tun_flow_entry *e;
376                 struct hlist_node *n;
377 
378                 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
379                         unsigned long this_timer;
380                         count++;
381                         this_timer = e->updated + delay;
382                         if (time_before_eq(this_timer, jiffies))
383                                 tun_flow_delete(tun, e);
384                         else if (time_before(this_timer, next_timer))
385                                 next_timer = this_timer;
386                 }
387         }
388 
389         if (count)
390                 mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer));
391         spin_unlock_bh(&tun->lock);
392 }
393 
394 static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
395                             struct tun_file *tfile)
396 {
397         struct hlist_head *head;
398         struct tun_flow_entry *e;
399         unsigned long delay = tun->ageing_time;
400         u16 queue_index = tfile->queue_index;
401 
402         if (!rxhash)
403                 return;
404         else
405                 head = &tun->flows[tun_hashfn(rxhash)];
406 
407         rcu_read_lock();
408 
409         /* We may get a very small possibility of OOO during switching, not
410          * worth to optimize.*/
411         if (tun->numqueues == 1 || tfile->detached)
412                 goto unlock;
413 
414         e = tun_flow_find(head, rxhash);
415         if (likely(e)) {
416                 /* TODO: keep queueing to old queue until it's empty? */
417                 e->queue_index = queue_index;
418                 e->updated = jiffies;
419                 sock_rps_record_flow_hash(e->rps_rxhash);
420         } else {
421                 spin_lock_bh(&tun->lock);
422                 if (!tun_flow_find(head, rxhash) &&
423                     tun->flow_count < MAX_TAP_FLOWS)
424                         tun_flow_create(tun, head, rxhash, queue_index);
425 
426                 if (!timer_pending(&tun->flow_gc_timer))
427                         mod_timer(&tun->flow_gc_timer,
428                                   round_jiffies_up(jiffies + delay));
429                 spin_unlock_bh(&tun->lock);
430         }
431 
432 unlock:
433         rcu_read_unlock();
434 }
435 
436 /**
437  * Save the hash received in the stack receive path and update the
438  * flow_hash table accordingly.
439  */
440 static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
441 {
442         if (unlikely(e->rps_rxhash != hash))
443                 e->rps_rxhash = hash;
444 }
445 
446 /* We try to identify a flow through its rxhash first. The reason that
447  * we do not check rxq no. is because some cards(e.g 82599), chooses
448  * the rxq based on the txq where the last packet of the flow comes. As
449  * the userspace application move between processors, we may get a
450  * different rxq no. here. If we could not get rxhash, then we would
451  * hope the rxq no. may help here.
452  */
453 static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
454                             void *accel_priv, select_queue_fallback_t fallback)
455 {
456         struct tun_struct *tun = netdev_priv(dev);
457         struct tun_flow_entry *e;
458         u32 txq = 0;
459         u32 numqueues = 0;
460 
461         rcu_read_lock();
462         numqueues = ACCESS_ONCE(tun->numqueues);
463 
464         txq = skb_get_hash(skb);
465         if (txq) {
466                 e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
467                 if (e) {
468                         tun_flow_save_rps_rxhash(e, txq);
469                         txq = e->queue_index;
470                 } else
471                         /* use multiply and shift instead of expensive divide */
472                         txq = ((u64)txq * numqueues) >> 32;
473         } else if (likely(skb_rx_queue_recorded(skb))) {
474                 txq = skb_get_rx_queue(skb);
475                 while (unlikely(txq >= numqueues))
476                         txq -= numqueues;
477         }
478 
479         rcu_read_unlock();
480         return txq;
481 }
482 
483 static inline bool tun_not_capable(struct tun_struct *tun)
484 {
485         const struct cred *cred = current_cred();
486         struct net *net = dev_net(tun->dev);
487 
488         return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
489                   (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
490                 !ns_capable(net->user_ns, CAP_NET_ADMIN);
491 }
492 
493 static void tun_set_real_num_queues(struct tun_struct *tun)
494 {
495         netif_set_real_num_tx_queues(tun->dev, tun->numqueues);
496         netif_set_real_num_rx_queues(tun->dev, tun->numqueues);
497 }
498 
499 static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile)
500 {
501         tfile->detached = tun;
502         list_add_tail(&tfile->next, &tun->disabled);
503         ++tun->numdisabled;
504 }
505 
506 static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
507 {
508         struct tun_struct *tun = tfile->detached;
509 
510         tfile->detached = NULL;
511         list_del_init(&tfile->next);
512         --tun->numdisabled;
513         return tun;
514 }
515 
516 static void tun_queue_purge(struct tun_file *tfile)
517 {
518         skb_queue_purge(&tfile->sk.sk_receive_queue);
519         skb_queue_purge(&tfile->sk.sk_error_queue);
520 }
521 
522 static void __tun_detach(struct tun_file *tfile, bool clean)
523 {
524         struct tun_file *ntfile;
525         struct tun_struct *tun;
526 
527         tun = rtnl_dereference(tfile->tun);
528 
529         if (tun && !tfile->detached) {
530                 u16 index = tfile->queue_index;
531                 BUG_ON(index >= tun->numqueues);
532 
533                 rcu_assign_pointer(tun->tfiles[index],
534                                    tun->tfiles[tun->numqueues - 1]);
535                 ntfile = rtnl_dereference(tun->tfiles[index]);
536                 ntfile->queue_index = index;
537 
538                 --tun->numqueues;
539                 if (clean) {
540                         RCU_INIT_POINTER(tfile->tun, NULL);
541                         sock_put(&tfile->sk);
542                 } else
543                         tun_disable_queue(tun, tfile);
544 
545                 synchronize_net();
546                 tun_flow_delete_by_queue(tun, tun->numqueues + 1);
547                 /* Drop read queue */
548                 tun_queue_purge(tfile);
549                 tun_set_real_num_queues(tun);
550         } else if (tfile->detached && clean) {
551                 tun = tun_enable_queue(tfile);
552                 sock_put(&tfile->sk);
553         }
554 
555         if (clean) {
556                 if (tun && tun->numqueues == 0 && tun->numdisabled == 0) {
557                         netif_carrier_off(tun->dev);
558 
559                         if (!(tun->flags & IFF_PERSIST) &&
560                             tun->dev->reg_state == NETREG_REGISTERED)
561                                 unregister_netdevice(tun->dev);
562                 }
563                 sock_put(&tfile->sk);
564         }
565 }
566 
567 static void tun_detach(struct tun_file *tfile, bool clean)
568 {
569         rtnl_lock();
570         __tun_detach(tfile, clean);
571         rtnl_unlock();
572 }
573 
574 static void tun_detach_all(struct net_device *dev)
575 {
576         struct tun_struct *tun = netdev_priv(dev);
577         struct tun_file *tfile, *tmp;
578         int i, n = tun->numqueues;
579 
580         for (i = 0; i < n; i++) {
581                 tfile = rtnl_dereference(tun->tfiles[i]);
582                 BUG_ON(!tfile);
583                 tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
584                 tfile->socket.sk->sk_data_ready(tfile->socket.sk);
585                 RCU_INIT_POINTER(tfile->tun, NULL);
586                 --tun->numqueues;
587         }
588         list_for_each_entry(tfile, &tun->disabled, next) {
589                 tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
590                 tfile->socket.sk->sk_data_ready(tfile->socket.sk);
591                 RCU_INIT_POINTER(tfile->tun, NULL);
592         }
593         BUG_ON(tun->numqueues != 0);
594 
595         synchronize_net();
596         for (i = 0; i < n; i++) {
597                 tfile = rtnl_dereference(tun->tfiles[i]);
598                 /* Drop read queue */
599                 tun_queue_purge(tfile);
600                 sock_put(&tfile->sk);
601         }
602         list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
603                 tun_enable_queue(tfile);
604                 tun_queue_purge(tfile);
605                 sock_put(&tfile->sk);
606         }
607         BUG_ON(tun->numdisabled != 0);
608 
609         if (tun->flags & IFF_PERSIST)
610                 module_put(THIS_MODULE);
611 }
612 
613 static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filter)
614 {
615         struct tun_file *tfile = file->private_data;
616         int err;
617 
618         err = security_tun_dev_attach(tfile->socket.sk, tun->security);
619         if (err < 0)
620                 goto out;
621 
622         err = -EINVAL;
623         if (rtnl_dereference(tfile->tun) && !tfile->detached)
624                 goto out;
625 
626         err = -EBUSY;
627         if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1)
628                 goto out;
629 
630         err = -E2BIG;
631         if (!tfile->detached &&
632             tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES)
633                 goto out;
634 
635         err = 0;
636 
637         /* Re-attach the filter to persist device */
638         if (!skip_filter && (tun->filter_attached == true)) {
639                 lock_sock(tfile->socket.sk);
640                 err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
641                 release_sock(tfile->socket.sk);
642                 if (!err)
643                         goto out;
644         }
645         tfile->queue_index = tun->numqueues;
646         tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN;
647         rcu_assign_pointer(tfile->tun, tun);
648         rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
649         tun->numqueues++;
650 
651         if (tfile->detached)
652                 tun_enable_queue(tfile);
653         else
654                 sock_hold(&tfile->sk);
655 
656         tun_set_real_num_queues(tun);
657 
658         /* device is allowed to go away first, so no need to hold extra
659          * refcnt.
660          */
661 
662 out:
663         return err;
664 }
665 
666 static struct tun_struct *__tun_get(struct tun_file *tfile)
667 {
668         struct tun_struct *tun;
669 
670         rcu_read_lock();
671         tun = rcu_dereference(tfile->tun);
672         if (tun)
673                 dev_hold(tun->dev);
674         rcu_read_unlock();
675 
676         return tun;
677 }
678 
679 static struct tun_struct *tun_get(struct file *file)
680 {
681         return __tun_get(file->private_data);
682 }
683 
684 static void tun_put(struct tun_struct *tun)
685 {
686         dev_put(tun->dev);
687 }
688 
689 /* TAP filtering */
690 static void addr_hash_set(u32 *mask, const u8 *addr)
691 {
692         int n = ether_crc(ETH_ALEN, addr) >> 26;
693         mask[n >> 5] |= (1 << (n & 31));
694 }
695 
696 static unsigned int addr_hash_test(const u32 *mask, const u8 *addr)
697 {
698         int n = ether_crc(ETH_ALEN, addr) >> 26;
699         return mask[n >> 5] & (1 << (n & 31));
700 }
701 
702 static int update_filter(struct tap_filter *filter, void __user *arg)
703 {
704         struct { u8 u[ETH_ALEN]; } *addr;
705         struct tun_filter uf;
706         int err, alen, n, nexact;
707 
708         if (copy_from_user(&uf, arg, sizeof(uf)))
709                 return -EFAULT;
710 
711         if (!uf.count) {
712                 /* Disabled */
713                 filter->count = 0;
714                 return 0;
715         }
716 
717         alen = ETH_ALEN * uf.count;
718         addr = kmalloc(alen, GFP_KERNEL);
719         if (!addr)
720                 return -ENOMEM;
721 
722         if (copy_from_user(addr, arg + sizeof(uf), alen)) {
723                 err = -EFAULT;
724                 goto done;
725         }
726 
727         /* The filter is updated without holding any locks. Which is
728          * perfectly safe. We disable it first and in the worst
729          * case we'll accept a few undesired packets. */
730         filter->count = 0;
731         wmb();
732 
733         /* Use first set of addresses as an exact filter */
734         for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++)
735                 memcpy(filter->addr[n], addr[n].u, ETH_ALEN);
736 
737         nexact = n;
738 
739         /* Remaining multicast addresses are hashed,
740          * unicast will leave the filter disabled. */
741         memset(filter->mask, 0, sizeof(filter->mask));
742         for (; n < uf.count; n++) {
743                 if (!is_multicast_ether_addr(addr[n].u)) {
744                         err = 0; /* no filter */
745                         goto done;
746                 }
747                 addr_hash_set(filter->mask, addr[n].u);
748         }
749 
750         /* For ALLMULTI just set the mask to all ones.
751          * This overrides the mask populated above. */
752         if ((uf.flags & TUN_FLT_ALLMULTI))
753                 memset(filter->mask, ~0, sizeof(filter->mask));
754 
755         /* Now enable the filter */
756         wmb();
757         filter->count = nexact;
758 
759         /* Return the number of exact filters */
760         err = nexact;
761 
762 done:
763         kfree(addr);
764         return err;
765 }
766 
767 /* Returns: 0 - drop, !=0 - accept */
768 static int run_filter(struct tap_filter *filter, const struct sk_buff *skb)
769 {
770         /* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect
771          * at this point. */
772         struct ethhdr *eh = (struct ethhdr *) skb->data;
773         int i;
774 
775         /* Exact match */
776         for (i = 0; i < filter->count; i++)
777                 if (ether_addr_equal(eh->h_dest, filter->addr[i]))
778                         return 1;
779 
780         /* Inexact match (multicast only) */
781         if (is_multicast_ether_addr(eh->h_dest))
782                 return addr_hash_test(filter->mask, eh->h_dest);
783 
784         return 0;
785 }
786 
787 /*
788  * Checks whether the packet is accepted or not.
789  * Returns: 0 - drop, !=0 - accept
790  */
791 static int check_filter(struct tap_filter *filter, const struct sk_buff *skb)
792 {
793         if (!filter->count)
794                 return 1;
795 
796         return run_filter(filter, skb);
797 }
798 
799 /* Network device part of the driver */
800 
801 static const struct ethtool_ops tun_ethtool_ops;
802 
803 /* Net device detach from fd. */
804 static void tun_net_uninit(struct net_device *dev)
805 {
806         tun_detach_all(dev);
807 }
808 
809 /* Net device open. */
810 static int tun_net_open(struct net_device *dev)
811 {
812         netif_tx_start_all_queues(dev);
813         return 0;
814 }
815 
816 /* Net device close. */
817 static int tun_net_close(struct net_device *dev)
818 {
819         netif_tx_stop_all_queues(dev);
820         return 0;
821 }
822 
823 /* Net device start xmit */
824 static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
825 {
826         struct tun_struct *tun = netdev_priv(dev);
827         int txq = skb->queue_mapping;
828         struct tun_file *tfile;
829         u32 numqueues = 0;
830 
831         rcu_read_lock();
832         tfile = rcu_dereference(tun->tfiles[txq]);
833         numqueues = ACCESS_ONCE(tun->numqueues);
834 
835         /* Drop packet if interface is not attached */
836         if (txq >= numqueues)
837                 goto drop;
838 
839 #ifdef CONFIG_RPS
840         if (numqueues == 1 && static_key_false(&rps_needed)) {
841                 /* Select queue was not called for the skbuff, so we extract the
842                  * RPS hash and save it into the flow_table here.
843                  */
844                 __u32 rxhash;
845 
846                 rxhash = skb_get_hash(skb);
847                 if (rxhash) {
848                         struct tun_flow_entry *e;
849                         e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)],
850                                         rxhash);
851                         if (e)
852                                 tun_flow_save_rps_rxhash(e, rxhash);
853                 }
854         }
855 #endif
856 
857         tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
858 
859         BUG_ON(!tfile);
860 
861         /* Drop if the filter does not like it.
862          * This is a noop if the filter is disabled.
863          * Filter can be enabled only for the TAP devices. */
864         if (!check_filter(&tun->txflt, skb))
865                 goto drop;
866 
867         if (tfile->socket.sk->sk_filter &&
868             sk_filter(tfile->socket.sk, skb))
869                 goto drop;
870 
871         /* Limit the number of packets queued by dividing txq length with the
872          * number of queues.
873          */
874         if (skb_queue_len(&tfile->socket.sk->sk_receive_queue) * numqueues
875                           >= dev->tx_queue_len)
876                 goto drop;
877 
878         if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
879                 goto drop;
880 
881         if (skb->sk && sk_fullsock(skb->sk)) {
882                 sock_tx_timestamp(skb->sk, skb->sk->sk_tsflags,
883                                   &skb_shinfo(skb)->tx_flags);
884                 sw_tx_timestamp(skb);
885         }
886 
887         /* Orphan the skb - required as we might hang on to it
888          * for indefinite time.
889          */
890         skb_orphan(skb);
891 
892         nf_reset(skb);
893 
894         /* Enqueue packet */
895         skb_queue_tail(&tfile->socket.sk->sk_receive_queue, skb);
896 
897         /* Notify and wake up reader process */
898         if (tfile->flags & TUN_FASYNC)
899                 kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
900         tfile->socket.sk->sk_data_ready(tfile->socket.sk);
901 
902         rcu_read_unlock();
903         return NETDEV_TX_OK;
904 
905 drop:
906         this_cpu_inc(tun->pcpu_stats->tx_dropped);
907         skb_tx_error(skb);
908         kfree_skb(skb);
909         rcu_read_unlock();
910         return NET_XMIT_DROP;
911 }
912 
913 static void tun_net_mclist(struct net_device *dev)
914 {
915         /*
916          * This callback is supposed to deal with mc filter in
917          * _rx_ path and has nothing to do with the _tx_ path.
918          * In rx path we always accept everything userspace gives us.
919          */
920 }
921 
922 #define MIN_MTU 68
923 #define MAX_MTU 65535
924 
925 static int
926 tun_net_change_mtu(struct net_device *dev, int new_mtu)
927 {
928         if (new_mtu < MIN_MTU || new_mtu + dev->hard_header_len > MAX_MTU)
929                 return -EINVAL;
930         dev->mtu = new_mtu;
931         return 0;
932 }
933 
934 static netdev_features_t tun_net_fix_features(struct net_device *dev,
935         netdev_features_t features)
936 {
937         struct tun_struct *tun = netdev_priv(dev);
938 
939         return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
940 }
941 #ifdef CONFIG_NET_POLL_CONTROLLER
942 static void tun_poll_controller(struct net_device *dev)
943 {
944         /*
945          * Tun only receives frames when:
946          * 1) the char device endpoint gets data from user space
947          * 2) the tun socket gets a sendmsg call from user space
948          * Since both of those are synchronous operations, we are guaranteed
949          * never to have pending data when we poll for it
950          * so there is nothing to do here but return.
951          * We need this though so netpoll recognizes us as an interface that
952          * supports polling, which enables bridge devices in virt setups to
953          * still use netconsole
954          */
955         return;
956 }
957 #endif
958 
959 static void tun_set_headroom(struct net_device *dev, int new_hr)
960 {
961         struct tun_struct *tun = netdev_priv(dev);
962 
963         if (new_hr < NET_SKB_PAD)
964                 new_hr = NET_SKB_PAD;
965 
966         tun->align = new_hr;
967 }
968 
969 static struct rtnl_link_stats64 *
970 tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
971 {
972         u32 rx_dropped = 0, tx_dropped = 0, rx_frame_errors = 0;
973         struct tun_struct *tun = netdev_priv(dev);
974         struct tun_pcpu_stats *p;
975         int i;
976 
977         for_each_possible_cpu(i) {
978                 u64 rxpackets, rxbytes, txpackets, txbytes;
979                 unsigned int start;
980 
981                 p = per_cpu_ptr(tun->pcpu_stats, i);
982                 do {
983                         start = u64_stats_fetch_begin(&p->syncp);
984                         rxpackets       = p->rx_packets;
985                         rxbytes         = p->rx_bytes;
986                         txpackets       = p->tx_packets;
987                         txbytes         = p->tx_bytes;
988                 } while (u64_stats_fetch_retry(&p->syncp, start));
989 
990                 stats->rx_packets       += rxpackets;
991                 stats->rx_bytes         += rxbytes;
992                 stats->tx_packets       += txpackets;
993                 stats->tx_bytes         += txbytes;
994 
995                 /* u32 counters */
996                 rx_dropped      += p->rx_dropped;
997                 rx_frame_errors += p->rx_frame_errors;
998                 tx_dropped      += p->tx_dropped;
999         }
1000         stats->rx_dropped  = rx_dropped;
1001         stats->rx_frame_errors = rx_frame_errors;
1002         stats->tx_dropped = tx_dropped;
1003         return stats;
1004 }
1005 
1006 static const struct net_device_ops tun_netdev_ops = {
1007         .ndo_uninit             = tun_net_uninit,
1008         .ndo_open               = tun_net_open,
1009         .ndo_stop               = tun_net_close,
1010         .ndo_start_xmit         = tun_net_xmit,
1011         .ndo_change_mtu         = tun_net_change_mtu,
1012         .ndo_fix_features       = tun_net_fix_features,
1013         .ndo_select_queue       = tun_select_queue,
1014 #ifdef CONFIG_NET_POLL_CONTROLLER
1015         .ndo_poll_controller    = tun_poll_controller,
1016 #endif
1017         .ndo_set_rx_headroom    = tun_set_headroom,
1018         .ndo_get_stats64        = tun_net_get_stats64,
1019 };
1020 
1021 static const struct net_device_ops tap_netdev_ops = {
1022         .ndo_uninit             = tun_net_uninit,
1023         .ndo_open               = tun_net_open,
1024         .ndo_stop               = tun_net_close,
1025         .ndo_start_xmit         = tun_net_xmit,
1026         .ndo_change_mtu         = tun_net_change_mtu,
1027         .ndo_fix_features       = tun_net_fix_features,
1028         .ndo_set_rx_mode        = tun_net_mclist,
1029         .ndo_set_mac_address    = eth_mac_addr,
1030         .ndo_validate_addr      = eth_validate_addr,
1031         .ndo_select_queue       = tun_select_queue,
1032 #ifdef CONFIG_NET_POLL_CONTROLLER
1033         .ndo_poll_controller    = tun_poll_controller,
1034 #endif
1035         .ndo_features_check     = passthru_features_check,
1036         .ndo_set_rx_headroom    = tun_set_headroom,
1037         .ndo_get_stats64        = tun_net_get_stats64,
1038 };
1039 
1040 static void tun_flow_init(struct tun_struct *tun)
1041 {
1042         int i;
1043 
1044         for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++)
1045                 INIT_HLIST_HEAD(&tun->flows[i]);
1046 
1047         tun->ageing_time = TUN_FLOW_EXPIRE;
1048         setup_timer(&tun->flow_gc_timer, tun_flow_cleanup, (unsigned long)tun);
1049         mod_timer(&tun->flow_gc_timer,
1050                   round_jiffies_up(jiffies + tun->ageing_time));
1051 }
1052 
1053 static void tun_flow_uninit(struct tun_struct *tun)
1054 {
1055         del_timer_sync(&tun->flow_gc_timer);
1056         tun_flow_flush(tun);
1057 }
1058 
1059 /* Initialize net device. */
1060 static void tun_net_init(struct net_device *dev)
1061 {
1062         struct tun_struct *tun = netdev_priv(dev);
1063 
1064         switch (tun->flags & TUN_TYPE_MASK) {
1065         case IFF_TUN:
1066                 dev->netdev_ops = &tun_netdev_ops;
1067 
1068                 /* Point-to-Point TUN Device */
1069                 dev->hard_header_len = 0;
1070                 dev->addr_len = 0;
1071                 dev->mtu = 1500;
1072 
1073                 /* Zero header length */
1074                 dev->type = ARPHRD_NONE;
1075                 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
1076                 break;
1077 
1078         case IFF_TAP:
1079                 dev->netdev_ops = &tap_netdev_ops;
1080                 /* Ethernet TAP Device */
1081                 ether_setup(dev);
1082                 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1083                 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1084 
1085                 eth_hw_addr_random(dev);
1086 
1087                 break;
1088         }
1089 }
1090 
1091 /* Character device part */
1092 
1093 /* Poll */
1094 static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
1095 {
1096         struct tun_file *tfile = file->private_data;
1097         struct tun_struct *tun = __tun_get(tfile);
1098         struct sock *sk;
1099         unsigned int mask = 0;
1100 
1101         if (!tun)
1102                 return POLLERR;
1103 
1104         sk = tfile->socket.sk;
1105 
1106         tun_debug(KERN_INFO, tun, "tun_chr_poll\n");
1107 
1108         poll_wait(file, sk_sleep(sk), wait);
1109 
1110         if (!skb_queue_empty(&sk->sk_receive_queue))
1111                 mask |= POLLIN | POLLRDNORM;
1112 
1113         if (sock_writeable(sk) ||
1114             (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
1115              sock_writeable(sk)))
1116                 mask |= POLLOUT | POLLWRNORM;
1117 
1118         if (tun->dev->reg_state != NETREG_REGISTERED)
1119                 mask = POLLERR;
1120 
1121         tun_put(tun);
1122         return mask;
1123 }
1124 
1125 /* prepad is the amount to reserve at front.  len is length after that.
1126  * linear is a hint as to how much to copy (usually headers). */
1127 static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
1128                                      size_t prepad, size_t len,
1129                                      size_t linear, int noblock)
1130 {
1131         struct sock *sk = tfile->socket.sk;
1132         struct sk_buff *skb;
1133         int err;
1134 
1135         /* Under a page?  Don't bother with paged skb. */
1136         if (prepad + len < PAGE_SIZE || !linear)
1137                 linear = len;
1138 
1139         skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
1140                                    &err, 0);
1141         if (!skb)
1142                 return ERR_PTR(err);
1143 
1144         skb_reserve(skb, prepad);
1145         skb_put(skb, linear);
1146         skb->data_len = len - linear;
1147         skb->len += len - linear;
1148 
1149         return skb;
1150 }
1151 
1152 /* Get packet from user space buffer */
1153 static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1154                             void *msg_control, struct iov_iter *from,
1155                             int noblock)
1156 {
1157         struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
1158         struct sk_buff *skb;
1159         size_t total_len = iov_iter_count(from);
1160         size_t len = total_len, align = tun->align, linear;
1161         struct virtio_net_hdr gso = { 0 };
1162         struct tun_pcpu_stats *stats;
1163         int good_linear;
1164         int copylen;
1165         bool zerocopy = false;
1166         int err;
1167         u32 rxhash;
1168         ssize_t n;
1169 
1170         if (!(tun->dev->flags & IFF_UP))
1171                 return -EIO;
1172 
1173         if (!(tun->flags & IFF_NO_PI)) {
1174                 if (len < sizeof(pi))
1175                         return -EINVAL;
1176                 len -= sizeof(pi);
1177 
1178                 n = copy_from_iter(&pi, sizeof(pi), from);
1179                 if (n != sizeof(pi))
1180                         return -EFAULT;
1181         }
1182 
1183         if (tun->flags & IFF_VNET_HDR) {
1184                 if (len < tun->vnet_hdr_sz)
1185                         return -EINVAL;
1186                 len -= tun->vnet_hdr_sz;
1187 
1188                 n = copy_from_iter(&gso, sizeof(gso), from);
1189                 if (n != sizeof(gso))
1190                         return -EFAULT;
1191 
1192                 if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
1193                     tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len))
1194                         gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2);
1195 
1196                 if (tun16_to_cpu(tun, gso.hdr_len) > len)
1197                         return -EINVAL;
1198                 iov_iter_advance(from, tun->vnet_hdr_sz - sizeof(gso));
1199         }
1200 
1201         if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
1202                 align += NET_IP_ALIGN;
1203                 if (unlikely(len < ETH_HLEN ||
1204                              (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN)))
1205                         return -EINVAL;
1206         }
1207 
1208         good_linear = SKB_MAX_HEAD(align);
1209 
1210         if (msg_control) {
1211                 struct iov_iter i = *from;
1212 
1213                 /* There are 256 bytes to be copied in skb, so there is
1214                  * enough room for skb expand head in case it is used.
1215                  * The rest of the buffer is mapped from userspace.
1216                  */
1217                 copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN;
1218                 if (copylen > good_linear)
1219                         copylen = good_linear;
1220                 linear = copylen;
1221                 iov_iter_advance(&i, copylen);
1222                 if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
1223                         zerocopy = true;
1224         }
1225 
1226         if (!zerocopy) {
1227                 copylen = len;
1228                 if (tun16_to_cpu(tun, gso.hdr_len) > good_linear)
1229                         linear = good_linear;
1230                 else
1231                         linear = tun16_to_cpu(tun, gso.hdr_len);
1232         }
1233 
1234         skb = tun_alloc_skb(tfile, align, copylen, linear, noblock);
1235         if (IS_ERR(skb)) {
1236                 if (PTR_ERR(skb) != -EAGAIN)
1237                         this_cpu_inc(tun->pcpu_stats->rx_dropped);
1238                 return PTR_ERR(skb);
1239         }
1240 
1241         if (zerocopy)
1242                 err = zerocopy_sg_from_iter(skb, from);
1243         else {
1244                 err = skb_copy_datagram_from_iter(skb, 0, from, len);
1245                 if (!err && msg_control) {
1246                         struct ubuf_info *uarg = msg_control;
1247                         uarg->callback(uarg, false);
1248                 }
1249         }
1250 
1251         if (err) {
1252                 this_cpu_inc(tun->pcpu_stats->rx_dropped);
1253                 kfree_skb(skb);
1254                 return -EFAULT;
1255         }
1256 
1257         if (gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1258                 if (!skb_partial_csum_set(skb, tun16_to_cpu(tun, gso.csum_start),
1259                                           tun16_to_cpu(tun, gso.csum_offset))) {
1260                         this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
1261                         kfree_skb(skb);
1262                         return -EINVAL;
1263                 }
1264         }
1265 
1266         switch (tun->flags & TUN_TYPE_MASK) {
1267         case IFF_TUN:
1268                 if (tun->flags & IFF_NO_PI) {
1269                         switch (skb->data[0] & 0xf0) {
1270                         case 0x40:
1271                                 pi.proto = htons(ETH_P_IP);
1272                                 break;
1273                         case 0x60:
1274                                 pi.proto = htons(ETH_P_IPV6);
1275                                 break;
1276                         default:
1277                                 this_cpu_inc(tun->pcpu_stats->rx_dropped);
1278                                 kfree_skb(skb);
1279                                 return -EINVAL;
1280                         }
1281                 }
1282 
1283                 skb_reset_mac_header(skb);
1284                 skb->protocol = pi.proto;
1285                 skb->dev = tun->dev;
1286                 break;
1287         case IFF_TAP:
1288                 skb->protocol = eth_type_trans(skb, tun->dev);
1289                 break;
1290         }
1291 
1292         if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1293                 pr_debug("GSO!\n");
1294                 switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1295                 case VIRTIO_NET_HDR_GSO_TCPV4:
1296                         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1297                         break;
1298                 case VIRTIO_NET_HDR_GSO_TCPV6:
1299                         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1300                         break;
1301                 case VIRTIO_NET_HDR_GSO_UDP:
1302                         skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1303                         break;
1304                 default:
1305                         this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
1306                         kfree_skb(skb);
1307                         return -EINVAL;
1308                 }
1309 
1310                 if (gso.gso_type & VIRTIO_NET_HDR_GSO_ECN)
1311                         skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
1312 
1313                 skb_shinfo(skb)->gso_size = tun16_to_cpu(tun, gso.gso_size);
1314                 if (skb_shinfo(skb)->gso_size == 0) {
1315                         this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
1316                         kfree_skb(skb);
1317                         return -EINVAL;
1318                 }
1319 
1320                 /* Header must be checked, and gso_segs computed. */
1321                 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1322                 skb_shinfo(skb)->gso_segs = 0;
1323         }
1324 
1325         /* copy skb_ubuf_info for callback when skb has no error */
1326         if (zerocopy) {
1327                 skb_shinfo(skb)->destructor_arg = msg_control;
1328                 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
1329                 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
1330         }
1331 
1332         skb_reset_network_header(skb);
1333         skb_probe_transport_header(skb, 0);
1334 
1335         rxhash = skb_get_hash(skb);
1336         netif_rx_ni(skb);
1337 
1338         stats = get_cpu_ptr(tun->pcpu_stats);
1339         u64_stats_update_begin(&stats->syncp);
1340         stats->rx_packets++;
1341         stats->rx_bytes += len;
1342         u64_stats_update_end(&stats->syncp);
1343         put_cpu_ptr(stats);
1344 
1345         tun_flow_update(tun, rxhash, tfile);
1346         return total_len;
1347 }
1348 
1349 static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
1350 {
1351         struct file *file = iocb->ki_filp;
1352         struct tun_struct *tun = tun_get(file);
1353         struct tun_file *tfile = file->private_data;
1354         ssize_t result;
1355 
1356         if (!tun)
1357                 return -EBADFD;
1358 
1359         result = tun_get_user(tun, tfile, NULL, from, file->f_flags & O_NONBLOCK);
1360 
1361         tun_put(tun);
1362         return result;
1363 }
1364 
1365 /* Put packet to the user space buffer */
1366 static ssize_t tun_put_user(struct tun_struct *tun,
1367                             struct tun_file *tfile,
1368                             struct sk_buff *skb,
1369                             struct iov_iter *iter)
1370 {
1371         struct tun_pi pi = { 0, skb->protocol };
1372         struct tun_pcpu_stats *stats;
1373         ssize_t total;
1374         int vlan_offset = 0;
1375         int vlan_hlen = 0;
1376         int vnet_hdr_sz = 0;
1377 
1378         if (skb_vlan_tag_present(skb))
1379                 vlan_hlen = VLAN_HLEN;
1380 
1381         if (tun->flags & IFF_VNET_HDR)
1382                 vnet_hdr_sz = tun->vnet_hdr_sz;
1383 
1384         total = skb->len + vlan_hlen + vnet_hdr_sz;
1385 
1386         if (!(tun->flags & IFF_NO_PI)) {
1387                 if (iov_iter_count(iter) < sizeof(pi))
1388                         return -EINVAL;
1389 
1390                 total += sizeof(pi);
1391                 if (iov_iter_count(iter) < total) {
1392                         /* Packet will be striped */
1393                         pi.flags |= TUN_PKT_STRIP;
1394                 }
1395 
1396                 if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi))
1397                         return -EFAULT;
1398         }
1399 
1400         if (vnet_hdr_sz) {
1401                 struct virtio_net_hdr gso = { 0 }; /* no info leak */
1402                 if (iov_iter_count(iter) < vnet_hdr_sz)
1403                         return -EINVAL;
1404 
1405                 if (skb_is_gso(skb)) {
1406                         struct skb_shared_info *sinfo = skb_shinfo(skb);
1407 
1408                         /* This is a hint as to how much should be linear. */
1409                         gso.hdr_len = cpu_to_tun16(tun, skb_headlen(skb));
1410                         gso.gso_size = cpu_to_tun16(tun, sinfo->gso_size);
1411                         if (sinfo->gso_type & SKB_GSO_TCPV4)
1412                                 gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
1413                         else if (sinfo->gso_type & SKB_GSO_TCPV6)
1414                                 gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
1415                         else if (sinfo->gso_type & SKB_GSO_UDP)
1416                                 gso.gso_type = VIRTIO_NET_HDR_GSO_UDP;
1417                         else {
1418                                 pr_err("unexpected GSO type: "
1419                                        "0x%x, gso_size %d, hdr_len %d\n",
1420                                        sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
1421                                        tun16_to_cpu(tun, gso.hdr_len));
1422                                 print_hex_dump(KERN_ERR, "tun: ",
1423                                                DUMP_PREFIX_NONE,
1424                                                16, 1, skb->head,
1425                                                min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
1426                                 WARN_ON_ONCE(1);
1427                                 return -EINVAL;
1428                         }
1429                         if (sinfo->gso_type & SKB_GSO_TCP_ECN)
1430                                 gso.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
1431                 } else
1432                         gso.gso_type = VIRTIO_NET_HDR_GSO_NONE;
1433 
1434                 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1435                         gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
1436                         gso.csum_start = cpu_to_tun16(tun, skb_checksum_start_offset(skb) +
1437                                                       vlan_hlen);
1438                         gso.csum_offset = cpu_to_tun16(tun, skb->csum_offset);
1439                 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1440                         gso.flags = VIRTIO_NET_HDR_F_DATA_VALID;
1441                 } /* else everything is zero */
1442 
1443                 if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso))
1444                         return -EFAULT;
1445 
1446                 iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
1447         }
1448 
1449         if (vlan_hlen) {
1450                 int ret;
1451                 struct {
1452                         __be16 h_vlan_proto;
1453                         __be16 h_vlan_TCI;
1454                 } veth;
1455 
1456                 veth.h_vlan_proto = skb->vlan_proto;
1457                 veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
1458 
1459                 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
1460 
1461                 ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
1462                 if (ret || !iov_iter_count(iter))
1463                         goto done;
1464 
1465                 ret = copy_to_iter(&veth, sizeof(veth), iter);
1466                 if (ret != sizeof(veth) || !iov_iter_count(iter))
1467                         goto done;
1468         }
1469 
1470         skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset);
1471 
1472 done:
1473         /* caller is in process context, */
1474         stats = get_cpu_ptr(tun->pcpu_stats);
1475         u64_stats_update_begin(&stats->syncp);
1476         stats->tx_packets++;
1477         stats->tx_bytes += skb->len + vlan_hlen;
1478         u64_stats_update_end(&stats->syncp);
1479         put_cpu_ptr(tun->pcpu_stats);
1480 
1481         return total;
1482 }
1483 
1484 static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
1485                            struct iov_iter *to,
1486                            int noblock)
1487 {
1488         struct sk_buff *skb;
1489         ssize_t ret;
1490         int peeked, err, off = 0;
1491 
1492         tun_debug(KERN_INFO, tun, "tun_do_read\n");
1493 
1494         if (!iov_iter_count(to))
1495                 return 0;
1496 
1497         /* Read frames from queue */
1498         skb = __skb_recv_datagram(tfile->socket.sk, noblock ? MSG_DONTWAIT : 0,
1499                                   &peeked, &off, &err);
1500         if (!skb)
1501                 return err;
1502 
1503         ret = tun_put_user(tun, tfile, skb, to);
1504         if (unlikely(ret < 0))
1505                 kfree_skb(skb);
1506         else
1507                 consume_skb(skb);
1508 
1509         return ret;
1510 }
1511 
1512 static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
1513 {
1514         struct file *file = iocb->ki_filp;
1515         struct tun_file *tfile = file->private_data;
1516         struct tun_struct *tun = __tun_get(tfile);
1517         ssize_t len = iov_iter_count(to), ret;
1518 
1519         if (!tun)
1520                 return -EBADFD;
1521         ret = tun_do_read(tun, tfile, to, file->f_flags & O_NONBLOCK);
1522         ret = min_t(ssize_t, ret, len);
1523         if (ret > 0)
1524                 iocb->ki_pos = ret;
1525         tun_put(tun);
1526         return ret;
1527 }
1528 
1529 static void tun_free_netdev(struct net_device *dev)
1530 {
1531         struct tun_struct *tun = netdev_priv(dev);
1532 
1533         BUG_ON(!(list_empty(&tun->disabled)));
1534         free_percpu(tun->pcpu_stats);
1535         tun_flow_uninit(tun);
1536         security_tun_dev_free_security(tun->security);
1537         free_netdev(dev);
1538 }
1539 
1540 static void tun_setup(struct net_device *dev)
1541 {
1542         struct tun_struct *tun = netdev_priv(dev);
1543 
1544         tun->owner = INVALID_UID;
1545         tun->group = INVALID_GID;
1546 
1547         dev->ethtool_ops = &tun_ethtool_ops;
1548         dev->destructor = tun_free_netdev;
1549         /* We prefer our own queue length */
1550         dev->tx_queue_len = TUN_READQ_SIZE;
1551 }
1552 
1553 /* Trivial set of netlink ops to allow deleting tun or tap
1554  * device with netlink.
1555  */
1556 static int tun_validate(struct nlattr *tb[], struct nlattr *data[])
1557 {
1558         return -EINVAL;
1559 }
1560 
1561 static struct rtnl_link_ops tun_link_ops __read_mostly = {
1562         .kind           = DRV_NAME,
1563         .priv_size      = sizeof(struct tun_struct),
1564         .setup          = tun_setup,
1565         .validate       = tun_validate,
1566 };
1567 
1568 static void tun_sock_write_space(struct sock *sk)
1569 {
1570         struct tun_file *tfile;
1571         wait_queue_head_t *wqueue;
1572 
1573         if (!sock_writeable(sk))
1574                 return;
1575 
1576         if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
1577                 return;
1578 
1579         wqueue = sk_sleep(sk);
1580         if (wqueue && waitqueue_active(wqueue))
1581                 wake_up_interruptible_sync_poll(wqueue, POLLOUT |
1582                                                 POLLWRNORM | POLLWRBAND);
1583 
1584         tfile = container_of(sk, struct tun_file, sk);
1585         kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
1586 }
1587 
1588 static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
1589 {
1590         int ret;
1591         struct tun_file *tfile = container_of(sock, struct tun_file, socket);
1592         struct tun_struct *tun = __tun_get(tfile);
1593 
1594         if (!tun)
1595                 return -EBADFD;
1596 
1597         ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter,
1598                            m->msg_flags & MSG_DONTWAIT);
1599         tun_put(tun);
1600         return ret;
1601 }
1602 
1603 static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
1604                        int flags)
1605 {
1606         struct tun_file *tfile = container_of(sock, struct tun_file, socket);
1607         struct tun_struct *tun = __tun_get(tfile);
1608         int ret;
1609 
1610         if (!tun)
1611                 return -EBADFD;
1612 
1613         if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
1614                 ret = -EINVAL;
1615                 goto out;
1616         }
1617         if (flags & MSG_ERRQUEUE) {
1618                 ret = sock_recv_errqueue(sock->sk, m, total_len,
1619                                          SOL_PACKET, TUN_TX_TIMESTAMP);
1620                 goto out;
1621         }
1622         ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT);
1623         if (ret > (ssize_t)total_len) {
1624                 m->msg_flags |= MSG_TRUNC;
1625                 ret = flags & MSG_TRUNC ? ret : total_len;
1626         }
1627 out:
1628         tun_put(tun);
1629         return ret;
1630 }
1631 
1632 /* Ops structure to mimic raw sockets with tun */
1633 static const struct proto_ops tun_socket_ops = {
1634         .sendmsg = tun_sendmsg,
1635         .recvmsg = tun_recvmsg,
1636 };
1637 
1638 static struct proto tun_proto = {
1639         .name           = "tun",
1640         .owner          = THIS_MODULE,
1641         .obj_size       = sizeof(struct tun_file),
1642 };
1643 
1644 static int tun_flags(struct tun_struct *tun)
1645 {
1646         return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP);
1647 }
1648 
1649 static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr,
1650                               char *buf)
1651 {
1652         struct tun_struct *tun = netdev_priv(to_net_dev(dev));
1653         return sprintf(buf, "0x%x\n", tun_flags(tun));
1654 }
1655 
1656 static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr,
1657                               char *buf)
1658 {
1659         struct tun_struct *tun = netdev_priv(to_net_dev(dev));
1660         return uid_valid(tun->owner)?
1661                 sprintf(buf, "%u\n",
1662                         from_kuid_munged(current_user_ns(), tun->owner)):
1663                 sprintf(buf, "-1\n");
1664 }
1665 
1666 static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr,
1667                               char *buf)
1668 {
1669         struct tun_struct *tun = netdev_priv(to_net_dev(dev));
1670         return gid_valid(tun->group) ?
1671                 sprintf(buf, "%u\n",
1672                         from_kgid_munged(current_user_ns(), tun->group)):
1673                 sprintf(buf, "-1\n");
1674 }
1675 
1676 static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL);
1677 static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL);
1678 static DEVICE_ATTR(group, 0444, tun_show_group, NULL);
1679 
1680 static struct attribute *tun_dev_attrs[] = {
1681         &dev_attr_tun_flags.attr,
1682         &dev_attr_owner.attr,
1683         &dev_attr_group.attr,
1684         NULL
1685 };
1686 
1687 static const struct attribute_group tun_attr_group = {
1688         .attrs = tun_dev_attrs
1689 };
1690 
1691 static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1692 {
1693         struct tun_struct *tun;
1694         struct tun_file *tfile = file->private_data;
1695         struct net_device *dev;
1696         int err;
1697 
1698         if (tfile->detached)
1699                 return -EINVAL;
1700 
1701         dev = __dev_get_by_name(net, ifr->ifr_name);
1702         if (dev) {
1703                 if (ifr->ifr_flags & IFF_TUN_EXCL)
1704                         return -EBUSY;
1705                 if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
1706                         tun = netdev_priv(dev);
1707                 else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops)
1708                         tun = netdev_priv(dev);
1709                 else
1710                         return -EINVAL;
1711 
1712                 if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) !=
1713                     !!(tun->flags & IFF_MULTI_QUEUE))
1714                         return -EINVAL;
1715 
1716                 if (tun_not_capable(tun))
1717                         return -EPERM;
1718                 err = security_tun_dev_open(tun->security);
1719                 if (err < 0)
1720                         return err;
1721 
1722                 err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER);
1723                 if (err < 0)
1724                         return err;
1725 
1726                 if (tun->flags & IFF_MULTI_QUEUE &&
1727                     (tun->numqueues + tun->numdisabled > 1)) {
1728                         /* One or more queue has already been attached, no need
1729                          * to initialize the device again.
1730                          */
1731                         return 0;
1732                 }
1733         }
1734         else {
1735                 char *name;
1736                 unsigned long flags = 0;
1737                 int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ?
1738                              MAX_TAP_QUEUES : 1;
1739 
1740                 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1741                         return -EPERM;
1742                 err = security_tun_dev_create();
1743                 if (err < 0)
1744                         return err;
1745 
1746                 /* Set dev type */
1747                 if (ifr->ifr_flags & IFF_TUN) {
1748                         /* TUN device */
1749                         flags |= IFF_TUN;
1750                         name = "tun%d";
1751                 } else if (ifr->ifr_flags & IFF_TAP) {
1752                         /* TAP device */
1753                         flags |= IFF_TAP;
1754                         name = "tap%d";
1755                 } else
1756                         return -EINVAL;
1757 
1758                 if (*ifr->ifr_name)
1759                         name = ifr->ifr_name;
1760 
1761                 dev = alloc_netdev_mqs(sizeof(struct tun_struct), name,
1762                                        NET_NAME_UNKNOWN, tun_setup, queues,
1763                                        queues);
1764 
1765                 if (!dev)
1766                         return -ENOMEM;
1767 
1768                 dev_net_set(dev, net);
1769                 dev->rtnl_link_ops = &tun_link_ops;
1770                 dev->ifindex = tfile->ifindex;
1771                 dev->sysfs_groups[0] = &tun_attr_group;
1772 
1773                 tun = netdev_priv(dev);
1774                 tun->dev = dev;
1775                 tun->flags = flags;
1776                 tun->txflt.count = 0;
1777                 tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
1778 
1779                 tun->align = NET_SKB_PAD;
1780                 tun->filter_attached = false;
1781                 tun->sndbuf = tfile->socket.sk->sk_sndbuf;
1782 
1783                 tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats);
1784                 if (!tun->pcpu_stats) {
1785                         err = -ENOMEM;
1786                         goto err_free_dev;
1787                 }
1788 
1789                 spin_lock_init(&tun->lock);
1790 
1791                 err = security_tun_dev_alloc_security(&tun->security);
1792                 if (err < 0)
1793                         goto err_free_stat;
1794 
1795                 tun_net_init(dev);
1796                 tun_flow_init(tun);
1797 
1798                 dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
1799                                    TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
1800                                    NETIF_F_HW_VLAN_STAG_TX;
1801                 dev->features = dev->hw_features | NETIF_F_LLTX;
1802                 dev->vlan_features = dev->features &
1803                                      ~(NETIF_F_HW_VLAN_CTAG_TX |
1804                                        NETIF_F_HW_VLAN_STAG_TX);
1805 
1806                 INIT_LIST_HEAD(&tun->disabled);
1807                 err = tun_attach(tun, file, false);
1808                 if (err < 0)
1809                         goto err_free_flow;
1810 
1811                 err = register_netdevice(tun->dev);
1812                 if (err < 0)
1813                         goto err_detach;
1814         }
1815 
1816         netif_carrier_on(tun->dev);
1817 
1818         tun_debug(KERN_INFO, tun, "tun_set_iff\n");
1819 
1820         tun->flags = (tun->flags & ~TUN_FEATURES) |
1821                 (ifr->ifr_flags & TUN_FEATURES);
1822 
1823         /* Make sure persistent devices do not get stuck in
1824          * xoff state.
1825          */
1826         if (netif_running(tun->dev))
1827                 netif_tx_wake_all_queues(tun->dev);
1828 
1829         strcpy(ifr->ifr_name, tun->dev->name);
1830         return 0;
1831 
1832 err_detach:
1833         tun_detach_all(dev);
1834 err_free_flow:
1835         tun_flow_uninit(tun);
1836         security_tun_dev_free_security(tun->security);
1837 err_free_stat:
1838         free_percpu(tun->pcpu_stats);
1839 err_free_dev:
1840         free_netdev(dev);
1841         return err;
1842 }
1843 
1844 static void tun_get_iff(struct net *net, struct tun_struct *tun,
1845                        struct ifreq *ifr)
1846 {
1847         tun_debug(KERN_INFO, tun, "tun_get_iff\n");
1848 
1849         strcpy(ifr->ifr_name, tun->dev->name);
1850 
1851         ifr->ifr_flags = tun_flags(tun);
1852 
1853 }
1854 
1855 /* This is like a cut-down ethtool ops, except done via tun fd so no
1856  * privs required. */
1857 static int set_offload(struct tun_struct *tun, unsigned long arg)
1858 {
1859         netdev_features_t features = 0;
1860 
1861         if (arg & TUN_F_CSUM) {
1862                 features |= NETIF_F_HW_CSUM;
1863                 arg &= ~TUN_F_CSUM;
1864 
1865                 if (arg & (TUN_F_TSO4|TUN_F_TSO6)) {
1866                         if (arg & TUN_F_TSO_ECN) {
1867                                 features |= NETIF_F_TSO_ECN;
1868                                 arg &= ~TUN_F_TSO_ECN;
1869                         }
1870                         if (arg & TUN_F_TSO4)
1871                                 features |= NETIF_F_TSO;
1872                         if (arg & TUN_F_TSO6)
1873                                 features |= NETIF_F_TSO6;
1874                         arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
1875                 }
1876 
1877                 if (arg & TUN_F_UFO) {
1878                         features |= NETIF_F_UFO;
1879                         arg &= ~TUN_F_UFO;
1880                 }
1881         }
1882 
1883         /* This gives the user a way to test for new features in future by
1884          * trying to set them. */
1885         if (arg)
1886                 return -EINVAL;
1887 
1888         tun->set_features = features;
1889         netdev_update_features(tun->dev);
1890 
1891         return 0;
1892 }
1893 
1894 static void tun_detach_filter(struct tun_struct *tun, int n)
1895 {
1896         int i;
1897         struct tun_file *tfile;
1898 
1899         for (i = 0; i < n; i++) {
1900                 tfile = rtnl_dereference(tun->tfiles[i]);
1901                 lock_sock(tfile->socket.sk);
1902                 sk_detach_filter(tfile->socket.sk);
1903                 release_sock(tfile->socket.sk);
1904         }
1905 
1906         tun->filter_attached = false;
1907 }
1908 
1909 static int tun_attach_filter(struct tun_struct *tun)
1910 {
1911         int i, ret = 0;
1912         struct tun_file *tfile;
1913 
1914         for (i = 0; i < tun->numqueues; i++) {
1915                 tfile = rtnl_dereference(tun->tfiles[i]);
1916                 lock_sock(tfile->socket.sk);
1917                 ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
1918                 release_sock(tfile->socket.sk);
1919                 if (ret) {
1920                         tun_detach_filter(tun, i);
1921                         return ret;
1922                 }
1923         }
1924 
1925         tun->filter_attached = true;
1926         return ret;
1927 }
1928 
1929 static void tun_set_sndbuf(struct tun_struct *tun)
1930 {
1931         struct tun_file *tfile;
1932         int i;
1933 
1934         for (i = 0; i < tun->numqueues; i++) {
1935                 tfile = rtnl_dereference(tun->tfiles[i]);
1936                 tfile->socket.sk->sk_sndbuf = tun->sndbuf;
1937         }
1938 }
1939 
1940 static int tun_set_queue(struct file *file, struct ifreq *ifr)
1941 {
1942         struct tun_file *tfile = file->private_data;
1943         struct tun_struct *tun;
1944         int ret = 0;
1945 
1946         rtnl_lock();
1947 
1948         if (ifr->ifr_flags & IFF_ATTACH_QUEUE) {
1949                 tun = tfile->detached;
1950                 if (!tun) {
1951                         ret = -EINVAL;
1952                         goto unlock;
1953                 }
1954                 ret = security_tun_dev_attach_queue(tun->security);
1955                 if (ret < 0)
1956                         goto unlock;
1957                 ret = tun_attach(tun, file, false);
1958         } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
1959                 tun = rtnl_dereference(tfile->tun);
1960                 if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
1961                         ret = -EINVAL;
1962                 else
1963                         __tun_detach(tfile, false);
1964         } else
1965                 ret = -EINVAL;
1966 
1967 unlock:
1968         rtnl_unlock();
1969         return ret;
1970 }
1971 
1972 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1973                             unsigned long arg, int ifreq_len)
1974 {
1975         struct tun_file *tfile = file->private_data;
1976         struct tun_struct *tun;
1977         void __user* argp = (void __user*)arg;
1978         struct ifreq ifr;
1979         kuid_t owner;
1980         kgid_t group;
1981         int sndbuf;
1982         int vnet_hdr_sz;
1983         unsigned int ifindex;
1984         int le;
1985         int ret;
1986 
1987         if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
1988                 if (copy_from_user(&ifr, argp, ifreq_len))
1989                         return -EFAULT;
1990         } else {
1991                 memset(&ifr, 0, sizeof(ifr));
1992         }
1993         if (cmd == TUNGETFEATURES) {
1994                 /* Currently this just means: "what IFF flags are valid?".
1995                  * This is needed because we never checked for invalid flags on
1996                  * TUNSETIFF.
1997                  */
1998                 return put_user(IFF_TUN | IFF_TAP | TUN_FEATURES,
1999                                 (unsigned int __user*)argp);
2000         } else if (cmd == TUNSETQUEUE)
2001                 return tun_set_queue(file, &ifr);
2002 
2003         ret = 0;
2004         rtnl_lock();
2005 
2006         tun = __tun_get(tfile);
2007         if (cmd == TUNSETIFF && !tun) {
2008                 ifr.ifr_name[IFNAMSIZ-1] = '\0';
2009 
2010                 ret = tun_set_iff(sock_net(&tfile->sk), file, &ifr);
2011 
2012                 if (ret)
2013                         goto unlock;
2014 
2015                 if (copy_to_user(argp, &ifr, ifreq_len))
2016                         ret = -EFAULT;
2017                 goto unlock;
2018         }
2019         if (cmd == TUNSETIFINDEX) {
2020                 ret = -EPERM;
2021                 if (tun)
2022                         goto unlock;
2023 
2024                 ret = -EFAULT;
2025                 if (copy_from_user(&ifindex, argp, sizeof(ifindex)))
2026                         goto unlock;
2027 
2028                 ret = 0;
2029                 tfile->ifindex = ifindex;
2030                 goto unlock;
2031         }
2032 
2033         ret = -EBADFD;
2034         if (!tun)
2035                 goto unlock;
2036 
2037         tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %u\n", cmd);
2038 
2039         ret = 0;
2040         switch (cmd) {
2041         case TUNGETIFF:
2042                 tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
2043 
2044                 if (tfile->detached)
2045                         ifr.ifr_flags |= IFF_DETACH_QUEUE;
2046                 if (!tfile->socket.sk->sk_filter)
2047                         ifr.ifr_flags |= IFF_NOFILTER;
2048 
2049                 if (copy_to_user(argp, &ifr, ifreq_len))
2050                         ret = -EFAULT;
2051                 break;
2052 
2053         case TUNSETNOCSUM:
2054                 /* Disable/Enable checksum */
2055 
2056                 /* [unimplemented] */
2057                 tun_debug(KERN_INFO, tun, "ignored: set checksum %s\n",
2058                           arg ? "disabled" : "enabled");
2059                 break;
2060 
2061         case TUNSETPERSIST:
2062                 /* Disable/Enable persist mode. Keep an extra reference to the
2063                  * module to prevent the module being unprobed.
2064                  */
2065                 if (arg && !(tun->flags & IFF_PERSIST)) {
2066                         tun->flags |= IFF_PERSIST;
2067                         __module_get(THIS_MODULE);
2068                 }
2069                 if (!arg && (tun->flags & IFF_PERSIST)) {
2070                         tun->flags &= ~IFF_PERSIST;
2071                         module_put(THIS_MODULE);
2072                 }
2073 
2074                 tun_debug(KERN_INFO, tun, "persist %s\n",
2075                           arg ? "enabled" : "disabled");
2076                 break;
2077 
2078         case TUNSETOWNER:
2079                 /* Set owner of the device */
2080                 owner = make_kuid(current_user_ns(), arg);
2081                 if (!uid_valid(owner)) {
2082                         ret = -EINVAL;
2083                         break;
2084                 }
2085                 tun->owner = owner;
2086                 tun_debug(KERN_INFO, tun, "owner set to %u\n",
2087                           from_kuid(&init_user_ns, tun->owner));
2088                 break;
2089 
2090         case TUNSETGROUP:
2091                 /* Set group of the device */
2092                 group = make_kgid(current_user_ns(), arg);
2093                 if (!gid_valid(group)) {
2094                         ret = -EINVAL;
2095                         break;
2096                 }
2097                 tun->group = group;
2098                 tun_debug(KERN_INFO, tun, "group set to %u\n",
2099                           from_kgid(&init_user_ns, tun->group));
2100                 break;
2101 
2102         case TUNSETLINK:
2103                 /* Only allow setting the type when the interface is down */
2104                 if (tun->dev->flags & IFF_UP) {
2105                         tun_debug(KERN_INFO, tun,
2106                                   "Linktype set failed because interface is up\n");
2107                         ret = -EBUSY;
2108                 } else {
2109                         tun->dev->type = (int) arg;
2110                         tun_debug(KERN_INFO, tun, "linktype set to %d\n",
2111                                   tun->dev->type);
2112                         ret = 0;
2113                 }
2114                 break;
2115 
2116 #ifdef TUN_DEBUG
2117         case TUNSETDEBUG:
2118                 tun->debug = arg;
2119                 break;
2120 #endif
2121         case TUNSETOFFLOAD:
2122                 ret = set_offload(tun, arg);
2123                 break;
2124 
2125         case TUNSETTXFILTER:
2126                 /* Can be set only for TAPs */
2127                 ret = -EINVAL;
2128                 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
2129                         break;
2130                 ret = update_filter(&tun->txflt, (void __user *)arg);
2131                 break;
2132 
2133         case SIOCGIFHWADDR:
2134                 /* Get hw address */
2135                 memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN);
2136                 ifr.ifr_hwaddr.sa_family = tun->dev->type;
2137                 if (copy_to_user(argp, &ifr, ifreq_len))
2138                         ret = -EFAULT;
2139                 break;
2140 
2141         case SIOCSIFHWADDR:
2142                 /* Set hw address */
2143                 tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n",
2144                           ifr.ifr_hwaddr.sa_data);
2145 
2146                 ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr);
2147                 break;
2148 
2149         case TUNGETSNDBUF:
2150                 sndbuf = tfile->socket.sk->sk_sndbuf;
2151                 if (copy_to_user(argp, &sndbuf, sizeof(sndbuf)))
2152                         ret = -EFAULT;
2153                 break;
2154 
2155         case TUNSETSNDBUF:
2156                 if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) {
2157                         ret = -EFAULT;
2158                         break;
2159                 }
2160 
2161                 tun->sndbuf = sndbuf;
2162                 tun_set_sndbuf(tun);
2163                 break;
2164 
2165         case TUNGETVNETHDRSZ:
2166                 vnet_hdr_sz = tun->vnet_hdr_sz;
2167                 if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz)))
2168                         ret = -EFAULT;
2169                 break;
2170 
2171         case TUNSETVNETHDRSZ:
2172                 if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) {
2173                         ret = -EFAULT;
2174                         break;
2175                 }
2176                 if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) {
2177                         ret = -EINVAL;
2178                         break;
2179                 }
2180 
2181                 tun->vnet_hdr_sz = vnet_hdr_sz;
2182                 break;
2183 
2184         case TUNGETVNETLE:
2185                 le = !!(tun->flags & TUN_VNET_LE);
2186                 if (put_user(le, (int __user *)argp))
2187                         ret = -EFAULT;
2188                 break;
2189 
2190         case TUNSETVNETLE:
2191                 if (get_user(le, (int __user *)argp)) {
2192                         ret = -EFAULT;
2193                         break;
2194                 }
2195                 if (le)
2196                         tun->flags |= TUN_VNET_LE;
2197                 else
2198                         tun->flags &= ~TUN_VNET_LE;
2199                 break;
2200 
2201         case TUNGETVNETBE:
2202                 ret = tun_get_vnet_be(tun, argp);
2203                 break;
2204 
2205         case TUNSETVNETBE:
2206                 ret = tun_set_vnet_be(tun, argp);
2207                 break;
2208 
2209         case TUNATTACHFILTER:
2210                 /* Can be set only for TAPs */
2211                 ret = -EINVAL;
2212                 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
2213                         break;
2214                 ret = -EFAULT;
2215                 if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog)))
2216                         break;
2217 
2218                 ret = tun_attach_filter(tun);
2219                 break;
2220 
2221         case TUNDETACHFILTER:
2222                 /* Can be set only for TAPs */
2223                 ret = -EINVAL;
2224                 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
2225                         break;
2226                 ret = 0;
2227                 tun_detach_filter(tun, tun->numqueues);
2228                 break;
2229 
2230         case TUNGETFILTER:
2231                 ret = -EINVAL;
2232                 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
2233                         break;
2234                 ret = -EFAULT;
2235                 if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog)))
2236                         break;
2237                 ret = 0;
2238                 break;
2239 
2240         default:
2241                 ret = -EINVAL;
2242                 break;
2243         }
2244 
2245 unlock:
2246         rtnl_unlock();
2247         if (tun)
2248                 tun_put(tun);
2249         return ret;
2250 }
2251 
2252 static long tun_chr_ioctl(struct file *file,
2253                           unsigned int cmd, unsigned long arg)
2254 {
2255         return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq));
2256 }
2257 
2258 #ifdef CONFIG_COMPAT
2259 static long tun_chr_compat_ioctl(struct file *file,
2260                          unsigned int cmd, unsigned long arg)
2261 {
2262         switch (cmd) {
2263         case TUNSETIFF:
2264         case TUNGETIFF:
2265         case TUNSETTXFILTER:
2266         case TUNGETSNDBUF:
2267         case TUNSETSNDBUF:
2268         case SIOCGIFHWADDR:
2269         case SIOCSIFHWADDR:
2270                 arg = (unsigned long)compat_ptr(arg);
2271                 break;
2272         default:
2273                 arg = (compat_ulong_t)arg;
2274                 break;
2275         }
2276 
2277         /*
2278          * compat_ifreq is shorter than ifreq, so we must not access beyond
2279          * the end of that structure. All fields that are used in this
2280          * driver are compatible though, we don't need to convert the
2281          * contents.
2282          */
2283         return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq));
2284 }
2285 #endif /* CONFIG_COMPAT */
2286 
2287 static int tun_chr_fasync(int fd, struct file *file, int on)
2288 {
2289         struct tun_file *tfile = file->private_data;
2290         int ret;
2291 
2292         if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0)
2293                 goto out;
2294 
2295         if (on) {
2296                 __f_setown(file, task_pid(current), PIDTYPE_PID, 0);
2297                 tfile->flags |= TUN_FASYNC;
2298         } else
2299                 tfile->flags &= ~TUN_FASYNC;
2300         ret = 0;
2301 out:
2302         return ret;
2303 }
2304 
2305 static int tun_chr_open(struct inode *inode, struct file * file)
2306 {
2307         struct net *net = current->nsproxy->net_ns;
2308         struct tun_file *tfile;
2309 
2310         DBG1(KERN_INFO, "tunX: tun_chr_open\n");
2311 
2312         tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
2313                                             &tun_proto, 0);
2314         if (!tfile)
2315                 return -ENOMEM;
2316         RCU_INIT_POINTER(tfile->tun, NULL);
2317         tfile->flags = 0;
2318         tfile->ifindex = 0;
2319 
2320         init_waitqueue_head(&tfile->wq.wait);
2321         RCU_INIT_POINTER(tfile->socket.wq, &tfile->wq);
2322 
2323         tfile->socket.file = file;
2324         tfile->socket.ops = &tun_socket_ops;
2325 
2326         sock_init_data(&tfile->socket, &tfile->sk);
2327 
2328         tfile->sk.sk_write_space = tun_sock_write_space;
2329         tfile->sk.sk_sndbuf = INT_MAX;
2330 
2331         file->private_data = tfile;
2332         INIT_LIST_HEAD(&tfile->next);
2333 
2334         sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
2335 
2336         return 0;
2337 }
2338 
2339 static int tun_chr_close(struct inode *inode, struct file *file)
2340 {
2341         struct tun_file *tfile = file->private_data;
2342 
2343         tun_detach(tfile, true);
2344 
2345         return 0;
2346 }
2347 
2348 #ifdef CONFIG_PROC_FS
2349 static void tun_chr_show_fdinfo(struct seq_file *m, struct file *f)
2350 {
2351         struct tun_struct *tun;
2352         struct ifreq ifr;
2353 
2354         memset(&ifr, 0, sizeof(ifr));
2355 
2356         rtnl_lock();
2357         tun = tun_get(f);
2358         if (tun)
2359                 tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
2360         rtnl_unlock();
2361 
2362         if (tun)
2363                 tun_put(tun);
2364 
2365         seq_printf(m, "iff:\t%s\n", ifr.ifr_name);
2366 }
2367 #endif
2368 
2369 static const struct file_operations tun_fops = {
2370         .owner  = THIS_MODULE,
2371         .llseek = no_llseek,
2372         .read_iter  = tun_chr_read_iter,
2373         .write_iter = tun_chr_write_iter,
2374         .poll   = tun_chr_poll,
2375         .unlocked_ioctl = tun_chr_ioctl,
2376 #ifdef CONFIG_COMPAT
2377         .compat_ioctl = tun_chr_compat_ioctl,
2378 #endif
2379         .open   = tun_chr_open,
2380         .release = tun_chr_close,
2381         .fasync = tun_chr_fasync,
2382 #ifdef CONFIG_PROC_FS
2383         .show_fdinfo = tun_chr_show_fdinfo,
2384 #endif
2385 };
2386 
2387 static struct miscdevice tun_miscdev = {
2388         .minor = TUN_MINOR,
2389         .name = "tun",
2390         .nodename = "net/tun",
2391         .fops = &tun_fops,
2392 };
2393 
2394 /* ethtool interface */
2395 
2396 static int tun_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2397 {
2398         cmd->supported          = 0;
2399         cmd->advertising        = 0;
2400         ethtool_cmd_speed_set(cmd, SPEED_10);
2401         cmd->duplex             = DUPLEX_FULL;
2402         cmd->port               = PORT_TP;
2403         cmd->phy_address        = 0;
2404         cmd->transceiver        = XCVR_INTERNAL;
2405         cmd->autoneg            = AUTONEG_DISABLE;
2406         cmd->maxtxpkt           = 0;
2407         cmd->maxrxpkt           = 0;
2408         return 0;
2409 }
2410 
2411 static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2412 {
2413         struct tun_struct *tun = netdev_priv(dev);
2414 
2415         strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2416         strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2417 
2418         switch (tun->flags & TUN_TYPE_MASK) {
2419         case IFF_TUN:
2420                 strlcpy(info->bus_info, "tun", sizeof(info->bus_info));
2421                 break;
2422         case IFF_TAP:
2423                 strlcpy(info->bus_info, "tap", sizeof(info->bus_info));
2424                 break;
2425         }
2426 }
2427 
2428 static u32 tun_get_msglevel(struct net_device *dev)
2429 {
2430 #ifdef TUN_DEBUG
2431         struct tun_struct *tun = netdev_priv(dev);
2432         return tun->debug;
2433 #else
2434         return -EOPNOTSUPP;
2435 #endif
2436 }
2437 
2438 static void tun_set_msglevel(struct net_device *dev, u32 value)
2439 {
2440 #ifdef TUN_DEBUG
2441         struct tun_struct *tun = netdev_priv(dev);
2442         tun->debug = value;
2443 #endif
2444 }
2445 
2446 static const struct ethtool_ops tun_ethtool_ops = {
2447         .get_settings   = tun_get_settings,
2448         .get_drvinfo    = tun_get_drvinfo,
2449         .get_msglevel   = tun_get_msglevel,
2450         .set_msglevel   = tun_set_msglevel,
2451         .get_link       = ethtool_op_get_link,
2452         .get_ts_info    = ethtool_op_get_ts_info,
2453 };
2454 
2455 
2456 static int __init tun_init(void)
2457 {
2458         int ret = 0;
2459 
2460         pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
2461         pr_info("%s\n", DRV_COPYRIGHT);
2462 
2463         ret = rtnl_link_register(&tun_link_ops);
2464         if (ret) {
2465                 pr_err("Can't register link_ops\n");
2466                 goto err_linkops;
2467         }
2468 
2469         ret = misc_register(&tun_miscdev);
2470         if (ret) {
2471                 pr_err("Can't register misc device %d\n", TUN_MINOR);
2472                 goto err_misc;
2473         }
2474         return  0;
2475 err_misc:
2476         rtnl_link_unregister(&tun_link_ops);
2477 err_linkops:
2478         return ret;
2479 }
2480 
2481 static void tun_cleanup(void)
2482 {
2483         misc_deregister(&tun_miscdev);
2484         rtnl_link_unregister(&tun_link_ops);
2485 }
2486 
2487 /* Get an underlying socket object from tun file.  Returns error unless file is
2488  * attached to a device.  The returned object works like a packet socket, it
2489  * can be used for sock_sendmsg/sock_recvmsg.  The caller is responsible for
2490  * holding a reference to the file for as long as the socket is in use. */
2491 struct socket *tun_get_socket(struct file *file)
2492 {
2493         struct tun_file *tfile;
2494         if (file->f_op != &tun_fops)
2495                 return ERR_PTR(-EINVAL);
2496         tfile = file->private_data;
2497         if (!tfile)
2498                 return ERR_PTR(-EBADFD);
2499         return &tfile->socket;
2500 }
2501 EXPORT_SYMBOL_GPL(tun_get_socket);
2502 
2503 module_init(tun_init);
2504 module_exit(tun_cleanup);
2505 MODULE_DESCRIPTION(DRV_DESCRIPTION);
2506 MODULE_AUTHOR(DRV_COPYRIGHT);
2507 MODULE_LICENSE("GPL");
2508 MODULE_ALIAS_MISCDEV(TUN_MINOR);
2509 MODULE_ALIAS("devname:net/tun");
2510 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us