Version:  2.0.40 2.2.26 2.4.37 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4 4.5

Linux/drivers/net/tun.c

  1 /*
  2  *  TUN - Universal TUN/TAP device driver.
  3  *  Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com>
  4  *
  5  *  This program is free software; you can redistribute it and/or modify
  6  *  it under the terms of the GNU General Public License as published by
  7  *  the Free Software Foundation; either version 2 of the License, or
  8  *  (at your option) any later version.
  9  *
 10  *  This program is distributed in the hope that it will be useful,
 11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 13  *  GNU General Public License for more details.
 14  *
 15  *  $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $
 16  */
 17 
 18 /*
 19  *  Changes:
 20  *
 21  *  Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14
 22  *    Add TUNSETLINK ioctl to set the link encapsulation
 23  *
 24  *  Mark Smith <markzzzsmith@yahoo.com.au>
 25  *    Use eth_random_addr() for tap MAC address.
 26  *
 27  *  Harald Roelle <harald.roelle@ifi.lmu.de>  2004/04/20
 28  *    Fixes in packet dropping, queue length setting and queue wakeup.
 29  *    Increased default tx queue length.
 30  *    Added ethtool API.
 31  *    Minor cleanups
 32  *
 33  *  Daniel Podlejski <underley@underley.eu.org>
 34  *    Modifications for 2.3.99-pre5 kernel.
 35  */
 36 
 37 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 38 
 39 #define DRV_NAME        "tun"
 40 #define DRV_VERSION     "1.6"
 41 #define DRV_DESCRIPTION "Universal TUN/TAP device driver"
 42 #define DRV_COPYRIGHT   "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>"
 43 
 44 #include <linux/module.h>
 45 #include <linux/errno.h>
 46 #include <linux/kernel.h>
 47 #include <linux/major.h>
 48 #include <linux/slab.h>
 49 #include <linux/poll.h>
 50 #include <linux/fcntl.h>
 51 #include <linux/init.h>
 52 #include <linux/skbuff.h>
 53 #include <linux/netdevice.h>
 54 #include <linux/etherdevice.h>
 55 #include <linux/miscdevice.h>
 56 #include <linux/ethtool.h>
 57 #include <linux/rtnetlink.h>
 58 #include <linux/compat.h>
 59 #include <linux/if.h>
 60 #include <linux/if_arp.h>
 61 #include <linux/if_ether.h>
 62 #include <linux/if_tun.h>
 63 #include <linux/if_vlan.h>
 64 #include <linux/crc32.h>
 65 #include <linux/nsproxy.h>
 66 #include <linux/virtio_net.h>
 67 #include <linux/rcupdate.h>
 68 #include <net/net_namespace.h>
 69 #include <net/netns/generic.h>
 70 #include <net/rtnetlink.h>
 71 #include <net/sock.h>
 72 #include <linux/seq_file.h>
 73 #include <linux/uio.h>
 74 
 75 #include <asm/uaccess.h>
 76 
 77 /* Uncomment to enable debugging */
 78 /* #define TUN_DEBUG 1 */
 79 
 80 #ifdef TUN_DEBUG
 81 static int debug;
 82 
 83 #define tun_debug(level, tun, fmt, args...)                     \
 84 do {                                                            \
 85         if (tun->debug)                                         \
 86                 netdev_printk(level, tun->dev, fmt, ##args);    \
 87 } while (0)
 88 #define DBG1(level, fmt, args...)                               \
 89 do {                                                            \
 90         if (debug == 2)                                         \
 91                 printk(level fmt, ##args);                      \
 92 } while (0)
 93 #else
 94 #define tun_debug(level, tun, fmt, args...)                     \
 95 do {                                                            \
 96         if (0)                                                  \
 97                 netdev_printk(level, tun->dev, fmt, ##args);    \
 98 } while (0)
 99 #define DBG1(level, fmt, args...)                               \
100 do {                                                            \
101         if (0)                                                  \
102                 printk(level fmt, ##args);                      \
103 } while (0)
104 #endif
105 
106 /* TUN device flags */
107 
108 /* IFF_ATTACH_QUEUE is never stored in device flags,
109  * overload it to mean fasync when stored there.
110  */
111 #define TUN_FASYNC      IFF_ATTACH_QUEUE
112 /* High bits in flags field are unused. */
113 #define TUN_VNET_LE     0x80000000
114 #define TUN_VNET_BE     0x40000000
115 
116 #define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \
117                       IFF_MULTI_QUEUE)
118 #define GOODCOPY_LEN 128
119 
120 #define FLT_EXACT_COUNT 8
121 struct tap_filter {
122         unsigned int    count;    /* Number of addrs. Zero means disabled */
123         u32             mask[2];  /* Mask of the hashed addrs */
124         unsigned char   addr[FLT_EXACT_COUNT][ETH_ALEN];
125 };
126 
127 /* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal
128  * to max number of VCPUs in guest. */
129 #define MAX_TAP_QUEUES 256
130 #define MAX_TAP_FLOWS  4096
131 
132 #define TUN_FLOW_EXPIRE (3 * HZ)
133 
134 /* A tun_file connects an open character device to a tuntap netdevice. It
135  * also contains all socket related structures (except sock_fprog and tap_filter)
136  * to serve as one transmit queue for tuntap device. The sock_fprog and
137  * tap_filter were kept in tun_struct since they were used for filtering for the
138  * netdevice not for a specific queue (at least I didn't see the requirement for
139  * this).
140  *
141  * RCU usage:
142  * The tun_file and tun_struct are loosely coupled, the pointer from one to the
143  * other can only be read while rcu_read_lock or rtnl_lock is held.
144  */
145 struct tun_file {
146         struct sock sk;
147         struct socket socket;
148         struct socket_wq wq;
149         struct tun_struct __rcu *tun;
150         struct fasync_struct *fasync;
151         /* only used for fasnyc */
152         unsigned int flags;
153         union {
154                 u16 queue_index;
155                 unsigned int ifindex;
156         };
157         struct list_head next;
158         struct tun_struct *detached;
159 };
160 
161 struct tun_flow_entry {
162         struct hlist_node hash_link;
163         struct rcu_head rcu;
164         struct tun_struct *tun;
165 
166         u32 rxhash;
167         u32 rps_rxhash;
168         int queue_index;
169         unsigned long updated;
170 };
171 
172 #define TUN_NUM_FLOW_ENTRIES 1024
173 
174 /* Since the socket were moved to tun_file, to preserve the behavior of persist
175  * device, socket filter, sndbuf and vnet header size were restore when the
176  * file were attached to a persist device.
177  */
178 struct tun_struct {
179         struct tun_file __rcu   *tfiles[MAX_TAP_QUEUES];
180         unsigned int            numqueues;
181         unsigned int            flags;
182         kuid_t                  owner;
183         kgid_t                  group;
184 
185         struct net_device       *dev;
186         netdev_features_t       set_features;
187 #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
188                           NETIF_F_TSO6|NETIF_F_UFO)
189 
190         int                     vnet_hdr_sz;
191         int                     sndbuf;
192         struct tap_filter       txflt;
193         struct sock_fprog       fprog;
194         /* protected by rtnl lock */
195         bool                    filter_attached;
196 #ifdef TUN_DEBUG
197         int debug;
198 #endif
199         spinlock_t lock;
200         struct hlist_head flows[TUN_NUM_FLOW_ENTRIES];
201         struct timer_list flow_gc_timer;
202         unsigned long ageing_time;
203         unsigned int numdisabled;
204         struct list_head disabled;
205         void *security;
206         u32 flow_count;
207 };
208 
209 #ifdef CONFIG_TUN_VNET_CROSS_LE
210 static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
211 {
212         return tun->flags & TUN_VNET_BE ? false :
213                 virtio_legacy_is_little_endian();
214 }
215 
216 static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
217 {
218         int be = !!(tun->flags & TUN_VNET_BE);
219 
220         if (put_user(be, argp))
221                 return -EFAULT;
222 
223         return 0;
224 }
225 
226 static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
227 {
228         int be;
229 
230         if (get_user(be, argp))
231                 return -EFAULT;
232 
233         if (be)
234                 tun->flags |= TUN_VNET_BE;
235         else
236                 tun->flags &= ~TUN_VNET_BE;
237 
238         return 0;
239 }
240 #else
241 static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
242 {
243         return virtio_legacy_is_little_endian();
244 }
245 
246 static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
247 {
248         return -EINVAL;
249 }
250 
251 static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
252 {
253         return -EINVAL;
254 }
255 #endif /* CONFIG_TUN_VNET_CROSS_LE */
256 
257 static inline bool tun_is_little_endian(struct tun_struct *tun)
258 {
259         return tun->flags & TUN_VNET_LE ||
260                 tun_legacy_is_little_endian(tun);
261 }
262 
263 static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val)
264 {
265         return __virtio16_to_cpu(tun_is_little_endian(tun), val);
266 }
267 
268 static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val)
269 {
270         return __cpu_to_virtio16(tun_is_little_endian(tun), val);
271 }
272 
273 static inline u32 tun_hashfn(u32 rxhash)
274 {
275         return rxhash & 0x3ff;
276 }
277 
278 static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash)
279 {
280         struct tun_flow_entry *e;
281 
282         hlist_for_each_entry_rcu(e, head, hash_link) {
283                 if (e->rxhash == rxhash)
284                         return e;
285         }
286         return NULL;
287 }
288 
289 static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
290                                               struct hlist_head *head,
291                                               u32 rxhash, u16 queue_index)
292 {
293         struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC);
294 
295         if (e) {
296                 tun_debug(KERN_INFO, tun, "create flow: hash %u index %u\n",
297                           rxhash, queue_index);
298                 e->updated = jiffies;
299                 e->rxhash = rxhash;
300                 e->rps_rxhash = 0;
301                 e->queue_index = queue_index;
302                 e->tun = tun;
303                 hlist_add_head_rcu(&e->hash_link, head);
304                 ++tun->flow_count;
305         }
306         return e;
307 }
308 
309 static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
310 {
311         tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n",
312                   e->rxhash, e->queue_index);
313         hlist_del_rcu(&e->hash_link);
314         kfree_rcu(e, rcu);
315         --tun->flow_count;
316 }
317 
318 static void tun_flow_flush(struct tun_struct *tun)
319 {
320         int i;
321 
322         spin_lock_bh(&tun->lock);
323         for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
324                 struct tun_flow_entry *e;
325                 struct hlist_node *n;
326 
327                 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link)
328                         tun_flow_delete(tun, e);
329         }
330         spin_unlock_bh(&tun->lock);
331 }
332 
333 static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
334 {
335         int i;
336 
337         spin_lock_bh(&tun->lock);
338         for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
339                 struct tun_flow_entry *e;
340                 struct hlist_node *n;
341 
342                 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
343                         if (e->queue_index == queue_index)
344                                 tun_flow_delete(tun, e);
345                 }
346         }
347         spin_unlock_bh(&tun->lock);
348 }
349 
350 static void tun_flow_cleanup(unsigned long data)
351 {
352         struct tun_struct *tun = (struct tun_struct *)data;
353         unsigned long delay = tun->ageing_time;
354         unsigned long next_timer = jiffies + delay;
355         unsigned long count = 0;
356         int i;
357 
358         tun_debug(KERN_INFO, tun, "tun_flow_cleanup\n");
359 
360         spin_lock_bh(&tun->lock);
361         for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
362                 struct tun_flow_entry *e;
363                 struct hlist_node *n;
364 
365                 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
366                         unsigned long this_timer;
367                         count++;
368                         this_timer = e->updated + delay;
369                         if (time_before_eq(this_timer, jiffies))
370                                 tun_flow_delete(tun, e);
371                         else if (time_before(this_timer, next_timer))
372                                 next_timer = this_timer;
373                 }
374         }
375 
376         if (count)
377                 mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer));
378         spin_unlock_bh(&tun->lock);
379 }
380 
381 static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
382                             struct tun_file *tfile)
383 {
384         struct hlist_head *head;
385         struct tun_flow_entry *e;
386         unsigned long delay = tun->ageing_time;
387         u16 queue_index = tfile->queue_index;
388 
389         if (!rxhash)
390                 return;
391         else
392                 head = &tun->flows[tun_hashfn(rxhash)];
393 
394         rcu_read_lock();
395 
396         /* We may get a very small possibility of OOO during switching, not
397          * worth to optimize.*/
398         if (tun->numqueues == 1 || tfile->detached)
399                 goto unlock;
400 
401         e = tun_flow_find(head, rxhash);
402         if (likely(e)) {
403                 /* TODO: keep queueing to old queue until it's empty? */
404                 e->queue_index = queue_index;
405                 e->updated = jiffies;
406                 sock_rps_record_flow_hash(e->rps_rxhash);
407         } else {
408                 spin_lock_bh(&tun->lock);
409                 if (!tun_flow_find(head, rxhash) &&
410                     tun->flow_count < MAX_TAP_FLOWS)
411                         tun_flow_create(tun, head, rxhash, queue_index);
412 
413                 if (!timer_pending(&tun->flow_gc_timer))
414                         mod_timer(&tun->flow_gc_timer,
415                                   round_jiffies_up(jiffies + delay));
416                 spin_unlock_bh(&tun->lock);
417         }
418 
419 unlock:
420         rcu_read_unlock();
421 }
422 
423 /**
424  * Save the hash received in the stack receive path and update the
425  * flow_hash table accordingly.
426  */
427 static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
428 {
429         if (unlikely(e->rps_rxhash != hash))
430                 e->rps_rxhash = hash;
431 }
432 
433 /* We try to identify a flow through its rxhash first. The reason that
434  * we do not check rxq no. is because some cards(e.g 82599), chooses
435  * the rxq based on the txq where the last packet of the flow comes. As
436  * the userspace application move between processors, we may get a
437  * different rxq no. here. If we could not get rxhash, then we would
438  * hope the rxq no. may help here.
439  */
440 static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
441                             void *accel_priv, select_queue_fallback_t fallback)
442 {
443         struct tun_struct *tun = netdev_priv(dev);
444         struct tun_flow_entry *e;
445         u32 txq = 0;
446         u32 numqueues = 0;
447 
448         rcu_read_lock();
449         numqueues = ACCESS_ONCE(tun->numqueues);
450 
451         txq = skb_get_hash(skb);
452         if (txq) {
453                 e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
454                 if (e) {
455                         tun_flow_save_rps_rxhash(e, txq);
456                         txq = e->queue_index;
457                 } else
458                         /* use multiply and shift instead of expensive divide */
459                         txq = ((u64)txq * numqueues) >> 32;
460         } else if (likely(skb_rx_queue_recorded(skb))) {
461                 txq = skb_get_rx_queue(skb);
462                 while (unlikely(txq >= numqueues))
463                         txq -= numqueues;
464         }
465 
466         rcu_read_unlock();
467         return txq;
468 }
469 
470 static inline bool tun_not_capable(struct tun_struct *tun)
471 {
472         const struct cred *cred = current_cred();
473         struct net *net = dev_net(tun->dev);
474 
475         return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
476                   (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
477                 !ns_capable(net->user_ns, CAP_NET_ADMIN);
478 }
479 
480 static void tun_set_real_num_queues(struct tun_struct *tun)
481 {
482         netif_set_real_num_tx_queues(tun->dev, tun->numqueues);
483         netif_set_real_num_rx_queues(tun->dev, tun->numqueues);
484 }
485 
486 static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile)
487 {
488         tfile->detached = tun;
489         list_add_tail(&tfile->next, &tun->disabled);
490         ++tun->numdisabled;
491 }
492 
493 static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
494 {
495         struct tun_struct *tun = tfile->detached;
496 
497         tfile->detached = NULL;
498         list_del_init(&tfile->next);
499         --tun->numdisabled;
500         return tun;
501 }
502 
503 static void tun_queue_purge(struct tun_file *tfile)
504 {
505         skb_queue_purge(&tfile->sk.sk_receive_queue);
506         skb_queue_purge(&tfile->sk.sk_error_queue);
507 }
508 
509 static void __tun_detach(struct tun_file *tfile, bool clean)
510 {
511         struct tun_file *ntfile;
512         struct tun_struct *tun;
513 
514         tun = rtnl_dereference(tfile->tun);
515 
516         if (tun && !tfile->detached) {
517                 u16 index = tfile->queue_index;
518                 BUG_ON(index >= tun->numqueues);
519 
520                 rcu_assign_pointer(tun->tfiles[index],
521                                    tun->tfiles[tun->numqueues - 1]);
522                 ntfile = rtnl_dereference(tun->tfiles[index]);
523                 ntfile->queue_index = index;
524 
525                 --tun->numqueues;
526                 if (clean) {
527                         RCU_INIT_POINTER(tfile->tun, NULL);
528                         sock_put(&tfile->sk);
529                 } else
530                         tun_disable_queue(tun, tfile);
531 
532                 synchronize_net();
533                 tun_flow_delete_by_queue(tun, tun->numqueues + 1);
534                 /* Drop read queue */
535                 tun_queue_purge(tfile);
536                 tun_set_real_num_queues(tun);
537         } else if (tfile->detached && clean) {
538                 tun = tun_enable_queue(tfile);
539                 sock_put(&tfile->sk);
540         }
541 
542         if (clean) {
543                 if (tun && tun->numqueues == 0 && tun->numdisabled == 0) {
544                         netif_carrier_off(tun->dev);
545 
546                         if (!(tun->flags & IFF_PERSIST) &&
547                             tun->dev->reg_state == NETREG_REGISTERED)
548                                 unregister_netdevice(tun->dev);
549                 }
550                 sock_put(&tfile->sk);
551         }
552 }
553 
554 static void tun_detach(struct tun_file *tfile, bool clean)
555 {
556         rtnl_lock();
557         __tun_detach(tfile, clean);
558         rtnl_unlock();
559 }
560 
561 static void tun_detach_all(struct net_device *dev)
562 {
563         struct tun_struct *tun = netdev_priv(dev);
564         struct tun_file *tfile, *tmp;
565         int i, n = tun->numqueues;
566 
567         for (i = 0; i < n; i++) {
568                 tfile = rtnl_dereference(tun->tfiles[i]);
569                 BUG_ON(!tfile);
570                 tfile->socket.sk->sk_data_ready(tfile->socket.sk);
571                 RCU_INIT_POINTER(tfile->tun, NULL);
572                 --tun->numqueues;
573         }
574         list_for_each_entry(tfile, &tun->disabled, next) {
575                 tfile->socket.sk->sk_data_ready(tfile->socket.sk);
576                 RCU_INIT_POINTER(tfile->tun, NULL);
577         }
578         BUG_ON(tun->numqueues != 0);
579 
580         synchronize_net();
581         for (i = 0; i < n; i++) {
582                 tfile = rtnl_dereference(tun->tfiles[i]);
583                 /* Drop read queue */
584                 tun_queue_purge(tfile);
585                 sock_put(&tfile->sk);
586         }
587         list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
588                 tun_enable_queue(tfile);
589                 tun_queue_purge(tfile);
590                 sock_put(&tfile->sk);
591         }
592         BUG_ON(tun->numdisabled != 0);
593 
594         if (tun->flags & IFF_PERSIST)
595                 module_put(THIS_MODULE);
596 }
597 
598 static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filter)
599 {
600         struct tun_file *tfile = file->private_data;
601         int err;
602 
603         err = security_tun_dev_attach(tfile->socket.sk, tun->security);
604         if (err < 0)
605                 goto out;
606 
607         err = -EINVAL;
608         if (rtnl_dereference(tfile->tun) && !tfile->detached)
609                 goto out;
610 
611         err = -EBUSY;
612         if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1)
613                 goto out;
614 
615         err = -E2BIG;
616         if (!tfile->detached &&
617             tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES)
618                 goto out;
619 
620         err = 0;
621 
622         /* Re-attach the filter to persist device */
623         if (!skip_filter && (tun->filter_attached == true)) {
624                 err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
625                 if (!err)
626                         goto out;
627         }
628         tfile->queue_index = tun->numqueues;
629         rcu_assign_pointer(tfile->tun, tun);
630         rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
631         tun->numqueues++;
632 
633         if (tfile->detached)
634                 tun_enable_queue(tfile);
635         else
636                 sock_hold(&tfile->sk);
637 
638         tun_set_real_num_queues(tun);
639 
640         /* device is allowed to go away first, so no need to hold extra
641          * refcnt.
642          */
643 
644 out:
645         return err;
646 }
647 
648 static struct tun_struct *__tun_get(struct tun_file *tfile)
649 {
650         struct tun_struct *tun;
651 
652         rcu_read_lock();
653         tun = rcu_dereference(tfile->tun);
654         if (tun)
655                 dev_hold(tun->dev);
656         rcu_read_unlock();
657 
658         return tun;
659 }
660 
661 static struct tun_struct *tun_get(struct file *file)
662 {
663         return __tun_get(file->private_data);
664 }
665 
666 static void tun_put(struct tun_struct *tun)
667 {
668         dev_put(tun->dev);
669 }
670 
671 /* TAP filtering */
672 static void addr_hash_set(u32 *mask, const u8 *addr)
673 {
674         int n = ether_crc(ETH_ALEN, addr) >> 26;
675         mask[n >> 5] |= (1 << (n & 31));
676 }
677 
678 static unsigned int addr_hash_test(const u32 *mask, const u8 *addr)
679 {
680         int n = ether_crc(ETH_ALEN, addr) >> 26;
681         return mask[n >> 5] & (1 << (n & 31));
682 }
683 
684 static int update_filter(struct tap_filter *filter, void __user *arg)
685 {
686         struct { u8 u[ETH_ALEN]; } *addr;
687         struct tun_filter uf;
688         int err, alen, n, nexact;
689 
690         if (copy_from_user(&uf, arg, sizeof(uf)))
691                 return -EFAULT;
692 
693         if (!uf.count) {
694                 /* Disabled */
695                 filter->count = 0;
696                 return 0;
697         }
698 
699         alen = ETH_ALEN * uf.count;
700         addr = kmalloc(alen, GFP_KERNEL);
701         if (!addr)
702                 return -ENOMEM;
703 
704         if (copy_from_user(addr, arg + sizeof(uf), alen)) {
705                 err = -EFAULT;
706                 goto done;
707         }
708 
709         /* The filter is updated without holding any locks. Which is
710          * perfectly safe. We disable it first and in the worst
711          * case we'll accept a few undesired packets. */
712         filter->count = 0;
713         wmb();
714 
715         /* Use first set of addresses as an exact filter */
716         for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++)
717                 memcpy(filter->addr[n], addr[n].u, ETH_ALEN);
718 
719         nexact = n;
720 
721         /* Remaining multicast addresses are hashed,
722          * unicast will leave the filter disabled. */
723         memset(filter->mask, 0, sizeof(filter->mask));
724         for (; n < uf.count; n++) {
725                 if (!is_multicast_ether_addr(addr[n].u)) {
726                         err = 0; /* no filter */
727                         goto done;
728                 }
729                 addr_hash_set(filter->mask, addr[n].u);
730         }
731 
732         /* For ALLMULTI just set the mask to all ones.
733          * This overrides the mask populated above. */
734         if ((uf.flags & TUN_FLT_ALLMULTI))
735                 memset(filter->mask, ~0, sizeof(filter->mask));
736 
737         /* Now enable the filter */
738         wmb();
739         filter->count = nexact;
740 
741         /* Return the number of exact filters */
742         err = nexact;
743 
744 done:
745         kfree(addr);
746         return err;
747 }
748 
749 /* Returns: 0 - drop, !=0 - accept */
750 static int run_filter(struct tap_filter *filter, const struct sk_buff *skb)
751 {
752         /* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect
753          * at this point. */
754         struct ethhdr *eh = (struct ethhdr *) skb->data;
755         int i;
756 
757         /* Exact match */
758         for (i = 0; i < filter->count; i++)
759                 if (ether_addr_equal(eh->h_dest, filter->addr[i]))
760                         return 1;
761 
762         /* Inexact match (multicast only) */
763         if (is_multicast_ether_addr(eh->h_dest))
764                 return addr_hash_test(filter->mask, eh->h_dest);
765 
766         return 0;
767 }
768 
769 /*
770  * Checks whether the packet is accepted or not.
771  * Returns: 0 - drop, !=0 - accept
772  */
773 static int check_filter(struct tap_filter *filter, const struct sk_buff *skb)
774 {
775         if (!filter->count)
776                 return 1;
777 
778         return run_filter(filter, skb);
779 }
780 
781 /* Network device part of the driver */
782 
783 static const struct ethtool_ops tun_ethtool_ops;
784 
785 /* Net device detach from fd. */
786 static void tun_net_uninit(struct net_device *dev)
787 {
788         tun_detach_all(dev);
789 }
790 
791 /* Net device open. */
792 static int tun_net_open(struct net_device *dev)
793 {
794         netif_tx_start_all_queues(dev);
795         return 0;
796 }
797 
798 /* Net device close. */
799 static int tun_net_close(struct net_device *dev)
800 {
801         netif_tx_stop_all_queues(dev);
802         return 0;
803 }
804 
805 /* Net device start xmit */
806 static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
807 {
808         struct tun_struct *tun = netdev_priv(dev);
809         int txq = skb->queue_mapping;
810         struct tun_file *tfile;
811         u32 numqueues = 0;
812 
813         rcu_read_lock();
814         tfile = rcu_dereference(tun->tfiles[txq]);
815         numqueues = ACCESS_ONCE(tun->numqueues);
816 
817         /* Drop packet if interface is not attached */
818         if (txq >= numqueues)
819                 goto drop;
820 
821         if (numqueues == 1) {
822                 /* Select queue was not called for the skbuff, so we extract the
823                  * RPS hash and save it into the flow_table here.
824                  */
825                 __u32 rxhash;
826 
827                 rxhash = skb_get_hash(skb);
828                 if (rxhash) {
829                         struct tun_flow_entry *e;
830                         e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)],
831                                         rxhash);
832                         if (e)
833                                 tun_flow_save_rps_rxhash(e, rxhash);
834                 }
835         }
836 
837         tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
838 
839         BUG_ON(!tfile);
840 
841         /* Drop if the filter does not like it.
842          * This is a noop if the filter is disabled.
843          * Filter can be enabled only for the TAP devices. */
844         if (!check_filter(&tun->txflt, skb))
845                 goto drop;
846 
847         if (tfile->socket.sk->sk_filter &&
848             sk_filter(tfile->socket.sk, skb))
849                 goto drop;
850 
851         /* Limit the number of packets queued by dividing txq length with the
852          * number of queues.
853          */
854         if (skb_queue_len(&tfile->socket.sk->sk_receive_queue) * numqueues
855                           >= dev->tx_queue_len)
856                 goto drop;
857 
858         if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
859                 goto drop;
860 
861         if (skb->sk && sk_fullsock(skb->sk)) {
862                 sock_tx_timestamp(skb->sk, &skb_shinfo(skb)->tx_flags);
863                 sw_tx_timestamp(skb);
864         }
865 
866         /* Orphan the skb - required as we might hang on to it
867          * for indefinite time.
868          */
869         skb_orphan(skb);
870 
871         nf_reset(skb);
872 
873         /* Enqueue packet */
874         skb_queue_tail(&tfile->socket.sk->sk_receive_queue, skb);
875 
876         /* Notify and wake up reader process */
877         if (tfile->flags & TUN_FASYNC)
878                 kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
879         tfile->socket.sk->sk_data_ready(tfile->socket.sk);
880 
881         rcu_read_unlock();
882         return NETDEV_TX_OK;
883 
884 drop:
885         dev->stats.tx_dropped++;
886         skb_tx_error(skb);
887         kfree_skb(skb);
888         rcu_read_unlock();
889         return NET_XMIT_DROP;
890 }
891 
892 static void tun_net_mclist(struct net_device *dev)
893 {
894         /*
895          * This callback is supposed to deal with mc filter in
896          * _rx_ path and has nothing to do with the _tx_ path.
897          * In rx path we always accept everything userspace gives us.
898          */
899 }
900 
901 #define MIN_MTU 68
902 #define MAX_MTU 65535
903 
904 static int
905 tun_net_change_mtu(struct net_device *dev, int new_mtu)
906 {
907         if (new_mtu < MIN_MTU || new_mtu + dev->hard_header_len > MAX_MTU)
908                 return -EINVAL;
909         dev->mtu = new_mtu;
910         return 0;
911 }
912 
913 static netdev_features_t tun_net_fix_features(struct net_device *dev,
914         netdev_features_t features)
915 {
916         struct tun_struct *tun = netdev_priv(dev);
917 
918         return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
919 }
920 #ifdef CONFIG_NET_POLL_CONTROLLER
921 static void tun_poll_controller(struct net_device *dev)
922 {
923         /*
924          * Tun only receives frames when:
925          * 1) the char device endpoint gets data from user space
926          * 2) the tun socket gets a sendmsg call from user space
927          * Since both of those are synchronous operations, we are guaranteed
928          * never to have pending data when we poll for it
929          * so there is nothing to do here but return.
930          * We need this though so netpoll recognizes us as an interface that
931          * supports polling, which enables bridge devices in virt setups to
932          * still use netconsole
933          */
934         return;
935 }
936 #endif
937 static const struct net_device_ops tun_netdev_ops = {
938         .ndo_uninit             = tun_net_uninit,
939         .ndo_open               = tun_net_open,
940         .ndo_stop               = tun_net_close,
941         .ndo_start_xmit         = tun_net_xmit,
942         .ndo_change_mtu         = tun_net_change_mtu,
943         .ndo_fix_features       = tun_net_fix_features,
944         .ndo_select_queue       = tun_select_queue,
945 #ifdef CONFIG_NET_POLL_CONTROLLER
946         .ndo_poll_controller    = tun_poll_controller,
947 #endif
948 };
949 
950 static const struct net_device_ops tap_netdev_ops = {
951         .ndo_uninit             = tun_net_uninit,
952         .ndo_open               = tun_net_open,
953         .ndo_stop               = tun_net_close,
954         .ndo_start_xmit         = tun_net_xmit,
955         .ndo_change_mtu         = tun_net_change_mtu,
956         .ndo_fix_features       = tun_net_fix_features,
957         .ndo_set_rx_mode        = tun_net_mclist,
958         .ndo_set_mac_address    = eth_mac_addr,
959         .ndo_validate_addr      = eth_validate_addr,
960         .ndo_select_queue       = tun_select_queue,
961 #ifdef CONFIG_NET_POLL_CONTROLLER
962         .ndo_poll_controller    = tun_poll_controller,
963 #endif
964         .ndo_features_check     = passthru_features_check,
965 };
966 
967 static void tun_flow_init(struct tun_struct *tun)
968 {
969         int i;
970 
971         for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++)
972                 INIT_HLIST_HEAD(&tun->flows[i]);
973 
974         tun->ageing_time = TUN_FLOW_EXPIRE;
975         setup_timer(&tun->flow_gc_timer, tun_flow_cleanup, (unsigned long)tun);
976         mod_timer(&tun->flow_gc_timer,
977                   round_jiffies_up(jiffies + tun->ageing_time));
978 }
979 
980 static void tun_flow_uninit(struct tun_struct *tun)
981 {
982         del_timer_sync(&tun->flow_gc_timer);
983         tun_flow_flush(tun);
984 }
985 
986 /* Initialize net device. */
987 static void tun_net_init(struct net_device *dev)
988 {
989         struct tun_struct *tun = netdev_priv(dev);
990 
991         switch (tun->flags & TUN_TYPE_MASK) {
992         case IFF_TUN:
993                 dev->netdev_ops = &tun_netdev_ops;
994 
995                 /* Point-to-Point TUN Device */
996                 dev->hard_header_len = 0;
997                 dev->addr_len = 0;
998                 dev->mtu = 1500;
999 
1000                 /* Zero header length */
1001                 dev->type = ARPHRD_NONE;
1002                 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
1003                 dev->tx_queue_len = TUN_READQ_SIZE;  /* We prefer our own queue length */
1004                 break;
1005 
1006         case IFF_TAP:
1007                 dev->netdev_ops = &tap_netdev_ops;
1008                 /* Ethernet TAP Device */
1009                 ether_setup(dev);
1010                 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1011                 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1012 
1013                 eth_hw_addr_random(dev);
1014 
1015                 dev->tx_queue_len = TUN_READQ_SIZE;  /* We prefer our own queue length */
1016                 break;
1017         }
1018 }
1019 
1020 /* Character device part */
1021 
1022 /* Poll */
1023 static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
1024 {
1025         struct tun_file *tfile = file->private_data;
1026         struct tun_struct *tun = __tun_get(tfile);
1027         struct sock *sk;
1028         unsigned int mask = 0;
1029 
1030         if (!tun)
1031                 return POLLERR;
1032 
1033         sk = tfile->socket.sk;
1034 
1035         tun_debug(KERN_INFO, tun, "tun_chr_poll\n");
1036 
1037         poll_wait(file, sk_sleep(sk), wait);
1038 
1039         if (!skb_queue_empty(&sk->sk_receive_queue))
1040                 mask |= POLLIN | POLLRDNORM;
1041 
1042         if (sock_writeable(sk) ||
1043             (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
1044              sock_writeable(sk)))
1045                 mask |= POLLOUT | POLLWRNORM;
1046 
1047         if (tun->dev->reg_state != NETREG_REGISTERED)
1048                 mask = POLLERR;
1049 
1050         tun_put(tun);
1051         return mask;
1052 }
1053 
1054 /* prepad is the amount to reserve at front.  len is length after that.
1055  * linear is a hint as to how much to copy (usually headers). */
1056 static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
1057                                      size_t prepad, size_t len,
1058                                      size_t linear, int noblock)
1059 {
1060         struct sock *sk = tfile->socket.sk;
1061         struct sk_buff *skb;
1062         int err;
1063 
1064         /* Under a page?  Don't bother with paged skb. */
1065         if (prepad + len < PAGE_SIZE || !linear)
1066                 linear = len;
1067 
1068         skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
1069                                    &err, 0);
1070         if (!skb)
1071                 return ERR_PTR(err);
1072 
1073         skb_reserve(skb, prepad);
1074         skb_put(skb, linear);
1075         skb->data_len = len - linear;
1076         skb->len += len - linear;
1077 
1078         return skb;
1079 }
1080 
1081 /* Get packet from user space buffer */
1082 static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1083                             void *msg_control, struct iov_iter *from,
1084                             int noblock)
1085 {
1086         struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
1087         struct sk_buff *skb;
1088         size_t total_len = iov_iter_count(from);
1089         size_t len = total_len, align = NET_SKB_PAD, linear;
1090         struct virtio_net_hdr gso = { 0 };
1091         int good_linear;
1092         int copylen;
1093         bool zerocopy = false;
1094         int err;
1095         u32 rxhash;
1096         ssize_t n;
1097 
1098         if (!(tun->dev->flags & IFF_UP))
1099                 return -EIO;
1100 
1101         if (!(tun->flags & IFF_NO_PI)) {
1102                 if (len < sizeof(pi))
1103                         return -EINVAL;
1104                 len -= sizeof(pi);
1105 
1106                 n = copy_from_iter(&pi, sizeof(pi), from);
1107                 if (n != sizeof(pi))
1108                         return -EFAULT;
1109         }
1110 
1111         if (tun->flags & IFF_VNET_HDR) {
1112                 if (len < tun->vnet_hdr_sz)
1113                         return -EINVAL;
1114                 len -= tun->vnet_hdr_sz;
1115 
1116                 n = copy_from_iter(&gso, sizeof(gso), from);
1117                 if (n != sizeof(gso))
1118                         return -EFAULT;
1119 
1120                 if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
1121                     tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len))
1122                         gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2);
1123 
1124                 if (tun16_to_cpu(tun, gso.hdr_len) > len)
1125                         return -EINVAL;
1126                 iov_iter_advance(from, tun->vnet_hdr_sz - sizeof(gso));
1127         }
1128 
1129         if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
1130                 align += NET_IP_ALIGN;
1131                 if (unlikely(len < ETH_HLEN ||
1132                              (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN)))
1133                         return -EINVAL;
1134         }
1135 
1136         good_linear = SKB_MAX_HEAD(align);
1137 
1138         if (msg_control) {
1139                 struct iov_iter i = *from;
1140 
1141                 /* There are 256 bytes to be copied in skb, so there is
1142                  * enough room for skb expand head in case it is used.
1143                  * The rest of the buffer is mapped from userspace.
1144                  */
1145                 copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN;
1146                 if (copylen > good_linear)
1147                         copylen = good_linear;
1148                 linear = copylen;
1149                 iov_iter_advance(&i, copylen);
1150                 if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
1151                         zerocopy = true;
1152         }
1153 
1154         if (!zerocopy) {
1155                 copylen = len;
1156                 if (tun16_to_cpu(tun, gso.hdr_len) > good_linear)
1157                         linear = good_linear;
1158                 else
1159                         linear = tun16_to_cpu(tun, gso.hdr_len);
1160         }
1161 
1162         skb = tun_alloc_skb(tfile, align, copylen, linear, noblock);
1163         if (IS_ERR(skb)) {
1164                 if (PTR_ERR(skb) != -EAGAIN)
1165                         tun->dev->stats.rx_dropped++;
1166                 return PTR_ERR(skb);
1167         }
1168 
1169         if (zerocopy)
1170                 err = zerocopy_sg_from_iter(skb, from);
1171         else {
1172                 err = skb_copy_datagram_from_iter(skb, 0, from, len);
1173                 if (!err && msg_control) {
1174                         struct ubuf_info *uarg = msg_control;
1175                         uarg->callback(uarg, false);
1176                 }
1177         }
1178 
1179         if (err) {
1180                 tun->dev->stats.rx_dropped++;
1181                 kfree_skb(skb);
1182                 return -EFAULT;
1183         }
1184 
1185         if (gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1186                 if (!skb_partial_csum_set(skb, tun16_to_cpu(tun, gso.csum_start),
1187                                           tun16_to_cpu(tun, gso.csum_offset))) {
1188                         tun->dev->stats.rx_frame_errors++;
1189                         kfree_skb(skb);
1190                         return -EINVAL;
1191                 }
1192         }
1193 
1194         switch (tun->flags & TUN_TYPE_MASK) {
1195         case IFF_TUN:
1196                 if (tun->flags & IFF_NO_PI) {
1197                         switch (skb->data[0] & 0xf0) {
1198                         case 0x40:
1199                                 pi.proto = htons(ETH_P_IP);
1200                                 break;
1201                         case 0x60:
1202                                 pi.proto = htons(ETH_P_IPV6);
1203                                 break;
1204                         default:
1205                                 tun->dev->stats.rx_dropped++;
1206                                 kfree_skb(skb);
1207                                 return -EINVAL;
1208                         }
1209                 }
1210 
1211                 skb_reset_mac_header(skb);
1212                 skb->protocol = pi.proto;
1213                 skb->dev = tun->dev;
1214                 break;
1215         case IFF_TAP:
1216                 skb->protocol = eth_type_trans(skb, tun->dev);
1217                 break;
1218         }
1219 
1220         if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1221                 pr_debug("GSO!\n");
1222                 switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1223                 case VIRTIO_NET_HDR_GSO_TCPV4:
1224                         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1225                         break;
1226                 case VIRTIO_NET_HDR_GSO_TCPV6:
1227                         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1228                         break;
1229                 case VIRTIO_NET_HDR_GSO_UDP:
1230                         skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1231                         break;
1232                 default:
1233                         tun->dev->stats.rx_frame_errors++;
1234                         kfree_skb(skb);
1235                         return -EINVAL;
1236                 }
1237 
1238                 if (gso.gso_type & VIRTIO_NET_HDR_GSO_ECN)
1239                         skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
1240 
1241                 skb_shinfo(skb)->gso_size = tun16_to_cpu(tun, gso.gso_size);
1242                 if (skb_shinfo(skb)->gso_size == 0) {
1243                         tun->dev->stats.rx_frame_errors++;
1244                         kfree_skb(skb);
1245                         return -EINVAL;
1246                 }
1247 
1248                 /* Header must be checked, and gso_segs computed. */
1249                 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1250                 skb_shinfo(skb)->gso_segs = 0;
1251         }
1252 
1253         /* copy skb_ubuf_info for callback when skb has no error */
1254         if (zerocopy) {
1255                 skb_shinfo(skb)->destructor_arg = msg_control;
1256                 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
1257                 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
1258         }
1259 
1260         skb_reset_network_header(skb);
1261         skb_probe_transport_header(skb, 0);
1262 
1263         rxhash = skb_get_hash(skb);
1264         netif_rx_ni(skb);
1265 
1266         tun->dev->stats.rx_packets++;
1267         tun->dev->stats.rx_bytes += len;
1268 
1269         tun_flow_update(tun, rxhash, tfile);
1270         return total_len;
1271 }
1272 
1273 static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
1274 {
1275         struct file *file = iocb->ki_filp;
1276         struct tun_struct *tun = tun_get(file);
1277         struct tun_file *tfile = file->private_data;
1278         ssize_t result;
1279 
1280         if (!tun)
1281                 return -EBADFD;
1282 
1283         result = tun_get_user(tun, tfile, NULL, from, file->f_flags & O_NONBLOCK);
1284 
1285         tun_put(tun);
1286         return result;
1287 }
1288 
1289 /* Put packet to the user space buffer */
1290 static ssize_t tun_put_user(struct tun_struct *tun,
1291                             struct tun_file *tfile,
1292                             struct sk_buff *skb,
1293                             struct iov_iter *iter)
1294 {
1295         struct tun_pi pi = { 0, skb->protocol };
1296         ssize_t total;
1297         int vlan_offset = 0;
1298         int vlan_hlen = 0;
1299         int vnet_hdr_sz = 0;
1300 
1301         if (skb_vlan_tag_present(skb))
1302                 vlan_hlen = VLAN_HLEN;
1303 
1304         if (tun->flags & IFF_VNET_HDR)
1305                 vnet_hdr_sz = tun->vnet_hdr_sz;
1306 
1307         total = skb->len + vlan_hlen + vnet_hdr_sz;
1308 
1309         if (!(tun->flags & IFF_NO_PI)) {
1310                 if (iov_iter_count(iter) < sizeof(pi))
1311                         return -EINVAL;
1312 
1313                 total += sizeof(pi);
1314                 if (iov_iter_count(iter) < total) {
1315                         /* Packet will be striped */
1316                         pi.flags |= TUN_PKT_STRIP;
1317                 }
1318 
1319                 if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi))
1320                         return -EFAULT;
1321         }
1322 
1323         if (vnet_hdr_sz) {
1324                 struct virtio_net_hdr gso = { 0 }; /* no info leak */
1325                 if (iov_iter_count(iter) < vnet_hdr_sz)
1326                         return -EINVAL;
1327 
1328                 if (skb_is_gso(skb)) {
1329                         struct skb_shared_info *sinfo = skb_shinfo(skb);
1330 
1331                         /* This is a hint as to how much should be linear. */
1332                         gso.hdr_len = cpu_to_tun16(tun, skb_headlen(skb));
1333                         gso.gso_size = cpu_to_tun16(tun, sinfo->gso_size);
1334                         if (sinfo->gso_type & SKB_GSO_TCPV4)
1335                                 gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
1336                         else if (sinfo->gso_type & SKB_GSO_TCPV6)
1337                                 gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
1338                         else if (sinfo->gso_type & SKB_GSO_UDP)
1339                                 gso.gso_type = VIRTIO_NET_HDR_GSO_UDP;
1340                         else {
1341                                 pr_err("unexpected GSO type: "
1342                                        "0x%x, gso_size %d, hdr_len %d\n",
1343                                        sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
1344                                        tun16_to_cpu(tun, gso.hdr_len));
1345                                 print_hex_dump(KERN_ERR, "tun: ",
1346                                                DUMP_PREFIX_NONE,
1347                                                16, 1, skb->head,
1348                                                min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
1349                                 WARN_ON_ONCE(1);
1350                                 return -EINVAL;
1351                         }
1352                         if (sinfo->gso_type & SKB_GSO_TCP_ECN)
1353                                 gso.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
1354                 } else
1355                         gso.gso_type = VIRTIO_NET_HDR_GSO_NONE;
1356 
1357                 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1358                         gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
1359                         gso.csum_start = cpu_to_tun16(tun, skb_checksum_start_offset(skb) +
1360                                                       vlan_hlen);
1361                         gso.csum_offset = cpu_to_tun16(tun, skb->csum_offset);
1362                 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1363                         gso.flags = VIRTIO_NET_HDR_F_DATA_VALID;
1364                 } /* else everything is zero */
1365 
1366                 if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso))
1367                         return -EFAULT;
1368 
1369                 iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
1370         }
1371 
1372         if (vlan_hlen) {
1373                 int ret;
1374                 struct {
1375                         __be16 h_vlan_proto;
1376                         __be16 h_vlan_TCI;
1377                 } veth;
1378 
1379                 veth.h_vlan_proto = skb->vlan_proto;
1380                 veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
1381 
1382                 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
1383 
1384                 ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
1385                 if (ret || !iov_iter_count(iter))
1386                         goto done;
1387 
1388                 ret = copy_to_iter(&veth, sizeof(veth), iter);
1389                 if (ret != sizeof(veth) || !iov_iter_count(iter))
1390                         goto done;
1391         }
1392 
1393         skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset);
1394 
1395 done:
1396         tun->dev->stats.tx_packets++;
1397         tun->dev->stats.tx_bytes += skb->len + vlan_hlen;
1398 
1399         return total;
1400 }
1401 
1402 static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
1403                            struct iov_iter *to,
1404                            int noblock)
1405 {
1406         struct sk_buff *skb;
1407         ssize_t ret;
1408         int peeked, err, off = 0;
1409 
1410         tun_debug(KERN_INFO, tun, "tun_do_read\n");
1411 
1412         if (!iov_iter_count(to))
1413                 return 0;
1414 
1415         if (tun->dev->reg_state != NETREG_REGISTERED)
1416                 return -EIO;
1417 
1418         /* Read frames from queue */
1419         skb = __skb_recv_datagram(tfile->socket.sk, noblock ? MSG_DONTWAIT : 0,
1420                                   &peeked, &off, &err);
1421         if (!skb)
1422                 return err;
1423 
1424         ret = tun_put_user(tun, tfile, skb, to);
1425         if (unlikely(ret < 0))
1426                 kfree_skb(skb);
1427         else
1428                 consume_skb(skb);
1429 
1430         return ret;
1431 }
1432 
1433 static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
1434 {
1435         struct file *file = iocb->ki_filp;
1436         struct tun_file *tfile = file->private_data;
1437         struct tun_struct *tun = __tun_get(tfile);
1438         ssize_t len = iov_iter_count(to), ret;
1439 
1440         if (!tun)
1441                 return -EBADFD;
1442         ret = tun_do_read(tun, tfile, to, file->f_flags & O_NONBLOCK);
1443         ret = min_t(ssize_t, ret, len);
1444         if (ret > 0)
1445                 iocb->ki_pos = ret;
1446         tun_put(tun);
1447         return ret;
1448 }
1449 
1450 static void tun_free_netdev(struct net_device *dev)
1451 {
1452         struct tun_struct *tun = netdev_priv(dev);
1453 
1454         BUG_ON(!(list_empty(&tun->disabled)));
1455         tun_flow_uninit(tun);
1456         security_tun_dev_free_security(tun->security);
1457         free_netdev(dev);
1458 }
1459 
1460 static void tun_setup(struct net_device *dev)
1461 {
1462         struct tun_struct *tun = netdev_priv(dev);
1463 
1464         tun->owner = INVALID_UID;
1465         tun->group = INVALID_GID;
1466 
1467         dev->ethtool_ops = &tun_ethtool_ops;
1468         dev->destructor = tun_free_netdev;
1469 }
1470 
1471 /* Trivial set of netlink ops to allow deleting tun or tap
1472  * device with netlink.
1473  */
1474 static int tun_validate(struct nlattr *tb[], struct nlattr *data[])
1475 {
1476         return -EINVAL;
1477 }
1478 
1479 static struct rtnl_link_ops tun_link_ops __read_mostly = {
1480         .kind           = DRV_NAME,
1481         .priv_size      = sizeof(struct tun_struct),
1482         .setup          = tun_setup,
1483         .validate       = tun_validate,
1484 };
1485 
1486 static void tun_sock_write_space(struct sock *sk)
1487 {
1488         struct tun_file *tfile;
1489         wait_queue_head_t *wqueue;
1490 
1491         if (!sock_writeable(sk))
1492                 return;
1493 
1494         if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
1495                 return;
1496 
1497         wqueue = sk_sleep(sk);
1498         if (wqueue && waitqueue_active(wqueue))
1499                 wake_up_interruptible_sync_poll(wqueue, POLLOUT |
1500                                                 POLLWRNORM | POLLWRBAND);
1501 
1502         tfile = container_of(sk, struct tun_file, sk);
1503         kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
1504 }
1505 
1506 static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
1507 {
1508         int ret;
1509         struct tun_file *tfile = container_of(sock, struct tun_file, socket);
1510         struct tun_struct *tun = __tun_get(tfile);
1511 
1512         if (!tun)
1513                 return -EBADFD;
1514 
1515         ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter,
1516                            m->msg_flags & MSG_DONTWAIT);
1517         tun_put(tun);
1518         return ret;
1519 }
1520 
1521 static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
1522                        int flags)
1523 {
1524         struct tun_file *tfile = container_of(sock, struct tun_file, socket);
1525         struct tun_struct *tun = __tun_get(tfile);
1526         int ret;
1527 
1528         if (!tun)
1529                 return -EBADFD;
1530 
1531         if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
1532                 ret = -EINVAL;
1533                 goto out;
1534         }
1535         if (flags & MSG_ERRQUEUE) {
1536                 ret = sock_recv_errqueue(sock->sk, m, total_len,
1537                                          SOL_PACKET, TUN_TX_TIMESTAMP);
1538                 goto out;
1539         }
1540         ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT);
1541         if (ret > (ssize_t)total_len) {
1542                 m->msg_flags |= MSG_TRUNC;
1543                 ret = flags & MSG_TRUNC ? ret : total_len;
1544         }
1545 out:
1546         tun_put(tun);
1547         return ret;
1548 }
1549 
1550 /* Ops structure to mimic raw sockets with tun */
1551 static const struct proto_ops tun_socket_ops = {
1552         .sendmsg = tun_sendmsg,
1553         .recvmsg = tun_recvmsg,
1554 };
1555 
1556 static struct proto tun_proto = {
1557         .name           = "tun",
1558         .owner          = THIS_MODULE,
1559         .obj_size       = sizeof(struct tun_file),
1560 };
1561 
1562 static int tun_flags(struct tun_struct *tun)
1563 {
1564         return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP);
1565 }
1566 
1567 static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr,
1568                               char *buf)
1569 {
1570         struct tun_struct *tun = netdev_priv(to_net_dev(dev));
1571         return sprintf(buf, "0x%x\n", tun_flags(tun));
1572 }
1573 
1574 static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr,
1575                               char *buf)
1576 {
1577         struct tun_struct *tun = netdev_priv(to_net_dev(dev));
1578         return uid_valid(tun->owner)?
1579                 sprintf(buf, "%u\n",
1580                         from_kuid_munged(current_user_ns(), tun->owner)):
1581                 sprintf(buf, "-1\n");
1582 }
1583 
1584 static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr,
1585                               char *buf)
1586 {
1587         struct tun_struct *tun = netdev_priv(to_net_dev(dev));
1588         return gid_valid(tun->group) ?
1589                 sprintf(buf, "%u\n",
1590                         from_kgid_munged(current_user_ns(), tun->group)):
1591                 sprintf(buf, "-1\n");
1592 }
1593 
1594 static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL);
1595 static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL);
1596 static DEVICE_ATTR(group, 0444, tun_show_group, NULL);
1597 
1598 static struct attribute *tun_dev_attrs[] = {
1599         &dev_attr_tun_flags.attr,
1600         &dev_attr_owner.attr,
1601         &dev_attr_group.attr,
1602         NULL
1603 };
1604 
1605 static const struct attribute_group tun_attr_group = {
1606         .attrs = tun_dev_attrs
1607 };
1608 
1609 static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1610 {
1611         struct tun_struct *tun;
1612         struct tun_file *tfile = file->private_data;
1613         struct net_device *dev;
1614         int err;
1615 
1616         if (tfile->detached)
1617                 return -EINVAL;
1618 
1619         dev = __dev_get_by_name(net, ifr->ifr_name);
1620         if (dev) {
1621                 if (ifr->ifr_flags & IFF_TUN_EXCL)
1622                         return -EBUSY;
1623                 if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
1624                         tun = netdev_priv(dev);
1625                 else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops)
1626                         tun = netdev_priv(dev);
1627                 else
1628                         return -EINVAL;
1629 
1630                 if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) !=
1631                     !!(tun->flags & IFF_MULTI_QUEUE))
1632                         return -EINVAL;
1633 
1634                 if (tun_not_capable(tun))
1635                         return -EPERM;
1636                 err = security_tun_dev_open(tun->security);
1637                 if (err < 0)
1638                         return err;
1639 
1640                 err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER);
1641                 if (err < 0)
1642                         return err;
1643 
1644                 if (tun->flags & IFF_MULTI_QUEUE &&
1645                     (tun->numqueues + tun->numdisabled > 1)) {
1646                         /* One or more queue has already been attached, no need
1647                          * to initialize the device again.
1648                          */
1649                         return 0;
1650                 }
1651         }
1652         else {
1653                 char *name;
1654                 unsigned long flags = 0;
1655                 int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ?
1656                              MAX_TAP_QUEUES : 1;
1657 
1658                 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1659                         return -EPERM;
1660                 err = security_tun_dev_create();
1661                 if (err < 0)
1662                         return err;
1663 
1664                 /* Set dev type */
1665                 if (ifr->ifr_flags & IFF_TUN) {
1666                         /* TUN device */
1667                         flags |= IFF_TUN;
1668                         name = "tun%d";
1669                 } else if (ifr->ifr_flags & IFF_TAP) {
1670                         /* TAP device */
1671                         flags |= IFF_TAP;
1672                         name = "tap%d";
1673                 } else
1674                         return -EINVAL;
1675 
1676                 if (*ifr->ifr_name)
1677                         name = ifr->ifr_name;
1678 
1679                 dev = alloc_netdev_mqs(sizeof(struct tun_struct), name,
1680                                        NET_NAME_UNKNOWN, tun_setup, queues,
1681                                        queues);
1682 
1683                 if (!dev)
1684                         return -ENOMEM;
1685 
1686                 dev_net_set(dev, net);
1687                 dev->rtnl_link_ops = &tun_link_ops;
1688                 dev->ifindex = tfile->ifindex;
1689                 dev->sysfs_groups[0] = &tun_attr_group;
1690 
1691                 tun = netdev_priv(dev);
1692                 tun->dev = dev;
1693                 tun->flags = flags;
1694                 tun->txflt.count = 0;
1695                 tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
1696 
1697                 tun->filter_attached = false;
1698                 tun->sndbuf = tfile->socket.sk->sk_sndbuf;
1699 
1700                 spin_lock_init(&tun->lock);
1701 
1702                 err = security_tun_dev_alloc_security(&tun->security);
1703                 if (err < 0)
1704                         goto err_free_dev;
1705 
1706                 tun_net_init(dev);
1707                 tun_flow_init(tun);
1708 
1709                 dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
1710                                    TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
1711                                    NETIF_F_HW_VLAN_STAG_TX;
1712                 dev->features = dev->hw_features;
1713                 dev->vlan_features = dev->features &
1714                                      ~(NETIF_F_HW_VLAN_CTAG_TX |
1715                                        NETIF_F_HW_VLAN_STAG_TX);
1716 
1717                 INIT_LIST_HEAD(&tun->disabled);
1718                 err = tun_attach(tun, file, false);
1719                 if (err < 0)
1720                         goto err_free_flow;
1721 
1722                 err = register_netdevice(tun->dev);
1723                 if (err < 0)
1724                         goto err_detach;
1725         }
1726 
1727         netif_carrier_on(tun->dev);
1728 
1729         tun_debug(KERN_INFO, tun, "tun_set_iff\n");
1730 
1731         tun->flags = (tun->flags & ~TUN_FEATURES) |
1732                 (ifr->ifr_flags & TUN_FEATURES);
1733 
1734         /* Make sure persistent devices do not get stuck in
1735          * xoff state.
1736          */
1737         if (netif_running(tun->dev))
1738                 netif_tx_wake_all_queues(tun->dev);
1739 
1740         strcpy(ifr->ifr_name, tun->dev->name);
1741         return 0;
1742 
1743 err_detach:
1744         tun_detach_all(dev);
1745 err_free_flow:
1746         tun_flow_uninit(tun);
1747         security_tun_dev_free_security(tun->security);
1748 err_free_dev:
1749         free_netdev(dev);
1750         return err;
1751 }
1752 
1753 static void tun_get_iff(struct net *net, struct tun_struct *tun,
1754                        struct ifreq *ifr)
1755 {
1756         tun_debug(KERN_INFO, tun, "tun_get_iff\n");
1757 
1758         strcpy(ifr->ifr_name, tun->dev->name);
1759 
1760         ifr->ifr_flags = tun_flags(tun);
1761 
1762 }
1763 
1764 /* This is like a cut-down ethtool ops, except done via tun fd so no
1765  * privs required. */
1766 static int set_offload(struct tun_struct *tun, unsigned long arg)
1767 {
1768         netdev_features_t features = 0;
1769 
1770         if (arg & TUN_F_CSUM) {
1771                 features |= NETIF_F_HW_CSUM;
1772                 arg &= ~TUN_F_CSUM;
1773 
1774                 if (arg & (TUN_F_TSO4|TUN_F_TSO6)) {
1775                         if (arg & TUN_F_TSO_ECN) {
1776                                 features |= NETIF_F_TSO_ECN;
1777                                 arg &= ~TUN_F_TSO_ECN;
1778                         }
1779                         if (arg & TUN_F_TSO4)
1780                                 features |= NETIF_F_TSO;
1781                         if (arg & TUN_F_TSO6)
1782                                 features |= NETIF_F_TSO6;
1783                         arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
1784                 }
1785 
1786                 if (arg & TUN_F_UFO) {
1787                         features |= NETIF_F_UFO;
1788                         arg &= ~TUN_F_UFO;
1789                 }
1790         }
1791 
1792         /* This gives the user a way to test for new features in future by
1793          * trying to set them. */
1794         if (arg)
1795                 return -EINVAL;
1796 
1797         tun->set_features = features;
1798         netdev_update_features(tun->dev);
1799 
1800         return 0;
1801 }
1802 
1803 static void tun_detach_filter(struct tun_struct *tun, int n)
1804 {
1805         int i;
1806         struct tun_file *tfile;
1807 
1808         for (i = 0; i < n; i++) {
1809                 tfile = rtnl_dereference(tun->tfiles[i]);
1810                 sk_detach_filter(tfile->socket.sk);
1811         }
1812 
1813         tun->filter_attached = false;
1814 }
1815 
1816 static int tun_attach_filter(struct tun_struct *tun)
1817 {
1818         int i, ret = 0;
1819         struct tun_file *tfile;
1820 
1821         for (i = 0; i < tun->numqueues; i++) {
1822                 tfile = rtnl_dereference(tun->tfiles[i]);
1823                 ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
1824                 if (ret) {
1825                         tun_detach_filter(tun, i);
1826                         return ret;
1827                 }
1828         }
1829 
1830         tun->filter_attached = true;
1831         return ret;
1832 }
1833 
1834 static void tun_set_sndbuf(struct tun_struct *tun)
1835 {
1836         struct tun_file *tfile;
1837         int i;
1838 
1839         for (i = 0; i < tun->numqueues; i++) {
1840                 tfile = rtnl_dereference(tun->tfiles[i]);
1841                 tfile->socket.sk->sk_sndbuf = tun->sndbuf;
1842         }
1843 }
1844 
1845 static int tun_set_queue(struct file *file, struct ifreq *ifr)
1846 {
1847         struct tun_file *tfile = file->private_data;
1848         struct tun_struct *tun;
1849         int ret = 0;
1850 
1851         rtnl_lock();
1852 
1853         if (ifr->ifr_flags & IFF_ATTACH_QUEUE) {
1854                 tun = tfile->detached;
1855                 if (!tun) {
1856                         ret = -EINVAL;
1857                         goto unlock;
1858                 }
1859                 ret = security_tun_dev_attach_queue(tun->security);
1860                 if (ret < 0)
1861                         goto unlock;
1862                 ret = tun_attach(tun, file, false);
1863         } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
1864                 tun = rtnl_dereference(tfile->tun);
1865                 if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
1866                         ret = -EINVAL;
1867                 else
1868                         __tun_detach(tfile, false);
1869         } else
1870                 ret = -EINVAL;
1871 
1872 unlock:
1873         rtnl_unlock();
1874         return ret;
1875 }
1876 
1877 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1878                             unsigned long arg, int ifreq_len)
1879 {
1880         struct tun_file *tfile = file->private_data;
1881         struct tun_struct *tun;
1882         void __user* argp = (void __user*)arg;
1883         struct ifreq ifr;
1884         kuid_t owner;
1885         kgid_t group;
1886         int sndbuf;
1887         int vnet_hdr_sz;
1888         unsigned int ifindex;
1889         int le;
1890         int ret;
1891 
1892         if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
1893                 if (copy_from_user(&ifr, argp, ifreq_len))
1894                         return -EFAULT;
1895         } else {
1896                 memset(&ifr, 0, sizeof(ifr));
1897         }
1898         if (cmd == TUNGETFEATURES) {
1899                 /* Currently this just means: "what IFF flags are valid?".
1900                  * This is needed because we never checked for invalid flags on
1901                  * TUNSETIFF.
1902                  */
1903                 return put_user(IFF_TUN | IFF_TAP | TUN_FEATURES,
1904                                 (unsigned int __user*)argp);
1905         } else if (cmd == TUNSETQUEUE)
1906                 return tun_set_queue(file, &ifr);
1907 
1908         ret = 0;
1909         rtnl_lock();
1910 
1911         tun = __tun_get(tfile);
1912         if (cmd == TUNSETIFF && !tun) {
1913                 ifr.ifr_name[IFNAMSIZ-1] = '\0';
1914 
1915                 ret = tun_set_iff(sock_net(&tfile->sk), file, &ifr);
1916 
1917                 if (ret)
1918                         goto unlock;
1919 
1920                 if (copy_to_user(argp, &ifr, ifreq_len))
1921                         ret = -EFAULT;
1922                 goto unlock;
1923         }
1924         if (cmd == TUNSETIFINDEX) {
1925                 ret = -EPERM;
1926                 if (tun)
1927                         goto unlock;
1928 
1929                 ret = -EFAULT;
1930                 if (copy_from_user(&ifindex, argp, sizeof(ifindex)))
1931                         goto unlock;
1932 
1933                 ret = 0;
1934                 tfile->ifindex = ifindex;
1935                 goto unlock;
1936         }
1937 
1938         ret = -EBADFD;
1939         if (!tun)
1940                 goto unlock;
1941 
1942         tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %u\n", cmd);
1943 
1944         ret = 0;
1945         switch (cmd) {
1946         case TUNGETIFF:
1947                 tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
1948 
1949                 if (tfile->detached)
1950                         ifr.ifr_flags |= IFF_DETACH_QUEUE;
1951                 if (!tfile->socket.sk->sk_filter)
1952                         ifr.ifr_flags |= IFF_NOFILTER;
1953 
1954                 if (copy_to_user(argp, &ifr, ifreq_len))
1955                         ret = -EFAULT;
1956                 break;
1957 
1958         case TUNSETNOCSUM:
1959                 /* Disable/Enable checksum */
1960 
1961                 /* [unimplemented] */
1962                 tun_debug(KERN_INFO, tun, "ignored: set checksum %s\n",
1963                           arg ? "disabled" : "enabled");
1964                 break;
1965 
1966         case TUNSETPERSIST:
1967                 /* Disable/Enable persist mode. Keep an extra reference to the
1968                  * module to prevent the module being unprobed.
1969                  */
1970                 if (arg && !(tun->flags & IFF_PERSIST)) {
1971                         tun->flags |= IFF_PERSIST;
1972                         __module_get(THIS_MODULE);
1973                 }
1974                 if (!arg && (tun->flags & IFF_PERSIST)) {
1975                         tun->flags &= ~IFF_PERSIST;
1976                         module_put(THIS_MODULE);
1977                 }
1978 
1979                 tun_debug(KERN_INFO, tun, "persist %s\n",
1980                           arg ? "enabled" : "disabled");
1981                 break;
1982 
1983         case TUNSETOWNER:
1984                 /* Set owner of the device */
1985                 owner = make_kuid(current_user_ns(), arg);
1986                 if (!uid_valid(owner)) {
1987                         ret = -EINVAL;
1988                         break;
1989                 }
1990                 tun->owner = owner;
1991                 tun_debug(KERN_INFO, tun, "owner set to %u\n",
1992                           from_kuid(&init_user_ns, tun->owner));
1993                 break;
1994 
1995         case TUNSETGROUP:
1996                 /* Set group of the device */
1997                 group = make_kgid(current_user_ns(), arg);
1998                 if (!gid_valid(group)) {
1999                         ret = -EINVAL;
2000                         break;
2001                 }
2002                 tun->group = group;
2003                 tun_debug(KERN_INFO, tun, "group set to %u\n",
2004                           from_kgid(&init_user_ns, tun->group));
2005                 break;
2006 
2007         case TUNSETLINK:
2008                 /* Only allow setting the type when the interface is down */
2009                 if (tun->dev->flags & IFF_UP) {
2010                         tun_debug(KERN_INFO, tun,
2011                                   "Linktype set failed because interface is up\n");
2012                         ret = -EBUSY;
2013                 } else {
2014                         tun->dev->type = (int) arg;
2015                         tun_debug(KERN_INFO, tun, "linktype set to %d\n",
2016                                   tun->dev->type);
2017                         ret = 0;
2018                 }
2019                 break;
2020 
2021 #ifdef TUN_DEBUG
2022         case TUNSETDEBUG:
2023                 tun->debug = arg;
2024                 break;
2025 #endif
2026         case TUNSETOFFLOAD:
2027                 ret = set_offload(tun, arg);
2028                 break;
2029 
2030         case TUNSETTXFILTER:
2031                 /* Can be set only for TAPs */
2032                 ret = -EINVAL;
2033                 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
2034                         break;
2035                 ret = update_filter(&tun->txflt, (void __user *)arg);
2036                 break;
2037 
2038         case SIOCGIFHWADDR:
2039                 /* Get hw address */
2040                 memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN);
2041                 ifr.ifr_hwaddr.sa_family = tun->dev->type;
2042                 if (copy_to_user(argp, &ifr, ifreq_len))
2043                         ret = -EFAULT;
2044                 break;
2045 
2046         case SIOCSIFHWADDR:
2047                 /* Set hw address */
2048                 tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n",
2049                           ifr.ifr_hwaddr.sa_data);
2050 
2051                 ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr);
2052                 break;
2053 
2054         case TUNGETSNDBUF:
2055                 sndbuf = tfile->socket.sk->sk_sndbuf;
2056                 if (copy_to_user(argp, &sndbuf, sizeof(sndbuf)))
2057                         ret = -EFAULT;
2058                 break;
2059 
2060         case TUNSETSNDBUF:
2061                 if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) {
2062                         ret = -EFAULT;
2063                         break;
2064                 }
2065 
2066                 tun->sndbuf = sndbuf;
2067                 tun_set_sndbuf(tun);
2068                 break;
2069 
2070         case TUNGETVNETHDRSZ:
2071                 vnet_hdr_sz = tun->vnet_hdr_sz;
2072                 if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz)))
2073                         ret = -EFAULT;
2074                 break;
2075 
2076         case TUNSETVNETHDRSZ:
2077                 if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) {
2078                         ret = -EFAULT;
2079                         break;
2080                 }
2081                 if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) {
2082                         ret = -EINVAL;
2083                         break;
2084                 }
2085 
2086                 tun->vnet_hdr_sz = vnet_hdr_sz;
2087                 break;
2088 
2089         case TUNGETVNETLE:
2090                 le = !!(tun->flags & TUN_VNET_LE);
2091                 if (put_user(le, (int __user *)argp))
2092                         ret = -EFAULT;
2093                 break;
2094 
2095         case TUNSETVNETLE:
2096                 if (get_user(le, (int __user *)argp)) {
2097                         ret = -EFAULT;
2098                         break;
2099                 }
2100                 if (le)
2101                         tun->flags |= TUN_VNET_LE;
2102                 else
2103                         tun->flags &= ~TUN_VNET_LE;
2104                 break;
2105 
2106         case TUNGETVNETBE:
2107                 ret = tun_get_vnet_be(tun, argp);
2108                 break;
2109 
2110         case TUNSETVNETBE:
2111                 ret = tun_set_vnet_be(tun, argp);
2112                 break;
2113 
2114         case TUNATTACHFILTER:
2115                 /* Can be set only for TAPs */
2116                 ret = -EINVAL;
2117                 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
2118                         break;
2119                 ret = -EFAULT;
2120                 if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog)))
2121                         break;
2122 
2123                 ret = tun_attach_filter(tun);
2124                 break;
2125 
2126         case TUNDETACHFILTER:
2127                 /* Can be set only for TAPs */
2128                 ret = -EINVAL;
2129                 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
2130                         break;
2131                 ret = 0;
2132                 tun_detach_filter(tun, tun->numqueues);
2133                 break;
2134 
2135         case TUNGETFILTER:
2136                 ret = -EINVAL;
2137                 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
2138                         break;
2139                 ret = -EFAULT;
2140                 if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog)))
2141                         break;
2142                 ret = 0;
2143                 break;
2144 
2145         default:
2146                 ret = -EINVAL;
2147                 break;
2148         }
2149 
2150 unlock:
2151         rtnl_unlock();
2152         if (tun)
2153                 tun_put(tun);
2154         return ret;
2155 }
2156 
2157 static long tun_chr_ioctl(struct file *file,
2158                           unsigned int cmd, unsigned long arg)
2159 {
2160         return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq));
2161 }
2162 
2163 #ifdef CONFIG_COMPAT
2164 static long tun_chr_compat_ioctl(struct file *file,
2165                          unsigned int cmd, unsigned long arg)
2166 {
2167         switch (cmd) {
2168         case TUNSETIFF:
2169         case TUNGETIFF:
2170         case TUNSETTXFILTER:
2171         case TUNGETSNDBUF:
2172         case TUNSETSNDBUF:
2173         case SIOCGIFHWADDR:
2174         case SIOCSIFHWADDR:
2175                 arg = (unsigned long)compat_ptr(arg);
2176                 break;
2177         default:
2178                 arg = (compat_ulong_t)arg;
2179                 break;
2180         }
2181 
2182         /*
2183          * compat_ifreq is shorter than ifreq, so we must not access beyond
2184          * the end of that structure. All fields that are used in this
2185          * driver are compatible though, we don't need to convert the
2186          * contents.
2187          */
2188         return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq));
2189 }
2190 #endif /* CONFIG_COMPAT */
2191 
2192 static int tun_chr_fasync(int fd, struct file *file, int on)
2193 {
2194         struct tun_file *tfile = file->private_data;
2195         int ret;
2196 
2197         if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0)
2198                 goto out;
2199 
2200         if (on) {
2201                 __f_setown(file, task_pid(current), PIDTYPE_PID, 0);
2202                 tfile->flags |= TUN_FASYNC;
2203         } else
2204                 tfile->flags &= ~TUN_FASYNC;
2205         ret = 0;
2206 out:
2207         return ret;
2208 }
2209 
2210 static int tun_chr_open(struct inode *inode, struct file * file)
2211 {
2212         struct net *net = current->nsproxy->net_ns;
2213         struct tun_file *tfile;
2214 
2215         DBG1(KERN_INFO, "tunX: tun_chr_open\n");
2216 
2217         tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
2218                                             &tun_proto, 0);
2219         if (!tfile)
2220                 return -ENOMEM;
2221         RCU_INIT_POINTER(tfile->tun, NULL);
2222         tfile->flags = 0;
2223         tfile->ifindex = 0;
2224 
2225         init_waitqueue_head(&tfile->wq.wait);
2226         RCU_INIT_POINTER(tfile->socket.wq, &tfile->wq);
2227 
2228         tfile->socket.file = file;
2229         tfile->socket.ops = &tun_socket_ops;
2230 
2231         sock_init_data(&tfile->socket, &tfile->sk);
2232 
2233         tfile->sk.sk_write_space = tun_sock_write_space;
2234         tfile->sk.sk_sndbuf = INT_MAX;
2235 
2236         file->private_data = tfile;
2237         INIT_LIST_HEAD(&tfile->next);
2238 
2239         sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
2240 
2241         return 0;
2242 }
2243 
2244 static int tun_chr_close(struct inode *inode, struct file *file)
2245 {
2246         struct tun_file *tfile = file->private_data;
2247 
2248         tun_detach(tfile, true);
2249 
2250         return 0;
2251 }
2252 
2253 #ifdef CONFIG_PROC_FS
2254 static void tun_chr_show_fdinfo(struct seq_file *m, struct file *f)
2255 {
2256         struct tun_struct *tun;
2257         struct ifreq ifr;
2258 
2259         memset(&ifr, 0, sizeof(ifr));
2260 
2261         rtnl_lock();
2262         tun = tun_get(f);
2263         if (tun)
2264                 tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
2265         rtnl_unlock();
2266 
2267         if (tun)
2268                 tun_put(tun);
2269 
2270         seq_printf(m, "iff:\t%s\n", ifr.ifr_name);
2271 }
2272 #endif
2273 
2274 static const struct file_operations tun_fops = {
2275         .owner  = THIS_MODULE,
2276         .llseek = no_llseek,
2277         .read_iter  = tun_chr_read_iter,
2278         .write_iter = tun_chr_write_iter,
2279         .poll   = tun_chr_poll,
2280         .unlocked_ioctl = tun_chr_ioctl,
2281 #ifdef CONFIG_COMPAT
2282         .compat_ioctl = tun_chr_compat_ioctl,
2283 #endif
2284         .open   = tun_chr_open,
2285         .release = tun_chr_close,
2286         .fasync = tun_chr_fasync,
2287 #ifdef CONFIG_PROC_FS
2288         .show_fdinfo = tun_chr_show_fdinfo,
2289 #endif
2290 };
2291 
2292 static struct miscdevice tun_miscdev = {
2293         .minor = TUN_MINOR,
2294         .name = "tun",
2295         .nodename = "net/tun",
2296         .fops = &tun_fops,
2297 };
2298 
2299 /* ethtool interface */
2300 
2301 static int tun_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2302 {
2303         cmd->supported          = 0;
2304         cmd->advertising        = 0;
2305         ethtool_cmd_speed_set(cmd, SPEED_10);
2306         cmd->duplex             = DUPLEX_FULL;
2307         cmd->port               = PORT_TP;
2308         cmd->phy_address        = 0;
2309         cmd->transceiver        = XCVR_INTERNAL;
2310         cmd->autoneg            = AUTONEG_DISABLE;
2311         cmd->maxtxpkt           = 0;
2312         cmd->maxrxpkt           = 0;
2313         return 0;
2314 }
2315 
2316 static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2317 {
2318         struct tun_struct *tun = netdev_priv(dev);
2319 
2320         strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2321         strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2322 
2323         switch (tun->flags & TUN_TYPE_MASK) {
2324         case IFF_TUN:
2325                 strlcpy(info->bus_info, "tun", sizeof(info->bus_info));
2326                 break;
2327         case IFF_TAP:
2328                 strlcpy(info->bus_info, "tap", sizeof(info->bus_info));
2329                 break;
2330         }
2331 }
2332 
2333 static u32 tun_get_msglevel(struct net_device *dev)
2334 {
2335 #ifdef TUN_DEBUG
2336         struct tun_struct *tun = netdev_priv(dev);
2337         return tun->debug;
2338 #else
2339         return -EOPNOTSUPP;
2340 #endif
2341 }
2342 
2343 static void tun_set_msglevel(struct net_device *dev, u32 value)
2344 {
2345 #ifdef TUN_DEBUG
2346         struct tun_struct *tun = netdev_priv(dev);
2347         tun->debug = value;
2348 #endif
2349 }
2350 
2351 static const struct ethtool_ops tun_ethtool_ops = {
2352         .get_settings   = tun_get_settings,
2353         .get_drvinfo    = tun_get_drvinfo,
2354         .get_msglevel   = tun_get_msglevel,
2355         .set_msglevel   = tun_set_msglevel,
2356         .get_link       = ethtool_op_get_link,
2357         .get_ts_info    = ethtool_op_get_ts_info,
2358 };
2359 
2360 
2361 static int __init tun_init(void)
2362 {
2363         int ret = 0;
2364 
2365         pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
2366         pr_info("%s\n", DRV_COPYRIGHT);
2367 
2368         ret = rtnl_link_register(&tun_link_ops);
2369         if (ret) {
2370                 pr_err("Can't register link_ops\n");
2371                 goto err_linkops;
2372         }
2373 
2374         ret = misc_register(&tun_miscdev);
2375         if (ret) {
2376                 pr_err("Can't register misc device %d\n", TUN_MINOR);
2377                 goto err_misc;
2378         }
2379         return  0;
2380 err_misc:
2381         rtnl_link_unregister(&tun_link_ops);
2382 err_linkops:
2383         return ret;
2384 }
2385 
2386 static void tun_cleanup(void)
2387 {
2388         misc_deregister(&tun_miscdev);
2389         rtnl_link_unregister(&tun_link_ops);
2390 }
2391 
2392 /* Get an underlying socket object from tun file.  Returns error unless file is
2393  * attached to a device.  The returned object works like a packet socket, it
2394  * can be used for sock_sendmsg/sock_recvmsg.  The caller is responsible for
2395  * holding a reference to the file for as long as the socket is in use. */
2396 struct socket *tun_get_socket(struct file *file)
2397 {
2398         struct tun_file *tfile;
2399         if (file->f_op != &tun_fops)
2400                 return ERR_PTR(-EINVAL);
2401         tfile = file->private_data;
2402         if (!tfile)
2403                 return ERR_PTR(-EBADFD);
2404         return &tfile->socket;
2405 }
2406 EXPORT_SYMBOL_GPL(tun_get_socket);
2407 
2408 module_init(tun_init);
2409 module_exit(tun_cleanup);
2410 MODULE_DESCRIPTION(DRV_DESCRIPTION);
2411 MODULE_AUTHOR(DRV_COPYRIGHT);
2412 MODULE_LICENSE("GPL");
2413 MODULE_ALIAS_MISCDEV(TUN_MINOR);
2414 MODULE_ALIAS("devname:net/tun");
2415 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us