Version:  2.0.40 2.2.26 2.4.37 3.0 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15 3.16

Linux/drivers/staging/gdm724x/gdm_mux.c

  1 /*
  2  * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
  3  *
  4  * This software is licensed under the terms of the GNU General Public
  5  * License version 2, as published by the Free Software Foundation, and
  6  * may be copied, distributed, and modified under those terms.
  7  *
  8  * This program is distributed in the hope that it will be useful,
  9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
 10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 11  * GNU General Public License for more details.
 12  */
 13 
 14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 15 
 16 #include <linux/module.h>
 17 #include <linux/kernel.h>
 18 #include <linux/usb.h>
 19 #include <linux/errno.h>
 20 #include <linux/init.h>
 21 #include <linux/tty.h>
 22 #include <linux/tty_driver.h>
 23 #include <linux/tty_flip.h>
 24 #include <linux/slab.h>
 25 #include <linux/usb/cdc.h>
 26 
 27 #include "gdm_mux.h"
 28 
 29 static struct workqueue_struct *mux_rx_wq;
 30 
 31 static u16 packet_type[TTY_MAX_COUNT] = {0xF011, 0xF010};
 32 
 33 #define USB_DEVICE_CDC_DATA(vid, pid) \
 34         .match_flags = \
 35                 USB_DEVICE_ID_MATCH_DEVICE |\
 36                 USB_DEVICE_ID_MATCH_INT_CLASS |\
 37                 USB_DEVICE_ID_MATCH_INT_SUBCLASS,\
 38         .idVendor = vid,\
 39         .idProduct = pid,\
 40         .bInterfaceClass = USB_CLASS_COMM,\
 41         .bInterfaceSubClass = USB_CDC_SUBCLASS_ACM
 42 
 43 static const struct usb_device_id id_table[] = {
 44         { USB_DEVICE_CDC_DATA(0x1076, 0x8000) }, /* GCT GDM7240 */
 45         { USB_DEVICE_CDC_DATA(0x1076, 0x8f00) }, /* GCT GDM7243 */
 46         { USB_DEVICE_CDC_DATA(0x1076, 0x9000) }, /* GCT GDM7243 */
 47         { USB_DEVICE_CDC_DATA(0x1d74, 0x2300) }, /* LGIT Phoenix */
 48         {}
 49 };
 50 
 51 
 52 MODULE_DEVICE_TABLE(usb, id_table);
 53 
 54 static int packet_type_to_index(u16 packetType)
 55 {
 56         int i;
 57 
 58         for (i = 0; i < TTY_MAX_COUNT; i++) {
 59                 if (packet_type[i] == packetType)
 60                         return i;
 61         }
 62 
 63         return -1;
 64 }
 65 
 66 static struct mux_tx *alloc_mux_tx(int len)
 67 {
 68         struct mux_tx *t = NULL;
 69 
 70         t = kzalloc(sizeof(struct mux_tx), GFP_ATOMIC);
 71         if (!t)
 72                 return NULL;
 73 
 74         t->urb = usb_alloc_urb(0, GFP_ATOMIC);
 75         t->buf = kmalloc(MUX_TX_MAX_SIZE, GFP_ATOMIC);
 76         if (!t->urb || !t->buf) {
 77                 usb_free_urb(t->urb);
 78                 kfree(t->buf);
 79                 kfree(t);
 80                 return NULL;
 81         }
 82 
 83         return t;
 84 }
 85 
 86 static void free_mux_tx(struct mux_tx *t)
 87 {
 88         if (t) {
 89                 usb_free_urb(t->urb);
 90                 kfree(t->buf);
 91                 kfree(t);
 92         }
 93 }
 94 
 95 static struct mux_rx *alloc_mux_rx(void)
 96 {
 97         struct mux_rx *r = NULL;
 98 
 99         r = kzalloc(sizeof(struct mux_rx), GFP_KERNEL);
100         if (!r)
101                 return NULL;
102 
103         r->urb = usb_alloc_urb(0, GFP_KERNEL);
104         r->buf = kmalloc(MUX_RX_MAX_SIZE, GFP_KERNEL);
105         if (!r->urb || !r->buf) {
106                 usb_free_urb(r->urb);
107                 kfree(r->buf);
108                 kfree(r);
109                 return NULL;
110         }
111 
112         return r;
113 }
114 
115 static void free_mux_rx(struct mux_rx *r)
116 {
117         if (r) {
118                 usb_free_urb(r->urb);
119                 kfree(r->buf);
120                 kfree(r);
121         }
122 }
123 
124 static struct mux_rx *get_rx_struct(struct rx_cxt *rx)
125 {
126         struct mux_rx *r;
127         unsigned long flags;
128 
129         spin_lock_irqsave(&rx->free_list_lock, flags);
130 
131         if (list_empty(&rx->rx_free_list)) {
132                 spin_unlock_irqrestore(&rx->free_list_lock, flags);
133                 return NULL;
134         }
135 
136         r = list_entry(rx->rx_free_list.prev, struct mux_rx, free_list);
137         list_del(&r->free_list);
138 
139         spin_unlock_irqrestore(&rx->free_list_lock, flags);
140 
141         return r;
142 }
143 
144 static void put_rx_struct(struct rx_cxt *rx, struct mux_rx *r)
145 {
146         unsigned long flags;
147 
148         spin_lock_irqsave(&rx->free_list_lock, flags);
149         list_add_tail(&r->free_list, &rx->rx_free_list);
150         spin_unlock_irqrestore(&rx->free_list_lock, flags);
151 }
152 
153 
154 static int up_to_host(struct mux_rx *r)
155 {
156         struct mux_dev *mux_dev = (struct mux_dev *)r->mux_dev;
157         struct mux_pkt_header *mux_header;
158         unsigned int start_flag;
159         unsigned int payload_size;
160         unsigned short packet_type;
161         int dummy_cnt;
162         u32 packet_size_sum = r->offset;
163         int index;
164         int ret = TO_HOST_INVALID_PACKET;
165         int len = r->len;
166 
167         while (1) {
168                 mux_header = (struct mux_pkt_header *)(r->buf +
169                                                        packet_size_sum);
170                 start_flag = __le32_to_cpu(mux_header->start_flag);
171                 payload_size = __le32_to_cpu(mux_header->payload_size);
172                 packet_type = __le16_to_cpu(mux_header->packet_type);
173 
174                 if (start_flag != START_FLAG) {
175                         pr_err("invalid START_FLAG %x\n", start_flag);
176                         break;
177                 }
178 
179                 dummy_cnt = ALIGN(MUX_HEADER_SIZE + payload_size, 4);
180 
181                 if (len - packet_size_sum <
182                         MUX_HEADER_SIZE + payload_size + dummy_cnt) {
183                         pr_err("invalid payload : %d %d %04x\n",
184                                payload_size, len, packet_type);
185                         break;
186                 }
187 
188                 index = packet_type_to_index(packet_type);
189                 if (index < 0) {
190                         pr_err("invalid index %d\n", index);
191                         break;
192                 }
193 
194                 ret = r->callback(mux_header->data,
195                                 payload_size,
196                                 index,
197                                 mux_dev->tty_dev,
198                                 RECV_PACKET_PROCESS_CONTINUE
199                                 );
200                 if (ret == TO_HOST_BUFFER_REQUEST_FAIL) {
201                         r->offset += packet_size_sum;
202                         break;
203                 }
204 
205                 packet_size_sum += MUX_HEADER_SIZE + payload_size + dummy_cnt;
206                 if (len - packet_size_sum <= MUX_HEADER_SIZE + 2) {
207                         ret = r->callback(NULL,
208                                         0,
209                                         index,
210                                         mux_dev->tty_dev,
211                                         RECV_PACKET_PROCESS_COMPLETE
212                                         );
213                         break;
214                 }
215         }
216 
217         return ret;
218 }
219 
220 static void do_rx(struct work_struct *work)
221 {
222         struct mux_dev *mux_dev =
223                 container_of(work, struct mux_dev , work_rx.work);
224         struct mux_rx *r;
225         struct rx_cxt *rx = (struct rx_cxt *)&mux_dev->rx;
226         unsigned long flags;
227         int ret = 0;
228 
229         while (1) {
230                 spin_lock_irqsave(&rx->to_host_lock, flags);
231                 if (list_empty(&rx->to_host_list)) {
232                         spin_unlock_irqrestore(&rx->to_host_lock, flags);
233                         break;
234                 }
235                 r = list_entry(rx->to_host_list.next, struct mux_rx,
236                                to_host_list);
237                 list_del(&r->to_host_list);
238                 spin_unlock_irqrestore(&rx->to_host_lock, flags);
239 
240                 ret = up_to_host(r);
241                 if (ret == TO_HOST_BUFFER_REQUEST_FAIL)
242                         pr_err("failed to send mux data to host\n");
243                 else
244                         put_rx_struct(rx, r);
245         }
246 }
247 
248 static void remove_rx_submit_list(struct mux_rx *r, struct rx_cxt *rx)
249 {
250         unsigned long flags;
251         struct mux_rx   *r_remove, *r_remove_next;
252 
253         spin_lock_irqsave(&rx->submit_list_lock, flags);
254         list_for_each_entry_safe(r_remove, r_remove_next, &rx->rx_submit_list,
255                                  rx_submit_list) {
256                 if (r == r_remove)
257                         list_del(&r->rx_submit_list);
258         }
259         spin_unlock_irqrestore(&rx->submit_list_lock, flags);
260 }
261 
262 static void gdm_mux_rcv_complete(struct urb *urb)
263 {
264         struct mux_rx *r = urb->context;
265         struct mux_dev *mux_dev = (struct mux_dev *)r->mux_dev;
266         struct rx_cxt *rx = &mux_dev->rx;
267         unsigned long flags;
268 
269         remove_rx_submit_list(r, rx);
270 
271         if (urb->status) {
272                 if (mux_dev->usb_state == PM_NORMAL)
273                         pr_err("%s: urb status error %d\n",
274                                __func__, urb->status);
275                 put_rx_struct(rx, r);
276         } else {
277                 r->len = r->urb->actual_length;
278                 spin_lock_irqsave(&rx->to_host_lock, flags);
279                 list_add_tail(&r->to_host_list, &rx->to_host_list);
280                 queue_work(mux_rx_wq, &mux_dev->work_rx.work);
281                 spin_unlock_irqrestore(&rx->to_host_lock, flags);
282         }
283 }
284 
285 static int gdm_mux_recv(void *priv_dev, int (*cb)(void *data, int len,
286                         int tty_index, struct tty_dev *tty_dev, int complete))
287 {
288         struct mux_dev *mux_dev = priv_dev;
289         struct usb_device *usbdev = mux_dev->usbdev;
290         struct mux_rx *r;
291         struct rx_cxt *rx = &mux_dev->rx;
292         unsigned long flags;
293         int ret;
294 
295         if (!usbdev) {
296                 pr_err("device is disconnected\n");
297                 return -ENODEV;
298         }
299 
300         r = get_rx_struct(rx);
301         if (!r) {
302                 pr_err("get_rx_struct fail\n");
303                 return -ENOMEM;
304         }
305 
306         r->offset = 0;
307         r->mux_dev = (void *)mux_dev;
308         r->callback = cb;
309         mux_dev->rx_cb = cb;
310 
311         usb_fill_bulk_urb(r->urb,
312                           usbdev,
313                           usb_rcvbulkpipe(usbdev, 0x86),
314                           r->buf,
315                           MUX_RX_MAX_SIZE,
316                           gdm_mux_rcv_complete,
317                           r);
318 
319         spin_lock_irqsave(&rx->submit_list_lock, flags);
320         list_add_tail(&r->rx_submit_list, &rx->rx_submit_list);
321         spin_unlock_irqrestore(&rx->submit_list_lock, flags);
322 
323         ret = usb_submit_urb(r->urb, GFP_KERNEL);
324 
325         if (ret) {
326                 spin_lock_irqsave(&rx->submit_list_lock, flags);
327                 list_del(&r->rx_submit_list);
328                 spin_unlock_irqrestore(&rx->submit_list_lock, flags);
329 
330                 put_rx_struct(rx, r);
331 
332                 pr_err("usb_submit_urb ret=%d\n", ret);
333         }
334 
335         usb_mark_last_busy(usbdev);
336 
337         return ret;
338 }
339 
340 static void gdm_mux_send_complete(struct urb *urb)
341 {
342         struct mux_tx *t = urb->context;
343 
344         if (urb->status == -ECONNRESET) {
345                 pr_info("CONNRESET\n");
346                 free_mux_tx(t);
347                 return;
348         }
349 
350         if (t->callback)
351                 t->callback(t->cb_data);
352 
353         free_mux_tx(t);
354 }
355 
356 static int gdm_mux_send(void *priv_dev, void *data, int len, int tty_index,
357                         void (*cb)(void *data), void *cb_data)
358 {
359         struct mux_dev *mux_dev = priv_dev;
360         struct usb_device *usbdev = mux_dev->usbdev;
361         struct mux_pkt_header *mux_header;
362         struct mux_tx *t = NULL;
363         static u32 seq_num = 1;
364         int dummy_cnt;
365         int total_len;
366         int ret;
367         unsigned long flags;
368 
369         if (mux_dev->usb_state == PM_SUSPEND) {
370                 ret = usb_autopm_get_interface(mux_dev->intf);
371                 if (!ret)
372                         usb_autopm_put_interface(mux_dev->intf);
373         }
374 
375         spin_lock_irqsave(&mux_dev->write_lock, flags);
376 
377         dummy_cnt = ALIGN(MUX_HEADER_SIZE + len, 4);
378 
379         total_len = len + MUX_HEADER_SIZE + dummy_cnt;
380 
381         t = alloc_mux_tx(total_len);
382         if (!t) {
383                 pr_err("alloc_mux_tx fail\n");
384                 spin_unlock_irqrestore(&mux_dev->write_lock, flags);
385                 return -ENOMEM;
386         }
387 
388         mux_header = (struct mux_pkt_header *)t->buf;
389         mux_header->start_flag = __cpu_to_le32(START_FLAG);
390         mux_header->seq_num = __cpu_to_le32(seq_num++);
391         mux_header->payload_size = __cpu_to_le32((u32)len);
392         mux_header->packet_type = __cpu_to_le16(packet_type[tty_index]);
393 
394         memcpy(t->buf+MUX_HEADER_SIZE, data, len);
395         memset(t->buf+MUX_HEADER_SIZE+len, 0, dummy_cnt);
396 
397         t->len = total_len;
398         t->callback = cb;
399         t->cb_data = cb_data;
400 
401         usb_fill_bulk_urb(t->urb,
402                           usbdev,
403                           usb_sndbulkpipe(usbdev, 5),
404                           t->buf,
405                           total_len,
406                           gdm_mux_send_complete,
407                           t);
408 
409         ret = usb_submit_urb(t->urb, GFP_ATOMIC);
410 
411         spin_unlock_irqrestore(&mux_dev->write_lock, flags);
412 
413         if (ret)
414                 pr_err("usb_submit_urb Error: %d\n", ret);
415 
416         usb_mark_last_busy(usbdev);
417 
418         return ret;
419 }
420 
421 static int gdm_mux_send_control(void *priv_dev, int request, int value,
422                                 void *buf, int len)
423 {
424         struct mux_dev *mux_dev = priv_dev;
425         struct usb_device *usbdev = mux_dev->usbdev;
426         int ret;
427 
428         ret = usb_control_msg(usbdev,
429                               usb_sndctrlpipe(usbdev, 0),
430                               request,
431                               USB_RT_ACM,
432                               value,
433                               2,
434                               buf,
435                               len,
436                               5000
437                              );
438 
439         if (ret < 0)
440                 pr_err("usb_control_msg error: %d\n", ret);
441 
442         return ret < 0 ? ret : 0;
443 }
444 
445 static void release_usb(struct mux_dev *mux_dev)
446 {
447         struct rx_cxt           *rx = &mux_dev->rx;
448         struct mux_rx           *r, *r_next;
449         unsigned long           flags;
450 
451         cancel_delayed_work(&mux_dev->work_rx);
452 
453         spin_lock_irqsave(&rx->submit_list_lock, flags);
454         list_for_each_entry_safe(r, r_next, &rx->rx_submit_list,
455                                  rx_submit_list) {
456                 spin_unlock_irqrestore(&rx->submit_list_lock, flags);
457                 usb_kill_urb(r->urb);
458                 spin_lock_irqsave(&rx->submit_list_lock, flags);
459         }
460         spin_unlock_irqrestore(&rx->submit_list_lock, flags);
461 
462         spin_lock_irqsave(&rx->free_list_lock, flags);
463         list_for_each_entry_safe(r, r_next, &rx->rx_free_list, free_list) {
464                 list_del(&r->free_list);
465                 free_mux_rx(r);
466         }
467         spin_unlock_irqrestore(&rx->free_list_lock, flags);
468 
469         spin_lock_irqsave(&rx->to_host_lock, flags);
470         list_for_each_entry_safe(r, r_next, &rx->to_host_list, to_host_list) {
471                 if (r->mux_dev == (void *)mux_dev) {
472                         list_del(&r->to_host_list);
473                         free_mux_rx(r);
474                 }
475         }
476         spin_unlock_irqrestore(&rx->to_host_lock, flags);
477 }
478 
479 
480 static int init_usb(struct mux_dev *mux_dev)
481 {
482         struct mux_rx *r;
483         struct rx_cxt *rx = &mux_dev->rx;
484         int ret = 0;
485         int i;
486 
487         spin_lock_init(&mux_dev->write_lock);
488         INIT_LIST_HEAD(&rx->to_host_list);
489         INIT_LIST_HEAD(&rx->rx_submit_list);
490         INIT_LIST_HEAD(&rx->rx_free_list);
491         spin_lock_init(&rx->to_host_lock);
492         spin_lock_init(&rx->submit_list_lock);
493         spin_lock_init(&rx->free_list_lock);
494 
495         for (i = 0; i < MAX_ISSUE_NUM * 2; i++) {
496                 r = alloc_mux_rx();
497                 if (r == NULL) {
498                         ret = -ENOMEM;
499                         break;
500                 }
501 
502                 list_add(&r->free_list, &rx->rx_free_list);
503         }
504 
505         INIT_DELAYED_WORK(&mux_dev->work_rx, do_rx);
506 
507         return ret;
508 }
509 
510 static int gdm_mux_probe(struct usb_interface *intf,
511                          const struct usb_device_id *id)
512 {
513         struct mux_dev *mux_dev;
514         struct tty_dev *tty_dev;
515         u16 idVendor, idProduct;
516         int bInterfaceNumber;
517         int ret;
518         int i;
519         struct usb_device *usbdev = interface_to_usbdev(intf);
520         bInterfaceNumber = intf->cur_altsetting->desc.bInterfaceNumber;
521 
522         idVendor = __le16_to_cpu(usbdev->descriptor.idVendor);
523         idProduct = __le16_to_cpu(usbdev->descriptor.idProduct);
524 
525         pr_info("mux vid = 0x%04x pid = 0x%04x\n", idVendor, idProduct);
526 
527         if (bInterfaceNumber != 2)
528                 return -ENODEV;
529 
530         mux_dev = kzalloc(sizeof(struct mux_dev), GFP_KERNEL);
531         if (!mux_dev)
532                 return -ENOMEM;
533 
534         tty_dev = kzalloc(sizeof(struct tty_dev), GFP_KERNEL);
535         if (!tty_dev) {
536                 ret = -ENOMEM;
537                 goto err_free_mux;
538         }
539 
540         mux_dev->usbdev = usbdev;
541         mux_dev->control_intf = intf;
542 
543         ret = init_usb(mux_dev);
544         if (ret)
545                 goto err_free_usb;
546 
547         tty_dev->priv_dev = (void *)mux_dev;
548         tty_dev->send_func = gdm_mux_send;
549         tty_dev->recv_func = gdm_mux_recv;
550         tty_dev->send_control = gdm_mux_send_control;
551 
552         ret = register_lte_tty_device(tty_dev, &intf->dev);
553         if (ret)
554                 goto err_unregister_tty;
555 
556         for (i = 0; i < TTY_MAX_COUNT; i++)
557                 mux_dev->tty_dev = tty_dev;
558 
559         mux_dev->intf = intf;
560         mux_dev->usb_state = PM_NORMAL;
561 
562         usb_get_dev(usbdev);
563         usb_set_intfdata(intf, tty_dev);
564 
565         return 0;
566 
567 err_unregister_tty:
568         unregister_lte_tty_device(tty_dev);
569 err_free_usb:
570         release_usb(mux_dev);
571         kfree(tty_dev);
572 err_free_mux:
573         kfree(mux_dev);
574 
575         return ret;
576 }
577 
578 static void gdm_mux_disconnect(struct usb_interface *intf)
579 {
580         struct tty_dev *tty_dev;
581         struct mux_dev *mux_dev;
582         struct usb_device *usbdev = interface_to_usbdev(intf);
583 
584         tty_dev = usb_get_intfdata(intf);
585 
586         mux_dev = tty_dev->priv_dev;
587 
588         release_usb(mux_dev);
589         unregister_lte_tty_device(tty_dev);
590 
591         kfree(mux_dev);
592         kfree(tty_dev);
593 
594         usb_put_dev(usbdev);
595 }
596 
597 static int gdm_mux_suspend(struct usb_interface *intf, pm_message_t pm_msg)
598 {
599         struct tty_dev *tty_dev;
600         struct mux_dev *mux_dev;
601         struct rx_cxt *rx;
602         struct mux_rx *r, *r_next;
603         unsigned long flags;
604 
605         tty_dev = usb_get_intfdata(intf);
606         mux_dev = tty_dev->priv_dev;
607         rx = &mux_dev->rx;
608 
609         if (mux_dev->usb_state != PM_NORMAL) {
610                 pr_err("usb suspend - invalid state\n");
611                 return -1;
612         }
613 
614         mux_dev->usb_state = PM_SUSPEND;
615 
616 
617         spin_lock_irqsave(&rx->submit_list_lock, flags);
618         list_for_each_entry_safe(r, r_next, &rx->rx_submit_list,
619                                  rx_submit_list) {
620                 spin_unlock_irqrestore(&rx->submit_list_lock, flags);
621                 usb_kill_urb(r->urb);
622                 spin_lock_irqsave(&rx->submit_list_lock, flags);
623         }
624         spin_unlock_irqrestore(&rx->submit_list_lock, flags);
625 
626         return 0;
627 }
628 
629 static int gdm_mux_resume(struct usb_interface *intf)
630 {
631         struct tty_dev *tty_dev;
632         struct mux_dev *mux_dev;
633         u8 i;
634 
635         tty_dev = usb_get_intfdata(intf);
636         mux_dev = tty_dev->priv_dev;
637 
638         if (mux_dev->usb_state != PM_SUSPEND) {
639                 pr_err("usb resume - invalid state\n");
640                 return -1;
641         }
642 
643         mux_dev->usb_state = PM_NORMAL;
644 
645         for (i = 0; i < MAX_ISSUE_NUM; i++)
646                 gdm_mux_recv(mux_dev, mux_dev->rx_cb);
647 
648         return 0;
649 }
650 
651 static struct usb_driver gdm_mux_driver = {
652         .name = "gdm_mux",
653         .probe = gdm_mux_probe,
654         .disconnect = gdm_mux_disconnect,
655         .id_table = id_table,
656         .supports_autosuspend = 1,
657         .suspend = gdm_mux_suspend,
658         .resume = gdm_mux_resume,
659         .reset_resume = gdm_mux_resume,
660 };
661 
662 static int __init gdm_usb_mux_init(void)
663 {
664 
665         mux_rx_wq = create_workqueue("mux_rx_wq");
666         if (mux_rx_wq == NULL) {
667                 pr_err("work queue create fail\n");
668                 return -1;
669         }
670 
671         register_lte_tty_driver();
672 
673         return usb_register(&gdm_mux_driver);
674 }
675 
676 static void __exit gdm_usb_mux_exit(void)
677 {
678         unregister_lte_tty_driver();
679 
680         if (mux_rx_wq) {
681                 flush_workqueue(mux_rx_wq);
682                 destroy_workqueue(mux_rx_wq);
683         }
684 
685         usb_deregister(&gdm_mux_driver);
686 }
687 
688 module_init(gdm_usb_mux_init);
689 module_exit(gdm_usb_mux_exit);
690 
691 MODULE_DESCRIPTION("GCT LTE TTY Device Driver");
692 MODULE_LICENSE("GPL");
693 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us