Version:  2.0.40 2.2.26 2.4.37 3.4 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0

Linux/drivers/hv/vmbus_drv.c

  1 /*
  2  * Copyright (c) 2009, Microsoft Corporation.
  3  *
  4  * This program is free software; you can redistribute it and/or modify it
  5  * under the terms and conditions of the GNU General Public License,
  6  * version 2, as published by the Free Software Foundation.
  7  *
  8  * This program is distributed in the hope it will be useful, but WITHOUT
  9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 11  * more details.
 12  *
 13  * You should have received a copy of the GNU General Public License along with
 14  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
 15  * Place - Suite 330, Boston, MA 02111-1307 USA.
 16  *
 17  * Authors:
 18  *   Haiyang Zhang <haiyangz@microsoft.com>
 19  *   Hank Janssen  <hjanssen@microsoft.com>
 20  *   K. Y. Srinivasan <kys@microsoft.com>
 21  *
 22  */
 23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 24 
 25 #include <linux/init.h>
 26 #include <linux/module.h>
 27 #include <linux/device.h>
 28 #include <linux/interrupt.h>
 29 #include <linux/sysctl.h>
 30 #include <linux/slab.h>
 31 #include <linux/acpi.h>
 32 #include <linux/completion.h>
 33 #include <linux/hyperv.h>
 34 #include <linux/kernel_stat.h>
 35 #include <linux/clockchips.h>
 36 #include <asm/hyperv.h>
 37 #include <asm/hypervisor.h>
 38 #include <asm/mshyperv.h>
 39 #include "hyperv_vmbus.h"
 40 
 41 static struct acpi_device  *hv_acpi_dev;
 42 
 43 static struct tasklet_struct msg_dpc;
 44 static struct completion probe_event;
 45 static int irq;
 46 
 47 struct resource hyperv_mmio = {
 48         .name  = "hyperv mmio",
 49         .flags = IORESOURCE_MEM,
 50 };
 51 EXPORT_SYMBOL_GPL(hyperv_mmio);
 52 
 53 static int vmbus_exists(void)
 54 {
 55         if (hv_acpi_dev == NULL)
 56                 return -ENODEV;
 57 
 58         return 0;
 59 }
 60 
 61 #define VMBUS_ALIAS_LEN ((sizeof((struct hv_vmbus_device_id *)0)->guid) * 2)
 62 static void print_alias_name(struct hv_device *hv_dev, char *alias_name)
 63 {
 64         int i;
 65         for (i = 0; i < VMBUS_ALIAS_LEN; i += 2)
 66                 sprintf(&alias_name[i], "%02x", hv_dev->dev_type.b[i/2]);
 67 }
 68 
 69 static u8 channel_monitor_group(struct vmbus_channel *channel)
 70 {
 71         return (u8)channel->offermsg.monitorid / 32;
 72 }
 73 
 74 static u8 channel_monitor_offset(struct vmbus_channel *channel)
 75 {
 76         return (u8)channel->offermsg.monitorid % 32;
 77 }
 78 
 79 static u32 channel_pending(struct vmbus_channel *channel,
 80                            struct hv_monitor_page *monitor_page)
 81 {
 82         u8 monitor_group = channel_monitor_group(channel);
 83         return monitor_page->trigger_group[monitor_group].pending;
 84 }
 85 
 86 static u32 channel_latency(struct vmbus_channel *channel,
 87                            struct hv_monitor_page *monitor_page)
 88 {
 89         u8 monitor_group = channel_monitor_group(channel);
 90         u8 monitor_offset = channel_monitor_offset(channel);
 91         return monitor_page->latency[monitor_group][monitor_offset];
 92 }
 93 
 94 static u32 channel_conn_id(struct vmbus_channel *channel,
 95                            struct hv_monitor_page *monitor_page)
 96 {
 97         u8 monitor_group = channel_monitor_group(channel);
 98         u8 monitor_offset = channel_monitor_offset(channel);
 99         return monitor_page->parameter[monitor_group][monitor_offset].connectionid.u.id;
100 }
101 
102 static ssize_t id_show(struct device *dev, struct device_attribute *dev_attr,
103                        char *buf)
104 {
105         struct hv_device *hv_dev = device_to_hv_device(dev);
106 
107         if (!hv_dev->channel)
108                 return -ENODEV;
109         return sprintf(buf, "%d\n", hv_dev->channel->offermsg.child_relid);
110 }
111 static DEVICE_ATTR_RO(id);
112 
113 static ssize_t state_show(struct device *dev, struct device_attribute *dev_attr,
114                           char *buf)
115 {
116         struct hv_device *hv_dev = device_to_hv_device(dev);
117 
118         if (!hv_dev->channel)
119                 return -ENODEV;
120         return sprintf(buf, "%d\n", hv_dev->channel->state);
121 }
122 static DEVICE_ATTR_RO(state);
123 
124 static ssize_t monitor_id_show(struct device *dev,
125                                struct device_attribute *dev_attr, char *buf)
126 {
127         struct hv_device *hv_dev = device_to_hv_device(dev);
128 
129         if (!hv_dev->channel)
130                 return -ENODEV;
131         return sprintf(buf, "%d\n", hv_dev->channel->offermsg.monitorid);
132 }
133 static DEVICE_ATTR_RO(monitor_id);
134 
135 static ssize_t class_id_show(struct device *dev,
136                                struct device_attribute *dev_attr, char *buf)
137 {
138         struct hv_device *hv_dev = device_to_hv_device(dev);
139 
140         if (!hv_dev->channel)
141                 return -ENODEV;
142         return sprintf(buf, "{%pUl}\n",
143                        hv_dev->channel->offermsg.offer.if_type.b);
144 }
145 static DEVICE_ATTR_RO(class_id);
146 
147 static ssize_t device_id_show(struct device *dev,
148                               struct device_attribute *dev_attr, char *buf)
149 {
150         struct hv_device *hv_dev = device_to_hv_device(dev);
151 
152         if (!hv_dev->channel)
153                 return -ENODEV;
154         return sprintf(buf, "{%pUl}\n",
155                        hv_dev->channel->offermsg.offer.if_instance.b);
156 }
157 static DEVICE_ATTR_RO(device_id);
158 
159 static ssize_t modalias_show(struct device *dev,
160                              struct device_attribute *dev_attr, char *buf)
161 {
162         struct hv_device *hv_dev = device_to_hv_device(dev);
163         char alias_name[VMBUS_ALIAS_LEN + 1];
164 
165         print_alias_name(hv_dev, alias_name);
166         return sprintf(buf, "vmbus:%s\n", alias_name);
167 }
168 static DEVICE_ATTR_RO(modalias);
169 
170 static ssize_t server_monitor_pending_show(struct device *dev,
171                                            struct device_attribute *dev_attr,
172                                            char *buf)
173 {
174         struct hv_device *hv_dev = device_to_hv_device(dev);
175 
176         if (!hv_dev->channel)
177                 return -ENODEV;
178         return sprintf(buf, "%d\n",
179                        channel_pending(hv_dev->channel,
180                                        vmbus_connection.monitor_pages[1]));
181 }
182 static DEVICE_ATTR_RO(server_monitor_pending);
183 
184 static ssize_t client_monitor_pending_show(struct device *dev,
185                                            struct device_attribute *dev_attr,
186                                            char *buf)
187 {
188         struct hv_device *hv_dev = device_to_hv_device(dev);
189 
190         if (!hv_dev->channel)
191                 return -ENODEV;
192         return sprintf(buf, "%d\n",
193                        channel_pending(hv_dev->channel,
194                                        vmbus_connection.monitor_pages[1]));
195 }
196 static DEVICE_ATTR_RO(client_monitor_pending);
197 
198 static ssize_t server_monitor_latency_show(struct device *dev,
199                                            struct device_attribute *dev_attr,
200                                            char *buf)
201 {
202         struct hv_device *hv_dev = device_to_hv_device(dev);
203 
204         if (!hv_dev->channel)
205                 return -ENODEV;
206         return sprintf(buf, "%d\n",
207                        channel_latency(hv_dev->channel,
208                                        vmbus_connection.monitor_pages[0]));
209 }
210 static DEVICE_ATTR_RO(server_monitor_latency);
211 
212 static ssize_t client_monitor_latency_show(struct device *dev,
213                                            struct device_attribute *dev_attr,
214                                            char *buf)
215 {
216         struct hv_device *hv_dev = device_to_hv_device(dev);
217 
218         if (!hv_dev->channel)
219                 return -ENODEV;
220         return sprintf(buf, "%d\n",
221                        channel_latency(hv_dev->channel,
222                                        vmbus_connection.monitor_pages[1]));
223 }
224 static DEVICE_ATTR_RO(client_monitor_latency);
225 
226 static ssize_t server_monitor_conn_id_show(struct device *dev,
227                                            struct device_attribute *dev_attr,
228                                            char *buf)
229 {
230         struct hv_device *hv_dev = device_to_hv_device(dev);
231 
232         if (!hv_dev->channel)
233                 return -ENODEV;
234         return sprintf(buf, "%d\n",
235                        channel_conn_id(hv_dev->channel,
236                                        vmbus_connection.monitor_pages[0]));
237 }
238 static DEVICE_ATTR_RO(server_monitor_conn_id);
239 
240 static ssize_t client_monitor_conn_id_show(struct device *dev,
241                                            struct device_attribute *dev_attr,
242                                            char *buf)
243 {
244         struct hv_device *hv_dev = device_to_hv_device(dev);
245 
246         if (!hv_dev->channel)
247                 return -ENODEV;
248         return sprintf(buf, "%d\n",
249                        channel_conn_id(hv_dev->channel,
250                                        vmbus_connection.monitor_pages[1]));
251 }
252 static DEVICE_ATTR_RO(client_monitor_conn_id);
253 
254 static ssize_t out_intr_mask_show(struct device *dev,
255                                   struct device_attribute *dev_attr, char *buf)
256 {
257         struct hv_device *hv_dev = device_to_hv_device(dev);
258         struct hv_ring_buffer_debug_info outbound;
259 
260         if (!hv_dev->channel)
261                 return -ENODEV;
262         hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
263         return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
264 }
265 static DEVICE_ATTR_RO(out_intr_mask);
266 
267 static ssize_t out_read_index_show(struct device *dev,
268                                    struct device_attribute *dev_attr, char *buf)
269 {
270         struct hv_device *hv_dev = device_to_hv_device(dev);
271         struct hv_ring_buffer_debug_info outbound;
272 
273         if (!hv_dev->channel)
274                 return -ENODEV;
275         hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
276         return sprintf(buf, "%d\n", outbound.current_read_index);
277 }
278 static DEVICE_ATTR_RO(out_read_index);
279 
280 static ssize_t out_write_index_show(struct device *dev,
281                                     struct device_attribute *dev_attr,
282                                     char *buf)
283 {
284         struct hv_device *hv_dev = device_to_hv_device(dev);
285         struct hv_ring_buffer_debug_info outbound;
286 
287         if (!hv_dev->channel)
288                 return -ENODEV;
289         hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
290         return sprintf(buf, "%d\n", outbound.current_write_index);
291 }
292 static DEVICE_ATTR_RO(out_write_index);
293 
294 static ssize_t out_read_bytes_avail_show(struct device *dev,
295                                          struct device_attribute *dev_attr,
296                                          char *buf)
297 {
298         struct hv_device *hv_dev = device_to_hv_device(dev);
299         struct hv_ring_buffer_debug_info outbound;
300 
301         if (!hv_dev->channel)
302                 return -ENODEV;
303         hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
304         return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
305 }
306 static DEVICE_ATTR_RO(out_read_bytes_avail);
307 
308 static ssize_t out_write_bytes_avail_show(struct device *dev,
309                                           struct device_attribute *dev_attr,
310                                           char *buf)
311 {
312         struct hv_device *hv_dev = device_to_hv_device(dev);
313         struct hv_ring_buffer_debug_info outbound;
314 
315         if (!hv_dev->channel)
316                 return -ENODEV;
317         hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
318         return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
319 }
320 static DEVICE_ATTR_RO(out_write_bytes_avail);
321 
322 static ssize_t in_intr_mask_show(struct device *dev,
323                                  struct device_attribute *dev_attr, char *buf)
324 {
325         struct hv_device *hv_dev = device_to_hv_device(dev);
326         struct hv_ring_buffer_debug_info inbound;
327 
328         if (!hv_dev->channel)
329                 return -ENODEV;
330         hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
331         return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
332 }
333 static DEVICE_ATTR_RO(in_intr_mask);
334 
335 static ssize_t in_read_index_show(struct device *dev,
336                                   struct device_attribute *dev_attr, char *buf)
337 {
338         struct hv_device *hv_dev = device_to_hv_device(dev);
339         struct hv_ring_buffer_debug_info inbound;
340 
341         if (!hv_dev->channel)
342                 return -ENODEV;
343         hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
344         return sprintf(buf, "%d\n", inbound.current_read_index);
345 }
346 static DEVICE_ATTR_RO(in_read_index);
347 
348 static ssize_t in_write_index_show(struct device *dev,
349                                    struct device_attribute *dev_attr, char *buf)
350 {
351         struct hv_device *hv_dev = device_to_hv_device(dev);
352         struct hv_ring_buffer_debug_info inbound;
353 
354         if (!hv_dev->channel)
355                 return -ENODEV;
356         hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
357         return sprintf(buf, "%d\n", inbound.current_write_index);
358 }
359 static DEVICE_ATTR_RO(in_write_index);
360 
361 static ssize_t in_read_bytes_avail_show(struct device *dev,
362                                         struct device_attribute *dev_attr,
363                                         char *buf)
364 {
365         struct hv_device *hv_dev = device_to_hv_device(dev);
366         struct hv_ring_buffer_debug_info inbound;
367 
368         if (!hv_dev->channel)
369                 return -ENODEV;
370         hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
371         return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
372 }
373 static DEVICE_ATTR_RO(in_read_bytes_avail);
374 
375 static ssize_t in_write_bytes_avail_show(struct device *dev,
376                                          struct device_attribute *dev_attr,
377                                          char *buf)
378 {
379         struct hv_device *hv_dev = device_to_hv_device(dev);
380         struct hv_ring_buffer_debug_info inbound;
381 
382         if (!hv_dev->channel)
383                 return -ENODEV;
384         hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
385         return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
386 }
387 static DEVICE_ATTR_RO(in_write_bytes_avail);
388 
389 /* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
390 static struct attribute *vmbus_attrs[] = {
391         &dev_attr_id.attr,
392         &dev_attr_state.attr,
393         &dev_attr_monitor_id.attr,
394         &dev_attr_class_id.attr,
395         &dev_attr_device_id.attr,
396         &dev_attr_modalias.attr,
397         &dev_attr_server_monitor_pending.attr,
398         &dev_attr_client_monitor_pending.attr,
399         &dev_attr_server_monitor_latency.attr,
400         &dev_attr_client_monitor_latency.attr,
401         &dev_attr_server_monitor_conn_id.attr,
402         &dev_attr_client_monitor_conn_id.attr,
403         &dev_attr_out_intr_mask.attr,
404         &dev_attr_out_read_index.attr,
405         &dev_attr_out_write_index.attr,
406         &dev_attr_out_read_bytes_avail.attr,
407         &dev_attr_out_write_bytes_avail.attr,
408         &dev_attr_in_intr_mask.attr,
409         &dev_attr_in_read_index.attr,
410         &dev_attr_in_write_index.attr,
411         &dev_attr_in_read_bytes_avail.attr,
412         &dev_attr_in_write_bytes_avail.attr,
413         NULL,
414 };
415 ATTRIBUTE_GROUPS(vmbus);
416 
417 /*
418  * vmbus_uevent - add uevent for our device
419  *
420  * This routine is invoked when a device is added or removed on the vmbus to
421  * generate a uevent to udev in the userspace. The udev will then look at its
422  * rule and the uevent generated here to load the appropriate driver
423  *
424  * The alias string will be of the form vmbus:guid where guid is the string
425  * representation of the device guid (each byte of the guid will be
426  * represented with two hex characters.
427  */
428 static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env)
429 {
430         struct hv_device *dev = device_to_hv_device(device);
431         int ret;
432         char alias_name[VMBUS_ALIAS_LEN + 1];
433 
434         print_alias_name(dev, alias_name);
435         ret = add_uevent_var(env, "MODALIAS=vmbus:%s", alias_name);
436         return ret;
437 }
438 
439 static const uuid_le null_guid;
440 
441 static inline bool is_null_guid(const __u8 *guid)
442 {
443         if (memcmp(guid, &null_guid, sizeof(uuid_le)))
444                 return false;
445         return true;
446 }
447 
448 /*
449  * Return a matching hv_vmbus_device_id pointer.
450  * If there is no match, return NULL.
451  */
452 static const struct hv_vmbus_device_id *hv_vmbus_get_id(
453                                         const struct hv_vmbus_device_id *id,
454                                         const __u8 *guid)
455 {
456         for (; !is_null_guid(id->guid); id++)
457                 if (!memcmp(&id->guid, guid, sizeof(uuid_le)))
458                         return id;
459 
460         return NULL;
461 }
462 
463 
464 
465 /*
466  * vmbus_match - Attempt to match the specified device to the specified driver
467  */
468 static int vmbus_match(struct device *device, struct device_driver *driver)
469 {
470         struct hv_driver *drv = drv_to_hv_drv(driver);
471         struct hv_device *hv_dev = device_to_hv_device(device);
472 
473         if (hv_vmbus_get_id(drv->id_table, hv_dev->dev_type.b))
474                 return 1;
475 
476         return 0;
477 }
478 
479 /*
480  * vmbus_probe - Add the new vmbus's child device
481  */
482 static int vmbus_probe(struct device *child_device)
483 {
484         int ret = 0;
485         struct hv_driver *drv =
486                         drv_to_hv_drv(child_device->driver);
487         struct hv_device *dev = device_to_hv_device(child_device);
488         const struct hv_vmbus_device_id *dev_id;
489 
490         dev_id = hv_vmbus_get_id(drv->id_table, dev->dev_type.b);
491         if (drv->probe) {
492                 ret = drv->probe(dev, dev_id);
493                 if (ret != 0)
494                         pr_err("probe failed for device %s (%d)\n",
495                                dev_name(child_device), ret);
496 
497         } else {
498                 pr_err("probe not set for driver %s\n",
499                        dev_name(child_device));
500                 ret = -ENODEV;
501         }
502         return ret;
503 }
504 
505 /*
506  * vmbus_remove - Remove a vmbus device
507  */
508 static int vmbus_remove(struct device *child_device)
509 {
510         struct hv_driver *drv = drv_to_hv_drv(child_device->driver);
511         struct hv_device *dev = device_to_hv_device(child_device);
512 
513         if (drv->remove)
514                 drv->remove(dev);
515         else
516                 pr_err("remove not set for driver %s\n",
517                         dev_name(child_device));
518 
519         return 0;
520 }
521 
522 
523 /*
524  * vmbus_shutdown - Shutdown a vmbus device
525  */
526 static void vmbus_shutdown(struct device *child_device)
527 {
528         struct hv_driver *drv;
529         struct hv_device *dev = device_to_hv_device(child_device);
530 
531 
532         /* The device may not be attached yet */
533         if (!child_device->driver)
534                 return;
535 
536         drv = drv_to_hv_drv(child_device->driver);
537 
538         if (drv->shutdown)
539                 drv->shutdown(dev);
540 
541         return;
542 }
543 
544 
545 /*
546  * vmbus_device_release - Final callback release of the vmbus child device
547  */
548 static void vmbus_device_release(struct device *device)
549 {
550         struct hv_device *hv_dev = device_to_hv_device(device);
551 
552         kfree(hv_dev);
553 
554 }
555 
556 /* The one and only one */
557 static struct bus_type  hv_bus = {
558         .name =         "vmbus",
559         .match =                vmbus_match,
560         .shutdown =             vmbus_shutdown,
561         .remove =               vmbus_remove,
562         .probe =                vmbus_probe,
563         .uevent =               vmbus_uevent,
564         .dev_groups =           vmbus_groups,
565 };
566 
567 struct onmessage_work_context {
568         struct work_struct work;
569         struct hv_message msg;
570 };
571 
572 static void vmbus_onmessage_work(struct work_struct *work)
573 {
574         struct onmessage_work_context *ctx;
575 
576         ctx = container_of(work, struct onmessage_work_context,
577                            work);
578         vmbus_onmessage(&ctx->msg);
579         kfree(ctx);
580 }
581 
582 static void hv_process_timer_expiration(struct hv_message *msg, int cpu)
583 {
584         struct clock_event_device *dev = hv_context.clk_evt[cpu];
585 
586         if (dev->event_handler)
587                 dev->event_handler(dev);
588 
589         msg->header.message_type = HVMSG_NONE;
590 
591         /*
592          * Make sure the write to MessageType (ie set to
593          * HVMSG_NONE) happens before we read the
594          * MessagePending and EOMing. Otherwise, the EOMing
595          * will not deliver any more messages since there is
596          * no empty slot
597          */
598         mb();
599 
600         if (msg->header.message_flags.msg_pending) {
601                 /*
602                  * This will cause message queue rescan to
603                  * possibly deliver another msg from the
604                  * hypervisor
605                  */
606                 wrmsrl(HV_X64_MSR_EOM, 0);
607         }
608 }
609 
610 static void vmbus_on_msg_dpc(unsigned long data)
611 {
612         int cpu = smp_processor_id();
613         void *page_addr = hv_context.synic_message_page[cpu];
614         struct hv_message *msg = (struct hv_message *)page_addr +
615                                   VMBUS_MESSAGE_SINT;
616         struct onmessage_work_context *ctx;
617 
618         while (1) {
619                 if (msg->header.message_type == HVMSG_NONE) {
620                         /* no msg */
621                         break;
622                 } else {
623                         ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
624                         if (ctx == NULL)
625                                 continue;
626                         INIT_WORK(&ctx->work, vmbus_onmessage_work);
627                         memcpy(&ctx->msg, msg, sizeof(*msg));
628                         queue_work(vmbus_connection.work_queue, &ctx->work);
629                 }
630 
631                 msg->header.message_type = HVMSG_NONE;
632 
633                 /*
634                  * Make sure the write to MessageType (ie set to
635                  * HVMSG_NONE) happens before we read the
636                  * MessagePending and EOMing. Otherwise, the EOMing
637                  * will not deliver any more messages since there is
638                  * no empty slot
639                  */
640                 mb();
641 
642                 if (msg->header.message_flags.msg_pending) {
643                         /*
644                          * This will cause message queue rescan to
645                          * possibly deliver another msg from the
646                          * hypervisor
647                          */
648                         wrmsrl(HV_X64_MSR_EOM, 0);
649                 }
650         }
651 }
652 
653 static void vmbus_isr(void)
654 {
655         int cpu = smp_processor_id();
656         void *page_addr;
657         struct hv_message *msg;
658         union hv_synic_event_flags *event;
659         bool handled = false;
660 
661         page_addr = hv_context.synic_event_page[cpu];
662         if (page_addr == NULL)
663                 return;
664 
665         event = (union hv_synic_event_flags *)page_addr +
666                                          VMBUS_MESSAGE_SINT;
667         /*
668          * Check for events before checking for messages. This is the order
669          * in which events and messages are checked in Windows guests on
670          * Hyper-V, and the Windows team suggested we do the same.
671          */
672 
673         if ((vmbus_proto_version == VERSION_WS2008) ||
674                 (vmbus_proto_version == VERSION_WIN7)) {
675 
676                 /* Since we are a child, we only need to check bit 0 */
677                 if (sync_test_and_clear_bit(0,
678                         (unsigned long *) &event->flags32[0])) {
679                         handled = true;
680                 }
681         } else {
682                 /*
683                  * Our host is win8 or above. The signaling mechanism
684                  * has changed and we can directly look at the event page.
685                  * If bit n is set then we have an interrup on the channel
686                  * whose id is n.
687                  */
688                 handled = true;
689         }
690 
691         if (handled)
692                 tasklet_schedule(hv_context.event_dpc[cpu]);
693 
694 
695         page_addr = hv_context.synic_message_page[cpu];
696         msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
697 
698         /* Check if there are actual msgs to be processed */
699         if (msg->header.message_type != HVMSG_NONE) {
700                 if (msg->header.message_type == HVMSG_TIMER_EXPIRED)
701                         hv_process_timer_expiration(msg, cpu);
702                 else
703                         tasklet_schedule(&msg_dpc);
704         }
705 }
706 
707 /*
708  * vmbus_bus_init -Main vmbus driver initialization routine.
709  *
710  * Here, we
711  *      - initialize the vmbus driver context
712  *      - invoke the vmbus hv main init routine
713  *      - get the irq resource
714  *      - retrieve the channel offers
715  */
716 static int vmbus_bus_init(int irq)
717 {
718         int ret;
719 
720         /* Hypervisor initialization...setup hypercall page..etc */
721         ret = hv_init();
722         if (ret != 0) {
723                 pr_err("Unable to initialize the hypervisor - 0x%x\n", ret);
724                 return ret;
725         }
726 
727         tasklet_init(&msg_dpc, vmbus_on_msg_dpc, 0);
728 
729         ret = bus_register(&hv_bus);
730         if (ret)
731                 goto err_cleanup;
732 
733         hv_setup_vmbus_irq(vmbus_isr);
734 
735         ret = hv_synic_alloc();
736         if (ret)
737                 goto err_alloc;
738         /*
739          * Initialize the per-cpu interrupt state and
740          * connect to the host.
741          */
742         on_each_cpu(hv_synic_init, NULL, 1);
743         ret = vmbus_connect();
744         if (ret)
745                 goto err_alloc;
746 
747         vmbus_request_offers();
748 
749         return 0;
750 
751 err_alloc:
752         hv_synic_free();
753         hv_remove_vmbus_irq();
754 
755         bus_unregister(&hv_bus);
756 
757 err_cleanup:
758         hv_cleanup();
759 
760         return ret;
761 }
762 
763 /**
764  * __vmbus_child_driver_register - Register a vmbus's driver
765  * @drv: Pointer to driver structure you want to register
766  * @owner: owner module of the drv
767  * @mod_name: module name string
768  *
769  * Registers the given driver with Linux through the 'driver_register()' call
770  * and sets up the hyper-v vmbus handling for this driver.
771  * It will return the state of the 'driver_register()' call.
772  *
773  */
774 int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, const char *mod_name)
775 {
776         int ret;
777 
778         pr_info("registering driver %s\n", hv_driver->name);
779 
780         ret = vmbus_exists();
781         if (ret < 0)
782                 return ret;
783 
784         hv_driver->driver.name = hv_driver->name;
785         hv_driver->driver.owner = owner;
786         hv_driver->driver.mod_name = mod_name;
787         hv_driver->driver.bus = &hv_bus;
788 
789         ret = driver_register(&hv_driver->driver);
790 
791         return ret;
792 }
793 EXPORT_SYMBOL_GPL(__vmbus_driver_register);
794 
795 /**
796  * vmbus_driver_unregister() - Unregister a vmbus's driver
797  * @drv: Pointer to driver structure you want to un-register
798  *
799  * Un-register the given driver that was previous registered with a call to
800  * vmbus_driver_register()
801  */
802 void vmbus_driver_unregister(struct hv_driver *hv_driver)
803 {
804         pr_info("unregistering driver %s\n", hv_driver->name);
805 
806         if (!vmbus_exists())
807                 driver_unregister(&hv_driver->driver);
808 }
809 EXPORT_SYMBOL_GPL(vmbus_driver_unregister);
810 
811 /*
812  * vmbus_device_create - Creates and registers a new child device
813  * on the vmbus.
814  */
815 struct hv_device *vmbus_device_create(const uuid_le *type,
816                                       const uuid_le *instance,
817                                       struct vmbus_channel *channel)
818 {
819         struct hv_device *child_device_obj;
820 
821         child_device_obj = kzalloc(sizeof(struct hv_device), GFP_KERNEL);
822         if (!child_device_obj) {
823                 pr_err("Unable to allocate device object for child device\n");
824                 return NULL;
825         }
826 
827         child_device_obj->channel = channel;
828         memcpy(&child_device_obj->dev_type, type, sizeof(uuid_le));
829         memcpy(&child_device_obj->dev_instance, instance,
830                sizeof(uuid_le));
831 
832 
833         return child_device_obj;
834 }
835 
836 /*
837  * vmbus_device_register - Register the child device
838  */
839 int vmbus_device_register(struct hv_device *child_device_obj)
840 {
841         int ret = 0;
842 
843         static atomic_t device_num = ATOMIC_INIT(0);
844 
845         dev_set_name(&child_device_obj->device, "vmbus_0_%d",
846                      atomic_inc_return(&device_num));
847 
848         child_device_obj->device.bus = &hv_bus;
849         child_device_obj->device.parent = &hv_acpi_dev->dev;
850         child_device_obj->device.release = vmbus_device_release;
851 
852         /*
853          * Register with the LDM. This will kick off the driver/device
854          * binding...which will eventually call vmbus_match() and vmbus_probe()
855          */
856         ret = device_register(&child_device_obj->device);
857 
858         if (ret)
859                 pr_err("Unable to register child device\n");
860         else
861                 pr_debug("child device %s registered\n",
862                         dev_name(&child_device_obj->device));
863 
864         return ret;
865 }
866 
867 /*
868  * vmbus_device_unregister - Remove the specified child device
869  * from the vmbus.
870  */
871 void vmbus_device_unregister(struct hv_device *device_obj)
872 {
873         pr_debug("child device %s unregistered\n",
874                 dev_name(&device_obj->device));
875 
876         /*
877          * Kick off the process of unregistering the device.
878          * This will call vmbus_remove() and eventually vmbus_device_release()
879          */
880         device_unregister(&device_obj->device);
881 }
882 
883 
884 /*
885  * VMBUS is an acpi enumerated device. Get the the information we
886  * need from DSDT.
887  */
888 
889 static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx)
890 {
891         switch (res->type) {
892         case ACPI_RESOURCE_TYPE_IRQ:
893                 irq = res->data.irq.interrupts[0];
894                 break;
895 
896         case ACPI_RESOURCE_TYPE_ADDRESS64:
897                 hyperv_mmio.start = res->data.address64.address.minimum;
898                 hyperv_mmio.end = res->data.address64.address.maximum;
899                 break;
900         }
901 
902         return AE_OK;
903 }
904 
905 static int vmbus_acpi_add(struct acpi_device *device)
906 {
907         acpi_status result;
908         int ret_val = -ENODEV;
909 
910         hv_acpi_dev = device;
911 
912         result = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
913                                         vmbus_walk_resources, NULL);
914 
915         if (ACPI_FAILURE(result))
916                 goto acpi_walk_err;
917         /*
918          * The parent of the vmbus acpi device (Gen2 firmware) is the VMOD that
919          * has the mmio ranges. Get that.
920          */
921         if (device->parent) {
922                 result = acpi_walk_resources(device->parent->handle,
923                                         METHOD_NAME__CRS,
924                                         vmbus_walk_resources, NULL);
925 
926                 if (ACPI_FAILURE(result))
927                         goto acpi_walk_err;
928                 if (hyperv_mmio.start && hyperv_mmio.end)
929                         request_resource(&iomem_resource, &hyperv_mmio);
930         }
931         ret_val = 0;
932 
933 acpi_walk_err:
934         complete(&probe_event);
935         return ret_val;
936 }
937 
938 static const struct acpi_device_id vmbus_acpi_device_ids[] = {
939         {"VMBUS", 0},
940         {"VMBus", 0},
941         {"", 0},
942 };
943 MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids);
944 
945 static struct acpi_driver vmbus_acpi_driver = {
946         .name = "vmbus",
947         .ids = vmbus_acpi_device_ids,
948         .ops = {
949                 .add = vmbus_acpi_add,
950         },
951 };
952 
953 static int __init hv_acpi_init(void)
954 {
955         int ret, t;
956 
957         if (x86_hyper != &x86_hyper_ms_hyperv)
958                 return -ENODEV;
959 
960         init_completion(&probe_event);
961 
962         /*
963          * Get irq resources first.
964          */
965         ret = acpi_bus_register_driver(&vmbus_acpi_driver);
966 
967         if (ret)
968                 return ret;
969 
970         t = wait_for_completion_timeout(&probe_event, 5*HZ);
971         if (t == 0) {
972                 ret = -ETIMEDOUT;
973                 goto cleanup;
974         }
975 
976         if (irq <= 0) {
977                 ret = -ENODEV;
978                 goto cleanup;
979         }
980 
981         ret = vmbus_bus_init(irq);
982         if (ret)
983                 goto cleanup;
984 
985         return 0;
986 
987 cleanup:
988         acpi_bus_unregister_driver(&vmbus_acpi_driver);
989         hv_acpi_dev = NULL;
990         return ret;
991 }
992 
993 static void __exit vmbus_exit(void)
994 {
995         hv_remove_vmbus_irq();
996         vmbus_free_channels();
997         bus_unregister(&hv_bus);
998         hv_cleanup();
999         acpi_bus_unregister_driver(&vmbus_acpi_driver);
1000 }
1001 
1002 
1003 MODULE_LICENSE("GPL");
1004 
1005 subsys_initcall(hv_acpi_init);
1006 module_exit(vmbus_exit);
1007 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us