Version:  2.0.40 2.2.26 2.4.37 3.4 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0

Linux/drivers/scsi/virtio_scsi.c

  1 /*
  2  * Virtio SCSI HBA driver
  3  *
  4  * Copyright IBM Corp. 2010
  5  * Copyright Red Hat, Inc. 2011
  6  *
  7  * Authors:
  8  *  Stefan Hajnoczi   <stefanha@linux.vnet.ibm.com>
  9  *  Paolo Bonzini   <pbonzini@redhat.com>
 10  *
 11  * This work is licensed under the terms of the GNU GPL, version 2 or later.
 12  * See the COPYING file in the top-level directory.
 13  *
 14  */
 15 
 16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 17 
 18 #include <linux/module.h>
 19 #include <linux/slab.h>
 20 #include <linux/mempool.h>
 21 #include <linux/virtio.h>
 22 #include <linux/virtio_ids.h>
 23 #include <linux/virtio_config.h>
 24 #include <linux/virtio_scsi.h>
 25 #include <linux/cpu.h>
 26 #include <linux/blkdev.h>
 27 #include <scsi/scsi_host.h>
 28 #include <scsi/scsi_device.h>
 29 #include <scsi/scsi_cmnd.h>
 30 #include <scsi/scsi_tcq.h>
 31 #include <linux/seqlock.h>
 32 
 33 #define VIRTIO_SCSI_MEMPOOL_SZ 64
 34 #define VIRTIO_SCSI_EVENT_LEN 8
 35 #define VIRTIO_SCSI_VQ_BASE 2
 36 
 37 /* Command queue element */
 38 struct virtio_scsi_cmd {
 39         struct scsi_cmnd *sc;
 40         struct completion *comp;
 41         union {
 42                 struct virtio_scsi_cmd_req       cmd;
 43                 struct virtio_scsi_cmd_req_pi    cmd_pi;
 44                 struct virtio_scsi_ctrl_tmf_req  tmf;
 45                 struct virtio_scsi_ctrl_an_req   an;
 46         } req;
 47         union {
 48                 struct virtio_scsi_cmd_resp      cmd;
 49                 struct virtio_scsi_ctrl_tmf_resp tmf;
 50                 struct virtio_scsi_ctrl_an_resp  an;
 51                 struct virtio_scsi_event         evt;
 52         } resp;
 53 } ____cacheline_aligned_in_smp;
 54 
 55 struct virtio_scsi_event_node {
 56         struct virtio_scsi *vscsi;
 57         struct virtio_scsi_event event;
 58         struct work_struct work;
 59 };
 60 
 61 struct virtio_scsi_vq {
 62         /* Protects vq */
 63         spinlock_t vq_lock;
 64 
 65         struct virtqueue *vq;
 66 };
 67 
 68 /*
 69  * Per-target queue state.
 70  *
 71  * This struct holds the data needed by the queue steering policy.  When a
 72  * target is sent multiple requests, we need to drive them to the same queue so
 73  * that FIFO processing order is kept.  However, if a target was idle, we can
 74  * choose a queue arbitrarily.  In this case the queue is chosen according to
 75  * the current VCPU, so the driver expects the number of request queues to be
 76  * equal to the number of VCPUs.  This makes it easy and fast to select the
 77  * queue, and also lets the driver optimize the IRQ affinity for the virtqueues
 78  * (each virtqueue's affinity is set to the CPU that "owns" the queue).
 79  *
 80  * tgt_seq is held to serialize reading and writing req_vq.
 81  *
 82  * Decrements of reqs are never concurrent with writes of req_vq: before the
 83  * decrement reqs will be != 0; after the decrement the virtqueue completion
 84  * routine will not use the req_vq so it can be changed by a new request.
 85  * Thus they can happen outside the tgt_seq, provided of course we make reqs
 86  * an atomic_t.
 87  */
 88 struct virtio_scsi_target_state {
 89         seqcount_t tgt_seq;
 90 
 91         /* Count of outstanding requests. */
 92         atomic_t reqs;
 93 
 94         /* Currently active virtqueue for requests sent to this target. */
 95         struct virtio_scsi_vq *req_vq;
 96 };
 97 
 98 /* Driver instance state */
 99 struct virtio_scsi {
100         struct virtio_device *vdev;
101 
102         /* Get some buffers ready for event vq */
103         struct virtio_scsi_event_node event_list[VIRTIO_SCSI_EVENT_LEN];
104 
105         u32 num_queues;
106 
107         /* If the affinity hint is set for virtqueues */
108         bool affinity_hint_set;
109 
110         /* CPU hotplug notifier */
111         struct notifier_block nb;
112 
113         /* Protected by event_vq lock */
114         bool stop_events;
115 
116         struct virtio_scsi_vq ctrl_vq;
117         struct virtio_scsi_vq event_vq;
118         struct virtio_scsi_vq req_vqs[];
119 };
120 
121 static struct kmem_cache *virtscsi_cmd_cache;
122 static mempool_t *virtscsi_cmd_pool;
123 
124 static inline struct Scsi_Host *virtio_scsi_host(struct virtio_device *vdev)
125 {
126         return vdev->priv;
127 }
128 
129 static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid)
130 {
131         if (!resid)
132                 return;
133 
134         if (!scsi_bidi_cmnd(sc)) {
135                 scsi_set_resid(sc, resid);
136                 return;
137         }
138 
139         scsi_in(sc)->resid = min(resid, scsi_in(sc)->length);
140         scsi_out(sc)->resid = resid - scsi_in(sc)->resid;
141 }
142 
143 /**
144  * virtscsi_complete_cmd - finish a scsi_cmd and invoke scsi_done
145  *
146  * Called with vq_lock held.
147  */
148 static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
149 {
150         struct virtio_scsi_cmd *cmd = buf;
151         struct scsi_cmnd *sc = cmd->sc;
152         struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd;
153         struct virtio_scsi_target_state *tgt =
154                                 scsi_target(sc->device)->hostdata;
155 
156         dev_dbg(&sc->device->sdev_gendev,
157                 "cmd %p response %u status %#02x sense_len %u\n",
158                 sc, resp->response, resp->status, resp->sense_len);
159 
160         sc->result = resp->status;
161         virtscsi_compute_resid(sc, virtio32_to_cpu(vscsi->vdev, resp->resid));
162         switch (resp->response) {
163         case VIRTIO_SCSI_S_OK:
164                 set_host_byte(sc, DID_OK);
165                 break;
166         case VIRTIO_SCSI_S_OVERRUN:
167                 set_host_byte(sc, DID_ERROR);
168                 break;
169         case VIRTIO_SCSI_S_ABORTED:
170                 set_host_byte(sc, DID_ABORT);
171                 break;
172         case VIRTIO_SCSI_S_BAD_TARGET:
173                 set_host_byte(sc, DID_BAD_TARGET);
174                 break;
175         case VIRTIO_SCSI_S_RESET:
176                 set_host_byte(sc, DID_RESET);
177                 break;
178         case VIRTIO_SCSI_S_BUSY:
179                 set_host_byte(sc, DID_BUS_BUSY);
180                 break;
181         case VIRTIO_SCSI_S_TRANSPORT_FAILURE:
182                 set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
183                 break;
184         case VIRTIO_SCSI_S_TARGET_FAILURE:
185                 set_host_byte(sc, DID_TARGET_FAILURE);
186                 break;
187         case VIRTIO_SCSI_S_NEXUS_FAILURE:
188                 set_host_byte(sc, DID_NEXUS_FAILURE);
189                 break;
190         default:
191                 scmd_printk(KERN_WARNING, sc, "Unknown response %d",
192                             resp->response);
193                 /* fall through */
194         case VIRTIO_SCSI_S_FAILURE:
195                 set_host_byte(sc, DID_ERROR);
196                 break;
197         }
198 
199         WARN_ON(virtio32_to_cpu(vscsi->vdev, resp->sense_len) >
200                 VIRTIO_SCSI_SENSE_SIZE);
201         if (sc->sense_buffer) {
202                 memcpy(sc->sense_buffer, resp->sense,
203                        min_t(u32,
204                              virtio32_to_cpu(vscsi->vdev, resp->sense_len),
205                              VIRTIO_SCSI_SENSE_SIZE));
206                 if (resp->sense_len)
207                         set_driver_byte(sc, DRIVER_SENSE);
208         }
209 
210         sc->scsi_done(sc);
211 
212         atomic_dec(&tgt->reqs);
213 }
214 
215 static void virtscsi_vq_done(struct virtio_scsi *vscsi,
216                              struct virtio_scsi_vq *virtscsi_vq,
217                              void (*fn)(struct virtio_scsi *vscsi, void *buf))
218 {
219         void *buf;
220         unsigned int len;
221         unsigned long flags;
222         struct virtqueue *vq = virtscsi_vq->vq;
223 
224         spin_lock_irqsave(&virtscsi_vq->vq_lock, flags);
225         do {
226                 virtqueue_disable_cb(vq);
227                 while ((buf = virtqueue_get_buf(vq, &len)) != NULL)
228                         fn(vscsi, buf);
229 
230                 if (unlikely(virtqueue_is_broken(vq)))
231                         break;
232         } while (!virtqueue_enable_cb(vq));
233         spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags);
234 }
235 
236 static void virtscsi_req_done(struct virtqueue *vq)
237 {
238         struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
239         struct virtio_scsi *vscsi = shost_priv(sh);
240         int index = vq->index - VIRTIO_SCSI_VQ_BASE;
241         struct virtio_scsi_vq *req_vq = &vscsi->req_vqs[index];
242 
243         virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd);
244 };
245 
246 static void virtscsi_poll_requests(struct virtio_scsi *vscsi)
247 {
248         int i, num_vqs;
249 
250         num_vqs = vscsi->num_queues;
251         for (i = 0; i < num_vqs; i++)
252                 virtscsi_vq_done(vscsi, &vscsi->req_vqs[i],
253                                  virtscsi_complete_cmd);
254 }
255 
256 static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf)
257 {
258         struct virtio_scsi_cmd *cmd = buf;
259 
260         if (cmd->comp)
261                 complete_all(cmd->comp);
262 }
263 
264 static void virtscsi_ctrl_done(struct virtqueue *vq)
265 {
266         struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
267         struct virtio_scsi *vscsi = shost_priv(sh);
268 
269         virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free);
270 };
271 
272 static void virtscsi_handle_event(struct work_struct *work);
273 
274 static int virtscsi_kick_event(struct virtio_scsi *vscsi,
275                                struct virtio_scsi_event_node *event_node)
276 {
277         int err;
278         struct scatterlist sg;
279         unsigned long flags;
280 
281         INIT_WORK(&event_node->work, virtscsi_handle_event);
282         sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event));
283 
284         spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
285 
286         err = virtqueue_add_inbuf(vscsi->event_vq.vq, &sg, 1, event_node,
287                                   GFP_ATOMIC);
288         if (!err)
289                 virtqueue_kick(vscsi->event_vq.vq);
290 
291         spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags);
292 
293         return err;
294 }
295 
296 static int virtscsi_kick_event_all(struct virtio_scsi *vscsi)
297 {
298         int i;
299 
300         for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++) {
301                 vscsi->event_list[i].vscsi = vscsi;
302                 virtscsi_kick_event(vscsi, &vscsi->event_list[i]);
303         }
304 
305         return 0;
306 }
307 
308 static void virtscsi_cancel_event_work(struct virtio_scsi *vscsi)
309 {
310         int i;
311 
312         /* Stop scheduling work before calling cancel_work_sync.  */
313         spin_lock_irq(&vscsi->event_vq.vq_lock);
314         vscsi->stop_events = true;
315         spin_unlock_irq(&vscsi->event_vq.vq_lock);
316 
317         for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++)
318                 cancel_work_sync(&vscsi->event_list[i].work);
319 }
320 
321 static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi,
322                                             struct virtio_scsi_event *event)
323 {
324         struct scsi_device *sdev;
325         struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
326         unsigned int target = event->lun[1];
327         unsigned int lun = (event->lun[2] << 8) | event->lun[3];
328 
329         switch (virtio32_to_cpu(vscsi->vdev, event->reason)) {
330         case VIRTIO_SCSI_EVT_RESET_RESCAN:
331                 scsi_add_device(shost, 0, target, lun);
332                 break;
333         case VIRTIO_SCSI_EVT_RESET_REMOVED:
334                 sdev = scsi_device_lookup(shost, 0, target, lun);
335                 if (sdev) {
336                         scsi_remove_device(sdev);
337                         scsi_device_put(sdev);
338                 } else {
339                         pr_err("SCSI device %d 0 %d %d not found\n",
340                                 shost->host_no, target, lun);
341                 }
342                 break;
343         default:
344                 pr_info("Unsupport virtio scsi event reason %x\n", event->reason);
345         }
346 }
347 
348 static void virtscsi_handle_param_change(struct virtio_scsi *vscsi,
349                                          struct virtio_scsi_event *event)
350 {
351         struct scsi_device *sdev;
352         struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
353         unsigned int target = event->lun[1];
354         unsigned int lun = (event->lun[2] << 8) | event->lun[3];
355         u8 asc = virtio32_to_cpu(vscsi->vdev, event->reason) & 255;
356         u8 ascq = virtio32_to_cpu(vscsi->vdev, event->reason) >> 8;
357 
358         sdev = scsi_device_lookup(shost, 0, target, lun);
359         if (!sdev) {
360                 pr_err("SCSI device %d 0 %d %d not found\n",
361                         shost->host_no, target, lun);
362                 return;
363         }
364 
365         /* Handle "Parameters changed", "Mode parameters changed", and
366            "Capacity data has changed".  */
367         if (asc == 0x2a && (ascq == 0x00 || ascq == 0x01 || ascq == 0x09))
368                 scsi_rescan_device(&sdev->sdev_gendev);
369 
370         scsi_device_put(sdev);
371 }
372 
373 static void virtscsi_handle_event(struct work_struct *work)
374 {
375         struct virtio_scsi_event_node *event_node =
376                 container_of(work, struct virtio_scsi_event_node, work);
377         struct virtio_scsi *vscsi = event_node->vscsi;
378         struct virtio_scsi_event *event = &event_node->event;
379 
380         if (event->event &
381             cpu_to_virtio32(vscsi->vdev, VIRTIO_SCSI_T_EVENTS_MISSED)) {
382                 event->event &= ~cpu_to_virtio32(vscsi->vdev,
383                                                    VIRTIO_SCSI_T_EVENTS_MISSED);
384                 scsi_scan_host(virtio_scsi_host(vscsi->vdev));
385         }
386 
387         switch (virtio32_to_cpu(vscsi->vdev, event->event)) {
388         case VIRTIO_SCSI_T_NO_EVENT:
389                 break;
390         case VIRTIO_SCSI_T_TRANSPORT_RESET:
391                 virtscsi_handle_transport_reset(vscsi, event);
392                 break;
393         case VIRTIO_SCSI_T_PARAM_CHANGE:
394                 virtscsi_handle_param_change(vscsi, event);
395                 break;
396         default:
397                 pr_err("Unsupport virtio scsi event %x\n", event->event);
398         }
399         virtscsi_kick_event(vscsi, event_node);
400 }
401 
402 static void virtscsi_complete_event(struct virtio_scsi *vscsi, void *buf)
403 {
404         struct virtio_scsi_event_node *event_node = buf;
405 
406         if (!vscsi->stop_events)
407                 queue_work(system_freezable_wq, &event_node->work);
408 }
409 
410 static void virtscsi_event_done(struct virtqueue *vq)
411 {
412         struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
413         struct virtio_scsi *vscsi = shost_priv(sh);
414 
415         virtscsi_vq_done(vscsi, &vscsi->event_vq, virtscsi_complete_event);
416 };
417 
418 /**
419  * virtscsi_add_cmd - add a virtio_scsi_cmd to a virtqueue
420  * @vq          : the struct virtqueue we're talking about
421  * @cmd         : command structure
422  * @req_size    : size of the request buffer
423  * @resp_size   : size of the response buffer
424  */
425 static int virtscsi_add_cmd(struct virtqueue *vq,
426                             struct virtio_scsi_cmd *cmd,
427                             size_t req_size, size_t resp_size)
428 {
429         struct scsi_cmnd *sc = cmd->sc;
430         struct scatterlist *sgs[6], req, resp;
431         struct sg_table *out, *in;
432         unsigned out_num = 0, in_num = 0;
433 
434         out = in = NULL;
435 
436         if (sc && sc->sc_data_direction != DMA_NONE) {
437                 if (sc->sc_data_direction != DMA_FROM_DEVICE)
438                         out = &scsi_out(sc)->table;
439                 if (sc->sc_data_direction != DMA_TO_DEVICE)
440                         in = &scsi_in(sc)->table;
441         }
442 
443         /* Request header.  */
444         sg_init_one(&req, &cmd->req, req_size);
445         sgs[out_num++] = &req;
446 
447         /* Data-out buffer.  */
448         if (out) {
449                 /* Place WRITE protection SGLs before Data OUT payload */
450                 if (scsi_prot_sg_count(sc))
451                         sgs[out_num++] = scsi_prot_sglist(sc);
452                 sgs[out_num++] = out->sgl;
453         }
454 
455         /* Response header.  */
456         sg_init_one(&resp, &cmd->resp, resp_size);
457         sgs[out_num + in_num++] = &resp;
458 
459         /* Data-in buffer */
460         if (in) {
461                 /* Place READ protection SGLs before Data IN payload */
462                 if (scsi_prot_sg_count(sc))
463                         sgs[out_num + in_num++] = scsi_prot_sglist(sc);
464                 sgs[out_num + in_num++] = in->sgl;
465         }
466 
467         return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_ATOMIC);
468 }
469 
470 static int virtscsi_kick_cmd(struct virtio_scsi_vq *vq,
471                              struct virtio_scsi_cmd *cmd,
472                              size_t req_size, size_t resp_size)
473 {
474         unsigned long flags;
475         int err;
476         bool needs_kick = false;
477 
478         spin_lock_irqsave(&vq->vq_lock, flags);
479         err = virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size);
480         if (!err)
481                 needs_kick = virtqueue_kick_prepare(vq->vq);
482 
483         spin_unlock_irqrestore(&vq->vq_lock, flags);
484 
485         if (needs_kick)
486                 virtqueue_notify(vq->vq);
487         return err;
488 }
489 
490 static void virtio_scsi_init_hdr(struct virtio_device *vdev,
491                                  struct virtio_scsi_cmd_req *cmd,
492                                  struct scsi_cmnd *sc)
493 {
494         cmd->lun[0] = 1;
495         cmd->lun[1] = sc->device->id;
496         cmd->lun[2] = (sc->device->lun >> 8) | 0x40;
497         cmd->lun[3] = sc->device->lun & 0xff;
498         cmd->tag = cpu_to_virtio64(vdev, (unsigned long)sc);
499         cmd->task_attr = VIRTIO_SCSI_S_SIMPLE;
500         cmd->prio = 0;
501         cmd->crn = 0;
502 }
503 
504 static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev,
505                                     struct virtio_scsi_cmd_req_pi *cmd_pi,
506                                     struct scsi_cmnd *sc)
507 {
508         struct request *rq = sc->request;
509         struct blk_integrity *bi;
510 
511         virtio_scsi_init_hdr(vdev, (struct virtio_scsi_cmd_req *)cmd_pi, sc);
512 
513         if (!rq || !scsi_prot_sg_count(sc))
514                 return;
515 
516         bi = blk_get_integrity(rq->rq_disk);
517 
518         if (sc->sc_data_direction == DMA_TO_DEVICE)
519                 cmd_pi->pi_bytesout = cpu_to_virtio32(vdev,
520                                                         blk_rq_sectors(rq) *
521                                                         bi->tuple_size);
522         else if (sc->sc_data_direction == DMA_FROM_DEVICE)
523                 cmd_pi->pi_bytesin = cpu_to_virtio32(vdev,
524                                                        blk_rq_sectors(rq) *
525                                                        bi->tuple_size);
526 }
527 
528 static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
529                                  struct virtio_scsi_vq *req_vq,
530                                  struct scsi_cmnd *sc)
531 {
532         struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
533         struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc);
534         int req_size;
535 
536         BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
537 
538         /* TODO: check feature bit and fail if unsupported?  */
539         BUG_ON(sc->sc_data_direction == DMA_BIDIRECTIONAL);
540 
541         dev_dbg(&sc->device->sdev_gendev,
542                 "cmd %p CDB: %#02x\n", sc, sc->cmnd[0]);
543 
544         memset(cmd, 0, sizeof(*cmd));
545         cmd->sc = sc;
546 
547         BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE);
548 
549         if (virtio_has_feature(vscsi->vdev, VIRTIO_SCSI_F_T10_PI)) {
550                 virtio_scsi_init_hdr_pi(vscsi->vdev, &cmd->req.cmd_pi, sc);
551                 memcpy(cmd->req.cmd_pi.cdb, sc->cmnd, sc->cmd_len);
552                 req_size = sizeof(cmd->req.cmd_pi);
553         } else {
554                 virtio_scsi_init_hdr(vscsi->vdev, &cmd->req.cmd, sc);
555                 memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
556                 req_size = sizeof(cmd->req.cmd);
557         }
558 
559         if (virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd)) != 0)
560                 return SCSI_MLQUEUE_HOST_BUSY;
561         return 0;
562 }
563 
564 static int virtscsi_queuecommand_single(struct Scsi_Host *sh,
565                                         struct scsi_cmnd *sc)
566 {
567         struct virtio_scsi *vscsi = shost_priv(sh);
568         struct virtio_scsi_target_state *tgt =
569                                 scsi_target(sc->device)->hostdata;
570 
571         atomic_inc(&tgt->reqs);
572         return virtscsi_queuecommand(vscsi, &vscsi->req_vqs[0], sc);
573 }
574 
575 static struct virtio_scsi_vq *virtscsi_pick_vq_mq(struct virtio_scsi *vscsi,
576                                                   struct scsi_cmnd *sc)
577 {
578         u32 tag = blk_mq_unique_tag(sc->request);
579         u16 hwq = blk_mq_unique_tag_to_hwq(tag);
580 
581         return &vscsi->req_vqs[hwq];
582 }
583 
584 static struct virtio_scsi_vq *virtscsi_pick_vq(struct virtio_scsi *vscsi,
585                                                struct virtio_scsi_target_state *tgt)
586 {
587         struct virtio_scsi_vq *vq;
588         unsigned long flags;
589         u32 queue_num;
590 
591         local_irq_save(flags);
592         if (atomic_inc_return(&tgt->reqs) > 1) {
593                 unsigned long seq;
594 
595                 do {
596                         seq = read_seqcount_begin(&tgt->tgt_seq);
597                         vq = tgt->req_vq;
598                 } while (read_seqcount_retry(&tgt->tgt_seq, seq));
599         } else {
600                 /* no writes can be concurrent because of atomic_t */
601                 write_seqcount_begin(&tgt->tgt_seq);
602 
603                 /* keep previous req_vq if a reader just arrived */
604                 if (unlikely(atomic_read(&tgt->reqs) > 1)) {
605                         vq = tgt->req_vq;
606                         goto unlock;
607                 }
608 
609                 queue_num = smp_processor_id();
610                 while (unlikely(queue_num >= vscsi->num_queues))
611                         queue_num -= vscsi->num_queues;
612                 tgt->req_vq = vq = &vscsi->req_vqs[queue_num];
613  unlock:
614                 write_seqcount_end(&tgt->tgt_seq);
615         }
616         local_irq_restore(flags);
617 
618         return vq;
619 }
620 
621 static int virtscsi_queuecommand_multi(struct Scsi_Host *sh,
622                                        struct scsi_cmnd *sc)
623 {
624         struct virtio_scsi *vscsi = shost_priv(sh);
625         struct virtio_scsi_target_state *tgt =
626                                 scsi_target(sc->device)->hostdata;
627         struct virtio_scsi_vq *req_vq;
628 
629         if (shost_use_blk_mq(sh))
630                 req_vq = virtscsi_pick_vq_mq(vscsi, sc);
631         else
632                 req_vq = virtscsi_pick_vq(vscsi, tgt);
633 
634         return virtscsi_queuecommand(vscsi, req_vq, sc);
635 }
636 
637 static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
638 {
639         DECLARE_COMPLETION_ONSTACK(comp);
640         int ret = FAILED;
641 
642         cmd->comp = &comp;
643         if (virtscsi_kick_cmd(&vscsi->ctrl_vq, cmd,
644                               sizeof cmd->req.tmf, sizeof cmd->resp.tmf) < 0)
645                 goto out;
646 
647         wait_for_completion(&comp);
648         if (cmd->resp.tmf.response == VIRTIO_SCSI_S_OK ||
649             cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED)
650                 ret = SUCCESS;
651 
652         /*
653          * The spec guarantees that all requests related to the TMF have
654          * been completed, but the callback might not have run yet if
655          * we're using independent interrupts (e.g. MSI).  Poll the
656          * virtqueues once.
657          *
658          * In the abort case, sc->scsi_done will do nothing, because
659          * the block layer must have detected a timeout and as a result
660          * REQ_ATOM_COMPLETE has been set.
661          */
662         virtscsi_poll_requests(vscsi);
663 
664 out:
665         mempool_free(cmd, virtscsi_cmd_pool);
666         return ret;
667 }
668 
669 static int virtscsi_device_reset(struct scsi_cmnd *sc)
670 {
671         struct virtio_scsi *vscsi = shost_priv(sc->device->host);
672         struct virtio_scsi_cmd *cmd;
673 
674         sdev_printk(KERN_INFO, sc->device, "device reset\n");
675         cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
676         if (!cmd)
677                 return FAILED;
678 
679         memset(cmd, 0, sizeof(*cmd));
680         cmd->sc = sc;
681         cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
682                 .type = VIRTIO_SCSI_T_TMF,
683                 .subtype = cpu_to_virtio32(vscsi->vdev,
684                                              VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET),
685                 .lun[0] = 1,
686                 .lun[1] = sc->device->id,
687                 .lun[2] = (sc->device->lun >> 8) | 0x40,
688                 .lun[3] = sc->device->lun & 0xff,
689         };
690         return virtscsi_tmf(vscsi, cmd);
691 }
692 
693 /**
694  * virtscsi_change_queue_depth() - Change a virtscsi target's queue depth
695  * @sdev:       Virtscsi target whose queue depth to change
696  * @qdepth:     New queue depth
697  */
698 static int virtscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
699 {
700         struct Scsi_Host *shost = sdev->host;
701         int max_depth = shost->cmd_per_lun;
702 
703         return scsi_change_queue_depth(sdev, min(max_depth, qdepth));
704 }
705 
706 static int virtscsi_abort(struct scsi_cmnd *sc)
707 {
708         struct virtio_scsi *vscsi = shost_priv(sc->device->host);
709         struct virtio_scsi_cmd *cmd;
710 
711         scmd_printk(KERN_INFO, sc, "abort\n");
712         cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
713         if (!cmd)
714                 return FAILED;
715 
716         memset(cmd, 0, sizeof(*cmd));
717         cmd->sc = sc;
718         cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
719                 .type = VIRTIO_SCSI_T_TMF,
720                 .subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK,
721                 .lun[0] = 1,
722                 .lun[1] = sc->device->id,
723                 .lun[2] = (sc->device->lun >> 8) | 0x40,
724                 .lun[3] = sc->device->lun & 0xff,
725                 .tag = cpu_to_virtio64(vscsi->vdev, (unsigned long)sc),
726         };
727         return virtscsi_tmf(vscsi, cmd);
728 }
729 
730 static int virtscsi_target_alloc(struct scsi_target *starget)
731 {
732         struct Scsi_Host *sh = dev_to_shost(starget->dev.parent);
733         struct virtio_scsi *vscsi = shost_priv(sh);
734 
735         struct virtio_scsi_target_state *tgt =
736                                 kmalloc(sizeof(*tgt), GFP_KERNEL);
737         if (!tgt)
738                 return -ENOMEM;
739 
740         seqcount_init(&tgt->tgt_seq);
741         atomic_set(&tgt->reqs, 0);
742         tgt->req_vq = &vscsi->req_vqs[0];
743 
744         starget->hostdata = tgt;
745         return 0;
746 }
747 
748 static void virtscsi_target_destroy(struct scsi_target *starget)
749 {
750         struct virtio_scsi_target_state *tgt = starget->hostdata;
751         kfree(tgt);
752 }
753 
754 static struct scsi_host_template virtscsi_host_template_single = {
755         .module = THIS_MODULE,
756         .name = "Virtio SCSI HBA",
757         .proc_name = "virtio_scsi",
758         .this_id = -1,
759         .cmd_size = sizeof(struct virtio_scsi_cmd),
760         .queuecommand = virtscsi_queuecommand_single,
761         .change_queue_depth = virtscsi_change_queue_depth,
762         .eh_abort_handler = virtscsi_abort,
763         .eh_device_reset_handler = virtscsi_device_reset,
764 
765         .can_queue = 1024,
766         .dma_boundary = UINT_MAX,
767         .use_clustering = ENABLE_CLUSTERING,
768         .target_alloc = virtscsi_target_alloc,
769         .target_destroy = virtscsi_target_destroy,
770         .track_queue_depth = 1,
771 };
772 
773 static struct scsi_host_template virtscsi_host_template_multi = {
774         .module = THIS_MODULE,
775         .name = "Virtio SCSI HBA",
776         .proc_name = "virtio_scsi",
777         .this_id = -1,
778         .cmd_size = sizeof(struct virtio_scsi_cmd),
779         .queuecommand = virtscsi_queuecommand_multi,
780         .change_queue_depth = virtscsi_change_queue_depth,
781         .eh_abort_handler = virtscsi_abort,
782         .eh_device_reset_handler = virtscsi_device_reset,
783 
784         .can_queue = 1024,
785         .dma_boundary = UINT_MAX,
786         .use_clustering = ENABLE_CLUSTERING,
787         .target_alloc = virtscsi_target_alloc,
788         .target_destroy = virtscsi_target_destroy,
789         .track_queue_depth = 1,
790 };
791 
792 #define virtscsi_config_get(vdev, fld) \
793         ({ \
794                 typeof(((struct virtio_scsi_config *)0)->fld) __val; \
795                 virtio_cread(vdev, struct virtio_scsi_config, fld, &__val); \
796                 __val; \
797         })
798 
799 #define virtscsi_config_set(vdev, fld, val) \
800         do { \
801                 typeof(((struct virtio_scsi_config *)0)->fld) __val = (val); \
802                 virtio_cwrite(vdev, struct virtio_scsi_config, fld, &__val); \
803         } while(0)
804 
805 static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity)
806 {
807         int i;
808         int cpu;
809 
810         /* In multiqueue mode, when the number of cpu is equal
811          * to the number of request queues, we let the qeueues
812          * to be private to one cpu by setting the affinity hint
813          * to eliminate the contention.
814          */
815         if ((vscsi->num_queues == 1 ||
816              vscsi->num_queues != num_online_cpus()) && affinity) {
817                 if (vscsi->affinity_hint_set)
818                         affinity = false;
819                 else
820                         return;
821         }
822 
823         if (affinity) {
824                 i = 0;
825                 for_each_online_cpu(cpu) {
826                         virtqueue_set_affinity(vscsi->req_vqs[i].vq, cpu);
827                         i++;
828                 }
829 
830                 vscsi->affinity_hint_set = true;
831         } else {
832                 for (i = 0; i < vscsi->num_queues; i++) {
833                         if (!vscsi->req_vqs[i].vq)
834                                 continue;
835 
836                         virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1);
837                 }
838 
839                 vscsi->affinity_hint_set = false;
840         }
841 }
842 
843 static void virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity)
844 {
845         get_online_cpus();
846         __virtscsi_set_affinity(vscsi, affinity);
847         put_online_cpus();
848 }
849 
850 static int virtscsi_cpu_callback(struct notifier_block *nfb,
851                                  unsigned long action, void *hcpu)
852 {
853         struct virtio_scsi *vscsi = container_of(nfb, struct virtio_scsi, nb);
854         switch(action) {
855         case CPU_ONLINE:
856         case CPU_ONLINE_FROZEN:
857         case CPU_DEAD:
858         case CPU_DEAD_FROZEN:
859                 __virtscsi_set_affinity(vscsi, true);
860                 break;
861         default:
862                 break;
863         }
864         return NOTIFY_OK;
865 }
866 
867 static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq,
868                              struct virtqueue *vq)
869 {
870         spin_lock_init(&virtscsi_vq->vq_lock);
871         virtscsi_vq->vq = vq;
872 }
873 
874 static void virtscsi_remove_vqs(struct virtio_device *vdev)
875 {
876         struct Scsi_Host *sh = virtio_scsi_host(vdev);
877         struct virtio_scsi *vscsi = shost_priv(sh);
878 
879         virtscsi_set_affinity(vscsi, false);
880 
881         /* Stop all the virtqueues. */
882         vdev->config->reset(vdev);
883 
884         vdev->config->del_vqs(vdev);
885 }
886 
887 static int virtscsi_init(struct virtio_device *vdev,
888                          struct virtio_scsi *vscsi)
889 {
890         int err;
891         u32 i;
892         u32 num_vqs;
893         vq_callback_t **callbacks;
894         const char **names;
895         struct virtqueue **vqs;
896 
897         num_vqs = vscsi->num_queues + VIRTIO_SCSI_VQ_BASE;
898         vqs = kmalloc(num_vqs * sizeof(struct virtqueue *), GFP_KERNEL);
899         callbacks = kmalloc(num_vqs * sizeof(vq_callback_t *), GFP_KERNEL);
900         names = kmalloc(num_vqs * sizeof(char *), GFP_KERNEL);
901 
902         if (!callbacks || !vqs || !names) {
903                 err = -ENOMEM;
904                 goto out;
905         }
906 
907         callbacks[0] = virtscsi_ctrl_done;
908         callbacks[1] = virtscsi_event_done;
909         names[0] = "control";
910         names[1] = "event";
911         for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++) {
912                 callbacks[i] = virtscsi_req_done;
913                 names[i] = "request";
914         }
915 
916         /* Discover virtqueues and write information to configuration.  */
917         err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names);
918         if (err)
919                 goto out;
920 
921         virtscsi_init_vq(&vscsi->ctrl_vq, vqs[0]);
922         virtscsi_init_vq(&vscsi->event_vq, vqs[1]);
923         for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++)
924                 virtscsi_init_vq(&vscsi->req_vqs[i - VIRTIO_SCSI_VQ_BASE],
925                                  vqs[i]);
926 
927         virtscsi_set_affinity(vscsi, true);
928 
929         virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE);
930         virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE);
931 
932         err = 0;
933 
934 out:
935         kfree(names);
936         kfree(callbacks);
937         kfree(vqs);
938         if (err)
939                 virtscsi_remove_vqs(vdev);
940         return err;
941 }
942 
943 static int virtscsi_probe(struct virtio_device *vdev)
944 {
945         struct Scsi_Host *shost;
946         struct virtio_scsi *vscsi;
947         int err, host_prot;
948         u32 sg_elems, num_targets;
949         u32 cmd_per_lun;
950         u32 num_queues;
951         struct scsi_host_template *hostt;
952 
953         if (!vdev->config->get) {
954                 dev_err(&vdev->dev, "%s failure: config access disabled\n",
955                         __func__);
956                 return -EINVAL;
957         }
958 
959         /* We need to know how many queues before we allocate. */
960         num_queues = virtscsi_config_get(vdev, num_queues) ? : 1;
961 
962         num_targets = virtscsi_config_get(vdev, max_target) + 1;
963 
964         if (num_queues == 1)
965                 hostt = &virtscsi_host_template_single;
966         else
967                 hostt = &virtscsi_host_template_multi;
968 
969         shost = scsi_host_alloc(hostt,
970                 sizeof(*vscsi) + sizeof(vscsi->req_vqs[0]) * num_queues);
971         if (!shost)
972                 return -ENOMEM;
973 
974         sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1;
975         shost->sg_tablesize = sg_elems;
976         vscsi = shost_priv(shost);
977         vscsi->vdev = vdev;
978         vscsi->num_queues = num_queues;
979         vdev->priv = shost;
980 
981         err = virtscsi_init(vdev, vscsi);
982         if (err)
983                 goto virtscsi_init_failed;
984 
985         vscsi->nb.notifier_call = &virtscsi_cpu_callback;
986         err = register_hotcpu_notifier(&vscsi->nb);
987         if (err) {
988                 pr_err("registering cpu notifier failed\n");
989                 goto scsi_add_host_failed;
990         }
991 
992         cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1;
993         shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue);
994         shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF;
995 
996         /* LUNs > 256 are reported with format 1, so they go in the range
997          * 16640-32767.
998          */
999         shost->max_lun = virtscsi_config_get(vdev, max_lun) + 1 + 0x4000;
1000         shost->max_id = num_targets;
1001         shost->max_channel = 0;
1002         shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE;
1003         shost->nr_hw_queues = num_queues;
1004 
1005         if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) {
1006                 host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
1007                             SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
1008                             SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
1009 
1010                 scsi_host_set_prot(shost, host_prot);
1011                 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
1012         }
1013 
1014         err = scsi_add_host(shost, &vdev->dev);
1015         if (err)
1016                 goto scsi_add_host_failed;
1017 
1018         virtio_device_ready(vdev);
1019 
1020         if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
1021                 virtscsi_kick_event_all(vscsi);
1022 
1023         scsi_scan_host(shost);
1024         return 0;
1025 
1026 scsi_add_host_failed:
1027         vdev->config->del_vqs(vdev);
1028 virtscsi_init_failed:
1029         scsi_host_put(shost);
1030         return err;
1031 }
1032 
1033 static void virtscsi_remove(struct virtio_device *vdev)
1034 {
1035         struct Scsi_Host *shost = virtio_scsi_host(vdev);
1036         struct virtio_scsi *vscsi = shost_priv(shost);
1037 
1038         if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
1039                 virtscsi_cancel_event_work(vscsi);
1040 
1041         scsi_remove_host(shost);
1042 
1043         unregister_hotcpu_notifier(&vscsi->nb);
1044 
1045         virtscsi_remove_vqs(vdev);
1046         scsi_host_put(shost);
1047 }
1048 
1049 #ifdef CONFIG_PM_SLEEP
1050 static int virtscsi_freeze(struct virtio_device *vdev)
1051 {
1052         struct Scsi_Host *sh = virtio_scsi_host(vdev);
1053         struct virtio_scsi *vscsi = shost_priv(sh);
1054 
1055         unregister_hotcpu_notifier(&vscsi->nb);
1056         virtscsi_remove_vqs(vdev);
1057         return 0;
1058 }
1059 
1060 static int virtscsi_restore(struct virtio_device *vdev)
1061 {
1062         struct Scsi_Host *sh = virtio_scsi_host(vdev);
1063         struct virtio_scsi *vscsi = shost_priv(sh);
1064         int err;
1065 
1066         err = virtscsi_init(vdev, vscsi);
1067         if (err)
1068                 return err;
1069 
1070         err = register_hotcpu_notifier(&vscsi->nb);
1071         if (err) {
1072                 vdev->config->del_vqs(vdev);
1073                 return err;
1074         }
1075 
1076         virtio_device_ready(vdev);
1077 
1078         if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
1079                 virtscsi_kick_event_all(vscsi);
1080 
1081         return err;
1082 }
1083 #endif
1084 
1085 static struct virtio_device_id id_table[] = {
1086         { VIRTIO_ID_SCSI, VIRTIO_DEV_ANY_ID },
1087         { 0 },
1088 };
1089 
1090 static unsigned int features[] = {
1091         VIRTIO_SCSI_F_HOTPLUG,
1092         VIRTIO_SCSI_F_CHANGE,
1093         VIRTIO_SCSI_F_T10_PI,
1094 };
1095 
1096 static struct virtio_driver virtio_scsi_driver = {
1097         .feature_table = features,
1098         .feature_table_size = ARRAY_SIZE(features),
1099         .driver.name = KBUILD_MODNAME,
1100         .driver.owner = THIS_MODULE,
1101         .id_table = id_table,
1102         .probe = virtscsi_probe,
1103 #ifdef CONFIG_PM_SLEEP
1104         .freeze = virtscsi_freeze,
1105         .restore = virtscsi_restore,
1106 #endif
1107         .remove = virtscsi_remove,
1108 };
1109 
1110 static int __init init(void)
1111 {
1112         int ret = -ENOMEM;
1113 
1114         virtscsi_cmd_cache = KMEM_CACHE(virtio_scsi_cmd, 0);
1115         if (!virtscsi_cmd_cache) {
1116                 pr_err("kmem_cache_create() for virtscsi_cmd_cache failed\n");
1117                 goto error;
1118         }
1119 
1120 
1121         virtscsi_cmd_pool =
1122                 mempool_create_slab_pool(VIRTIO_SCSI_MEMPOOL_SZ,
1123                                          virtscsi_cmd_cache);
1124         if (!virtscsi_cmd_pool) {
1125                 pr_err("mempool_create() for virtscsi_cmd_pool failed\n");
1126                 goto error;
1127         }
1128         ret = register_virtio_driver(&virtio_scsi_driver);
1129         if (ret < 0)
1130                 goto error;
1131 
1132         return 0;
1133 
1134 error:
1135         if (virtscsi_cmd_pool) {
1136                 mempool_destroy(virtscsi_cmd_pool);
1137                 virtscsi_cmd_pool = NULL;
1138         }
1139         if (virtscsi_cmd_cache) {
1140                 kmem_cache_destroy(virtscsi_cmd_cache);
1141                 virtscsi_cmd_cache = NULL;
1142         }
1143         return ret;
1144 }
1145 
1146 static void __exit fini(void)
1147 {
1148         unregister_virtio_driver(&virtio_scsi_driver);
1149         mempool_destroy(virtscsi_cmd_pool);
1150         kmem_cache_destroy(virtscsi_cmd_cache);
1151 }
1152 module_init(init);
1153 module_exit(fini);
1154 
1155 MODULE_DEVICE_TABLE(virtio, id_table);
1156 MODULE_DESCRIPTION("Virtio SCSI HBA driver");
1157 MODULE_LICENSE("GPL");
1158 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us