Version:  2.0.40 2.2.26 2.4.37 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15 3.16 3.17 3.18

Linux/drivers/scsi/virtio_scsi.c

  1 /*
  2  * Virtio SCSI HBA driver
  3  *
  4  * Copyright IBM Corp. 2010
  5  * Copyright Red Hat, Inc. 2011
  6  *
  7  * Authors:
  8  *  Stefan Hajnoczi   <stefanha@linux.vnet.ibm.com>
  9  *  Paolo Bonzini   <pbonzini@redhat.com>
 10  *
 11  * This work is licensed under the terms of the GNU GPL, version 2 or later.
 12  * See the COPYING file in the top-level directory.
 13  *
 14  */
 15 
 16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 17 
 18 #include <linux/module.h>
 19 #include <linux/slab.h>
 20 #include <linux/mempool.h>
 21 #include <linux/virtio.h>
 22 #include <linux/virtio_ids.h>
 23 #include <linux/virtio_config.h>
 24 #include <linux/virtio_scsi.h>
 25 #include <linux/cpu.h>
 26 #include <linux/blkdev.h>
 27 #include <scsi/scsi_host.h>
 28 #include <scsi/scsi_device.h>
 29 #include <scsi/scsi_cmnd.h>
 30 #include <scsi/scsi_tcq.h>
 31 #include <linux/seqlock.h>
 32 
 33 #define VIRTIO_SCSI_MEMPOOL_SZ 64
 34 #define VIRTIO_SCSI_EVENT_LEN 8
 35 #define VIRTIO_SCSI_VQ_BASE 2
 36 
 37 /* Command queue element */
 38 struct virtio_scsi_cmd {
 39         struct scsi_cmnd *sc;
 40         struct completion *comp;
 41         union {
 42                 struct virtio_scsi_cmd_req       cmd;
 43                 struct virtio_scsi_cmd_req_pi    cmd_pi;
 44                 struct virtio_scsi_ctrl_tmf_req  tmf;
 45                 struct virtio_scsi_ctrl_an_req   an;
 46         } req;
 47         union {
 48                 struct virtio_scsi_cmd_resp      cmd;
 49                 struct virtio_scsi_ctrl_tmf_resp tmf;
 50                 struct virtio_scsi_ctrl_an_resp  an;
 51                 struct virtio_scsi_event         evt;
 52         } resp;
 53 } ____cacheline_aligned_in_smp;
 54 
 55 struct virtio_scsi_event_node {
 56         struct virtio_scsi *vscsi;
 57         struct virtio_scsi_event event;
 58         struct work_struct work;
 59 };
 60 
 61 struct virtio_scsi_vq {
 62         /* Protects vq */
 63         spinlock_t vq_lock;
 64 
 65         struct virtqueue *vq;
 66 };
 67 
 68 /*
 69  * Per-target queue state.
 70  *
 71  * This struct holds the data needed by the queue steering policy.  When a
 72  * target is sent multiple requests, we need to drive them to the same queue so
 73  * that FIFO processing order is kept.  However, if a target was idle, we can
 74  * choose a queue arbitrarily.  In this case the queue is chosen according to
 75  * the current VCPU, so the driver expects the number of request queues to be
 76  * equal to the number of VCPUs.  This makes it easy and fast to select the
 77  * queue, and also lets the driver optimize the IRQ affinity for the virtqueues
 78  * (each virtqueue's affinity is set to the CPU that "owns" the queue).
 79  *
 80  * tgt_seq is held to serialize reading and writing req_vq.
 81  *
 82  * Decrements of reqs are never concurrent with writes of req_vq: before the
 83  * decrement reqs will be != 0; after the decrement the virtqueue completion
 84  * routine will not use the req_vq so it can be changed by a new request.
 85  * Thus they can happen outside the tgt_seq, provided of course we make reqs
 86  * an atomic_t.
 87  */
 88 struct virtio_scsi_target_state {
 89         seqcount_t tgt_seq;
 90 
 91         /* Count of outstanding requests. */
 92         atomic_t reqs;
 93 
 94         /* Currently active virtqueue for requests sent to this target. */
 95         struct virtio_scsi_vq *req_vq;
 96 };
 97 
 98 /* Driver instance state */
 99 struct virtio_scsi {
100         struct virtio_device *vdev;
101 
102         /* Get some buffers ready for event vq */
103         struct virtio_scsi_event_node event_list[VIRTIO_SCSI_EVENT_LEN];
104 
105         u32 num_queues;
106 
107         /* If the affinity hint is set for virtqueues */
108         bool affinity_hint_set;
109 
110         /* CPU hotplug notifier */
111         struct notifier_block nb;
112 
113         /* Protected by event_vq lock */
114         bool stop_events;
115 
116         struct virtio_scsi_vq ctrl_vq;
117         struct virtio_scsi_vq event_vq;
118         struct virtio_scsi_vq req_vqs[];
119 };
120 
121 static struct kmem_cache *virtscsi_cmd_cache;
122 static mempool_t *virtscsi_cmd_pool;
123 
124 static inline struct Scsi_Host *virtio_scsi_host(struct virtio_device *vdev)
125 {
126         return vdev->priv;
127 }
128 
129 static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid)
130 {
131         if (!resid)
132                 return;
133 
134         if (!scsi_bidi_cmnd(sc)) {
135                 scsi_set_resid(sc, resid);
136                 return;
137         }
138 
139         scsi_in(sc)->resid = min(resid, scsi_in(sc)->length);
140         scsi_out(sc)->resid = resid - scsi_in(sc)->resid;
141 }
142 
143 /**
144  * virtscsi_complete_cmd - finish a scsi_cmd and invoke scsi_done
145  *
146  * Called with vq_lock held.
147  */
148 static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
149 {
150         struct virtio_scsi_cmd *cmd = buf;
151         struct scsi_cmnd *sc = cmd->sc;
152         struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd;
153         struct virtio_scsi_target_state *tgt =
154                                 scsi_target(sc->device)->hostdata;
155 
156         dev_dbg(&sc->device->sdev_gendev,
157                 "cmd %p response %u status %#02x sense_len %u\n",
158                 sc, resp->response, resp->status, resp->sense_len);
159 
160         sc->result = resp->status;
161         virtscsi_compute_resid(sc, resp->resid);
162         switch (resp->response) {
163         case VIRTIO_SCSI_S_OK:
164                 set_host_byte(sc, DID_OK);
165                 break;
166         case VIRTIO_SCSI_S_OVERRUN:
167                 set_host_byte(sc, DID_ERROR);
168                 break;
169         case VIRTIO_SCSI_S_ABORTED:
170                 set_host_byte(sc, DID_ABORT);
171                 break;
172         case VIRTIO_SCSI_S_BAD_TARGET:
173                 set_host_byte(sc, DID_BAD_TARGET);
174                 break;
175         case VIRTIO_SCSI_S_RESET:
176                 set_host_byte(sc, DID_RESET);
177                 break;
178         case VIRTIO_SCSI_S_BUSY:
179                 set_host_byte(sc, DID_BUS_BUSY);
180                 break;
181         case VIRTIO_SCSI_S_TRANSPORT_FAILURE:
182                 set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
183                 break;
184         case VIRTIO_SCSI_S_TARGET_FAILURE:
185                 set_host_byte(sc, DID_TARGET_FAILURE);
186                 break;
187         case VIRTIO_SCSI_S_NEXUS_FAILURE:
188                 set_host_byte(sc, DID_NEXUS_FAILURE);
189                 break;
190         default:
191                 scmd_printk(KERN_WARNING, sc, "Unknown response %d",
192                             resp->response);
193                 /* fall through */
194         case VIRTIO_SCSI_S_FAILURE:
195                 set_host_byte(sc, DID_ERROR);
196                 break;
197         }
198 
199         WARN_ON(resp->sense_len > VIRTIO_SCSI_SENSE_SIZE);
200         if (sc->sense_buffer) {
201                 memcpy(sc->sense_buffer, resp->sense,
202                        min_t(u32, resp->sense_len, VIRTIO_SCSI_SENSE_SIZE));
203                 if (resp->sense_len)
204                         set_driver_byte(sc, DRIVER_SENSE);
205         }
206 
207         sc->scsi_done(sc);
208 
209         atomic_dec(&tgt->reqs);
210 }
211 
212 static void virtscsi_vq_done(struct virtio_scsi *vscsi,
213                              struct virtio_scsi_vq *virtscsi_vq,
214                              void (*fn)(struct virtio_scsi *vscsi, void *buf))
215 {
216         void *buf;
217         unsigned int len;
218         unsigned long flags;
219         struct virtqueue *vq = virtscsi_vq->vq;
220 
221         spin_lock_irqsave(&virtscsi_vq->vq_lock, flags);
222         do {
223                 virtqueue_disable_cb(vq);
224                 while ((buf = virtqueue_get_buf(vq, &len)) != NULL)
225                         fn(vscsi, buf);
226 
227                 if (unlikely(virtqueue_is_broken(vq)))
228                         break;
229         } while (!virtqueue_enable_cb(vq));
230         spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags);
231 }
232 
233 static void virtscsi_req_done(struct virtqueue *vq)
234 {
235         struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
236         struct virtio_scsi *vscsi = shost_priv(sh);
237         int index = vq->index - VIRTIO_SCSI_VQ_BASE;
238         struct virtio_scsi_vq *req_vq = &vscsi->req_vqs[index];
239 
240         virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd);
241 };
242 
243 static void virtscsi_poll_requests(struct virtio_scsi *vscsi)
244 {
245         int i, num_vqs;
246 
247         num_vqs = vscsi->num_queues;
248         for (i = 0; i < num_vqs; i++)
249                 virtscsi_vq_done(vscsi, &vscsi->req_vqs[i],
250                                  virtscsi_complete_cmd);
251 }
252 
253 static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf)
254 {
255         struct virtio_scsi_cmd *cmd = buf;
256 
257         if (cmd->comp)
258                 complete_all(cmd->comp);
259 }
260 
261 static void virtscsi_ctrl_done(struct virtqueue *vq)
262 {
263         struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
264         struct virtio_scsi *vscsi = shost_priv(sh);
265 
266         virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free);
267 };
268 
269 static void virtscsi_handle_event(struct work_struct *work);
270 
271 static int virtscsi_kick_event(struct virtio_scsi *vscsi,
272                                struct virtio_scsi_event_node *event_node)
273 {
274         int err;
275         struct scatterlist sg;
276         unsigned long flags;
277 
278         INIT_WORK(&event_node->work, virtscsi_handle_event);
279         sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event));
280 
281         spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
282 
283         err = virtqueue_add_inbuf(vscsi->event_vq.vq, &sg, 1, event_node,
284                                   GFP_ATOMIC);
285         if (!err)
286                 virtqueue_kick(vscsi->event_vq.vq);
287 
288         spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags);
289 
290         return err;
291 }
292 
293 static int virtscsi_kick_event_all(struct virtio_scsi *vscsi)
294 {
295         int i;
296 
297         for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++) {
298                 vscsi->event_list[i].vscsi = vscsi;
299                 virtscsi_kick_event(vscsi, &vscsi->event_list[i]);
300         }
301 
302         return 0;
303 }
304 
305 static void virtscsi_cancel_event_work(struct virtio_scsi *vscsi)
306 {
307         int i;
308 
309         /* Stop scheduling work before calling cancel_work_sync.  */
310         spin_lock_irq(&vscsi->event_vq.vq_lock);
311         vscsi->stop_events = true;
312         spin_unlock_irq(&vscsi->event_vq.vq_lock);
313 
314         for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++)
315                 cancel_work_sync(&vscsi->event_list[i].work);
316 }
317 
318 static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi,
319                                             struct virtio_scsi_event *event)
320 {
321         struct scsi_device *sdev;
322         struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
323         unsigned int target = event->lun[1];
324         unsigned int lun = (event->lun[2] << 8) | event->lun[3];
325 
326         switch (event->reason) {
327         case VIRTIO_SCSI_EVT_RESET_RESCAN:
328                 scsi_add_device(shost, 0, target, lun);
329                 break;
330         case VIRTIO_SCSI_EVT_RESET_REMOVED:
331                 sdev = scsi_device_lookup(shost, 0, target, lun);
332                 if (sdev) {
333                         scsi_remove_device(sdev);
334                         scsi_device_put(sdev);
335                 } else {
336                         pr_err("SCSI device %d 0 %d %d not found\n",
337                                 shost->host_no, target, lun);
338                 }
339                 break;
340         default:
341                 pr_info("Unsupport virtio scsi event reason %x\n", event->reason);
342         }
343 }
344 
345 static void virtscsi_handle_param_change(struct virtio_scsi *vscsi,
346                                          struct virtio_scsi_event *event)
347 {
348         struct scsi_device *sdev;
349         struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
350         unsigned int target = event->lun[1];
351         unsigned int lun = (event->lun[2] << 8) | event->lun[3];
352         u8 asc = event->reason & 255;
353         u8 ascq = event->reason >> 8;
354 
355         sdev = scsi_device_lookup(shost, 0, target, lun);
356         if (!sdev) {
357                 pr_err("SCSI device %d 0 %d %d not found\n",
358                         shost->host_no, target, lun);
359                 return;
360         }
361 
362         /* Handle "Parameters changed", "Mode parameters changed", and
363            "Capacity data has changed".  */
364         if (asc == 0x2a && (ascq == 0x00 || ascq == 0x01 || ascq == 0x09))
365                 scsi_rescan_device(&sdev->sdev_gendev);
366 
367         scsi_device_put(sdev);
368 }
369 
370 static void virtscsi_handle_event(struct work_struct *work)
371 {
372         struct virtio_scsi_event_node *event_node =
373                 container_of(work, struct virtio_scsi_event_node, work);
374         struct virtio_scsi *vscsi = event_node->vscsi;
375         struct virtio_scsi_event *event = &event_node->event;
376 
377         if (event->event & VIRTIO_SCSI_T_EVENTS_MISSED) {
378                 event->event &= ~VIRTIO_SCSI_T_EVENTS_MISSED;
379                 scsi_scan_host(virtio_scsi_host(vscsi->vdev));
380         }
381 
382         switch (event->event) {
383         case VIRTIO_SCSI_T_NO_EVENT:
384                 break;
385         case VIRTIO_SCSI_T_TRANSPORT_RESET:
386                 virtscsi_handle_transport_reset(vscsi, event);
387                 break;
388         case VIRTIO_SCSI_T_PARAM_CHANGE:
389                 virtscsi_handle_param_change(vscsi, event);
390                 break;
391         default:
392                 pr_err("Unsupport virtio scsi event %x\n", event->event);
393         }
394         virtscsi_kick_event(vscsi, event_node);
395 }
396 
397 static void virtscsi_complete_event(struct virtio_scsi *vscsi, void *buf)
398 {
399         struct virtio_scsi_event_node *event_node = buf;
400 
401         if (!vscsi->stop_events)
402                 queue_work(system_freezable_wq, &event_node->work);
403 }
404 
405 static void virtscsi_event_done(struct virtqueue *vq)
406 {
407         struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
408         struct virtio_scsi *vscsi = shost_priv(sh);
409 
410         virtscsi_vq_done(vscsi, &vscsi->event_vq, virtscsi_complete_event);
411 };
412 
413 /**
414  * virtscsi_add_cmd - add a virtio_scsi_cmd to a virtqueue
415  * @vq          : the struct virtqueue we're talking about
416  * @cmd         : command structure
417  * @req_size    : size of the request buffer
418  * @resp_size   : size of the response buffer
419  */
420 static int virtscsi_add_cmd(struct virtqueue *vq,
421                             struct virtio_scsi_cmd *cmd,
422                             size_t req_size, size_t resp_size)
423 {
424         struct scsi_cmnd *sc = cmd->sc;
425         struct scatterlist *sgs[6], req, resp;
426         struct sg_table *out, *in;
427         unsigned out_num = 0, in_num = 0;
428 
429         out = in = NULL;
430 
431         if (sc && sc->sc_data_direction != DMA_NONE) {
432                 if (sc->sc_data_direction != DMA_FROM_DEVICE)
433                         out = &scsi_out(sc)->table;
434                 if (sc->sc_data_direction != DMA_TO_DEVICE)
435                         in = &scsi_in(sc)->table;
436         }
437 
438         /* Request header.  */
439         sg_init_one(&req, &cmd->req, req_size);
440         sgs[out_num++] = &req;
441 
442         /* Data-out buffer.  */
443         if (out) {
444                 /* Place WRITE protection SGLs before Data OUT payload */
445                 if (scsi_prot_sg_count(sc))
446                         sgs[out_num++] = scsi_prot_sglist(sc);
447                 sgs[out_num++] = out->sgl;
448         }
449 
450         /* Response header.  */
451         sg_init_one(&resp, &cmd->resp, resp_size);
452         sgs[out_num + in_num++] = &resp;
453 
454         /* Data-in buffer */
455         if (in) {
456                 /* Place READ protection SGLs before Data IN payload */
457                 if (scsi_prot_sg_count(sc))
458                         sgs[out_num + in_num++] = scsi_prot_sglist(sc);
459                 sgs[out_num + in_num++] = in->sgl;
460         }
461 
462         return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_ATOMIC);
463 }
464 
465 static int virtscsi_kick_cmd(struct virtio_scsi_vq *vq,
466                              struct virtio_scsi_cmd *cmd,
467                              size_t req_size, size_t resp_size)
468 {
469         unsigned long flags;
470         int err;
471         bool needs_kick = false;
472 
473         spin_lock_irqsave(&vq->vq_lock, flags);
474         err = virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size);
475         if (!err)
476                 needs_kick = virtqueue_kick_prepare(vq->vq);
477 
478         spin_unlock_irqrestore(&vq->vq_lock, flags);
479 
480         if (needs_kick)
481                 virtqueue_notify(vq->vq);
482         return err;
483 }
484 
485 static void virtio_scsi_init_hdr(struct virtio_scsi_cmd_req *cmd,
486                                  struct scsi_cmnd *sc)
487 {
488         cmd->lun[0] = 1;
489         cmd->lun[1] = sc->device->id;
490         cmd->lun[2] = (sc->device->lun >> 8) | 0x40;
491         cmd->lun[3] = sc->device->lun & 0xff;
492         cmd->tag = (unsigned long)sc;
493         cmd->task_attr = VIRTIO_SCSI_S_SIMPLE;
494         cmd->prio = 0;
495         cmd->crn = 0;
496 }
497 
498 static void virtio_scsi_init_hdr_pi(struct virtio_scsi_cmd_req_pi *cmd_pi,
499                                     struct scsi_cmnd *sc)
500 {
501         struct request *rq = sc->request;
502         struct blk_integrity *bi;
503 
504         virtio_scsi_init_hdr((struct virtio_scsi_cmd_req *)cmd_pi, sc);
505 
506         if (!rq || !scsi_prot_sg_count(sc))
507                 return;
508 
509         bi = blk_get_integrity(rq->rq_disk);
510 
511         if (sc->sc_data_direction == DMA_TO_DEVICE)
512                 cmd_pi->pi_bytesout = blk_rq_sectors(rq) * bi->tuple_size;
513         else if (sc->sc_data_direction == DMA_FROM_DEVICE)
514                 cmd_pi->pi_bytesin = blk_rq_sectors(rq) * bi->tuple_size;
515 }
516 
517 static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
518                                  struct virtio_scsi_vq *req_vq,
519                                  struct scsi_cmnd *sc)
520 {
521         struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
522         struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc);
523         int req_size;
524 
525         BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
526 
527         /* TODO: check feature bit and fail if unsupported?  */
528         BUG_ON(sc->sc_data_direction == DMA_BIDIRECTIONAL);
529 
530         dev_dbg(&sc->device->sdev_gendev,
531                 "cmd %p CDB: %#02x\n", sc, sc->cmnd[0]);
532 
533         memset(cmd, 0, sizeof(*cmd));
534         cmd->sc = sc;
535 
536         BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE);
537 
538         if (virtio_has_feature(vscsi->vdev, VIRTIO_SCSI_F_T10_PI)) {
539                 virtio_scsi_init_hdr_pi(&cmd->req.cmd_pi, sc);
540                 memcpy(cmd->req.cmd_pi.cdb, sc->cmnd, sc->cmd_len);
541                 req_size = sizeof(cmd->req.cmd_pi);
542         } else {
543                 virtio_scsi_init_hdr(&cmd->req.cmd, sc);
544                 memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
545                 req_size = sizeof(cmd->req.cmd);
546         }
547 
548         if (virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd)) != 0)
549                 return SCSI_MLQUEUE_HOST_BUSY;
550         return 0;
551 }
552 
553 static int virtscsi_queuecommand_single(struct Scsi_Host *sh,
554                                         struct scsi_cmnd *sc)
555 {
556         struct virtio_scsi *vscsi = shost_priv(sh);
557         struct virtio_scsi_target_state *tgt =
558                                 scsi_target(sc->device)->hostdata;
559 
560         atomic_inc(&tgt->reqs);
561         return virtscsi_queuecommand(vscsi, &vscsi->req_vqs[0], sc);
562 }
563 
564 static struct virtio_scsi_vq *virtscsi_pick_vq(struct virtio_scsi *vscsi,
565                                                struct virtio_scsi_target_state *tgt)
566 {
567         struct virtio_scsi_vq *vq;
568         unsigned long flags;
569         u32 queue_num;
570 
571         local_irq_save(flags);
572         if (atomic_inc_return(&tgt->reqs) > 1) {
573                 unsigned long seq;
574 
575                 do {
576                         seq = read_seqcount_begin(&tgt->tgt_seq);
577                         vq = tgt->req_vq;
578                 } while (read_seqcount_retry(&tgt->tgt_seq, seq));
579         } else {
580                 /* no writes can be concurrent because of atomic_t */
581                 write_seqcount_begin(&tgt->tgt_seq);
582 
583                 /* keep previous req_vq if a reader just arrived */
584                 if (unlikely(atomic_read(&tgt->reqs) > 1)) {
585                         vq = tgt->req_vq;
586                         goto unlock;
587                 }
588 
589                 queue_num = smp_processor_id();
590                 while (unlikely(queue_num >= vscsi->num_queues))
591                         queue_num -= vscsi->num_queues;
592                 tgt->req_vq = vq = &vscsi->req_vqs[queue_num];
593  unlock:
594                 write_seqcount_end(&tgt->tgt_seq);
595         }
596         local_irq_restore(flags);
597 
598         return vq;
599 }
600 
601 static int virtscsi_queuecommand_multi(struct Scsi_Host *sh,
602                                        struct scsi_cmnd *sc)
603 {
604         struct virtio_scsi *vscsi = shost_priv(sh);
605         struct virtio_scsi_target_state *tgt =
606                                 scsi_target(sc->device)->hostdata;
607         struct virtio_scsi_vq *req_vq = virtscsi_pick_vq(vscsi, tgt);
608 
609         return virtscsi_queuecommand(vscsi, req_vq, sc);
610 }
611 
612 static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
613 {
614         DECLARE_COMPLETION_ONSTACK(comp);
615         int ret = FAILED;
616 
617         cmd->comp = &comp;
618         if (virtscsi_kick_cmd(&vscsi->ctrl_vq, cmd,
619                               sizeof cmd->req.tmf, sizeof cmd->resp.tmf) < 0)
620                 goto out;
621 
622         wait_for_completion(&comp);
623         if (cmd->resp.tmf.response == VIRTIO_SCSI_S_OK ||
624             cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED)
625                 ret = SUCCESS;
626 
627         /*
628          * The spec guarantees that all requests related to the TMF have
629          * been completed, but the callback might not have run yet if
630          * we're using independent interrupts (e.g. MSI).  Poll the
631          * virtqueues once.
632          *
633          * In the abort case, sc->scsi_done will do nothing, because
634          * the block layer must have detected a timeout and as a result
635          * REQ_ATOM_COMPLETE has been set.
636          */
637         virtscsi_poll_requests(vscsi);
638 
639 out:
640         mempool_free(cmd, virtscsi_cmd_pool);
641         return ret;
642 }
643 
644 static int virtscsi_device_reset(struct scsi_cmnd *sc)
645 {
646         struct virtio_scsi *vscsi = shost_priv(sc->device->host);
647         struct virtio_scsi_cmd *cmd;
648 
649         sdev_printk(KERN_INFO, sc->device, "device reset\n");
650         cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
651         if (!cmd)
652                 return FAILED;
653 
654         memset(cmd, 0, sizeof(*cmd));
655         cmd->sc = sc;
656         cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
657                 .type = VIRTIO_SCSI_T_TMF,
658                 .subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET,
659                 .lun[0] = 1,
660                 .lun[1] = sc->device->id,
661                 .lun[2] = (sc->device->lun >> 8) | 0x40,
662                 .lun[3] = sc->device->lun & 0xff,
663         };
664         return virtscsi_tmf(vscsi, cmd);
665 }
666 
667 /**
668  * virtscsi_change_queue_depth() - Change a virtscsi target's queue depth
669  * @sdev:       Virtscsi target whose queue depth to change
670  * @qdepth:     New queue depth
671  * @reason:     Reason for the queue depth change.
672  */
673 static int virtscsi_change_queue_depth(struct scsi_device *sdev,
674                                        int qdepth,
675                                        int reason)
676 {
677         struct Scsi_Host *shost = sdev->host;
678         int max_depth = shost->cmd_per_lun;
679 
680         switch (reason) {
681         case SCSI_QDEPTH_QFULL: /* Drop qdepth in response to BUSY state */
682                 scsi_track_queue_full(sdev, qdepth);
683                 break;
684         case SCSI_QDEPTH_RAMP_UP: /* Raise qdepth after BUSY state resolved */
685         case SCSI_QDEPTH_DEFAULT: /* Manual change via sysfs */
686                 scsi_adjust_queue_depth(sdev,
687                                         scsi_get_tag_type(sdev),
688                                         min(max_depth, qdepth));
689                 break;
690         default:
691                 return -EOPNOTSUPP;
692         }
693 
694         return sdev->queue_depth;
695 }
696 
697 static int virtscsi_abort(struct scsi_cmnd *sc)
698 {
699         struct virtio_scsi *vscsi = shost_priv(sc->device->host);
700         struct virtio_scsi_cmd *cmd;
701 
702         scmd_printk(KERN_INFO, sc, "abort\n");
703         cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
704         if (!cmd)
705                 return FAILED;
706 
707         memset(cmd, 0, sizeof(*cmd));
708         cmd->sc = sc;
709         cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
710                 .type = VIRTIO_SCSI_T_TMF,
711                 .subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK,
712                 .lun[0] = 1,
713                 .lun[1] = sc->device->id,
714                 .lun[2] = (sc->device->lun >> 8) | 0x40,
715                 .lun[3] = sc->device->lun & 0xff,
716                 .tag = (unsigned long)sc,
717         };
718         return virtscsi_tmf(vscsi, cmd);
719 }
720 
721 static int virtscsi_target_alloc(struct scsi_target *starget)
722 {
723         struct Scsi_Host *sh = dev_to_shost(starget->dev.parent);
724         struct virtio_scsi *vscsi = shost_priv(sh);
725 
726         struct virtio_scsi_target_state *tgt =
727                                 kmalloc(sizeof(*tgt), GFP_KERNEL);
728         if (!tgt)
729                 return -ENOMEM;
730 
731         seqcount_init(&tgt->tgt_seq);
732         atomic_set(&tgt->reqs, 0);
733         tgt->req_vq = &vscsi->req_vqs[0];
734 
735         starget->hostdata = tgt;
736         return 0;
737 }
738 
739 static void virtscsi_target_destroy(struct scsi_target *starget)
740 {
741         struct virtio_scsi_target_state *tgt = starget->hostdata;
742         kfree(tgt);
743 }
744 
745 static struct scsi_host_template virtscsi_host_template_single = {
746         .module = THIS_MODULE,
747         .name = "Virtio SCSI HBA",
748         .proc_name = "virtio_scsi",
749         .this_id = -1,
750         .cmd_size = sizeof(struct virtio_scsi_cmd),
751         .queuecommand = virtscsi_queuecommand_single,
752         .change_queue_depth = virtscsi_change_queue_depth,
753         .eh_abort_handler = virtscsi_abort,
754         .eh_device_reset_handler = virtscsi_device_reset,
755 
756         .can_queue = 1024,
757         .dma_boundary = UINT_MAX,
758         .use_clustering = ENABLE_CLUSTERING,
759         .target_alloc = virtscsi_target_alloc,
760         .target_destroy = virtscsi_target_destroy,
761 };
762 
763 static struct scsi_host_template virtscsi_host_template_multi = {
764         .module = THIS_MODULE,
765         .name = "Virtio SCSI HBA",
766         .proc_name = "virtio_scsi",
767         .this_id = -1,
768         .cmd_size = sizeof(struct virtio_scsi_cmd),
769         .queuecommand = virtscsi_queuecommand_multi,
770         .change_queue_depth = virtscsi_change_queue_depth,
771         .eh_abort_handler = virtscsi_abort,
772         .eh_device_reset_handler = virtscsi_device_reset,
773 
774         .can_queue = 1024,
775         .dma_boundary = UINT_MAX,
776         .use_clustering = ENABLE_CLUSTERING,
777         .target_alloc = virtscsi_target_alloc,
778         .target_destroy = virtscsi_target_destroy,
779 };
780 
781 #define virtscsi_config_get(vdev, fld) \
782         ({ \
783                 typeof(((struct virtio_scsi_config *)0)->fld) __val; \
784                 virtio_cread(vdev, struct virtio_scsi_config, fld, &__val); \
785                 __val; \
786         })
787 
788 #define virtscsi_config_set(vdev, fld, val) \
789         do { \
790                 typeof(((struct virtio_scsi_config *)0)->fld) __val = (val); \
791                 virtio_cwrite(vdev, struct virtio_scsi_config, fld, &__val); \
792         } while(0)
793 
794 static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity)
795 {
796         int i;
797         int cpu;
798 
799         /* In multiqueue mode, when the number of cpu is equal
800          * to the number of request queues, we let the qeueues
801          * to be private to one cpu by setting the affinity hint
802          * to eliminate the contention.
803          */
804         if ((vscsi->num_queues == 1 ||
805              vscsi->num_queues != num_online_cpus()) && affinity) {
806                 if (vscsi->affinity_hint_set)
807                         affinity = false;
808                 else
809                         return;
810         }
811 
812         if (affinity) {
813                 i = 0;
814                 for_each_online_cpu(cpu) {
815                         virtqueue_set_affinity(vscsi->req_vqs[i].vq, cpu);
816                         i++;
817                 }
818 
819                 vscsi->affinity_hint_set = true;
820         } else {
821                 for (i = 0; i < vscsi->num_queues; i++) {
822                         if (!vscsi->req_vqs[i].vq)
823                                 continue;
824 
825                         virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1);
826                 }
827 
828                 vscsi->affinity_hint_set = false;
829         }
830 }
831 
832 static void virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity)
833 {
834         get_online_cpus();
835         __virtscsi_set_affinity(vscsi, affinity);
836         put_online_cpus();
837 }
838 
839 static int virtscsi_cpu_callback(struct notifier_block *nfb,
840                                  unsigned long action, void *hcpu)
841 {
842         struct virtio_scsi *vscsi = container_of(nfb, struct virtio_scsi, nb);
843         switch(action) {
844         case CPU_ONLINE:
845         case CPU_ONLINE_FROZEN:
846         case CPU_DEAD:
847         case CPU_DEAD_FROZEN:
848                 __virtscsi_set_affinity(vscsi, true);
849                 break;
850         default:
851                 break;
852         }
853         return NOTIFY_OK;
854 }
855 
856 static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq,
857                              struct virtqueue *vq)
858 {
859         spin_lock_init(&virtscsi_vq->vq_lock);
860         virtscsi_vq->vq = vq;
861 }
862 
863 static void virtscsi_remove_vqs(struct virtio_device *vdev)
864 {
865         struct Scsi_Host *sh = virtio_scsi_host(vdev);
866         struct virtio_scsi *vscsi = shost_priv(sh);
867 
868         virtscsi_set_affinity(vscsi, false);
869 
870         /* Stop all the virtqueues. */
871         vdev->config->reset(vdev);
872 
873         vdev->config->del_vqs(vdev);
874 }
875 
876 static int virtscsi_init(struct virtio_device *vdev,
877                          struct virtio_scsi *vscsi)
878 {
879         int err;
880         u32 i;
881         u32 num_vqs;
882         vq_callback_t **callbacks;
883         const char **names;
884         struct virtqueue **vqs;
885 
886         num_vqs = vscsi->num_queues + VIRTIO_SCSI_VQ_BASE;
887         vqs = kmalloc(num_vqs * sizeof(struct virtqueue *), GFP_KERNEL);
888         callbacks = kmalloc(num_vqs * sizeof(vq_callback_t *), GFP_KERNEL);
889         names = kmalloc(num_vqs * sizeof(char *), GFP_KERNEL);
890 
891         if (!callbacks || !vqs || !names) {
892                 err = -ENOMEM;
893                 goto out;
894         }
895 
896         callbacks[0] = virtscsi_ctrl_done;
897         callbacks[1] = virtscsi_event_done;
898         names[0] = "control";
899         names[1] = "event";
900         for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++) {
901                 callbacks[i] = virtscsi_req_done;
902                 names[i] = "request";
903         }
904 
905         /* Discover virtqueues and write information to configuration.  */
906         err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names);
907         if (err)
908                 goto out;
909 
910         virtscsi_init_vq(&vscsi->ctrl_vq, vqs[0]);
911         virtscsi_init_vq(&vscsi->event_vq, vqs[1]);
912         for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++)
913                 virtscsi_init_vq(&vscsi->req_vqs[i - VIRTIO_SCSI_VQ_BASE],
914                                  vqs[i]);
915 
916         virtscsi_set_affinity(vscsi, true);
917 
918         virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE);
919         virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE);
920 
921         err = 0;
922 
923 out:
924         kfree(names);
925         kfree(callbacks);
926         kfree(vqs);
927         if (err)
928                 virtscsi_remove_vqs(vdev);
929         return err;
930 }
931 
932 static int virtscsi_probe(struct virtio_device *vdev)
933 {
934         struct Scsi_Host *shost;
935         struct virtio_scsi *vscsi;
936         int err, host_prot;
937         u32 sg_elems, num_targets;
938         u32 cmd_per_lun;
939         u32 num_queues;
940         struct scsi_host_template *hostt;
941 
942         /* We need to know how many queues before we allocate. */
943         num_queues = virtscsi_config_get(vdev, num_queues) ? : 1;
944 
945         num_targets = virtscsi_config_get(vdev, max_target) + 1;
946 
947         if (num_queues == 1)
948                 hostt = &virtscsi_host_template_single;
949         else
950                 hostt = &virtscsi_host_template_multi;
951 
952         shost = scsi_host_alloc(hostt,
953                 sizeof(*vscsi) + sizeof(vscsi->req_vqs[0]) * num_queues);
954         if (!shost)
955                 return -ENOMEM;
956 
957         sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1;
958         shost->sg_tablesize = sg_elems;
959         vscsi = shost_priv(shost);
960         vscsi->vdev = vdev;
961         vscsi->num_queues = num_queues;
962         vdev->priv = shost;
963 
964         err = virtscsi_init(vdev, vscsi);
965         if (err)
966                 goto virtscsi_init_failed;
967 
968         vscsi->nb.notifier_call = &virtscsi_cpu_callback;
969         err = register_hotcpu_notifier(&vscsi->nb);
970         if (err) {
971                 pr_err("registering cpu notifier failed\n");
972                 goto scsi_add_host_failed;
973         }
974 
975         cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1;
976         shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue);
977         shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF;
978 
979         /* LUNs > 256 are reported with format 1, so they go in the range
980          * 16640-32767.
981          */
982         shost->max_lun = virtscsi_config_get(vdev, max_lun) + 1 + 0x4000;
983         shost->max_id = num_targets;
984         shost->max_channel = 0;
985         shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE;
986 
987         if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) {
988                 host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
989                             SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
990                             SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
991 
992                 scsi_host_set_prot(shost, host_prot);
993                 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
994         }
995 
996         err = scsi_add_host(shost, &vdev->dev);
997         if (err)
998                 goto scsi_add_host_failed;
999 
1000         virtio_device_ready(vdev);
1001 
1002         if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
1003                 virtscsi_kick_event_all(vscsi);
1004 
1005         scsi_scan_host(shost);
1006         return 0;
1007 
1008 scsi_add_host_failed:
1009         vdev->config->del_vqs(vdev);
1010 virtscsi_init_failed:
1011         scsi_host_put(shost);
1012         return err;
1013 }
1014 
1015 static void virtscsi_remove(struct virtio_device *vdev)
1016 {
1017         struct Scsi_Host *shost = virtio_scsi_host(vdev);
1018         struct virtio_scsi *vscsi = shost_priv(shost);
1019 
1020         if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
1021                 virtscsi_cancel_event_work(vscsi);
1022 
1023         scsi_remove_host(shost);
1024 
1025         unregister_hotcpu_notifier(&vscsi->nb);
1026 
1027         virtscsi_remove_vqs(vdev);
1028         scsi_host_put(shost);
1029 }
1030 
1031 #ifdef CONFIG_PM_SLEEP
1032 static int virtscsi_freeze(struct virtio_device *vdev)
1033 {
1034         struct Scsi_Host *sh = virtio_scsi_host(vdev);
1035         struct virtio_scsi *vscsi = shost_priv(sh);
1036 
1037         unregister_hotcpu_notifier(&vscsi->nb);
1038         virtscsi_remove_vqs(vdev);
1039         return 0;
1040 }
1041 
1042 static int virtscsi_restore(struct virtio_device *vdev)
1043 {
1044         struct Scsi_Host *sh = virtio_scsi_host(vdev);
1045         struct virtio_scsi *vscsi = shost_priv(sh);
1046         int err;
1047 
1048         err = virtscsi_init(vdev, vscsi);
1049         if (err)
1050                 return err;
1051 
1052         err = register_hotcpu_notifier(&vscsi->nb);
1053         if (err) {
1054                 vdev->config->del_vqs(vdev);
1055                 return err;
1056         }
1057 
1058         virtio_device_ready(vdev);
1059 
1060         if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
1061                 virtscsi_kick_event_all(vscsi);
1062 
1063         return err;
1064 }
1065 #endif
1066 
1067 static struct virtio_device_id id_table[] = {
1068         { VIRTIO_ID_SCSI, VIRTIO_DEV_ANY_ID },
1069         { 0 },
1070 };
1071 
1072 static unsigned int features[] = {
1073         VIRTIO_SCSI_F_HOTPLUG,
1074         VIRTIO_SCSI_F_CHANGE,
1075         VIRTIO_SCSI_F_T10_PI,
1076 };
1077 
1078 static struct virtio_driver virtio_scsi_driver = {
1079         .feature_table = features,
1080         .feature_table_size = ARRAY_SIZE(features),
1081         .driver.name = KBUILD_MODNAME,
1082         .driver.owner = THIS_MODULE,
1083         .id_table = id_table,
1084         .probe = virtscsi_probe,
1085 #ifdef CONFIG_PM_SLEEP
1086         .freeze = virtscsi_freeze,
1087         .restore = virtscsi_restore,
1088 #endif
1089         .remove = virtscsi_remove,
1090 };
1091 
1092 static int __init init(void)
1093 {
1094         int ret = -ENOMEM;
1095 
1096         virtscsi_cmd_cache = KMEM_CACHE(virtio_scsi_cmd, 0);
1097         if (!virtscsi_cmd_cache) {
1098                 pr_err("kmem_cache_create() for virtscsi_cmd_cache failed\n");
1099                 goto error;
1100         }
1101 
1102 
1103         virtscsi_cmd_pool =
1104                 mempool_create_slab_pool(VIRTIO_SCSI_MEMPOOL_SZ,
1105                                          virtscsi_cmd_cache);
1106         if (!virtscsi_cmd_pool) {
1107                 pr_err("mempool_create() for virtscsi_cmd_pool failed\n");
1108                 goto error;
1109         }
1110         ret = register_virtio_driver(&virtio_scsi_driver);
1111         if (ret < 0)
1112                 goto error;
1113 
1114         return 0;
1115 
1116 error:
1117         if (virtscsi_cmd_pool) {
1118                 mempool_destroy(virtscsi_cmd_pool);
1119                 virtscsi_cmd_pool = NULL;
1120         }
1121         if (virtscsi_cmd_cache) {
1122                 kmem_cache_destroy(virtscsi_cmd_cache);
1123                 virtscsi_cmd_cache = NULL;
1124         }
1125         return ret;
1126 }
1127 
1128 static void __exit fini(void)
1129 {
1130         unregister_virtio_driver(&virtio_scsi_driver);
1131         mempool_destroy(virtscsi_cmd_pool);
1132         kmem_cache_destroy(virtscsi_cmd_cache);
1133 }
1134 module_init(init);
1135 module_exit(fini);
1136 
1137 MODULE_DEVICE_TABLE(virtio, id_table);
1138 MODULE_DESCRIPTION("Virtio SCSI HBA driver");
1139 MODULE_LICENSE("GPL");
1140 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us