Print this page
XXXX introduce drv_sectohz
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/vioblk/vioblk.c
+++ new/usr/src/uts/common/io/vioblk/vioblk.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2014, Nexenta Systems, Inc. All rights reserved.
24 24 * Copyright (c) 2012, Alexey Zaytsev <alexey.zaytsev@gmail.com>
25 25 */
26 26
27 27
28 28 #include <sys/modctl.h>
29 29 #include <sys/blkdev.h>
30 30 #include <sys/types.h>
31 31 #include <sys/errno.h>
32 32 #include <sys/param.h>
33 33 #include <sys/stropts.h>
34 34 #include <sys/stream.h>
35 35 #include <sys/strsubr.h>
36 36 #include <sys/kmem.h>
37 37 #include <sys/conf.h>
38 38 #include <sys/devops.h>
39 39 #include <sys/ksynch.h>
40 40 #include <sys/stat.h>
41 41 #include <sys/modctl.h>
42 42 #include <sys/debug.h>
43 43 #include <sys/pci.h>
44 44 #include <sys/sysmacros.h>
45 45 #include "virtiovar.h"
46 46 #include "virtioreg.h"
47 47
48 48 /* Feature bits */
49 49 #define VIRTIO_BLK_F_BARRIER (1<<0)
50 50 #define VIRTIO_BLK_F_SIZE_MAX (1<<1)
51 51 #define VIRTIO_BLK_F_SEG_MAX (1<<2)
52 52 #define VIRTIO_BLK_F_GEOMETRY (1<<4)
53 53 #define VIRTIO_BLK_F_RO (1<<5)
54 54 #define VIRTIO_BLK_F_BLK_SIZE (1<<6)
55 55 #define VIRTIO_BLK_F_SCSI (1<<7)
56 56 #define VIRTIO_BLK_F_FLUSH (1<<9)
57 57 #define VIRTIO_BLK_F_TOPOLOGY (1<<10)
58 58
59 59 /* Configuration registers */
60 60 #define VIRTIO_BLK_CONFIG_CAPACITY 0 /* 64bit */
61 61 #define VIRTIO_BLK_CONFIG_SIZE_MAX 8 /* 32bit */
62 62 #define VIRTIO_BLK_CONFIG_SEG_MAX 12 /* 32bit */
63 63 #define VIRTIO_BLK_CONFIG_GEOMETRY_C 16 /* 16bit */
64 64 #define VIRTIO_BLK_CONFIG_GEOMETRY_H 18 /* 8bit */
65 65 #define VIRTIO_BLK_CONFIG_GEOMETRY_S 19 /* 8bit */
66 66 #define VIRTIO_BLK_CONFIG_BLK_SIZE 20 /* 32bit */
67 67 #define VIRTIO_BLK_CONFIG_TOPO_PBEXP 24 /* 8bit */
68 68 #define VIRTIO_BLK_CONFIG_TOPO_ALIGN 25 /* 8bit */
69 69 #define VIRTIO_BLK_CONFIG_TOPO_MIN_SZ 26 /* 16bit */
70 70 #define VIRTIO_BLK_CONFIG_TOPO_OPT_SZ 28 /* 32bit */
71 71
72 72 /* Command */
73 73 #define VIRTIO_BLK_T_IN 0
74 74 #define VIRTIO_BLK_T_OUT 1
75 75 #define VIRTIO_BLK_T_SCSI_CMD 2
76 76 #define VIRTIO_BLK_T_SCSI_CMD_OUT 3
77 77 #define VIRTIO_BLK_T_FLUSH 4
78 78 #define VIRTIO_BLK_T_FLUSH_OUT 5
79 79 #define VIRTIO_BLK_T_GET_ID 8
80 80 #define VIRTIO_BLK_T_BARRIER 0x80000000
81 81
82 82 #define VIRTIO_BLK_ID_BYTES 20 /* devid */
83 83
84 84 /* Statuses */
85 85 #define VIRTIO_BLK_S_OK 0
86 86 #define VIRTIO_BLK_S_IOERR 1
87 87 #define VIRTIO_BLK_S_UNSUPP 2
88 88
89 89 #define DEF_MAXINDIRECT (128)
90 90 #define DEF_MAXSECTOR (4096)
91 91
92 92 #define VIOBLK_POISON 0xdead0001dead0001
93 93
94 94 /*
95 95 * Static Variables.
96 96 */
97 97 static char vioblk_ident[] = "VirtIO block driver";
98 98
99 99 /* Request header structure */
100 100 struct vioblk_req_hdr {
101 101 uint32_t type; /* VIRTIO_BLK_T_* */
102 102 uint32_t ioprio;
103 103 uint64_t sector;
104 104 };
105 105
106 106 struct vioblk_req {
107 107 struct vioblk_req_hdr hdr;
108 108 uint8_t status;
109 109 uint8_t unused[3];
110 110 unsigned int ndmac;
111 111 ddi_dma_handle_t dmah;
112 112 ddi_dma_handle_t bd_dmah;
113 113 ddi_dma_cookie_t dmac;
114 114 bd_xfer_t *xfer;
115 115 };
116 116
117 117 struct vioblk_stats {
118 118 struct kstat_named sts_rw_outofmemory;
119 119 struct kstat_named sts_rw_badoffset;
120 120 struct kstat_named sts_rw_queuemax;
121 121 struct kstat_named sts_rw_cookiesmax;
122 122 struct kstat_named sts_rw_cacheflush;
123 123 struct kstat_named sts_intr_queuemax;
124 124 struct kstat_named sts_intr_total;
125 125 struct kstat_named sts_io_errors;
126 126 struct kstat_named sts_unsupp_errors;
127 127 struct kstat_named sts_nxio_errors;
128 128 };
129 129
130 130 struct vioblk_lstats {
131 131 uint64_t rw_cacheflush;
132 132 uint64_t intr_total;
133 133 unsigned int rw_cookiesmax;
134 134 unsigned int intr_queuemax;
135 135 unsigned int io_errors;
136 136 unsigned int unsupp_errors;
137 137 unsigned int nxio_errors;
138 138 };
139 139
140 140 struct vioblk_softc {
141 141 dev_info_t *sc_dev; /* mirrors virtio_softc->sc_dev */
142 142 struct virtio_softc sc_virtio;
143 143 struct virtqueue *sc_vq;
144 144 bd_handle_t bd_h;
145 145 struct vioblk_req *sc_reqs;
146 146 struct vioblk_stats *ks_data;
147 147 kstat_t *sc_intrstat;
148 148 uint64_t sc_capacity;
149 149 uint64_t sc_nblks;
150 150 struct vioblk_lstats sc_stats;
151 151 short sc_blkflags;
152 152 boolean_t sc_in_poll_mode;
153 153 boolean_t sc_readonly;
154 154 int sc_blk_size;
155 155 int sc_pblk_size;
156 156 int sc_seg_max;
157 157 int sc_seg_size_max;
158 158 kmutex_t lock_devid;
159 159 kcondvar_t cv_devid;
160 160 char devid[VIRTIO_BLK_ID_BYTES + 1];
161 161 };
162 162
163 163 static int vioblk_read(void *arg, bd_xfer_t *xfer);
164 164 static int vioblk_write(void *arg, bd_xfer_t *xfer);
165 165 static int vioblk_flush(void *arg, bd_xfer_t *xfer);
166 166 static void vioblk_driveinfo(void *arg, bd_drive_t *drive);
167 167 static int vioblk_mediainfo(void *arg, bd_media_t *media);
168 168 static int vioblk_devid_init(void *, dev_info_t *, ddi_devid_t *);
169 169 uint_t vioblk_int_handler(caddr_t arg1, caddr_t arg2);
170 170
171 171 static bd_ops_t vioblk_ops = {
172 172 BD_OPS_VERSION_0,
173 173 vioblk_driveinfo,
174 174 vioblk_mediainfo,
175 175 vioblk_devid_init,
176 176 vioblk_flush,
177 177 vioblk_read,
178 178 vioblk_write,
179 179 };
180 180
181 181 static int vioblk_quiesce(dev_info_t *);
182 182 static int vioblk_attach(dev_info_t *, ddi_attach_cmd_t);
183 183 static int vioblk_detach(dev_info_t *, ddi_detach_cmd_t);
184 184
185 185 static struct dev_ops vioblk_dev_ops = {
186 186 DEVO_REV,
187 187 0,
188 188 ddi_no_info,
189 189 nulldev, /* identify */
190 190 nulldev, /* probe */
191 191 vioblk_attach, /* attach */
192 192 vioblk_detach, /* detach */
193 193 nodev, /* reset */
194 194 NULL, /* cb_ops */
195 195 NULL, /* bus_ops */
196 196 NULL, /* power */
197 197 vioblk_quiesce /* quiesce */
198 198 };
199 199
200 200
201 201
202 202 /* Standard Module linkage initialization for a Streams driver */
203 203 extern struct mod_ops mod_driverops;
204 204
205 205 static struct modldrv modldrv = {
206 206 &mod_driverops, /* Type of module. This one is a driver */
207 207 vioblk_ident, /* short description */
208 208 &vioblk_dev_ops /* driver specific ops */
209 209 };
210 210
211 211 static struct modlinkage modlinkage = {
212 212 MODREV_1,
213 213 {
214 214 (void *)&modldrv,
215 215 NULL,
216 216 },
217 217 };
218 218
219 219 ddi_device_acc_attr_t vioblk_attr = {
220 220 DDI_DEVICE_ATTR_V0,
221 221 DDI_NEVERSWAP_ACC, /* virtio is always native byte order */
222 222 DDI_STORECACHING_OK_ACC,
223 223 DDI_DEFAULT_ACC
224 224 };
225 225
226 226 /* DMA attr for the header/status blocks. */
227 227 static ddi_dma_attr_t vioblk_req_dma_attr = {
228 228 DMA_ATTR_V0, /* dma_attr version */
229 229 0, /* dma_attr_addr_lo */
230 230 0xFFFFFFFFFFFFFFFFull, /* dma_attr_addr_hi */
231 231 0x00000000FFFFFFFFull, /* dma_attr_count_max */
232 232 1, /* dma_attr_align */
233 233 1, /* dma_attr_burstsizes */
234 234 1, /* dma_attr_minxfer */
235 235 0xFFFFFFFFull, /* dma_attr_maxxfer */
236 236 0xFFFFFFFFFFFFFFFFull, /* dma_attr_seg */
237 237 1, /* dma_attr_sgllen */
238 238 1, /* dma_attr_granular */
239 239 0, /* dma_attr_flags */
240 240 };
241 241
242 242 /* DMA attr for the data blocks. */
243 243 static ddi_dma_attr_t vioblk_bd_dma_attr = {
244 244 DMA_ATTR_V0, /* dma_attr version */
245 245 0, /* dma_attr_addr_lo */
246 246 0xFFFFFFFFFFFFFFFFull, /* dma_attr_addr_hi */
247 247 0x00000000FFFFFFFFull, /* dma_attr_count_max */
248 248 1, /* dma_attr_align */
249 249 1, /* dma_attr_burstsizes */
250 250 1, /* dma_attr_minxfer */
251 251 0, /* dma_attr_maxxfer, set in attach */
252 252 0xFFFFFFFFFFFFFFFFull, /* dma_attr_seg */
253 253 0, /* dma_attr_sgllen, set in attach */
254 254 1, /* dma_attr_granular */
255 255 0, /* dma_attr_flags */
256 256 };
257 257
258 258 static int
259 259 vioblk_rw(struct vioblk_softc *sc, bd_xfer_t *xfer, int type,
260 260 uint32_t len)
261 261 {
262 262 struct vioblk_req *req;
263 263 struct vq_entry *ve_hdr;
264 264 int total_cookies, write;
265 265
266 266 write = (type == VIRTIO_BLK_T_OUT ||
267 267 type == VIRTIO_BLK_T_FLUSH_OUT) ? 1 : 0;
268 268 total_cookies = 2;
269 269
270 270 if ((xfer->x_blkno + xfer->x_nblks) > sc->sc_nblks) {
271 271 sc->ks_data->sts_rw_badoffset.value.ui64++;
272 272 return (EINVAL);
273 273 }
274 274
275 275 /* allocate top entry */
276 276 ve_hdr = vq_alloc_entry(sc->sc_vq);
277 277 if (!ve_hdr) {
278 278 sc->ks_data->sts_rw_outofmemory.value.ui64++;
279 279 return (ENOMEM);
280 280 }
281 281
282 282 /* getting request */
283 283 req = &sc->sc_reqs[ve_hdr->qe_index];
284 284 req->hdr.type = type;
285 285 req->hdr.ioprio = 0;
286 286 req->hdr.sector = xfer->x_blkno;
287 287 req->xfer = xfer;
288 288
289 289 /* Header */
290 290 virtio_ve_add_indirect_buf(ve_hdr, req->dmac.dmac_laddress,
291 291 sizeof (struct vioblk_req_hdr), B_TRUE);
292 292
293 293 /* Payload */
294 294 if (len > 0) {
295 295 virtio_ve_add_cookie(ve_hdr, xfer->x_dmah, xfer->x_dmac,
296 296 xfer->x_ndmac, write ? B_TRUE : B_FALSE);
297 297 total_cookies += xfer->x_ndmac;
298 298 }
299 299
300 300 /* Status */
301 301 virtio_ve_add_indirect_buf(ve_hdr,
302 302 req->dmac.dmac_laddress + sizeof (struct vioblk_req_hdr),
303 303 sizeof (uint8_t), B_FALSE);
304 304
305 305 /* sending the whole chain to the device */
306 306 virtio_push_chain(ve_hdr, B_TRUE);
307 307
308 308 if (sc->sc_stats.rw_cookiesmax < total_cookies)
309 309 sc->sc_stats.rw_cookiesmax = total_cookies;
310 310
311 311 return (DDI_SUCCESS);
312 312 }
313 313
314 314 /*
315 315 * Now in polling mode. Interrupts are off, so we
316 316 * 1) poll for the already queued requests to complete.
317 317 * 2) push our request.
318 318 * 3) wait for our request to complete.
319 319 */
320 320 static int
321 321 vioblk_rw_poll(struct vioblk_softc *sc, bd_xfer_t *xfer,
322 322 int type, uint32_t len)
323 323 {
324 324 clock_t tmout;
325 325 int ret;
326 326
327 327 ASSERT(xfer->x_flags & BD_XFER_POLL);
328 328
329 329 /* Prevent a hard hang. */
330 330 tmout = drv_usectohz(30000000);
331 331
332 332 /* Poll for an empty queue */
333 333 while (vq_num_used(sc->sc_vq)) {
334 334 /* Check if any pending requests completed. */
335 335 ret = vioblk_int_handler((caddr_t)&sc->sc_virtio, NULL);
336 336 if (ret != DDI_INTR_CLAIMED) {
337 337 drv_usecwait(10);
338 338 tmout -= 10;
339 339 return (ETIMEDOUT);
340 340 }
341 341 }
342 342
343 343 ret = vioblk_rw(sc, xfer, type, len);
344 344 if (ret)
345 345 return (ret);
346 346
347 347 tmout = drv_usectohz(30000000);
348 348 /* Poll for an empty queue again. */
349 349 while (vq_num_used(sc->sc_vq)) {
350 350 /* Check if any pending requests completed. */
351 351 ret = vioblk_int_handler((caddr_t)&sc->sc_virtio, NULL);
352 352 if (ret != DDI_INTR_CLAIMED) {
353 353 drv_usecwait(10);
354 354 tmout -= 10;
355 355 return (ETIMEDOUT);
356 356 }
357 357 }
358 358
359 359 return (DDI_SUCCESS);
360 360 }
361 361
362 362 static int
363 363 vioblk_read(void *arg, bd_xfer_t *xfer)
364 364 {
365 365 int ret;
366 366 struct vioblk_softc *sc = (void *)arg;
367 367
368 368 if (xfer->x_flags & BD_XFER_POLL) {
369 369 if (!sc->sc_in_poll_mode) {
370 370 virtio_stop_vq_intr(sc->sc_vq);
371 371 sc->sc_in_poll_mode = 1;
372 372 }
373 373
374 374 ret = vioblk_rw_poll(sc, xfer, VIRTIO_BLK_T_IN,
375 375 xfer->x_nblks * DEV_BSIZE);
376 376 } else {
377 377 if (sc->sc_in_poll_mode) {
378 378 virtio_start_vq_intr(sc->sc_vq);
379 379 sc->sc_in_poll_mode = 0;
380 380 }
381 381
382 382 ret = vioblk_rw(sc, xfer, VIRTIO_BLK_T_IN,
383 383 xfer->x_nblks * DEV_BSIZE);
384 384 }
385 385
386 386 return (ret);
387 387 }
388 388
389 389 static int
390 390 vioblk_write(void *arg, bd_xfer_t *xfer)
391 391 {
392 392 int ret;
393 393 struct vioblk_softc *sc = (void *)arg;
394 394
395 395 if (xfer->x_flags & BD_XFER_POLL) {
396 396 if (!sc->sc_in_poll_mode) {
397 397 virtio_stop_vq_intr(sc->sc_vq);
398 398 sc->sc_in_poll_mode = 1;
399 399 }
400 400
401 401 ret = vioblk_rw_poll(sc, xfer, VIRTIO_BLK_T_OUT,
402 402 xfer->x_nblks * DEV_BSIZE);
403 403 } else {
404 404 if (sc->sc_in_poll_mode) {
405 405 virtio_start_vq_intr(sc->sc_vq);
406 406 sc->sc_in_poll_mode = 0;
407 407 }
408 408
409 409 ret = vioblk_rw(sc, xfer, VIRTIO_BLK_T_OUT,
410 410 xfer->x_nblks * DEV_BSIZE);
411 411 }
412 412 return (ret);
413 413 }
414 414
415 415 static int
416 416 vioblk_flush(void *arg, bd_xfer_t *xfer)
417 417 {
418 418 int ret;
419 419 struct vioblk_softc *sc = (void *)arg;
420 420
421 421 ASSERT((xfer->x_flags & BD_XFER_POLL) == 0);
422 422
423 423 ret = vioblk_rw(sc, xfer, VIRTIO_BLK_T_FLUSH_OUT,
424 424 xfer->x_nblks * DEV_BSIZE);
425 425
426 426 if (!ret)
427 427 sc->sc_stats.rw_cacheflush++;
428 428
429 429 return (ret);
430 430 }
431 431
432 432
433 433 static void
434 434 vioblk_driveinfo(void *arg, bd_drive_t *drive)
435 435 {
436 436 struct vioblk_softc *sc = (void *)arg;
437 437
438 438 drive->d_qsize = sc->sc_vq->vq_num;
439 439 drive->d_removable = B_FALSE;
440 440 drive->d_hotpluggable = B_TRUE;
441 441 drive->d_target = 0;
442 442 drive->d_lun = 0;
443 443 }
444 444
445 445 static int
446 446 vioblk_mediainfo(void *arg, bd_media_t *media)
447 447 {
448 448 struct vioblk_softc *sc = (void *)arg;
449 449
450 450 media->m_nblks = sc->sc_nblks;
451 451 media->m_blksize = sc->sc_blk_size;
452 452 media->m_readonly = sc->sc_readonly;
453 453 media->m_pblksize = sc->sc_pblk_size;
454 454 return (0);
↓ open down ↓ |
454 lines elided |
↑ open up ↑ |
455 455 }
456 456
457 457 static int
458 458 vioblk_devid_init(void *arg, dev_info_t *devinfo, ddi_devid_t *devid)
459 459 {
460 460 struct vioblk_softc *sc = (void *)arg;
461 461 clock_t deadline;
462 462 int ret;
463 463 bd_xfer_t xfer;
464 464
465 - deadline = ddi_get_lbolt() + (clock_t)drv_usectohz(3 * 1000000);
465 + deadline = ddi_get_lbolt() + drv_sectohz(3);
466 466 (void) memset(&xfer, 0, sizeof (bd_xfer_t));
467 467 xfer.x_nblks = 1;
468 468
469 469 ret = ddi_dma_alloc_handle(sc->sc_dev, &vioblk_bd_dma_attr,
470 470 DDI_DMA_SLEEP, NULL, &xfer.x_dmah);
471 471 if (ret != DDI_SUCCESS)
472 472 goto out_alloc;
473 473
474 474 ret = ddi_dma_addr_bind_handle(xfer.x_dmah, NULL, (caddr_t)&sc->devid,
475 475 VIRTIO_BLK_ID_BYTES, DDI_DMA_READ | DDI_DMA_CONSISTENT,
476 476 DDI_DMA_SLEEP, NULL, &xfer.x_dmac, &xfer.x_ndmac);
477 477 if (ret != DDI_DMA_MAPPED) {
478 478 ret = DDI_FAILURE;
479 479 goto out_map;
480 480 }
481 481
482 482 mutex_enter(&sc->lock_devid);
483 483
484 484 ret = vioblk_rw(sc, &xfer, VIRTIO_BLK_T_GET_ID,
485 485 VIRTIO_BLK_ID_BYTES);
486 486 if (ret) {
487 487 mutex_exit(&sc->lock_devid);
488 488 goto out_rw;
489 489 }
490 490
491 491 /* wait for reply */
492 492 ret = cv_timedwait(&sc->cv_devid, &sc->lock_devid, deadline);
493 493 mutex_exit(&sc->lock_devid);
494 494
495 495 (void) ddi_dma_unbind_handle(xfer.x_dmah);
496 496 ddi_dma_free_handle(&xfer.x_dmah);
497 497
498 498 /* timeout */
499 499 if (ret < 0) {
500 500 dev_err(devinfo, CE_WARN, "Cannot get devid from the device");
501 501 return (DDI_FAILURE);
502 502 }
503 503
504 504 ret = ddi_devid_init(devinfo, DEVID_ATA_SERIAL,
505 505 VIRTIO_BLK_ID_BYTES, sc->devid, devid);
506 506 if (ret != DDI_SUCCESS) {
507 507 dev_err(devinfo, CE_WARN, "Cannot build devid from the device");
508 508 return (ret);
509 509 }
510 510
511 511 dev_debug(sc->sc_dev, CE_NOTE,
512 512 "devid %x%x%x%x%x%x%x%x%x%x%x%x%x%x%x%x%x%x%x%x",
513 513 sc->devid[0], sc->devid[1], sc->devid[2], sc->devid[3],
514 514 sc->devid[4], sc->devid[5], sc->devid[6], sc->devid[7],
515 515 sc->devid[8], sc->devid[9], sc->devid[10], sc->devid[11],
516 516 sc->devid[12], sc->devid[13], sc->devid[14], sc->devid[15],
517 517 sc->devid[16], sc->devid[17], sc->devid[18], sc->devid[19]);
518 518
519 519 return (0);
520 520
521 521 out_rw:
522 522 (void) ddi_dma_unbind_handle(xfer.x_dmah);
523 523 out_map:
524 524 ddi_dma_free_handle(&xfer.x_dmah);
525 525 out_alloc:
526 526 return (ret);
527 527 }
528 528
529 529 static void
530 530 vioblk_show_features(struct vioblk_softc *sc, const char *prefix,
531 531 uint32_t features)
532 532 {
533 533 char buf[512];
534 534 char *bufp = buf;
535 535 char *bufend = buf + sizeof (buf);
536 536
537 537 /* LINTED E_PTRDIFF_OVERFLOW */
538 538 bufp += snprintf(bufp, bufend - bufp, prefix);
539 539
540 540 /* LINTED E_PTRDIFF_OVERFLOW */
541 541 bufp += virtio_show_features(features, bufp, bufend - bufp);
542 542
543 543
544 544 /* LINTED E_PTRDIFF_OVERFLOW */
545 545 bufp += snprintf(bufp, bufend - bufp, "Vioblk ( ");
546 546
547 547 if (features & VIRTIO_BLK_F_BARRIER)
548 548 /* LINTED E_PTRDIFF_OVERFLOW */
549 549 bufp += snprintf(bufp, bufend - bufp, "BARRIER ");
550 550 if (features & VIRTIO_BLK_F_SIZE_MAX)
551 551 /* LINTED E_PTRDIFF_OVERFLOW */
552 552 bufp += snprintf(bufp, bufend - bufp, "SIZE_MAX ");
553 553 if (features & VIRTIO_BLK_F_SEG_MAX)
554 554 /* LINTED E_PTRDIFF_OVERFLOW */
555 555 bufp += snprintf(bufp, bufend - bufp, "SEG_MAX ");
556 556 if (features & VIRTIO_BLK_F_GEOMETRY)
557 557 /* LINTED E_PTRDIFF_OVERFLOW */
558 558 bufp += snprintf(bufp, bufend - bufp, "GEOMETRY ");
559 559 if (features & VIRTIO_BLK_F_RO)
560 560 /* LINTED E_PTRDIFF_OVERFLOW */
561 561 bufp += snprintf(bufp, bufend - bufp, "RO ");
562 562 if (features & VIRTIO_BLK_F_BLK_SIZE)
563 563 /* LINTED E_PTRDIFF_OVERFLOW */
564 564 bufp += snprintf(bufp, bufend - bufp, "BLK_SIZE ");
565 565 if (features & VIRTIO_BLK_F_SCSI)
566 566 /* LINTED E_PTRDIFF_OVERFLOW */
567 567 bufp += snprintf(bufp, bufend - bufp, "SCSI ");
568 568 if (features & VIRTIO_BLK_F_FLUSH)
569 569 /* LINTED E_PTRDIFF_OVERFLOW */
570 570 bufp += snprintf(bufp, bufend - bufp, "FLUSH ");
571 571 if (features & VIRTIO_BLK_F_TOPOLOGY)
572 572 /* LINTED E_PTRDIFF_OVERFLOW */
573 573 bufp += snprintf(bufp, bufend - bufp, "TOPOLOGY ");
574 574
575 575 /* LINTED E_PTRDIFF_OVERFLOW */
576 576 bufp += snprintf(bufp, bufend - bufp, ")");
577 577 *bufp = '\0';
578 578
579 579 dev_debug(sc->sc_dev, CE_NOTE, "%s", buf);
580 580 }
581 581
582 582 static int
583 583 vioblk_dev_features(struct vioblk_softc *sc)
584 584 {
585 585 uint32_t host_features;
586 586
587 587 host_features = virtio_negotiate_features(&sc->sc_virtio,
588 588 VIRTIO_BLK_F_RO |
589 589 VIRTIO_BLK_F_GEOMETRY |
590 590 VIRTIO_BLK_F_BLK_SIZE |
591 591 VIRTIO_BLK_F_FLUSH |
592 592 VIRTIO_BLK_F_TOPOLOGY |
593 593 VIRTIO_BLK_F_SEG_MAX |
594 594 VIRTIO_BLK_F_SIZE_MAX |
595 595 VIRTIO_F_RING_INDIRECT_DESC);
596 596
597 597 vioblk_show_features(sc, "Host features: ", host_features);
598 598 vioblk_show_features(sc, "Negotiated features: ",
599 599 sc->sc_virtio.sc_features);
600 600
601 601 if (!(sc->sc_virtio.sc_features & VIRTIO_F_RING_INDIRECT_DESC)) {
602 602 dev_err(sc->sc_dev, CE_NOTE,
603 603 "Host does not support RING_INDIRECT_DESC, bye.");
604 604 return (DDI_FAILURE);
605 605 }
606 606
607 607 return (DDI_SUCCESS);
608 608 }
609 609
610 610 /* ARGSUSED */
611 611 uint_t
612 612 vioblk_int_handler(caddr_t arg1, caddr_t arg2)
613 613 {
614 614 struct virtio_softc *vsc = (void *)arg1;
615 615 struct vioblk_softc *sc = container_of(vsc,
616 616 struct vioblk_softc, sc_virtio);
617 617 struct vq_entry *ve;
618 618 uint32_t len;
619 619 int i = 0, error;
620 620
621 621 while ((ve = virtio_pull_chain(sc->sc_vq, &len))) {
622 622 struct vioblk_req *req = &sc->sc_reqs[ve->qe_index];
623 623 bd_xfer_t *xfer = req->xfer;
624 624 uint8_t status = req->status;
625 625 uint32_t type = req->hdr.type;
626 626
627 627 if (req->xfer == (void *)VIOBLK_POISON) {
628 628 dev_err(sc->sc_dev, CE_WARN, "Poisoned descriptor!");
629 629 virtio_free_chain(ve);
630 630 return (DDI_INTR_CLAIMED);
631 631 }
632 632
633 633 req->xfer = (void *) VIOBLK_POISON;
634 634
635 635 /* Note: blkdev tears down the payload mapping for us. */
636 636 virtio_free_chain(ve);
637 637
638 638 /* returning payload back to blkdev */
639 639 switch (status) {
640 640 case VIRTIO_BLK_S_OK:
641 641 error = 0;
642 642 break;
643 643 case VIRTIO_BLK_S_IOERR:
644 644 error = EIO;
645 645 sc->sc_stats.io_errors++;
646 646 break;
647 647 case VIRTIO_BLK_S_UNSUPP:
648 648 sc->sc_stats.unsupp_errors++;
649 649 error = ENOTTY;
650 650 break;
651 651 default:
652 652 sc->sc_stats.nxio_errors++;
653 653 error = ENXIO;
654 654 break;
655 655 }
656 656
657 657 if (type == VIRTIO_BLK_T_GET_ID) {
658 658 /* notify devid_init */
659 659 mutex_enter(&sc->lock_devid);
660 660 cv_broadcast(&sc->cv_devid);
661 661 mutex_exit(&sc->lock_devid);
662 662 } else
663 663 bd_xfer_done(xfer, error);
664 664
665 665 i++;
666 666 }
667 667
668 668 /* update stats */
669 669 if (sc->sc_stats.intr_queuemax < i)
670 670 sc->sc_stats.intr_queuemax = i;
671 671 sc->sc_stats.intr_total++;
672 672
673 673 return (DDI_INTR_CLAIMED);
674 674 }
675 675
676 676 /* ARGSUSED */
677 677 uint_t
678 678 vioblk_config_handler(caddr_t arg1, caddr_t arg2)
679 679 {
680 680 return (DDI_INTR_CLAIMED);
681 681 }
682 682
683 683 static int
684 684 vioblk_register_ints(struct vioblk_softc *sc)
685 685 {
686 686 int ret;
687 687
688 688 struct virtio_int_handler vioblk_conf_h = {
689 689 vioblk_config_handler
690 690 };
691 691
692 692 struct virtio_int_handler vioblk_vq_h[] = {
693 693 { vioblk_int_handler },
694 694 { NULL },
695 695 };
696 696
697 697 ret = virtio_register_ints(&sc->sc_virtio,
698 698 &vioblk_conf_h, vioblk_vq_h);
699 699
700 700 return (ret);
701 701 }
702 702
703 703 static void
704 704 vioblk_free_reqs(struct vioblk_softc *sc)
705 705 {
706 706 int i, qsize;
707 707
708 708 qsize = sc->sc_vq->vq_num;
709 709
710 710 for (i = 0; i < qsize; i++) {
711 711 struct vioblk_req *req = &sc->sc_reqs[i];
712 712
713 713 if (req->ndmac)
714 714 (void) ddi_dma_unbind_handle(req->dmah);
715 715
716 716 if (req->dmah)
717 717 ddi_dma_free_handle(&req->dmah);
718 718 }
719 719
720 720 kmem_free(sc->sc_reqs, sizeof (struct vioblk_req) * qsize);
721 721 }
722 722
723 723 static int
724 724 vioblk_alloc_reqs(struct vioblk_softc *sc)
725 725 {
726 726 int i, qsize;
727 727 int ret;
728 728
729 729 qsize = sc->sc_vq->vq_num;
730 730
731 731 sc->sc_reqs = kmem_zalloc(sizeof (struct vioblk_req) * qsize, KM_SLEEP);
732 732
733 733 for (i = 0; i < qsize; i++) {
734 734 struct vioblk_req *req = &sc->sc_reqs[i];
735 735
736 736 ret = ddi_dma_alloc_handle(sc->sc_dev, &vioblk_req_dma_attr,
737 737 DDI_DMA_SLEEP, NULL, &req->dmah);
738 738 if (ret != DDI_SUCCESS) {
739 739
740 740 dev_err(sc->sc_dev, CE_WARN,
741 741 "Can't allocate dma handle for req "
742 742 "buffer %d", i);
743 743 goto exit;
744 744 }
745 745
746 746 ret = ddi_dma_addr_bind_handle(req->dmah, NULL,
747 747 (caddr_t)&req->hdr,
748 748 sizeof (struct vioblk_req_hdr) + sizeof (uint8_t),
749 749 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
750 750 NULL, &req->dmac, &req->ndmac);
751 751 if (ret != DDI_DMA_MAPPED) {
752 752 dev_err(sc->sc_dev, CE_WARN,
753 753 "Can't bind req buffer %d", i);
754 754 goto exit;
755 755 }
756 756 }
757 757
758 758 return (0);
759 759
760 760 exit:
761 761 vioblk_free_reqs(sc);
762 762 return (ENOMEM);
763 763 }
764 764
765 765
766 766 static int
767 767 vioblk_ksupdate(kstat_t *ksp, int rw)
768 768 {
769 769 struct vioblk_softc *sc = ksp->ks_private;
770 770
771 771 if (rw == KSTAT_WRITE)
772 772 return (EACCES);
773 773
774 774 sc->ks_data->sts_rw_cookiesmax.value.ui32 = sc->sc_stats.rw_cookiesmax;
775 775 sc->ks_data->sts_intr_queuemax.value.ui32 = sc->sc_stats.intr_queuemax;
776 776 sc->ks_data->sts_unsupp_errors.value.ui32 = sc->sc_stats.unsupp_errors;
777 777 sc->ks_data->sts_nxio_errors.value.ui32 = sc->sc_stats.nxio_errors;
778 778 sc->ks_data->sts_io_errors.value.ui32 = sc->sc_stats.io_errors;
779 779 sc->ks_data->sts_rw_cacheflush.value.ui64 = sc->sc_stats.rw_cacheflush;
780 780 sc->ks_data->sts_intr_total.value.ui64 = sc->sc_stats.intr_total;
781 781
782 782
783 783 return (0);
784 784 }
785 785
786 786 static int
787 787 vioblk_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
788 788 {
789 789 int ret = DDI_SUCCESS;
790 790 int instance;
791 791 struct vioblk_softc *sc;
792 792 struct virtio_softc *vsc;
793 793 struct vioblk_stats *ks_data;
794 794
795 795 instance = ddi_get_instance(devinfo);
796 796
797 797 switch (cmd) {
798 798 case DDI_ATTACH:
799 799 break;
800 800
801 801 case DDI_RESUME:
802 802 case DDI_PM_RESUME:
803 803 dev_err(devinfo, CE_WARN, "resume not supported yet");
804 804 ret = DDI_FAILURE;
805 805 goto exit;
806 806
807 807 default:
808 808 dev_err(devinfo, CE_WARN, "cmd 0x%x not recognized", cmd);
809 809 ret = DDI_FAILURE;
810 810 goto exit;
811 811 }
812 812
813 813 sc = kmem_zalloc(sizeof (struct vioblk_softc), KM_SLEEP);
814 814 ddi_set_driver_private(devinfo, sc);
815 815
816 816 vsc = &sc->sc_virtio;
817 817
818 818 /* Duplicate for faster access / less typing */
819 819 sc->sc_dev = devinfo;
820 820 vsc->sc_dev = devinfo;
821 821
822 822 cv_init(&sc->cv_devid, NULL, CV_DRIVER, NULL);
823 823 mutex_init(&sc->lock_devid, NULL, MUTEX_DRIVER, NULL);
824 824
825 825 /*
826 826 * Initialize interrupt kstat. This should not normally fail, since
827 827 * we don't use a persistent stat. We do it this way to avoid having
828 828 * to test for it at run time on the hot path.
829 829 */
830 830 sc->sc_intrstat = kstat_create("vioblk", instance,
831 831 "intrs", "controller", KSTAT_TYPE_NAMED,
832 832 sizeof (struct vioblk_stats) / sizeof (kstat_named_t),
833 833 KSTAT_FLAG_PERSISTENT);
834 834 if (sc->sc_intrstat == NULL) {
835 835 dev_err(devinfo, CE_WARN, "kstat_create failed");
836 836 goto exit_intrstat;
837 837 }
838 838 ks_data = (struct vioblk_stats *)sc->sc_intrstat->ks_data;
839 839 kstat_named_init(&ks_data->sts_rw_outofmemory,
840 840 "total_rw_outofmemory", KSTAT_DATA_UINT64);
841 841 kstat_named_init(&ks_data->sts_rw_badoffset,
842 842 "total_rw_badoffset", KSTAT_DATA_UINT64);
843 843 kstat_named_init(&ks_data->sts_intr_total,
844 844 "total_intr", KSTAT_DATA_UINT64);
845 845 kstat_named_init(&ks_data->sts_io_errors,
846 846 "total_io_errors", KSTAT_DATA_UINT32);
847 847 kstat_named_init(&ks_data->sts_unsupp_errors,
848 848 "total_unsupp_errors", KSTAT_DATA_UINT32);
849 849 kstat_named_init(&ks_data->sts_nxio_errors,
850 850 "total_nxio_errors", KSTAT_DATA_UINT32);
851 851 kstat_named_init(&ks_data->sts_rw_cacheflush,
852 852 "total_rw_cacheflush", KSTAT_DATA_UINT64);
853 853 kstat_named_init(&ks_data->sts_rw_cookiesmax,
854 854 "max_rw_cookies", KSTAT_DATA_UINT32);
855 855 kstat_named_init(&ks_data->sts_intr_queuemax,
856 856 "max_intr_queue", KSTAT_DATA_UINT32);
857 857 sc->ks_data = ks_data;
858 858 sc->sc_intrstat->ks_private = sc;
859 859 sc->sc_intrstat->ks_update = vioblk_ksupdate;
860 860 kstat_install(sc->sc_intrstat);
861 861
862 862 /* map BAR0 */
863 863 ret = ddi_regs_map_setup(devinfo, 1,
864 864 (caddr_t *)&sc->sc_virtio.sc_io_addr,
865 865 0, 0, &vioblk_attr, &sc->sc_virtio.sc_ioh);
866 866 if (ret != DDI_SUCCESS) {
867 867 dev_err(devinfo, CE_WARN, "unable to map bar0: [%d]", ret);
868 868 goto exit_map;
869 869 }
870 870
871 871 virtio_device_reset(&sc->sc_virtio);
872 872 virtio_set_status(&sc->sc_virtio, VIRTIO_CONFIG_DEVICE_STATUS_ACK);
873 873 virtio_set_status(&sc->sc_virtio, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER);
874 874
875 875 if (vioblk_register_ints(sc)) {
876 876 dev_err(devinfo, CE_WARN, "Unable to add interrupt");
877 877 goto exit_int;
878 878 }
879 879
880 880 ret = vioblk_dev_features(sc);
881 881 if (ret)
882 882 goto exit_features;
883 883
884 884 if (sc->sc_virtio.sc_features & VIRTIO_BLK_F_RO)
885 885 sc->sc_readonly = B_TRUE;
886 886 else
887 887 sc->sc_readonly = B_FALSE;
888 888
889 889 sc->sc_capacity = virtio_read_device_config_8(&sc->sc_virtio,
890 890 VIRTIO_BLK_CONFIG_CAPACITY);
891 891 sc->sc_nblks = sc->sc_capacity;
892 892
893 893 sc->sc_blk_size = DEV_BSIZE;
894 894 if (sc->sc_virtio.sc_features & VIRTIO_BLK_F_BLK_SIZE) {
895 895 sc->sc_blk_size = virtio_read_device_config_4(&sc->sc_virtio,
896 896 VIRTIO_BLK_CONFIG_BLK_SIZE);
897 897 }
898 898
899 899 sc->sc_pblk_size = sc->sc_blk_size;
900 900 if (sc->sc_virtio.sc_features & VIRTIO_BLK_F_TOPOLOGY) {
901 901 sc->sc_pblk_size <<= virtio_read_device_config_1(&sc->sc_virtio,
902 902 VIRTIO_BLK_CONFIG_TOPO_PBEXP);
903 903 }
904 904
905 905 /* Flushing is not supported. */
906 906 if (!(sc->sc_virtio.sc_features & VIRTIO_BLK_F_FLUSH)) {
907 907 vioblk_ops.o_sync_cache = NULL;
908 908 }
909 909
910 910 sc->sc_seg_max = DEF_MAXINDIRECT;
911 911 /* The max number of segments (cookies) in a request */
912 912 if (sc->sc_virtio.sc_features & VIRTIO_BLK_F_SEG_MAX) {
913 913 sc->sc_seg_max = virtio_read_device_config_4(&sc->sc_virtio,
914 914 VIRTIO_BLK_CONFIG_SEG_MAX);
915 915
916 916 /* That's what Linux does. */
917 917 if (!sc->sc_seg_max)
918 918 sc->sc_seg_max = 1;
919 919
920 920 /*
921 921 * SEG_MAX corresponds to the number of _data_
922 922 * blocks in a request
923 923 */
924 924 sc->sc_seg_max += 2;
925 925 }
926 926 /* 2 descriptors taken for header/status */
927 927 vioblk_bd_dma_attr.dma_attr_sgllen = sc->sc_seg_max - 2;
928 928
929 929
930 930 /* The maximum size for a cookie in a request. */
931 931 sc->sc_seg_size_max = DEF_MAXSECTOR;
932 932 if (sc->sc_virtio.sc_features & VIRTIO_BLK_F_SIZE_MAX) {
933 933 sc->sc_seg_size_max = virtio_read_device_config_4(
934 934 &sc->sc_virtio, VIRTIO_BLK_CONFIG_SIZE_MAX);
935 935 }
936 936
937 937 /* The maximum request size */
938 938 vioblk_bd_dma_attr.dma_attr_maxxfer =
939 939 vioblk_bd_dma_attr.dma_attr_sgllen * sc->sc_seg_size_max;
940 940
941 941 dev_debug(devinfo, CE_NOTE,
942 942 "nblks=%" PRIu64 " blksize=%d (%d) num_seg=%d, "
943 943 "seg_size=%d, maxxfer=%" PRIu64,
944 944 sc->sc_nblks, sc->sc_blk_size, sc->sc_pblk_size,
945 945 vioblk_bd_dma_attr.dma_attr_sgllen,
946 946 sc->sc_seg_size_max,
947 947 vioblk_bd_dma_attr.dma_attr_maxxfer);
948 948
949 949
950 950 sc->sc_vq = virtio_alloc_vq(&sc->sc_virtio, 0, 0,
951 951 sc->sc_seg_max, "I/O request");
952 952 if (sc->sc_vq == NULL) {
953 953 goto exit_alloc1;
954 954 }
955 955
956 956 ret = vioblk_alloc_reqs(sc);
957 957 if (ret) {
958 958 goto exit_alloc2;
959 959 }
960 960
961 961 sc->bd_h = bd_alloc_handle(sc, &vioblk_ops, &vioblk_bd_dma_attr,
962 962 KM_SLEEP);
963 963
964 964
965 965 virtio_set_status(&sc->sc_virtio,
966 966 VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK);
967 967 virtio_start_vq_intr(sc->sc_vq);
968 968
969 969 ret = virtio_enable_ints(&sc->sc_virtio);
970 970 if (ret)
971 971 goto exit_enable_ints;
972 972
973 973 ret = bd_attach_handle(devinfo, sc->bd_h);
974 974 if (ret != DDI_SUCCESS) {
975 975 dev_err(devinfo, CE_WARN, "Failed to attach blkdev");
976 976 goto exit_attach_bd;
977 977 }
978 978
979 979 return (DDI_SUCCESS);
980 980
981 981 exit_attach_bd:
982 982 /*
983 983 * There is no virtio_disable_ints(), it's done in virtio_release_ints.
984 984 * If they ever get split, don't forget to add a call here.
985 985 */
986 986 exit_enable_ints:
987 987 virtio_stop_vq_intr(sc->sc_vq);
988 988 bd_free_handle(sc->bd_h);
989 989 vioblk_free_reqs(sc);
990 990 exit_alloc2:
991 991 virtio_free_vq(sc->sc_vq);
992 992 exit_alloc1:
993 993 exit_features:
994 994 virtio_release_ints(&sc->sc_virtio);
995 995 exit_int:
996 996 virtio_set_status(&sc->sc_virtio, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
997 997 ddi_regs_map_free(&sc->sc_virtio.sc_ioh);
998 998 exit_map:
999 999 kstat_delete(sc->sc_intrstat);
1000 1000 exit_intrstat:
1001 1001 mutex_destroy(&sc->lock_devid);
1002 1002 cv_destroy(&sc->cv_devid);
1003 1003 kmem_free(sc, sizeof (struct vioblk_softc));
1004 1004 exit:
1005 1005 return (ret);
1006 1006 }
1007 1007
1008 1008 static int
1009 1009 vioblk_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
1010 1010 {
1011 1011 struct vioblk_softc *sc = ddi_get_driver_private(devinfo);
1012 1012
1013 1013 switch (cmd) {
1014 1014 case DDI_DETACH:
1015 1015 break;
1016 1016
1017 1017 case DDI_PM_SUSPEND:
1018 1018 cmn_err(CE_WARN, "suspend not supported yet");
1019 1019 return (DDI_FAILURE);
1020 1020
1021 1021 default:
1022 1022 cmn_err(CE_WARN, "cmd 0x%x unrecognized", cmd);
1023 1023 return (DDI_FAILURE);
1024 1024 }
1025 1025
1026 1026 (void) bd_detach_handle(sc->bd_h);
1027 1027 virtio_stop_vq_intr(sc->sc_vq);
1028 1028 virtio_release_ints(&sc->sc_virtio);
1029 1029 vioblk_free_reqs(sc);
1030 1030 virtio_free_vq(sc->sc_vq);
1031 1031 virtio_device_reset(&sc->sc_virtio);
1032 1032 ddi_regs_map_free(&sc->sc_virtio.sc_ioh);
1033 1033 kstat_delete(sc->sc_intrstat);
1034 1034 kmem_free(sc, sizeof (struct vioblk_softc));
1035 1035
1036 1036 return (DDI_SUCCESS);
1037 1037 }
1038 1038
1039 1039 static int
1040 1040 vioblk_quiesce(dev_info_t *devinfo)
1041 1041 {
1042 1042 struct vioblk_softc *sc = ddi_get_driver_private(devinfo);
1043 1043
1044 1044 virtio_stop_vq_intr(sc->sc_vq);
1045 1045 virtio_device_reset(&sc->sc_virtio);
1046 1046
1047 1047 return (DDI_SUCCESS);
1048 1048 }
1049 1049
1050 1050 int
1051 1051 _init(void)
1052 1052 {
1053 1053 int rv;
1054 1054
1055 1055 bd_mod_init(&vioblk_dev_ops);
1056 1056
1057 1057 if ((rv = mod_install(&modlinkage)) != 0) {
1058 1058 bd_mod_fini(&vioblk_dev_ops);
1059 1059 }
1060 1060
1061 1061 return (rv);
1062 1062 }
1063 1063
1064 1064 int
1065 1065 _fini(void)
1066 1066 {
1067 1067 int rv;
1068 1068
1069 1069 if ((rv = mod_remove(&modlinkage)) == 0) {
1070 1070 bd_mod_fini(&vioblk_dev_ops);
1071 1071 }
1072 1072
1073 1073 return (rv);
1074 1074 }
1075 1075
1076 1076 int
1077 1077 _info(struct modinfo *modinfop)
1078 1078 {
1079 1079 return (mod_info(&modlinkage, modinfop));
1080 1080 }
↓ open down ↓ |
605 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX