Print this page
XXXX introduce drv_sectohz


 445 static int
 446 vioblk_mediainfo(void *arg, bd_media_t *media)
 447 {
 448         struct vioblk_softc *sc = (void *)arg;
 449 
 450         media->m_nblks = sc->sc_nblks;
 451         media->m_blksize = sc->sc_blk_size;
 452         media->m_readonly = sc->sc_readonly;
 453         media->m_pblksize = sc->sc_pblk_size;
 454         return (0);
 455 }
 456 
 457 static int
 458 vioblk_devid_init(void *arg, dev_info_t *devinfo, ddi_devid_t *devid)
 459 {
 460         struct vioblk_softc *sc = (void *)arg;
 461         clock_t deadline;
 462         int ret;
 463         bd_xfer_t xfer;
 464 
 465         deadline = ddi_get_lbolt() + (clock_t)drv_usectohz(3 * 1000000);
 466         (void) memset(&xfer, 0, sizeof (bd_xfer_t));
 467         xfer.x_nblks = 1;
 468 
 469         ret = ddi_dma_alloc_handle(sc->sc_dev, &vioblk_bd_dma_attr,
 470             DDI_DMA_SLEEP, NULL, &xfer.x_dmah);
 471         if (ret != DDI_SUCCESS)
 472                 goto out_alloc;
 473 
 474         ret = ddi_dma_addr_bind_handle(xfer.x_dmah, NULL, (caddr_t)&sc->devid,
 475             VIRTIO_BLK_ID_BYTES, DDI_DMA_READ | DDI_DMA_CONSISTENT,
 476             DDI_DMA_SLEEP, NULL, &xfer.x_dmac, &xfer.x_ndmac);
 477         if (ret != DDI_DMA_MAPPED) {
 478                 ret = DDI_FAILURE;
 479                 goto out_map;
 480         }
 481 
 482         mutex_enter(&sc->lock_devid);
 483 
 484         ret = vioblk_rw(sc, &xfer, VIRTIO_BLK_T_GET_ID,
 485             VIRTIO_BLK_ID_BYTES);




 445 static int
 446 vioblk_mediainfo(void *arg, bd_media_t *media)
 447 {
 448         struct vioblk_softc *sc = (void *)arg;
 449 
 450         media->m_nblks = sc->sc_nblks;
 451         media->m_blksize = sc->sc_blk_size;
 452         media->m_readonly = sc->sc_readonly;
 453         media->m_pblksize = sc->sc_pblk_size;
 454         return (0);
 455 }
 456 
 457 static int
 458 vioblk_devid_init(void *arg, dev_info_t *devinfo, ddi_devid_t *devid)
 459 {
 460         struct vioblk_softc *sc = (void *)arg;
 461         clock_t deadline;
 462         int ret;
 463         bd_xfer_t xfer;
 464 
 465         deadline = ddi_get_lbolt() + drv_sectohz(3);
 466         (void) memset(&xfer, 0, sizeof (bd_xfer_t));
 467         xfer.x_nblks = 1;
 468 
 469         ret = ddi_dma_alloc_handle(sc->sc_dev, &vioblk_bd_dma_attr,
 470             DDI_DMA_SLEEP, NULL, &xfer.x_dmah);
 471         if (ret != DDI_SUCCESS)
 472                 goto out_alloc;
 473 
 474         ret = ddi_dma_addr_bind_handle(xfer.x_dmah, NULL, (caddr_t)&sc->devid,
 475             VIRTIO_BLK_ID_BYTES, DDI_DMA_READ | DDI_DMA_CONSISTENT,
 476             DDI_DMA_SLEEP, NULL, &xfer.x_dmac, &xfer.x_ndmac);
 477         if (ret != DDI_DMA_MAPPED) {
 478                 ret = DDI_FAILURE;
 479                 goto out_map;
 480         }
 481 
 482         mutex_enter(&sc->lock_devid);
 483 
 484         ret = vioblk_rw(sc, &xfer, VIRTIO_BLK_T_GET_ID,
 485             VIRTIO_BLK_ID_BYTES);