Print this page
5255 uts shouldn't open-code ISP2


1519 }
1520 
1521 /*ARGSUSED*/
1522 void
1523 i_ddi_devacc_to_hatacc(ddi_device_acc_attr_t *devaccp, uint_t *hataccp)
1524 {
1525 }
1526 
1527 /*
1528  * Check if the specified cache attribute is supported on the platform.
1529  * This function must be called before i_ddi_cacheattr_to_hatacc().
1530  */
1531 boolean_t
1532 i_ddi_check_cache_attr(uint_t flags)
1533 {
1534         /*
1535          * The cache attributes are mutually exclusive. Any combination of
1536          * the attributes leads to a failure.
1537          */
1538         uint_t cache_attr = IOMEM_CACHE_ATTR(flags);
1539         if ((cache_attr != 0) && ((cache_attr & (cache_attr - 1)) != 0))
1540                 return (B_FALSE);
1541 
1542         /* All cache attributes are supported on X86/X64 */
1543         if (cache_attr & (IOMEM_DATA_UNCACHED | IOMEM_DATA_CACHED |
1544             IOMEM_DATA_UC_WR_COMBINE))
1545                 return (B_TRUE);
1546 
1547         /* undefined attributes */
1548         return (B_FALSE);
1549 }
1550 
1551 /* set HAT cache attributes from the cache attributes */
1552 void
1553 i_ddi_cacheattr_to_hatacc(uint_t flags, uint_t *hataccp)
1554 {
1555         uint_t cache_attr = IOMEM_CACHE_ATTR(flags);
1556         static char *fname = "i_ddi_cacheattr_to_hatacc";
1557 
1558         /*
1559          * If write-combining is not supported, then it falls back


1610         ddi_device_acc_attr_t *accattrp, caddr_t *kaddrp,
1611         size_t *real_length, ddi_acc_hdl_t *ap)
1612 {
1613         caddr_t a;
1614         int iomin;
1615         ddi_acc_impl_t *iap;
1616         int physcontig = 0;
1617         pgcnt_t npages;
1618         pgcnt_t minctg;
1619         uint_t order;
1620         int e;
1621 
1622         /*
1623          * Check legality of arguments
1624          */
1625         if (length == 0 || kaddrp == NULL || attr == NULL) {
1626                 return (DDI_FAILURE);
1627         }
1628 
1629         if (attr->dma_attr_minxfer == 0 || attr->dma_attr_align == 0 ||
1630             (attr->dma_attr_align & (attr->dma_attr_align - 1)) ||
1631             (attr->dma_attr_minxfer & (attr->dma_attr_minxfer - 1))) {
1632                         return (DDI_FAILURE);
1633         }
1634 
1635         /*
1636          * figure out most restrictive alignment requirement
1637          */
1638         iomin = attr->dma_attr_minxfer;
1639         iomin = maxbit(iomin, attr->dma_attr_align);
1640         if (iomin == 0)
1641                 return (DDI_FAILURE);
1642 
1643         ASSERT((iomin & (iomin - 1)) == 0);
1644 
1645         /*
1646          * if we allocate memory with IOMEM_DATA_UNCACHED or
1647          * IOMEM_DATA_UC_WR_COMBINE, make sure we allocate a page aligned
1648          * memory that ends on a page boundry.
1649          * Don't want to have to different cache mappings to the same
1650          * physical page.
1651          */




1519 }
1520 
1521 /*ARGSUSED*/
1522 void
1523 i_ddi_devacc_to_hatacc(ddi_device_acc_attr_t *devaccp, uint_t *hataccp)
1524 {
1525 }
1526 
1527 /*
1528  * Check if the specified cache attribute is supported on the platform.
1529  * This function must be called before i_ddi_cacheattr_to_hatacc().
1530  */
1531 boolean_t
1532 i_ddi_check_cache_attr(uint_t flags)
1533 {
1534         /*
1535          * The cache attributes are mutually exclusive. Any combination of
1536          * the attributes leads to a failure.
1537          */
1538         uint_t cache_attr = IOMEM_CACHE_ATTR(flags);
1539         if ((cache_attr != 0) && !ISP2(cache_attr))
1540                 return (B_FALSE);
1541 
1542         /* All cache attributes are supported on X86/X64 */
1543         if (cache_attr & (IOMEM_DATA_UNCACHED | IOMEM_DATA_CACHED |
1544             IOMEM_DATA_UC_WR_COMBINE))
1545                 return (B_TRUE);
1546 
1547         /* undefined attributes */
1548         return (B_FALSE);
1549 }
1550 
1551 /* set HAT cache attributes from the cache attributes */
1552 void
1553 i_ddi_cacheattr_to_hatacc(uint_t flags, uint_t *hataccp)
1554 {
1555         uint_t cache_attr = IOMEM_CACHE_ATTR(flags);
1556         static char *fname = "i_ddi_cacheattr_to_hatacc";
1557 
1558         /*
1559          * If write-combining is not supported, then it falls back


1610         ddi_device_acc_attr_t *accattrp, caddr_t *kaddrp,
1611         size_t *real_length, ddi_acc_hdl_t *ap)
1612 {
1613         caddr_t a;
1614         int iomin;
1615         ddi_acc_impl_t *iap;
1616         int physcontig = 0;
1617         pgcnt_t npages;
1618         pgcnt_t minctg;
1619         uint_t order;
1620         int e;
1621 
1622         /*
1623          * Check legality of arguments
1624          */
1625         if (length == 0 || kaddrp == NULL || attr == NULL) {
1626                 return (DDI_FAILURE);
1627         }
1628 
1629         if (attr->dma_attr_minxfer == 0 || attr->dma_attr_align == 0 ||
1630             !ISP2(attr->dma_attr_align) || !ISP2(attr->dma_attr_minxfer)) {

1631                 return (DDI_FAILURE);
1632         }
1633 
1634         /*
1635          * figure out most restrictive alignment requirement
1636          */
1637         iomin = attr->dma_attr_minxfer;
1638         iomin = maxbit(iomin, attr->dma_attr_align);
1639         if (iomin == 0)
1640                 return (DDI_FAILURE);
1641 
1642         ASSERT((iomin & (iomin - 1)) == 0);
1643 
1644         /*
1645          * if we allocate memory with IOMEM_DATA_UNCACHED or
1646          * IOMEM_DATA_UC_WR_COMBINE, make sure we allocate a page aligned
1647          * memory that ends on a page boundry.
1648          * Don't want to have to different cache mappings to the same
1649          * physical page.
1650          */