1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  23  * Use is subject to license terms.
  24  */
  25 
  26 
  27 /*
  28  * Universal Host Controller Driver (UHCI)
  29  *
  30  * The UHCI driver is a driver which interfaces to the Universal
  31  * Serial Bus Driver (USBA) and the Host Controller (HC). The interface to
  32  * the Host Controller is defined by the UHCI.
  33  * This file contains misc functions.
  34  */
  35 #include <sys/usb/hcd/uhci/uhcid.h>
  36 #include <sys/usb/hcd/uhci/uhciutil.h>
  37 #include <sys/usb/hcd/uhci/uhcipolled.h>
  38 
  39 #include <sys/disp.h>
  40 
  41 /* Globals */
  42 extern uint_t   uhci_td_pool_size;                      /* Num TDs */
  43 extern uint_t   uhci_qh_pool_size;                      /* Num QHs */
  44 extern ushort_t uhci_tree_bottom_nodes[];
  45 extern void     *uhci_statep;
  46 
  47 /* function prototypes */
  48 static void     uhci_build_interrupt_lattice(uhci_state_t *uhcip);
  49 static int      uhci_init_frame_lst_table(dev_info_t *dip, uhci_state_t *uhcip);
  50 
  51 static uint_t   uhci_lattice_height(uint_t bandwidth);
  52 static uint_t   uhci_lattice_parent(uint_t node);
  53 static uint_t   uhci_leftmost_leaf(uint_t node, uint_t height);
  54 static uint_t   uhci_compute_total_bandwidth(usb_ep_descr_t *endpoint,
  55                     usb_port_status_t port_status);
  56 
  57 static int      uhci_bandwidth_adjust(uhci_state_t *uhcip,
  58                     usb_ep_descr_t *endpoint, usb_port_status_t port_status);
  59 
  60 static uhci_td_t *uhci_allocate_td_from_pool(uhci_state_t *uhcip);
  61 static void     uhci_fill_in_td(uhci_state_t *uhcip,
  62                     uhci_td_t *td, uhci_td_t *current_dummy,
  63                     uint32_t buffer_offset, size_t length,
  64                     uhci_pipe_private_t *pp, uchar_t PID,
  65                     usb_req_attrs_t attrs, uhci_trans_wrapper_t *tw);
  66 static uint32_t uhci_get_tw_paddr_by_offs(uhci_state_t *uhcip,
  67                     uint32_t buffer_offset, size_t length,
  68                     uhci_trans_wrapper_t *tw);
  69 static uhci_trans_wrapper_t *uhci_create_transfer_wrapper(
  70                     uhci_state_t *uhcip, uhci_pipe_private_t *pp,
  71                     size_t length, usb_flags_t usb_flags);
  72 static uhci_trans_wrapper_t *uhci_create_isoc_transfer_wrapper(
  73                     uhci_state_t *uhcip, uhci_pipe_private_t *pp,
  74                     usb_isoc_req_t *req, size_t length,
  75                     usb_flags_t usb_flags);
  76 
  77 static int      uhci_create_setup_pkt(uhci_state_t *uhcip,
  78                     uhci_pipe_private_t *pp, uhci_trans_wrapper_t *tw);
  79 static void     uhci_insert_ctrl_qh(uhci_state_t *uhcip,
  80                     uhci_pipe_private_t *pp);
  81 static void     uhci_remove_ctrl_qh(uhci_state_t *uhcip,
  82                     uhci_pipe_private_t *pp);
  83 static void     uhci_insert_intr_qh(uhci_state_t *uhcip,
  84                     uhci_pipe_private_t *pp);
  85 static void     uhci_remove_intr_qh(uhci_state_t *uhcip,
  86                     uhci_pipe_private_t *pp);
  87 static void     uhci_remove_bulk_qh(uhci_state_t *uhcip,
  88                     uhci_pipe_private_t *pp);
  89 static void     uhci_insert_bulk_qh(uhci_state_t *uhcip,
  90                     uhci_pipe_private_t *pp);
  91 static void     uhci_handle_bulk_td_errors(uhci_state_t *uhcip, uhci_td_t *td);
  92 static int      uhci_alloc_memory_for_tds(uhci_state_t *uhcip, uint_t num_tds,
  93                     uhci_bulk_isoc_xfer_t *info);
  94 static int      uhci_alloc_bulk_isoc_tds(uhci_state_t *uhcip, uint_t num_tds,
  95                     uhci_bulk_isoc_xfer_t *info);
  96 static void     uhci_get_isoc_td_by_index(uhci_state_t *uhcip,
  97                     uhci_bulk_isoc_xfer_t *info, uint_t index,
  98                     uhci_td_t **tdpp, uhci_bulk_isoc_td_pool_t **td_pool_pp);
  99 static void     uhci_get_bulk_td_by_paddr(uhci_state_t *uhcip,
 100                     uhci_bulk_isoc_xfer_t *info, uint32_t paddr,
 101                     uhci_bulk_isoc_td_pool_t **td_pool_pp);
 102 
 103 static  int     uhci_handle_isoc_receive(uhci_state_t *uhcip,
 104                 uhci_pipe_private_t *pp, uhci_trans_wrapper_t *tw);
 105 static void     uhci_delete_isoc_td(uhci_state_t *uhcip,
 106                     uhci_td_t *td);
 107 #ifdef DEBUG
 108 static void     uhci_print_td(uhci_state_t *uhcip, uhci_td_t *td);
 109 static void     uhci_print_qh(uhci_state_t *uhcip, queue_head_t *qh);
 110 #endif
 111 
 112 
 113 /*
 114  * uhci_build_interrupt_lattice:
 115  *
 116  * Construct the interrupt lattice tree using static Queue Head pointers.
 117  * This interrupt lattice tree will have total of 63 queue heads and the
 118  * Host Controller (HC) processes queue heads every frame.
 119  */
 120 static void
 121 uhci_build_interrupt_lattice(uhci_state_t *uhcip)
 122 {
 123         int                     half_list = NUM_INTR_QH_LISTS / 2;
 124         uint16_t                i, j, k;
 125         uhci_td_t               *sof_td, *isoc_td;
 126         uintptr_t               addr;
 127         queue_head_t            *list_array = uhcip->uhci_qh_pool_addr;
 128         queue_head_t            *tmp_qh;
 129         frame_lst_table_t       *frame_lst_tablep =
 130             uhcip->uhci_frame_lst_tablep;
 131 
 132         USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
 133             "uhci_build_interrupt_lattice:");
 134 
 135         /*
 136          * Reserve the first 63 queue head structures in the pool as static
 137          * queue heads & these are required for constructing interrupt
 138          * lattice tree.
 139          */
 140         for (i = 0; i < NUM_INTR_QH_LISTS; i++) {
 141                 SetQH32(uhcip, list_array[i].link_ptr, HC_END_OF_LIST);
 142                 SetQH32(uhcip, list_array[i].element_ptr, HC_END_OF_LIST);
 143                 list_array[i].qh_flag           = QUEUE_HEAD_FLAG_STATIC;
 144                 list_array[i].node              = i;
 145         }
 146 
 147         /* Build the interrupt lattice tree */
 148         for (i = 0; i < half_list - 1; i++) {
 149                 /*
 150                  * The next  pointer in the host controller  queue head
 151                  * descriptor must contain an iommu address. Calculate
 152                  * the offset into the cpu address and add this to the
 153                  * starting iommu address.
 154                  */
 155                 addr = QH_PADDR(&list_array[i]) | HC_QUEUE_HEAD;
 156 
 157                 SetQH32(uhcip, list_array[2*i + 1].link_ptr, addr);
 158                 SetQH32(uhcip, list_array[2*i + 2].link_ptr, addr);
 159         }
 160 
 161         /*
 162          * Initialize the interrupt list in the Frame list Table
 163          * so that it points to the bottom of the tree.
 164          */
 165         for (i = 0, j = 0; i < pow_2(TREE_HEIGHT); i++) {
 166                 addr = QH_PADDR(&list_array[half_list + i - 1]);
 167                 for (k = 0; k <  pow_2(VIRTUAL_TREE_HEIGHT); k++) {
 168                         SetFL32(uhcip,
 169                             frame_lst_tablep[uhci_tree_bottom_nodes[j++]],
 170                             addr | HC_QUEUE_HEAD);
 171                 }
 172         }
 173 
 174         /*
 175          *  Create a controller and bulk Queue heads
 176          */
 177         uhcip->uhci_ctrl_xfers_q_head = uhci_alloc_queue_head(uhcip);
 178         tmp_qh = uhcip->uhci_ctrl_xfers_q_tail = uhcip->uhci_ctrl_xfers_q_head;
 179 
 180         SetQH32(uhcip, list_array[0].link_ptr,
 181             (QH_PADDR(tmp_qh) | HC_QUEUE_HEAD));
 182 
 183         uhcip->uhci_bulk_xfers_q_head = uhci_alloc_queue_head(uhcip);
 184         uhcip->uhci_bulk_xfers_q_tail = uhcip->uhci_bulk_xfers_q_head;
 185         SetQH32(uhcip, tmp_qh->link_ptr,
 186             (QH_PADDR(uhcip->uhci_bulk_xfers_q_head)|HC_QUEUE_HEAD));
 187 
 188         SetQH32(uhcip, uhcip->uhci_bulk_xfers_q_head->link_ptr, HC_END_OF_LIST);
 189 
 190         /*
 191          * Add a dummy TD to the static queue head 0. THis is used
 192          * to generate an at the end of frame.
 193          */
 194         sof_td = uhci_allocate_td_from_pool(uhcip);
 195 
 196         SetQH32(uhcip, list_array[0].element_ptr,
 197             TD_PADDR(sof_td) | HC_TD_HEAD);
 198         SetTD32(uhcip, sof_td->link_ptr, HC_END_OF_LIST);
 199         uhcip->uhci_sof_td = sof_td;
 200 
 201         /*
 202          * Add a dummy td that is used to generate an interrupt for
 203          * every 1024 frames.
 204          */
 205         isoc_td = uhci_allocate_td_from_pool(uhcip);
 206         SetTD32(uhcip, isoc_td->link_ptr, HC_END_OF_LIST);
 207         uhcip->uhci_isoc_td = isoc_td;
 208 
 209         uhcip->uhci_isoc_qh = uhci_alloc_queue_head(uhcip);
 210         SetQH32(uhcip, uhcip->uhci_isoc_qh->link_ptr,
 211             GetFL32(uhcip, uhcip->uhci_frame_lst_tablep[MAX_FRAME_NUM]));
 212         SetQH32(uhcip, uhcip->uhci_isoc_qh->element_ptr, TD_PADDR(isoc_td));
 213         SetFL32(uhcip, uhcip->uhci_frame_lst_tablep[MAX_FRAME_NUM],
 214             QH_PADDR(uhcip->uhci_isoc_qh) | HC_QUEUE_HEAD);
 215 }
 216 
 217 
 218 /*
 219  * uhci_allocate_pools:
 220  *      Allocate the system memory for the Queue Heads Descriptor and
 221  *      for the Transfer Descriptor (TD) pools. Both QH and TD structures
 222  *      must be aligned to a 16 byte boundary.
 223  */
 224 int
 225 uhci_allocate_pools(uhci_state_t *uhcip)
 226 {
 227         dev_info_t              *dip = uhcip->uhci_dip;
 228         size_t                  real_length;
 229         int                     i, result;
 230         uint_t                  ccount;
 231         ddi_device_acc_attr_t   dev_attr;
 232 
 233         USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
 234             "uhci_allocate_pools:");
 235 
 236         /* The host controller will be little endian */
 237         dev_attr.devacc_attr_version            = DDI_DEVICE_ATTR_V0;
 238         dev_attr.devacc_attr_endian_flags       = DDI_STRUCTURE_LE_ACC;
 239         dev_attr.devacc_attr_dataorder          = DDI_STRICTORDER_ACC;
 240 
 241         /* Allocate the TD pool DMA handle */
 242         if (ddi_dma_alloc_handle(dip, &uhcip->uhci_dma_attr, DDI_DMA_SLEEP, 0,
 243             &uhcip->uhci_td_pool_dma_handle) != DDI_SUCCESS) {
 244 
 245                 return (USB_FAILURE);
 246         }
 247 
 248         /* Allocate the memory for the TD pool */
 249         if (ddi_dma_mem_alloc(uhcip->uhci_td_pool_dma_handle,
 250             uhci_td_pool_size * sizeof (uhci_td_t),
 251             &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
 252             (caddr_t *)&uhcip->uhci_td_pool_addr, &real_length,
 253             &uhcip->uhci_td_pool_mem_handle)) {
 254 
 255                 return (USB_FAILURE);
 256         }
 257 
 258         /* Map the TD pool into the I/O address space */
 259         result = ddi_dma_addr_bind_handle(uhcip->uhci_td_pool_dma_handle,
 260             NULL, (caddr_t)uhcip->uhci_td_pool_addr, real_length,
 261             DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
 262             NULL, &uhcip->uhci_td_pool_cookie, &ccount);
 263 
 264         bzero((void *)uhcip->uhci_td_pool_addr,
 265             uhci_td_pool_size * sizeof (uhci_td_t));
 266 
 267         /* Process the result */
 268         if (result == DDI_DMA_MAPPED) {
 269                 /* The cookie count should be 1 */
 270                 if (ccount != 1) {
 271                         USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
 272                             "uhci_allocate_pools: More than 1 cookie");
 273 
 274                         return (USB_FAILURE);
 275                 }
 276         } else {
 277                 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
 278                     "uhci_allocate_pools: Result = %d", result);
 279 
 280                 uhci_decode_ddi_dma_addr_bind_handle_result(uhcip, result);
 281 
 282                 return (USB_FAILURE);
 283         }
 284 
 285         uhcip->uhci_dma_addr_bind_flag |= UHCI_TD_POOL_BOUND;
 286 
 287         /* Initialize the TD pool */
 288         for (i = 0; i < uhci_td_pool_size; i++) {
 289                 uhcip->uhci_td_pool_addr[i].flag = TD_FLAG_FREE;
 290         }
 291 
 292         /* Allocate the TD pool DMA handle */
 293         if (ddi_dma_alloc_handle(dip, &uhcip->uhci_dma_attr, DDI_DMA_SLEEP,
 294             0, &uhcip->uhci_qh_pool_dma_handle) != DDI_SUCCESS) {
 295 
 296                 return (USB_FAILURE);
 297         }
 298 
 299         /* Allocate the memory for the QH pool */
 300         if (ddi_dma_mem_alloc(uhcip->uhci_qh_pool_dma_handle,
 301             uhci_qh_pool_size * sizeof (queue_head_t),
 302             &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
 303             (caddr_t *)&uhcip->uhci_qh_pool_addr, &real_length,
 304             &uhcip->uhci_qh_pool_mem_handle) != DDI_SUCCESS) {
 305 
 306                 return (USB_FAILURE);
 307         }
 308 
 309         result = ddi_dma_addr_bind_handle(uhcip->uhci_qh_pool_dma_handle,
 310             NULL, (caddr_t)uhcip->uhci_qh_pool_addr, real_length,
 311             DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
 312             &uhcip->uhci_qh_pool_cookie, &ccount);
 313 
 314         /* Process the result */
 315         if (result == DDI_DMA_MAPPED) {
 316                 /* The cookie count should be 1 */
 317                 if (ccount != 1) {
 318                         USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
 319                             "uhci_allocate_pools: More than 1 cookie");
 320 
 321                         return (USB_FAILURE);
 322                 }
 323         } else {
 324                 uhci_decode_ddi_dma_addr_bind_handle_result(uhcip, result);
 325 
 326                 return (USB_FAILURE);
 327         }
 328 
 329         uhcip->uhci_dma_addr_bind_flag |= UHCI_QH_POOL_BOUND;
 330 
 331         bzero((void *)uhcip->uhci_qh_pool_addr,
 332             uhci_qh_pool_size * sizeof (queue_head_t));
 333 
 334         /* Initialize the QH pool */
 335         for (i = 0; i < uhci_qh_pool_size; i ++) {
 336                 uhcip->uhci_qh_pool_addr[i].qh_flag = QUEUE_HEAD_FLAG_FREE;
 337         }
 338 
 339         USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
 340             "uhci_allocate_pools: Completed");
 341 
 342         return (USB_SUCCESS);
 343 }
 344 
 345 
 346 /*
 347  * uhci_free_pools:
 348  *      Cleanup on attach failure or detach
 349  */
 350 void
 351 uhci_free_pools(uhci_state_t *uhcip)
 352 {
 353         int                     i, flag, rval;
 354         uhci_td_t               *td;
 355         uhci_trans_wrapper_t    *tw;
 356 
 357         USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
 358             "uhci_free_pools:");
 359 
 360         if (uhcip->uhci_td_pool_addr && uhcip->uhci_td_pool_mem_handle) {
 361                 for (i = 0; i < uhci_td_pool_size; i ++) {
 362                         td = &uhcip->uhci_td_pool_addr[i];
 363 
 364                         flag = uhcip->uhci_td_pool_addr[i].flag;
 365                         if ((flag != TD_FLAG_FREE) &&
 366                             (flag != TD_FLAG_DUMMY) && (td->tw != NULL)) {
 367                                 tw = td->tw;
 368                                 uhci_free_tw(uhcip, tw);
 369                         }
 370 
 371                 }
 372 
 373                 if (uhcip->uhci_dma_addr_bind_flag & UHCI_TD_POOL_BOUND) {
 374                         rval = ddi_dma_unbind_handle(
 375                             uhcip->uhci_td_pool_dma_handle);
 376                         ASSERT(rval == DDI_SUCCESS);
 377                 }
 378 
 379                 ddi_dma_mem_free(&uhcip->uhci_td_pool_mem_handle);
 380         }
 381 
 382         /* Free the TD pool */
 383         if (uhcip->uhci_td_pool_dma_handle) {
 384                 ddi_dma_free_handle(&uhcip->uhci_td_pool_dma_handle);
 385         }
 386 
 387         if (uhcip->uhci_qh_pool_addr && uhcip->uhci_qh_pool_mem_handle) {
 388                 if (uhcip->uhci_dma_addr_bind_flag & UHCI_QH_POOL_BOUND) {
 389                         rval = ddi_dma_unbind_handle(
 390                             uhcip->uhci_qh_pool_dma_handle);
 391                         ASSERT(rval == DDI_SUCCESS);
 392                 }
 393                 ddi_dma_mem_free(&uhcip->uhci_qh_pool_mem_handle);
 394         }
 395 
 396         /* Free the QH pool */
 397         if (uhcip->uhci_qh_pool_dma_handle) {
 398                 ddi_dma_free_handle(&uhcip->uhci_qh_pool_dma_handle);
 399         }
 400 
 401         /* Free the Frame list Table area */
 402         if (uhcip->uhci_frame_lst_tablep && uhcip->uhci_flt_mem_handle) {
 403                 if (uhcip->uhci_dma_addr_bind_flag & UHCI_FLA_POOL_BOUND) {
 404                         rval = ddi_dma_unbind_handle(
 405                             uhcip->uhci_flt_dma_handle);
 406                         ASSERT(rval == DDI_SUCCESS);
 407                 }
 408                 ddi_dma_mem_free(&uhcip->uhci_flt_mem_handle);
 409         }
 410 
 411         if (uhcip->uhci_flt_dma_handle) {
 412                 ddi_dma_free_handle(&uhcip->uhci_flt_dma_handle);
 413         }
 414 }
 415 
 416 
 417 /*
 418  * uhci_decode_ddi_dma_addr_bind_handle_result:
 419  *      Process the return values of ddi_dma_addr_bind_handle()
 420  */
 421 void
 422 uhci_decode_ddi_dma_addr_bind_handle_result(uhci_state_t *uhcip, int result)
 423 {
 424         char *msg;
 425 
 426         USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl,
 427             "uhci_decode_ddi_dma_addr_bind_handle_result:");
 428 
 429         switch (result) {
 430         case DDI_DMA_PARTIAL_MAP:
 431                 msg = "Partial transfers not allowed";
 432                 break;
 433         case DDI_DMA_INUSE:
 434                 msg = "Handle is in use";
 435                 break;
 436         case DDI_DMA_NORESOURCES:
 437                 msg = "No resources";
 438                 break;
 439         case DDI_DMA_NOMAPPING:
 440                 msg = "No mapping";
 441                 break;
 442         case DDI_DMA_TOOBIG:
 443                 msg = "Object is too big";
 444                 break;
 445         default:
 446                 msg = "Unknown dma error";
 447         }
 448 
 449         USB_DPRINTF_L4(PRINT_MASK_ALL, uhcip->uhci_log_hdl, "%s", msg);
 450 }
 451 
 452 
 453 /*
 454  * uhci_init_ctlr:
 455  *      Initialize the Host Controller (HC).
 456  */
 457 int
 458 uhci_init_ctlr(uhci_state_t *uhcip)
 459 {
 460         dev_info_t *dip = uhcip->uhci_dip;
 461         uint_t  cmd_reg;
 462         uint_t  frame_base_addr;
 463 
 464         mutex_enter(&uhcip->uhci_int_mutex);
 465 
 466         USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, "uhci_init_ctlr:");
 467 
 468         /*
 469          * When USB legacy mode is enabled, the BIOS manages the USB keyboard
 470          * attached to the UHCI controller. It has been observed that some
 471          * times the BIOS does not clear the interrupts in the legacy mode
 472          * register in the PCI configuration space. So, disable the SMI intrs
 473          * and route the intrs to PIRQD here.
 474          */
 475         pci_config_put16(uhcip->uhci_config_handle,
 476             LEGACYMODE_REG_OFFSET, LEGACYMODE_REG_INIT_VALUE);
 477 
 478         /*
 479          * Disable all the interrupts.
 480          */
 481         Set_OpReg16(USBINTR, DISABLE_ALL_INTRS);
 482 
 483         cmd_reg = Get_OpReg16(USBCMD);
 484         cmd_reg &= (~USBCMD_REG_HC_RUN);
 485 
 486         /* Stop the controller */
 487         Set_OpReg16(USBCMD, cmd_reg);
 488 
 489         /* Reset the host controller */
 490         Set_OpReg16(USBCMD, USBCMD_REG_GBL_RESET);
 491 
 492         /* Wait 10ms for reset to complete */
 493         mutex_exit(&uhcip->uhci_int_mutex);
 494         delay(drv_usectohz(UHCI_RESET_DELAY));
 495         mutex_enter(&uhcip->uhci_int_mutex);
 496 
 497         Set_OpReg16(USBCMD, 0);
 498 
 499         /* Set the frame number to zero */
 500         Set_OpReg16(FRNUM, 0);
 501 
 502         if (uhcip->uhci_hc_soft_state == UHCI_CTLR_INIT_STATE) {
 503                 /* Initialize the Frame list base address area */
 504                 if (uhci_init_frame_lst_table(dip, uhcip) != USB_SUCCESS) {
 505                         mutex_exit(&uhcip->uhci_int_mutex);
 506 
 507                         return (USB_FAILURE);
 508                 }
 509         }
 510 
 511         /* Save the contents of the Frame Interval Registers */
 512         uhcip->uhci_frame_interval = Get_OpReg8(SOFMOD);
 513 
 514         frame_base_addr = uhcip->uhci_flt_cookie.dmac_address;
 515 
 516         /* Set the Frame list base address */
 517         Set_OpReg32(FRBASEADD, frame_base_addr);
 518 
 519         /*
 520          * Begin sending SOFs
 521          * Set the Host Controller Functional State to Operational
 522          */
 523         cmd_reg = Get_OpReg16(USBCMD);
 524         cmd_reg |= (USBCMD_REG_HC_RUN | USBCMD_REG_MAXPKT_64 |
 525             USBCMD_REG_CONFIG_FLAG);
 526 
 527         Set_OpReg16(USBCMD, cmd_reg);
 528 
 529         /*
 530          * Verify the Command and interrupt enable registers,
 531          * a sanity check whether actually initialized or not
 532          */
 533         cmd_reg = Get_OpReg16(USBCMD);
 534 
 535         if (!(cmd_reg & (USBCMD_REG_HC_RUN | USBCMD_REG_MAXPKT_64 |
 536             USBCMD_REG_CONFIG_FLAG))) {
 537                 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
 538                     "uhci_init_ctlr: Controller initialization failed");
 539                 mutex_exit(&uhcip->uhci_int_mutex);
 540 
 541                 return (USB_FAILURE);
 542         }
 543 
 544         /*
 545          * Set the ioc bit of the isoc intr td. This enables
 546          * the generation of an interrupt for every 1024 frames.
 547          */
 548         SetTD_ioc(uhcip, uhcip->uhci_isoc_td, 1);
 549 
 550         /* Set host controller soft state to operational */
 551         uhcip->uhci_hc_soft_state = UHCI_CTLR_OPERATIONAL_STATE;
 552         mutex_exit(&uhcip->uhci_int_mutex);
 553 
 554         USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
 555             "uhci_init_ctlr: Completed");
 556 
 557         return (USB_SUCCESS);
 558 }
 559 
 560 
 561 /*
 562  * uhci_uninit_ctlr:
 563  *      uninitialize the Host Controller (HC).
 564  */
 565 void
 566 uhci_uninit_ctlr(uhci_state_t *uhcip)
 567 {
 568         if (uhcip->uhci_regs_handle) {
 569                 /* Disable all the interrupts. */
 570                 Set_OpReg16(USBINTR, DISABLE_ALL_INTRS);
 571 
 572                 /* Complete the current transaction and then halt. */
 573                 Set_OpReg16(USBCMD, 0);
 574 
 575                 /* Wait for sometime */
 576                 mutex_exit(&uhcip->uhci_int_mutex);
 577                 delay(drv_usectohz(UHCI_TIMEWAIT));
 578                 mutex_enter(&uhcip->uhci_int_mutex);
 579         }
 580 }
 581 
 582 
 583 /*
 584  * uhci_map_regs:
 585  *      The Host Controller (HC) contains a set of on-chip operational
 586  *      registers and which should be mapped into a non-cacheable
 587  *      portion of the system addressable space.
 588  */
 589 int
 590 uhci_map_regs(uhci_state_t *uhcip)
 591 {
 592         dev_info_t              *dip = uhcip->uhci_dip;
 593         int                     index;
 594         uint32_t                regs_prop_len;
 595         int32_t                 *regs_list;
 596         uint16_t                command_reg;
 597         ddi_device_acc_attr_t   attr;
 598 
 599         USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, "uhci_map_regs:");
 600 
 601         /* The host controller will be little endian */
 602         attr.devacc_attr_version        = DDI_DEVICE_ATTR_V0;
 603         attr.devacc_attr_endian_flags   = DDI_STRUCTURE_LE_ACC;
 604         attr.devacc_attr_dataorder      = DDI_STRICTORDER_ACC;
 605 
 606         if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, uhcip->uhci_dip,
 607             DDI_PROP_DONTPASS, "reg", &regs_list, &regs_prop_len) !=
 608             DDI_PROP_SUCCESS) {
 609 
 610                 return (USB_FAILURE);
 611         }
 612 
 613         for (index = 0; index * 5 < regs_prop_len; index++) {
 614                 if (regs_list[index * 5] & UHCI_PROP_MASK) {
 615                         break;
 616                 }
 617         }
 618 
 619         /*
 620          * Deallocate the memory allocated by the ddi_prop_lookup_int_array
 621          */
 622         ddi_prop_free(regs_list);
 623 
 624         if (index * 5 >= regs_prop_len) {
 625 
 626                 return (USB_FAILURE);
 627         }
 628 
 629         /* Map in operational registers */
 630         if (ddi_regs_map_setup(dip, index, (caddr_t *)&uhcip->uhci_regsp,
 631             0, sizeof (hc_regs_t), &attr, &uhcip->uhci_regs_handle) !=
 632             DDI_SUCCESS) {
 633                 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
 634                     "ddi_regs_map_setup: failed");
 635 
 636                 return (USB_FAILURE);
 637         }
 638 
 639         if (pci_config_setup(dip, &uhcip->uhci_config_handle) != DDI_SUCCESS) {
 640                 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
 641                     "uhci_map_regs: Config error");
 642 
 643                 return (USB_FAILURE);
 644         }
 645 
 646         /* Make sure Memory Access Enable and Master Enable are set */
 647         command_reg = pci_config_get16(uhcip->uhci_config_handle,
 648             PCI_CONF_COMM);
 649         if (!(command_reg & (PCI_COMM_MAE | PCI_COMM_ME))) {
 650                 USB_DPRINTF_L3(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
 651                     "uhci_map_regs: No MAE/ME");
 652         }
 653 
 654         command_reg |= PCI_COMM_MAE | PCI_COMM_ME;
 655         pci_config_put16(uhcip->uhci_config_handle, PCI_CONF_COMM, command_reg);
 656 
 657         /*
 658          * Check whether I/O base address is configured and enabled.
 659          */
 660         if (!(command_reg & PCI_COMM_IO)) {
 661                 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
 662                     "I/O Base address access disabled");
 663 
 664                 return (USB_FAILURE);
 665         }
 666         /*
 667          * Get the IO base address of the controller
 668          */
 669         uhcip->uhci_iobase = (pci_config_get16(uhcip->uhci_config_handle,
 670             PCI_CONF_IOBASE) & PCI_CONF_IOBASE_MASK);
 671 
 672         USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
 673             "uhci_map_regs: Completed");
 674 
 675         return (USB_SUCCESS);
 676 }
 677 
 678 
 679 void
 680 uhci_unmap_regs(uhci_state_t *uhcip)
 681 {
 682         /* Unmap the UHCI registers */
 683         if (uhcip->uhci_regs_handle) {
 684                 /* Reset the host controller */
 685                 Set_OpReg16(USBCMD, USBCMD_REG_GBL_RESET);
 686 
 687                 ddi_regs_map_free(&uhcip->uhci_regs_handle);
 688         }
 689 
 690         if (uhcip->uhci_config_handle) {
 691                 pci_config_teardown(&uhcip->uhci_config_handle);
 692         }
 693 }
 694 
 695 
 696 /*
 697  * uhci_set_dma_attributes:
 698  *      Set the limits in the DMA attributes structure. Most of the values used
 699  *      in the  DMA limit structres are the default values as specified by  the
 700  *      Writing PCI device drivers document.
 701  */
 702 void
 703 uhci_set_dma_attributes(uhci_state_t *uhcip)
 704 {
 705         USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
 706             "uhci_set_dma_attributes:");
 707 
 708         /* Initialize the DMA attributes */
 709         uhcip->uhci_dma_attr.dma_attr_version = DMA_ATTR_V0;
 710         uhcip->uhci_dma_attr.dma_attr_addr_lo = 0x00000000ull;
 711         uhcip->uhci_dma_attr.dma_attr_addr_hi = 0xfffffff0ull;
 712 
 713         /* 32 bit addressing */
 714         uhcip->uhci_dma_attr.dma_attr_count_max = 0xffffffull;
 715 
 716         /*
 717          * Setting the dam_att_align to 512, some times fails the
 718          * binding handle. I dont know why ? But setting to 16 will
 719          * be right for our case (16 byte alignment required per
 720          * UHCI spec for TD descriptors).
 721          */
 722 
 723         /* 16 byte alignment */
 724         uhcip->uhci_dma_attr.dma_attr_align = 0x10;
 725 
 726         /*
 727          * Since PCI  specification is byte alignment, the
 728          * burstsize field should be set to 1 for PCI devices.
 729          */
 730         uhcip->uhci_dma_attr.dma_attr_burstsizes = 0x1;
 731 
 732         uhcip->uhci_dma_attr.dma_attr_minxfer        = 0x1;
 733         uhcip->uhci_dma_attr.dma_attr_maxxfer        = 0xffffffull;
 734         uhcip->uhci_dma_attr.dma_attr_seg    = 0xffffffffull;
 735         uhcip->uhci_dma_attr.dma_attr_sgllen = 1;
 736         uhcip->uhci_dma_attr.dma_attr_granular       = 1;
 737         uhcip->uhci_dma_attr.dma_attr_flags  = 0;
 738 }
 739 
 740 
 741 uint_t
 742 pow_2(uint_t x)
 743 {
 744         return ((x == 0) ? 1 : (1 << x));
 745 }
 746 
 747 
 748 uint_t
 749 log_2(uint_t x)
 750 {
 751         int ret_val = 0;
 752 
 753         while (x != 1) {
 754                 ret_val++;
 755                 x = x >> 1;
 756         }
 757 
 758         return (ret_val);
 759 }
 760 
 761 
 762 /*
 763  * uhci_obtain_state:
 764  */
 765 uhci_state_t *
 766 uhci_obtain_state(dev_info_t *dip)
 767 {
 768         int instance = ddi_get_instance(dip);
 769         uhci_state_t *state = ddi_get_soft_state(uhci_statep, instance);
 770 
 771         ASSERT(state != NULL);
 772 
 773         return (state);
 774 }
 775 
 776 
 777 /*
 778  * uhci_alloc_hcdi_ops:
 779  *      The HCDI interfaces or entry points are the software interfaces used by
 780  *      the Universal Serial Bus Driver  (USBA) to  access the services of the
 781  *      Host Controller Driver (HCD).  During HCD initialization, inform  USBA
 782  *      about all available HCDI interfaces or entry points.
 783  */
 784 usba_hcdi_ops_t *
 785 uhci_alloc_hcdi_ops(uhci_state_t *uhcip)
 786 {
 787         usba_hcdi_ops_t *hcdi_ops;
 788 
 789         USB_DPRINTF_L4(PRINT_MASK_HCDI, uhcip->uhci_log_hdl,
 790             "uhci_alloc_hcdi_ops:");
 791 
 792         hcdi_ops = usba_alloc_hcdi_ops();
 793 
 794         hcdi_ops->usba_hcdi_ops_version = HCDI_OPS_VERSION_1;
 795 
 796         hcdi_ops->usba_hcdi_pipe_open = uhci_hcdi_pipe_open;
 797         hcdi_ops->usba_hcdi_pipe_close       = uhci_hcdi_pipe_close;
 798         hcdi_ops->usba_hcdi_pipe_reset = uhci_hcdi_pipe_reset;
 799         hcdi_ops->usba_hcdi_pipe_reset_data_toggle =
 800             uhci_hcdi_pipe_reset_data_toggle;
 801 
 802         hcdi_ops->usba_hcdi_pipe_ctrl_xfer = uhci_hcdi_pipe_ctrl_xfer;
 803         hcdi_ops->usba_hcdi_pipe_bulk_xfer = uhci_hcdi_pipe_bulk_xfer;
 804         hcdi_ops->usba_hcdi_pipe_intr_xfer = uhci_hcdi_pipe_intr_xfer;
 805         hcdi_ops->usba_hcdi_pipe_isoc_xfer = uhci_hcdi_pipe_isoc_xfer;
 806 
 807         hcdi_ops->usba_hcdi_bulk_transfer_size = uhci_hcdi_bulk_transfer_size;
 808         hcdi_ops->usba_hcdi_pipe_stop_intr_polling =
 809             uhci_hcdi_pipe_stop_intr_polling;
 810         hcdi_ops->usba_hcdi_pipe_stop_isoc_polling =
 811             uhci_hcdi_pipe_stop_isoc_polling;
 812 
 813         hcdi_ops->usba_hcdi_get_current_frame_number =
 814             uhci_hcdi_get_current_frame_number;
 815         hcdi_ops->usba_hcdi_get_max_isoc_pkts = uhci_hcdi_get_max_isoc_pkts;
 816 
 817         hcdi_ops->usba_hcdi_console_input_init = uhci_hcdi_polled_input_init;
 818         hcdi_ops->usba_hcdi_console_input_enter = uhci_hcdi_polled_input_enter;
 819         hcdi_ops->usba_hcdi_console_read = uhci_hcdi_polled_read;
 820         hcdi_ops->usba_hcdi_console_input_exit = uhci_hcdi_polled_input_exit;
 821         hcdi_ops->usba_hcdi_console_input_fini = uhci_hcdi_polled_input_fini;
 822 
 823         hcdi_ops->usba_hcdi_console_output_init = uhci_hcdi_polled_output_init;
 824         hcdi_ops->usba_hcdi_console_output_enter =
 825             uhci_hcdi_polled_output_enter;
 826         hcdi_ops->usba_hcdi_console_write = uhci_hcdi_polled_write;
 827         hcdi_ops->usba_hcdi_console_output_exit = uhci_hcdi_polled_output_exit;
 828         hcdi_ops->usba_hcdi_console_output_fini = uhci_hcdi_polled_output_fini;
 829 
 830         return (hcdi_ops);
 831 }
 832 
 833 
 834 /*
 835  * uhci_init_frame_lst_table :
 836  *      Allocate the system memory and initialize Host Controller
 837  *      Frame list table area The starting of the Frame list Table
 838  *      area must be 4096 byte aligned.
 839  */
 840 static int
 841 uhci_init_frame_lst_table(dev_info_t *dip, uhci_state_t *uhcip)
 842 {
 843         int                     result;
 844         uint_t                  ccount;
 845         size_t                  real_length;
 846         ddi_device_acc_attr_t   dev_attr;
 847 
 848         ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
 849 
 850         USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
 851             "uhci_init_frame_lst_table:");
 852 
 853         /* The host controller will be little endian */
 854         dev_attr.devacc_attr_version            = DDI_DEVICE_ATTR_V0;
 855         dev_attr.devacc_attr_endian_flags       = DDI_STRUCTURE_LE_ACC;
 856         dev_attr.devacc_attr_dataorder          = DDI_STRICTORDER_ACC;
 857 
 858         /* 4K alignment required */
 859         uhcip->uhci_dma_attr.dma_attr_align = 0x1000;
 860 
 861         /* Create space for the HCCA block */
 862         if (ddi_dma_alloc_handle(dip, &uhcip->uhci_dma_attr, DDI_DMA_SLEEP,
 863             0, &uhcip->uhci_flt_dma_handle) != DDI_SUCCESS) {
 864 
 865                 return (USB_FAILURE);
 866         }
 867 
 868         /* Reset to default 16 bytes */
 869         uhcip->uhci_dma_attr.dma_attr_align = 0x10;
 870 
 871         if (ddi_dma_mem_alloc(uhcip->uhci_flt_dma_handle,
 872             SIZE_OF_FRAME_LST_TABLE, &dev_attr, DDI_DMA_CONSISTENT,
 873             DDI_DMA_SLEEP, 0, (caddr_t *)&uhcip->uhci_frame_lst_tablep,
 874             &real_length, &uhcip->uhci_flt_mem_handle)) {
 875 
 876                 return (USB_FAILURE);
 877         }
 878 
 879         /* Map the whole Frame list base area into the I/O address space */
 880         result = ddi_dma_addr_bind_handle(uhcip->uhci_flt_dma_handle,
 881             NULL, (caddr_t)uhcip->uhci_frame_lst_tablep, real_length,
 882             DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
 883             &uhcip->uhci_flt_cookie, &ccount);
 884 
 885         if (result == DDI_DMA_MAPPED) {
 886                 /* The cookie count should be 1 */
 887                 if (ccount != 1) {
 888                         USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
 889                             "uhci_init_frame_list_table: More than 1 cookie");
 890 
 891                         return (USB_FAILURE);
 892                 }
 893         } else {
 894                 uhci_decode_ddi_dma_addr_bind_handle_result(uhcip, result);
 895 
 896                 return (USB_FAILURE);
 897         }
 898 
 899         uhcip->uhci_dma_addr_bind_flag |= UHCI_FLA_POOL_BOUND;
 900 
 901         bzero((void *)uhcip->uhci_frame_lst_tablep, real_length);
 902 
 903         /* Initialize the interrupt lists */
 904         uhci_build_interrupt_lattice(uhcip);
 905 
 906         return (USB_SUCCESS);
 907 }
 908 
 909 
 910 /*
 911  * uhci_alloc_queue_head:
 912  *      Allocate a queue head
 913  */
 914 queue_head_t *
 915 uhci_alloc_queue_head(uhci_state_t *uhcip)
 916 {
 917         int             index;
 918         uhci_td_t       *dummy_td;
 919         queue_head_t    *queue_head;
 920 
 921         USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl,
 922             "uhci_alloc_queue_head");
 923 
 924         ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
 925 
 926         /* Allocate a dummy td first. */
 927         if ((dummy_td = uhci_allocate_td_from_pool(uhcip)) == NULL) {
 928 
 929                 USB_DPRINTF_L2(PRINT_MASK_ALLOC,  uhcip->uhci_log_hdl,
 930                     "uhci_alloc_queue_head: allocate td from pool failed");
 931 
 932                 return (NULL);
 933         }
 934 
 935         /*
 936          * The first 63 queue heads in the Queue Head (QH)
 937          * buffer pool are reserved for building interrupt lattice
 938          * tree. Search for a blank Queue head in the QH buffer pool.
 939          */
 940         for (index = NUM_STATIC_NODES; index < uhci_qh_pool_size; index++) {
 941                 if (uhcip->uhci_qh_pool_addr[index].qh_flag ==
 942                     QUEUE_HEAD_FLAG_FREE) {
 943                         break;
 944                 }
 945         }
 946 
 947         USB_DPRINTF_L3(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl,
 948             "uhci_alloc_queue_head: Allocated %d", index);
 949 
 950         if (index == uhci_qh_pool_size) {
 951                 USB_DPRINTF_L2(PRINT_MASK_ALLOC,  uhcip->uhci_log_hdl,
 952                     "uhci_alloc_queue_head: All QH exhausted");
 953 
 954                 /* Free the dummy td allocated for this qh. */
 955                 dummy_td->flag = TD_FLAG_FREE;
 956 
 957                 return (NULL);
 958         }
 959 
 960         queue_head = &uhcip->uhci_qh_pool_addr[index];
 961         USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl,
 962             "uhci_alloc_queue_head: Allocated address 0x%p",
 963             (void *)queue_head);
 964 
 965         bzero((void *)queue_head, sizeof (queue_head_t));
 966         SetQH32(uhcip, queue_head->link_ptr, HC_END_OF_LIST);
 967         SetQH32(uhcip, queue_head->element_ptr, HC_END_OF_LIST);
 968         queue_head->prev_qh  = NULL;
 969         queue_head->qh_flag  = QUEUE_HEAD_FLAG_BUSY;
 970 
 971         bzero((char *)dummy_td, sizeof (uhci_td_t));
 972         queue_head->td_tailp = dummy_td;
 973         SetQH32(uhcip, queue_head->element_ptr, TD_PADDR(dummy_td));
 974 
 975         return (queue_head);
 976 }
 977 
 978 
 979 /*
 980  * uhci_allocate_bandwidth:
 981  *      Figure out whether or not this interval may be supported. Return
 982  *      the index into the  lattice if it can be supported.  Return
 983  *      allocation failure if it can not be supported.
 984  */
 985 int
 986 uhci_allocate_bandwidth(
 987         uhci_state_t            *uhcip,
 988         usba_pipe_handle_data_t *pipe_handle,
 989         uint_t                  *node)
 990 {
 991         int             bandwidth;      /* Requested bandwidth */
 992         uint_t          min, min_index;
 993         uint_t          i;
 994         uint_t          height;         /* Bandwidth's height in the tree */
 995         uint_t          leftmost;
 996         uint_t          length;
 997         uint32_t        paddr;
 998         queue_head_t    *tmp_qh;
 999         usb_ep_descr_t  *endpoint = &pipe_handle->p_ep;
1000 
1001         ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
1002 
1003         /*
1004          * Calculate the length in bytes of a transaction on this
1005          * periodic endpoint.
1006          */
1007         mutex_enter(&pipe_handle->p_usba_device->usb_mutex);
1008 
1009         length = uhci_compute_total_bandwidth(endpoint,
1010             pipe_handle->p_usba_device->usb_port_status);
1011         mutex_exit(&pipe_handle->p_usba_device->usb_mutex);
1012 
1013         /*
1014          * If the length in bytes plus the allocated bandwidth exceeds
1015          * the maximum, return bandwidth allocation failure.
1016          */
1017         if ((length + uhcip->uhci_bandwidth_intr_min +
1018             uhcip->uhci_bandwidth_isoch_sum) > (MAX_PERIODIC_BANDWIDTH)) {
1019                 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1020                     "uhci_allocate_bandwidth: "
1021                     "Reached maximum bandwidth value and cannot allocate "
1022                     "bandwidth for a given Interrupt/Isoch endpoint");
1023 
1024                 return (USB_NO_BANDWIDTH);
1025         }
1026 
1027         /*
1028          * ISOC xfers are not supported at this point type
1029          */
1030         if (UHCI_XFER_TYPE(endpoint) == USB_EP_ATTR_ISOCH) {
1031                 uhcip->uhci_bandwidth_isoch_sum += length;
1032 
1033                 return (USB_SUCCESS);
1034         }
1035 
1036         /*
1037          * This is an interrupt endpoint.
1038          * Adjust bandwidth to be a power of 2
1039          */
1040         mutex_enter(&pipe_handle->p_usba_device->usb_mutex);
1041         bandwidth = uhci_bandwidth_adjust(uhcip, endpoint,
1042             pipe_handle->p_usba_device->usb_port_status);
1043         mutex_exit(&pipe_handle->p_usba_device->usb_mutex);
1044 
1045         /*
1046          * If this bandwidth can't be supported,
1047          * return allocation failure.
1048          */
1049         if (bandwidth == USB_FAILURE) {
1050 
1051                 return (USB_FAILURE);
1052         }
1053 
1054         USB_DPRINTF_L3(PRINT_MASK_BW, uhcip->uhci_log_hdl,
1055             "The new bandwidth is %d", bandwidth);
1056 
1057         /* Find the leaf with the smallest allocated bandwidth */
1058         min_index = 0;
1059         min = uhcip->uhci_bandwidth[0];
1060 
1061         for (i = 1; i < NUM_FRAME_LST_ENTRIES; i++) {
1062                 if (uhcip->uhci_bandwidth[i] < min) {
1063                         min_index = i;
1064                         min = uhcip->uhci_bandwidth[i];
1065                 }
1066         }
1067 
1068         USB_DPRINTF_L3(PRINT_MASK_BW, uhcip->uhci_log_hdl,
1069             "The leaf with minimal bandwidth %d, "
1070             "The smallest bandwidth %d", min_index, min);
1071 
1072         /*
1073          * Find the index into the lattice given the
1074          * leaf with the smallest allocated bandwidth.
1075          */
1076         height = uhci_lattice_height(bandwidth);
1077         USB_DPRINTF_L3(PRINT_MASK_BW, uhcip->uhci_log_hdl,
1078             "The height is %d", height);
1079 
1080         *node = uhci_tree_bottom_nodes[min_index];
1081 
1082         /* check if there are isocs TDs scheduled for this frame */
1083         if (uhcip->uhci_isoc_q_tailp[*node]) {
1084                 paddr = (uhcip->uhci_isoc_q_tailp[*node]->link_ptr &
1085                     FRAME_LST_PTR_MASK);
1086         } else {
1087                 paddr = (uhcip->uhci_frame_lst_tablep[*node] &
1088                     FRAME_LST_PTR_MASK);
1089         }
1090 
1091         tmp_qh = QH_VADDR(paddr);
1092         *node = tmp_qh->node;
1093         for (i = 0; i < height; i++) {
1094                 *node = uhci_lattice_parent(*node);
1095         }
1096 
1097         USB_DPRINTF_L3(PRINT_MASK_BW, uhcip->uhci_log_hdl,
1098             "The real node is %d", *node);
1099 
1100         /*
1101          * Find the leftmost leaf in the subtree specified by the node.
1102          */
1103         leftmost = uhci_leftmost_leaf(*node, height);
1104         USB_DPRINTF_L3(PRINT_MASK_BW, uhcip->uhci_log_hdl,
1105             "Leftmost %d", leftmost);
1106 
1107         for (i = leftmost; i < leftmost +
1108             (NUM_FRAME_LST_ENTRIES/bandwidth); i ++) {
1109 
1110                 if ((length + uhcip->uhci_bandwidth_isoch_sum +
1111                     uhcip->uhci_bandwidth[i]) > MAX_PERIODIC_BANDWIDTH) {
1112 
1113                         USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1114                             "uhci_allocate_bandwidth: "
1115                             "Reached maximum bandwidth value and cannot "
1116                             "allocate bandwidth for Interrupt endpoint");
1117 
1118                         return (USB_NO_BANDWIDTH);
1119                 }
1120         }
1121 
1122         /*
1123          * All the leaves for this node must be updated with the bandwidth.
1124          */
1125         for (i = leftmost; i < leftmost +
1126             (NUM_FRAME_LST_ENTRIES/bandwidth); i ++) {
1127                 uhcip->uhci_bandwidth[i] += length;
1128         }
1129 
1130         /* Find the leaf with the smallest allocated bandwidth */
1131         min_index = 0;
1132         min = uhcip->uhci_bandwidth[0];
1133 
1134         for (i = 1; i < NUM_FRAME_LST_ENTRIES; i++) {
1135                 if (uhcip->uhci_bandwidth[i] < min) {
1136                         min_index = i;
1137                         min = uhcip->uhci_bandwidth[i];
1138                 }
1139         }
1140 
1141         /* Save the minimum for later use */
1142         uhcip->uhci_bandwidth_intr_min = min;
1143 
1144         return (USB_SUCCESS);
1145 }
1146 
1147 
1148 /*
1149  * uhci_deallocate_bandwidth:
1150  *      Deallocate bandwidth for the given node in the lattice
1151  *      and the length of transfer.
1152  */
1153 void
1154 uhci_deallocate_bandwidth(uhci_state_t *uhcip,
1155     usba_pipe_handle_data_t *pipe_handle)
1156 {
1157         uint_t          bandwidth;
1158         uint_t          height;
1159         uint_t          leftmost;
1160         uint_t          i;
1161         uint_t          min;
1162         usb_ep_descr_t  *endpoint = &pipe_handle->p_ep;
1163         uint_t          node, length;
1164         uhci_pipe_private_t *pp =
1165             (uhci_pipe_private_t *)pipe_handle->p_hcd_private;
1166 
1167         /* This routine is protected by the uhci_int_mutex */
1168         ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
1169 
1170         /* Obtain the length */
1171         mutex_enter(&pipe_handle->p_usba_device->usb_mutex);
1172         length = uhci_compute_total_bandwidth(endpoint,
1173             pipe_handle->p_usba_device->usb_port_status);
1174         mutex_exit(&pipe_handle->p_usba_device->usb_mutex);
1175 
1176         /*
1177          * If this is an isochronous endpoint, just delete endpoint's
1178          * bandwidth from the total allocated isochronous bandwidth.
1179          */
1180         if (UHCI_XFER_TYPE(endpoint) == USB_EP_ATTR_ISOCH) {
1181                 uhcip->uhci_bandwidth_isoch_sum -= length;
1182 
1183                 return;
1184         }
1185 
1186         /* Obtain the node */
1187         node = pp->pp_node;
1188 
1189         /* Adjust bandwidth to be a power of 2 */
1190         mutex_enter(&pipe_handle->p_usba_device->usb_mutex);
1191         bandwidth = uhci_bandwidth_adjust(uhcip, endpoint,
1192             pipe_handle->p_usba_device->usb_port_status);
1193         mutex_exit(&pipe_handle->p_usba_device->usb_mutex);
1194 
1195         /* Find the height in the tree */
1196         height = uhci_lattice_height(bandwidth);
1197 
1198         /*
1199          * Find the leftmost leaf in the subtree specified by the node
1200          */
1201         leftmost = uhci_leftmost_leaf(node, height);
1202 
1203         /* Delete the bandwith from the appropriate lists */
1204         for (i = leftmost; i < leftmost + (NUM_FRAME_LST_ENTRIES/bandwidth);
1205             i ++) {
1206                 uhcip->uhci_bandwidth[i] -= length;
1207         }
1208 
1209         min = uhcip->uhci_bandwidth[0];
1210 
1211         /* Recompute the minimum */
1212         for (i = 1; i < NUM_FRAME_LST_ENTRIES; i++) {
1213                 if (uhcip->uhci_bandwidth[i] < min) {
1214                         min = uhcip->uhci_bandwidth[i];
1215                 }
1216         }
1217 
1218         /* Save the minimum for later use */
1219         uhcip->uhci_bandwidth_intr_min = min;
1220 }
1221 
1222 
1223 /*
1224  * uhci_compute_total_bandwidth:
1225  *
1226  * Given a periodic endpoint (interrupt or isochronous) determine the total
1227  * bandwidth for one transaction. The UHCI host controller traverses the
1228  * endpoint descriptor lists on a first-come-first-serve basis. When the HC
1229  * services an endpoint, only a single transaction attempt is made. The  HC
1230  * moves to the next Endpoint Descriptor after the first transaction attempt
1231  * rather than finishing the entire Transfer Descriptor. Therefore, when  a
1232  * Transfer Descriptor is inserted into the lattice, we will only count the
1233  * number of bytes for one transaction.
1234  *
1235  * The following are the formulas used for calculating bandwidth in terms
1236  * bytes and it is for the single USB full speed and low speed  transaction
1237  * respectively. The protocol overheads will be different for each of  type
1238  * of USB transfer and all these formulas & protocol overheads are  derived
1239  * from the 5.9.3 section of USB Specification & with the help of Bandwidth
1240  * Analysis white paper which is posted on the USB  developer forum.
1241  *
1242  * Full-Speed:
1243  *        Protocol overhead  + ((MaxPacketSize * 7)/6 )  + Host_Delay
1244  *
1245  * Low-Speed:
1246  *              Protocol overhead  + Hub LS overhead +
1247  *                (Low-Speed clock * ((MaxPacketSize * 7)/6 )) + Host_Delay
1248  */
1249 static uint_t
1250 uhci_compute_total_bandwidth(usb_ep_descr_t *endpoint,
1251                 usb_port_status_t port_status)
1252 {
1253         uint_t          bandwidth;
1254         ushort_t        MaxPacketSize = endpoint->wMaxPacketSize;
1255 
1256         /* Add Host Controller specific delay to required bandwidth */
1257         bandwidth = HOST_CONTROLLER_DELAY;
1258 
1259         /* Add bit-stuffing overhead */
1260         MaxPacketSize = (ushort_t)((MaxPacketSize * 7) / 6);
1261 
1262         /* Low Speed interrupt transaction */
1263         if (port_status == USBA_LOW_SPEED_DEV) {
1264                 /* Low Speed interrupt transaction */
1265                 bandwidth += (LOW_SPEED_PROTO_OVERHEAD +
1266                     HUB_LOW_SPEED_PROTO_OVERHEAD +
1267                     (LOW_SPEED_CLOCK * MaxPacketSize));
1268         } else {
1269                 /* Full Speed transaction */
1270                 bandwidth += MaxPacketSize;
1271 
1272                 if (UHCI_XFER_TYPE(endpoint) == USB_EP_ATTR_INTR) {
1273                         /* Full Speed interrupt transaction */
1274                         bandwidth += FS_NON_ISOC_PROTO_OVERHEAD;
1275                 } else {
1276                         /* Isochronus and input transaction */
1277                         if (UHCI_XFER_DIR(endpoint) == USB_EP_DIR_IN) {
1278                                 bandwidth += FS_ISOC_INPUT_PROTO_OVERHEAD;
1279                         } else {
1280                                 /* Isochronus and output transaction */
1281                                 bandwidth += FS_ISOC_OUTPUT_PROTO_OVERHEAD;
1282                         }
1283                 }
1284         }
1285 
1286         return (bandwidth);
1287 }
1288 
1289 
1290 /*
1291  * uhci_bandwidth_adjust:
1292  */
1293 static int
1294 uhci_bandwidth_adjust(
1295         uhci_state_t            *uhcip,
1296         usb_ep_descr_t          *endpoint,
1297         usb_port_status_t       port_status)
1298 {
1299         int     i = 0;
1300         uint_t  interval;
1301 
1302         /*
1303          * Get the polling interval from the endpoint descriptor
1304          */
1305         interval = endpoint->bInterval;
1306 
1307         /*
1308          * The bInterval value in the endpoint descriptor can range
1309          * from 1 to 255ms. The interrupt lattice has 32 leaf nodes,
1310          * and the host controller cycles through these nodes every
1311          * 32ms. The longest polling  interval that the  controller
1312          * supports is 32ms.
1313          */
1314 
1315         /*
1316          * Return an error if the polling interval is less than 1ms
1317          * and greater than 255ms
1318          */
1319         if ((interval < MIN_POLL_INTERVAL) || (interval > MAX_POLL_INTERVAL)) {
1320                 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1321                     "uhci_bandwidth_adjust: Endpoint's poll interval must be "
1322                     "between %d and %d ms", MIN_POLL_INTERVAL,
1323                     MAX_POLL_INTERVAL);
1324 
1325                 return (USB_FAILURE);
1326         }
1327 
1328         /*
1329          * According USB Specifications, a  full-speed endpoint can
1330          * specify a desired polling interval 1ms to 255ms and a low
1331          * speed  endpoints are limited to  specifying only 10ms to
1332          * 255ms. But some old keyboards & mice uses polling interval
1333          * of 8ms. For compatibility  purpose, we are using polling
1334          * interval between 8ms & 255ms for low speed endpoints.
1335          */
1336         if ((port_status == USBA_LOW_SPEED_DEV) &&
1337             (interval < MIN_LOW_SPEED_POLL_INTERVAL)) {
1338                 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1339                     "uhci_bandwidth_adjust: Low speed endpoint's poll interval "
1340                     "must be >= %d ms, adjusted",
1341                     MIN_LOW_SPEED_POLL_INTERVAL);
1342 
1343                 interval = MIN_LOW_SPEED_POLL_INTERVAL;
1344         }
1345 
1346         /*
1347          * If polling interval is greater than 32ms,
1348          * adjust polling interval equal to 32ms.
1349          */
1350         if (interval > 32) {
1351                 interval = 32;
1352         }
1353 
1354         /*
1355          * Find the nearest power of 2 that's less
1356          * than interval.
1357          */
1358         while ((pow_2(i)) <= interval) {
1359                 i++;
1360         }
1361 
1362         return (pow_2((i - 1)));
1363 }
1364 
1365 
1366 /*
1367  * uhci_lattice_height:
1368  *      Given the requested bandwidth, find the height in the tree at
1369  *      which the nodes for this bandwidth fall.  The height is measured
1370  *      as the number of nodes from the leaf to the level specified by
1371  *      bandwidth The root of the tree is at height TREE_HEIGHT.
1372  */
1373 static uint_t
1374 uhci_lattice_height(uint_t bandwidth)
1375 {
1376         return (TREE_HEIGHT - (log_2(bandwidth)));
1377 }
1378 
1379 
1380 static uint_t
1381 uhci_lattice_parent(uint_t node)
1382 {
1383         return (((node % 2) == 0) ? ((node/2) - 1) : (node/2));
1384 }
1385 
1386 
1387 /*
1388  * uhci_leftmost_leaf:
1389  *      Find the leftmost leaf in the subtree specified by the node.
1390  *      Height refers to number of nodes from the bottom of the tree
1391  *      to the node,  including the node.
1392  */
1393 static uint_t
1394 uhci_leftmost_leaf(uint_t node, uint_t height)
1395 {
1396         node = pow_2(height + VIRTUAL_TREE_HEIGHT) * (node+1) -
1397             NUM_FRAME_LST_ENTRIES;
1398         return (node);
1399 }
1400 
1401 
1402 /*
1403  * uhci_insert_qh:
1404  *      Add the Queue Head (QH) into the Host Controller's (HC)
1405  *      appropriate queue head list.
1406  */
1407 void
1408 uhci_insert_qh(uhci_state_t *uhcip, usba_pipe_handle_data_t *ph)
1409 {
1410         uhci_pipe_private_t *pp = (uhci_pipe_private_t *)ph->p_hcd_private;
1411 
1412         USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1413             "uhci_insert_qh:");
1414 
1415         ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
1416 
1417         switch (UHCI_XFER_TYPE(&ph->p_ep)) {
1418         case USB_EP_ATTR_CONTROL:
1419                 uhci_insert_ctrl_qh(uhcip, pp);
1420                 break;
1421         case USB_EP_ATTR_BULK:
1422                 uhci_insert_bulk_qh(uhcip, pp);
1423                 break;
1424         case USB_EP_ATTR_INTR:
1425                 uhci_insert_intr_qh(uhcip, pp);
1426                 break;
1427         case USB_EP_ATTR_ISOCH:
1428                         USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
1429                             "uhci_insert_qh: Illegal request");
1430                 break;
1431         }
1432 }
1433 
1434 
1435 /*
1436  * uhci_insert_ctrl_qh:
1437  *      Insert a control QH into the Host Controller's (HC) control QH list.
1438  */
1439 static void
1440 uhci_insert_ctrl_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp)
1441 {
1442         queue_head_t *qh = pp->pp_qh;
1443 
1444         USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1445             "uhci_insert_ctrl_qh:");
1446 
1447         ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
1448 
1449         if (uhcip->uhci_ctrl_xfers_q_head == uhcip->uhci_ctrl_xfers_q_tail) {
1450                 uhcip->uhci_ctrl_xfers_q_head->prev_qh    = UHCI_INVALID_PTR;
1451         }
1452 
1453         SetQH32(uhcip, qh->link_ptr,
1454             GetQH32(uhcip, uhcip->uhci_ctrl_xfers_q_tail->link_ptr));
1455         qh->prev_qh = uhcip->uhci_ctrl_xfers_q_tail;
1456         SetQH32(uhcip, uhcip->uhci_ctrl_xfers_q_tail->link_ptr,
1457             QH_PADDR(qh) | HC_QUEUE_HEAD);
1458         uhcip->uhci_ctrl_xfers_q_tail = qh;
1459 
1460 }
1461 
1462 
1463 /*
1464  * uhci_insert_bulk_qh:
1465  *      Insert a bulk QH into the Host Controller's (HC) bulk QH list.
1466  */
1467 static void
1468 uhci_insert_bulk_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp)
1469 {
1470         queue_head_t *qh = pp->pp_qh;
1471 
1472         USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1473             "uhci_insert_bulk_qh:");
1474 
1475         ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
1476 
1477         if (uhcip->uhci_bulk_xfers_q_head == uhcip->uhci_bulk_xfers_q_tail) {
1478                 uhcip->uhci_bulk_xfers_q_head->prev_qh = UHCI_INVALID_PTR;
1479         } else if (uhcip->uhci_bulk_xfers_q_head->link_ptr ==
1480             uhcip->uhci_bulk_xfers_q_tail->link_ptr) {
1481 
1482                 /* If there is already a loop, we should keep the loop. */
1483                 qh->link_ptr = uhcip->uhci_bulk_xfers_q_tail->link_ptr;
1484         }
1485 
1486         qh->prev_qh = uhcip->uhci_bulk_xfers_q_tail;
1487         SetQH32(uhcip, uhcip->uhci_bulk_xfers_q_tail->link_ptr,
1488             QH_PADDR(qh) | HC_QUEUE_HEAD);
1489         uhcip->uhci_bulk_xfers_q_tail = qh;
1490 }
1491 
1492 
1493 /*
1494  * uhci_insert_intr_qh:
1495  *      Insert a periodic Queue head i.e Interrupt queue head into the
1496  *      Host Controller's (HC) interrupt lattice tree.
1497  */
1498 static void
1499 uhci_insert_intr_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp)
1500 {
1501         uint_t          node = pp->pp_node;  /* The appropriate node was */
1502                                                 /* found during the opening */
1503                                                 /* of the pipe.  */
1504         queue_head_t    *qh = pp->pp_qh;
1505         queue_head_t    *next_lattice_qh, *lattice_qh;
1506 
1507         USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1508             "uhci_insert_intr_qh:");
1509 
1510         ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
1511 
1512         /* Find the lattice queue head */
1513         lattice_qh = &uhcip->uhci_qh_pool_addr[node];
1514         next_lattice_qh =
1515             QH_VADDR(GetQH32(uhcip, lattice_qh->link_ptr) & QH_LINK_PTR_MASK);
1516 
1517         next_lattice_qh->prev_qh = qh;
1518         qh->link_ptr = lattice_qh->link_ptr;
1519         qh->prev_qh  = lattice_qh;
1520         SetQH32(uhcip, lattice_qh->link_ptr, QH_PADDR(qh) | HC_QUEUE_HEAD);
1521         pp->pp_data_toggle = 0;
1522 }
1523 
1524 
1525 /*
1526  * uhci_insert_intr_td:
1527  *      Create a TD and a data buffer for an interrupt endpoint.
1528  */
1529 int
1530 uhci_insert_intr_td(
1531         uhci_state_t            *uhcip,
1532         usba_pipe_handle_data_t *ph,
1533         usb_intr_req_t          *req,
1534         usb_flags_t             flags)
1535 {
1536         int                     error, pipe_dir;
1537         uint_t                  length, mps;
1538         uint32_t                buf_offs;
1539         uhci_td_t               *tmp_td;
1540         usb_intr_req_t          *intr_reqp;
1541         uhci_pipe_private_t     *pp = (uhci_pipe_private_t *)ph->p_hcd_private;
1542         uhci_trans_wrapper_t    *tw;
1543 
1544         USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1545             "uhci_insert_intr_td: req: 0x%p", (void *)req);
1546 
1547         ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
1548 
1549         /* Get the interrupt pipe direction */
1550         pipe_dir = UHCI_XFER_DIR(&ph->p_ep);
1551 
1552         /* Get the current interrupt request pointer */
1553         if (req) {
1554                 length = req->intr_len;
1555         } else {
1556                 ASSERT(pipe_dir == USB_EP_DIR_IN);
1557                 length = (pp->pp_client_periodic_in_reqp) ?
1558                     (((usb_intr_req_t *)pp->
1559                     pp_client_periodic_in_reqp)->intr_len) :
1560                     ph->p_ep.wMaxPacketSize;
1561         }
1562 
1563         /* Check the size of interrupt request */
1564         if (length > UHCI_MAX_TD_XFER_SIZE) {
1565 
1566                 /* the length shouldn't exceed 8K */
1567                 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1568                     "uhci_insert_intr_td: Intr request size 0x%x is "
1569                     "more than 0x%x", length, UHCI_MAX_TD_XFER_SIZE);
1570 
1571                 return (USB_INVALID_REQUEST);
1572         }
1573 
1574         USB_DPRINTF_L3(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1575             "uhci_insert_intr_td: length: 0x%x", length);
1576 
1577         /* Allocate a transaction wrapper */
1578         if ((tw = uhci_create_transfer_wrapper(uhcip, pp, length, flags)) ==
1579             NULL) {
1580 
1581                 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1582                     "uhci_insert_intr_td: TW allocation failed");
1583 
1584                 return (USB_NO_RESOURCES);
1585         }
1586 
1587         /*
1588          * Initialize the callback and any callback
1589          * data for when the td completes.
1590          */
1591         tw->tw_handle_td = uhci_handle_intr_td;
1592         tw->tw_handle_callback_value = NULL;
1593         tw->tw_direction = (pipe_dir == USB_EP_DIR_OUT) ?
1594             PID_OUT : PID_IN;
1595         tw->tw_curr_xfer_reqp = (usb_opaque_t)req;
1596 
1597         /*
1598          * If it is an Interrupt IN request and interrupt request is NULL,
1599          * allocate the usb interrupt request structure for the current
1600          * interrupt polling request.
1601          */
1602         if (tw->tw_direction == PID_IN) {
1603                 if ((error = uhci_allocate_periodic_in_resource(uhcip,
1604                     pp, tw, flags)) != USB_SUCCESS) {
1605                         USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1606                             "uhci_insert_intr_td: Interrupt request structure "
1607                             "allocation failed");
1608 
1609                         /* free the transfer wrapper */
1610                         uhci_deallocate_tw(uhcip, pp, tw);
1611 
1612                         return (error);
1613                 }
1614         }
1615 
1616         intr_reqp = (usb_intr_req_t *)tw->tw_curr_xfer_reqp;
1617         ASSERT(tw->tw_curr_xfer_reqp != NULL);
1618 
1619         tw->tw_timeout_cnt = (intr_reqp->intr_attributes & USB_ATTRS_ONE_XFER) ?
1620             intr_reqp->intr_timeout : 0;
1621 
1622         /* DATA IN */
1623         if (tw->tw_direction == PID_IN) {
1624                 /* Insert the td onto the queue head */
1625                 error = uhci_insert_hc_td(uhcip, 0,
1626                     length, pp, tw, PID_IN, intr_reqp->intr_attributes);
1627 
1628                 if (error != USB_SUCCESS) {
1629 
1630                         uhci_deallocate_periodic_in_resource(uhcip, pp, tw);
1631                         /* free the transfer wrapper */
1632                         uhci_deallocate_tw(uhcip, pp, tw);
1633 
1634                         return (USB_NO_RESOURCES);
1635                 }
1636                 tw->tw_bytes_xfered = 0;
1637 
1638                 return (USB_SUCCESS);
1639         }
1640 
1641         if (req->intr_len) {
1642                 /* DATA OUT */
1643                 ASSERT(req->intr_data != NULL);
1644 
1645                 /* Copy the data into the message */
1646                 ddi_rep_put8(tw->tw_accesshandle, req->intr_data->b_rptr,
1647                     (uint8_t *)tw->tw_buf, req->intr_len, DDI_DEV_AUTOINCR);
1648         }
1649 
1650         /* set tw->tw_claim flag, so that nobody else works on this tw. */
1651         tw->tw_claim = UHCI_INTR_HDLR_CLAIMED;
1652 
1653         mps = ph->p_ep.wMaxPacketSize;
1654         buf_offs = 0;
1655 
1656         /* Insert tds onto the queue head */
1657         while (length > 0) {
1658 
1659                 error = uhci_insert_hc_td(uhcip, buf_offs,
1660                     (length > mps) ? mps : length,
1661                     pp, tw, PID_OUT,
1662                     intr_reqp->intr_attributes);
1663 
1664                 if (error != USB_SUCCESS) {
1665                         /* no resource. */
1666                         break;
1667                 }
1668 
1669                 if (length <= mps) {
1670                         /* inserted all data. */
1671                         length = 0;
1672 
1673                 } else {
1674 
1675                         buf_offs += mps;
1676                         length -= mps;
1677                 }
1678         }
1679 
1680         if (error != USB_SUCCESS) {
1681 
1682                 USB_DPRINTF_L2(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl,
1683                     "uhci_insert_intr_td: allocate td failed, free resource");
1684 
1685                 /* remove all the tds */
1686                 while (tw->tw_hctd_head != NULL) {
1687                         uhci_delete_td(uhcip, tw->tw_hctd_head);
1688                 }
1689 
1690                 tw->tw_claim = UHCI_NOT_CLAIMED;
1691                 uhci_deallocate_tw(uhcip, pp, tw);
1692 
1693                 return (error);
1694         }
1695 
1696         /* allow HC to xfer the tds of this tw */
1697         tmp_td = tw->tw_hctd_head;
1698         while (tmp_td != NULL) {
1699 
1700                 SetTD_status(uhcip, tmp_td, UHCI_TD_ACTIVE);
1701                 tmp_td = tmp_td->tw_td_next;
1702         }
1703 
1704         tw->tw_bytes_xfered = 0;
1705         tw->tw_claim = UHCI_NOT_CLAIMED;
1706 
1707         return (error);
1708 }
1709 
1710 
1711 /*
1712  * uhci_create_transfer_wrapper:
1713  *      Create a Transaction Wrapper (TW) for non-isoc transfer types.
1714  *      This involves the allocating of DMA resources.
1715  *
1716  *      For non-isoc transfers, one DMA handle and one DMA buffer are
1717  *      allocated per transfer. The DMA buffer may contain multiple
1718  *      DMA cookies and the cookies should meet certain alignment
1719  *      requirement to be able to fit in the multiple TDs. The alignment
1720  *      needs to ensure:
1721  *      1. the size of a cookie be larger than max TD length (0x500)
1722  *      2. the size of a cookie be a multiple of wMaxPacketSize of the
1723  *      ctrl/bulk pipes
1724  *
1725  *      wMaxPacketSize for ctrl and bulk pipes may be 8, 16, 32 or 64 bytes.
1726  *      So the alignment should be a multiple of 64. wMaxPacketSize for intr
1727  *      pipes is a little different since it only specifies the max to be
1728  *      64 bytes, but as long as an intr transfer is limited to max TD length,
1729  *      any alignment can work if the cookie size is larger than max TD length.
1730  *
1731  *      Considering the above conditions, 2K alignment is used. 4K alignment
1732  *      should also be fine.
1733  */
1734 static uhci_trans_wrapper_t *
1735 uhci_create_transfer_wrapper(
1736         uhci_state_t            *uhcip,
1737         uhci_pipe_private_t     *pp,
1738         size_t                  length,
1739         usb_flags_t             usb_flags)
1740 {
1741         size_t                  real_length;
1742         uhci_trans_wrapper_t    *tw;
1743         ddi_device_acc_attr_t   dev_attr;
1744         ddi_dma_attr_t          dma_attr;
1745         int                     kmem_flag;
1746         int                     (*dmamem_wait)(caddr_t);
1747         usba_pipe_handle_data_t *ph = pp->pp_pipe_handle;
1748 
1749         USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1750             "uhci_create_transfer_wrapper: length = 0x%lx flags = 0x%x",
1751             length, usb_flags);
1752 
1753         ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
1754 
1755         /* isochronous pipe should not call into this function */
1756         if (UHCI_XFER_TYPE(&ph->p_ep) == USB_EP_ATTR_ISOCH) {
1757 
1758                 return (NULL);
1759         }
1760 
1761         /* SLEEP flag should not be used while holding mutex */
1762         kmem_flag = KM_NOSLEEP;
1763         dmamem_wait = DDI_DMA_DONTWAIT;
1764 
1765         /* Allocate space for the transfer wrapper */
1766         if ((tw = kmem_zalloc(sizeof (uhci_trans_wrapper_t), kmem_flag)) ==
1767             NULL) {
1768                 USB_DPRINTF_L2(PRINT_MASK_LISTS,  uhcip->uhci_log_hdl,
1769                     "uhci_create_transfer_wrapper: kmem_alloc failed");
1770 
1771                 return (NULL);
1772         }
1773 
1774         /* zero-length packet doesn't need to allocate dma memory */
1775         if (length == 0) {
1776 
1777                 goto dmadone;
1778         }
1779 
1780         /* allow sg lists for transfer wrapper dma memory */
1781         bcopy(&uhcip->uhci_dma_attr, &dma_attr, sizeof (ddi_dma_attr_t));
1782         dma_attr.dma_attr_sgllen = UHCI_DMA_ATTR_SGLLEN;
1783         dma_attr.dma_attr_align = UHCI_DMA_ATTR_ALIGN;
1784 
1785         /* Store the transfer length */
1786         tw->tw_length = length;
1787 
1788         /* Allocate the DMA handle */
1789         if (ddi_dma_alloc_handle(uhcip->uhci_dip, &dma_attr, dmamem_wait,
1790             0, &tw->tw_dmahandle) != DDI_SUCCESS) {
1791                 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1792                     "uhci_create_transfer_wrapper: Alloc handle failed");
1793                 kmem_free(tw, sizeof (uhci_trans_wrapper_t));
1794 
1795                 return (NULL);
1796         }
1797 
1798         dev_attr.devacc_attr_version            = DDI_DEVICE_ATTR_V0;
1799         dev_attr.devacc_attr_endian_flags       = DDI_STRUCTURE_LE_ACC;
1800         dev_attr.devacc_attr_dataorder          = DDI_STRICTORDER_ACC;
1801 
1802         /* Allocate the memory */
1803         if (ddi_dma_mem_alloc(tw->tw_dmahandle, tw->tw_length, &dev_attr,
1804             DDI_DMA_CONSISTENT, dmamem_wait, NULL, (caddr_t *)&tw->tw_buf,
1805             &real_length, &tw->tw_accesshandle) != DDI_SUCCESS) {
1806                 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1807                     "uhci_create_transfer_wrapper: dma_mem_alloc fail");
1808                 ddi_dma_free_handle(&tw->tw_dmahandle);
1809                 kmem_free(tw, sizeof (uhci_trans_wrapper_t));
1810 
1811                 return (NULL);
1812         }
1813 
1814         ASSERT(real_length >= length);
1815 
1816         /* Bind the handle */
1817         if (ddi_dma_addr_bind_handle(tw->tw_dmahandle, NULL,
1818             (caddr_t)tw->tw_buf, real_length, DDI_DMA_RDWR|DDI_DMA_CONSISTENT,
1819             dmamem_wait, NULL, &tw->tw_cookie, &tw->tw_ncookies) !=
1820             DDI_DMA_MAPPED) {
1821                 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1822                     "uhci_create_transfer_wrapper: Bind handle failed");
1823                 ddi_dma_mem_free(&tw->tw_accesshandle);
1824                 ddi_dma_free_handle(&tw->tw_dmahandle);
1825                 kmem_free(tw, sizeof (uhci_trans_wrapper_t));
1826 
1827                 return (NULL);
1828         }
1829 
1830         tw->tw_cookie_idx = 0;
1831         tw->tw_dma_offs = 0;
1832 
1833 dmadone:
1834         /*
1835          * Only allow one wrapper to be added at a time. Insert the
1836          * new transaction wrapper into the list for this pipe.
1837          */
1838         if (pp->pp_tw_head == NULL) {
1839                 pp->pp_tw_head = tw;
1840                 pp->pp_tw_tail = tw;
1841         } else {
1842                 pp->pp_tw_tail->tw_next = tw;
1843                 pp->pp_tw_tail = tw;
1844                 ASSERT(tw->tw_next == NULL);
1845         }
1846 
1847         /* Store a back pointer to the pipe private structure */
1848         tw->tw_pipe_private = pp;
1849 
1850         /* Store the transfer type - synchronous or asynchronous */
1851         tw->tw_flags = usb_flags;
1852 
1853         USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1854             "uhci_create_transfer_wrapper: tw = 0x%p, ncookies = %u",
1855             (void *)tw, tw->tw_ncookies);
1856 
1857         return (tw);
1858 }
1859 
1860 
1861 /*
1862  * uhci_insert_hc_td:
1863  *      Insert a Transfer Descriptor (TD) on an QH.
1864  */
1865 int
1866 uhci_insert_hc_td(
1867         uhci_state_t            *uhcip,
1868         uint32_t                buffer_offset,
1869         size_t                  hcgtd_length,
1870         uhci_pipe_private_t     *pp,
1871         uhci_trans_wrapper_t    *tw,
1872         uchar_t                 PID,
1873         usb_req_attrs_t         attrs)
1874 {
1875         uhci_td_t       *td, *current_dummy;
1876         queue_head_t    *qh = pp->pp_qh;
1877 
1878         ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
1879 
1880         if ((td = uhci_allocate_td_from_pool(uhcip)) == NULL) {
1881 
1882                 return (USB_NO_RESOURCES);
1883         }
1884 
1885         current_dummy = qh->td_tailp;
1886 
1887         USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl,
1888             "uhci_insert_hc_td: td %p, attrs = 0x%x", (void *)td, attrs);
1889 
1890         /*
1891          * Fill in the current dummy td and
1892          * add the new dummy to the end.
1893          */
1894         uhci_fill_in_td(uhcip, td, current_dummy, buffer_offset,
1895             hcgtd_length, pp, PID, attrs, tw);
1896 
1897         /*
1898          * Allow HC hardware xfer the td, except interrupt out td.
1899          */
1900         if ((tw->tw_handle_td != uhci_handle_intr_td) || (PID != PID_OUT)) {
1901 
1902                 SetTD_status(uhcip, current_dummy, UHCI_TD_ACTIVE);
1903         }
1904 
1905         /* Insert this td onto the tw */
1906 
1907         if (tw->tw_hctd_head == NULL) {
1908                 ASSERT(tw->tw_hctd_tail == NULL);
1909                 tw->tw_hctd_head = current_dummy;
1910                 tw->tw_hctd_tail = current_dummy;
1911         } else {
1912                 /* Add the td to the end of the list */
1913                 tw->tw_hctd_tail->tw_td_next = current_dummy;
1914                 tw->tw_hctd_tail = current_dummy;
1915         }
1916 
1917         /*
1918          * Insert the TD on to the QH. When this occurs,
1919          * the Host Controller will see the newly filled in TD
1920          */
1921         current_dummy->outst_td_next  = NULL;
1922         current_dummy->outst_td_prev  = uhcip->uhci_outst_tds_tail;
1923         if (uhcip->uhci_outst_tds_head == NULL) {
1924                 uhcip->uhci_outst_tds_head = current_dummy;
1925         } else {
1926                 uhcip->uhci_outst_tds_tail->outst_td_next = current_dummy;
1927         }
1928         uhcip->uhci_outst_tds_tail = current_dummy;
1929         current_dummy->tw = tw;
1930 
1931         return (USB_SUCCESS);
1932 }
1933 
1934 
1935 /*
1936  * uhci_fill_in_td:
1937  *      Fill in the fields of a Transfer Descriptor (TD).
1938  */
1939 static void
1940 uhci_fill_in_td(
1941         uhci_state_t            *uhcip,
1942         uhci_td_t               *td,
1943         uhci_td_t               *current_dummy,
1944         uint32_t                buffer_offset,
1945         size_t                  length,
1946         uhci_pipe_private_t     *pp,
1947         uchar_t                 PID,
1948         usb_req_attrs_t         attrs,
1949         uhci_trans_wrapper_t    *tw)
1950 {
1951         usba_pipe_handle_data_t *ph = pp->pp_pipe_handle;
1952         uint32_t                buf_addr;
1953 
1954         USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl,
1955             "uhci_fill_in_td: td 0x%p buf_offs 0x%x len 0x%lx "
1956             "attrs 0x%x", (void *)td, buffer_offset, length, attrs);
1957 
1958         /*
1959          * If this is an isochronous TD, just return
1960          */
1961         if (UHCI_XFER_TYPE(&ph->p_ep) == USB_EP_ATTR_ISOCH) {
1962 
1963                 return;
1964         }
1965 
1966         /* The maximum transfer length of UHCI cannot exceed 0x500 bytes */
1967         ASSERT(length <= UHCI_MAX_TD_XFER_SIZE);
1968 
1969         bzero((char *)td, sizeof (uhci_td_t));  /* Clear the TD */
1970         SetTD32(uhcip, current_dummy->link_ptr, TD_PADDR(td));
1971 
1972         if (attrs & USB_ATTRS_SHORT_XFER_OK) {
1973                 SetTD_spd(uhcip, current_dummy, 1);
1974         }
1975 
1976         mutex_enter(&ph->p_usba_device->usb_mutex);
1977         if (ph->p_usba_device->usb_port_status == USBA_LOW_SPEED_DEV) {
1978                 SetTD_ls(uhcip, current_dummy, LOW_SPEED_DEVICE);
1979         }
1980 
1981         SetTD_c_err(uhcip, current_dummy, UHCI_MAX_ERR_COUNT);
1982         SetTD_mlen(uhcip, current_dummy,
1983             (length == 0) ? ZERO_LENGTH : (length - 1));
1984         SetTD_dtogg(uhcip, current_dummy, pp->pp_data_toggle);
1985 
1986         /* Adjust the data toggle bit */
1987         ADJ_DATA_TOGGLE(pp);
1988 
1989         SetTD_devaddr(uhcip, current_dummy,  ph->p_usba_device->usb_addr);
1990         SetTD_endpt(uhcip, current_dummy,
1991             ph->p_ep.bEndpointAddress & END_POINT_ADDRESS_MASK);
1992         SetTD_PID(uhcip, current_dummy, PID);
1993         SetTD_ioc(uhcip, current_dummy, INTERRUPT_ON_COMPLETION);
1994 
1995         buf_addr = uhci_get_tw_paddr_by_offs(uhcip, buffer_offset, length, tw);
1996         SetTD32(uhcip, current_dummy->buffer_address, buf_addr);
1997 
1998         td->qh_td_prev                       = current_dummy;
1999         current_dummy->qh_td_prev    = NULL;
2000         pp->pp_qh->td_tailp               = td;
2001         mutex_exit(&ph->p_usba_device->usb_mutex);
2002 }
2003 
2004 /*
2005  * uhci_get_tw_paddr_by_offs:
2006  *      Walk through the DMA cookies of a TW buffer to retrieve
2007  *      the device address used for a TD.
2008  *
2009  * buffer_offset - the starting offset into the TW buffer, where the
2010  *                 TD should transfer from. When a TW has more than
2011  *                 one TD, the TDs must be filled in increasing order.
2012  */
2013 static uint32_t
2014 uhci_get_tw_paddr_by_offs(
2015         uhci_state_t            *uhcip,
2016         uint32_t                buffer_offset,
2017         size_t                  length,
2018         uhci_trans_wrapper_t    *tw)
2019 {
2020         uint32_t                buf_addr;
2021         int                     rem_len;
2022 
2023         USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl,
2024             "uhci_get_tw_paddr_by_offs: buf_offs 0x%x len 0x%lx",
2025             buffer_offset, length);
2026 
2027         /*
2028          * TDs must be filled in increasing DMA offset order.
2029          * tw_dma_offs is initialized to be 0 at TW creation and
2030          * is only increased in this function.
2031          */
2032         ASSERT(length == 0 || buffer_offset >= tw->tw_dma_offs);
2033 
2034         if (length == 0) {
2035                 buf_addr = 0;
2036 
2037                 return (buf_addr);
2038         }
2039 
2040         /*
2041          * Advance to the next DMA cookie until finding the cookie
2042          * that buffer_offset falls in.
2043          * It is very likely this loop will never repeat more than
2044          * once. It is here just to accommodate the case buffer_offset
2045          * is increased by multiple cookies during two consecutive
2046          * calls into this function. In that case, the interim DMA
2047          * buffer is allowed to be skipped.
2048          */
2049         while ((tw->tw_dma_offs + tw->tw_cookie.dmac_size) <=
2050             buffer_offset) {
2051                 /*
2052                  * tw_dma_offs always points to the starting offset
2053                  * of a cookie
2054                  */
2055                 tw->tw_dma_offs += tw->tw_cookie.dmac_size;
2056                 ddi_dma_nextcookie(tw->tw_dmahandle, &tw->tw_cookie);
2057                 tw->tw_cookie_idx++;
2058                 ASSERT(tw->tw_cookie_idx < tw->tw_ncookies);
2059         }
2060 
2061         /*
2062          * Counting the remained buffer length to be filled in
2063          * the TDs for current DMA cookie
2064          */
2065         rem_len = (tw->tw_dma_offs + tw->tw_cookie.dmac_size) -
2066             buffer_offset;
2067 
2068         /* Calculate the beginning address of the buffer */
2069         ASSERT(length <= rem_len);
2070         buf_addr = (buffer_offset - tw->tw_dma_offs) +
2071             tw->tw_cookie.dmac_address;
2072 
2073         USB_DPRINTF_L3(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl,
2074             "uhci_get_tw_paddr_by_offs: dmac_addr 0x%x dmac_size "
2075             "0x%lx idx %d", buf_addr, tw->tw_cookie.dmac_size,
2076             tw->tw_cookie_idx);
2077 
2078         return (buf_addr);
2079 }
2080 
2081 
2082 /*
2083  * uhci_modify_td_active_bits:
2084  *      Sets active bit in all the tds of QH to INACTIVE so that
2085  *      the HC stops processing the TD's related to the QH.
2086  */
2087 void
2088 uhci_modify_td_active_bits(
2089         uhci_state_t            *uhcip,
2090         uhci_pipe_private_t     *pp)
2091 {
2092         uhci_td_t               *td_head;
2093         usb_ep_descr_t          *ept = &pp->pp_pipe_handle->p_ep;
2094         uhci_trans_wrapper_t    *tw_head = pp->pp_tw_head;
2095 
2096         USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl,
2097             "uhci_modify_td_active_bits: tw head %p", (void *)tw_head);
2098 
2099         while (tw_head != NULL) {
2100                 tw_head->tw_claim = UHCI_MODIFY_TD_BITS_CLAIMED;
2101                 td_head = tw_head->tw_hctd_head;
2102 
2103                 while (td_head) {
2104                         if (UHCI_XFER_TYPE(ept) == USB_EP_ATTR_ISOCH) {
2105                                 SetTD_status(uhcip, td_head,
2106                                     GetTD_status(uhcip, td_head) & TD_INACTIVE);
2107                         } else {
2108                                 SetTD32(uhcip, td_head->link_ptr,
2109                                     GetTD32(uhcip, td_head->link_ptr) |
2110                                     HC_END_OF_LIST);
2111                         }
2112 
2113                         td_head = td_head->tw_td_next;
2114                 }
2115                 tw_head = tw_head->tw_next;
2116         }
2117 }
2118 
2119 
2120 /*
2121  * uhci_insert_ctrl_td:
2122  *      Create a TD and a data buffer for a control Queue Head.
2123  */
2124 int
2125 uhci_insert_ctrl_td(
2126         uhci_state_t            *uhcip,
2127         usba_pipe_handle_data_t  *ph,
2128         usb_ctrl_req_t          *ctrl_reqp,
2129         usb_flags_t             flags)
2130 {
2131         uhci_pipe_private_t  *pp = (uhci_pipe_private_t *)ph->p_hcd_private;
2132         uhci_trans_wrapper_t *tw;
2133         size_t  ctrl_buf_size;
2134 
2135         USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
2136             "uhci_insert_ctrl_td: timeout: 0x%x", ctrl_reqp->ctrl_timeout);
2137 
2138         ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
2139 
2140         /*
2141          * If we have a control data phase, make the data buffer start
2142          * on the next 64-byte boundary so as to ensure the DMA cookie
2143          * can fit in the multiple TDs. The buffer in the range of
2144          * [SETUP_SIZE, UHCI_CTRL_EPT_MAX_SIZE) is just for padding
2145          * and not to be transferred.
2146          */
2147         if (ctrl_reqp->ctrl_wLength) {
2148                 ctrl_buf_size = UHCI_CTRL_EPT_MAX_SIZE +
2149                     ctrl_reqp->ctrl_wLength;
2150         } else {
2151                 ctrl_buf_size = SETUP_SIZE;
2152         }
2153 
2154         /* Allocate a transaction wrapper */
2155         if ((tw = uhci_create_transfer_wrapper(uhcip, pp,
2156             ctrl_buf_size, flags)) == NULL) {
2157                 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
2158                     "uhci_insert_ctrl_td: TW allocation failed");
2159 
2160                 return (USB_NO_RESOURCES);
2161         }
2162 
2163         pp->pp_data_toggle = 0;
2164 
2165         tw->tw_curr_xfer_reqp = (usb_opaque_t)ctrl_reqp;
2166         tw->tw_bytes_xfered = 0;
2167         tw->tw_bytes_pending = ctrl_reqp->ctrl_wLength;
2168         tw->tw_timeout_cnt = max(UHCI_CTRL_TIMEOUT, ctrl_reqp->ctrl_timeout);
2169 
2170         /*
2171          * Initialize the callback and any callback
2172          * data for when the td completes.
2173          */
2174         tw->tw_handle_td = uhci_handle_ctrl_td;
2175         tw->tw_handle_callback_value = NULL;
2176 
2177         if ((uhci_create_setup_pkt(uhcip, pp, tw)) != USB_SUCCESS) {
2178                 tw->tw_ctrl_state = 0;
2179 
2180                 /* free the transfer wrapper */
2181                 uhci_deallocate_tw(uhcip, pp, tw);
2182 
2183                 return (USB_NO_RESOURCES);
2184         }
2185 
2186         tw->tw_ctrl_state = SETUP;
2187 
2188         return (USB_SUCCESS);
2189 }
2190 
2191 
2192 /*
2193  * uhci_create_setup_pkt:
2194  *      create a setup packet to initiate a control transfer.
2195  *
2196  *      OHCI driver has seen the case where devices fail if there is
2197  *      more than one control transfer to the device within a frame.
2198  *      So, the UHCI ensures that only one TD will be put on the control
2199  *      pipe to one device (to be consistent with OHCI driver).
2200  */
2201 static int
2202 uhci_create_setup_pkt(
2203         uhci_state_t            *uhcip,
2204         uhci_pipe_private_t     *pp,
2205         uhci_trans_wrapper_t    *tw)
2206 {
2207         int             sdata;
2208         usb_ctrl_req_t  *req = (usb_ctrl_req_t *)tw->tw_curr_xfer_reqp;
2209 
2210         USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl,
2211             "uhci_create_setup_pkt: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%p",
2212             req->ctrl_bmRequestType, req->ctrl_bRequest, req->ctrl_wValue,
2213             req->ctrl_wIndex, req->ctrl_wLength, (void *)req->ctrl_data);
2214 
2215         ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
2216         ASSERT(tw != NULL);
2217 
2218         /* Create the first four bytes of the setup packet */
2219         sdata = (req->ctrl_bmRequestType | (req->ctrl_bRequest << 8) |
2220             (req->ctrl_wValue << 16));
2221         ddi_put32(tw->tw_accesshandle, (uint_t *)tw->tw_buf, sdata);
2222 
2223         /* Create the second four bytes */
2224         sdata = (uint32_t)(req->ctrl_wIndex | (req->ctrl_wLength << 16));
2225         ddi_put32(tw->tw_accesshandle,
2226             (uint_t *)(tw->tw_buf + sizeof (uint_t)), sdata);
2227 
2228         /*
2229          * The TD's are placed on the QH one at a time.
2230          * Once this TD is placed on the done list, the
2231          * data or status phase TD will be enqueued.
2232          */
2233         if ((uhci_insert_hc_td(uhcip, 0, SETUP_SIZE,
2234             pp, tw, PID_SETUP, req->ctrl_attributes)) != USB_SUCCESS) {
2235 
2236                 return (USB_NO_RESOURCES);
2237         }
2238 
2239         USB_DPRINTF_L3(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl,
2240             "Create_setup: pp = 0x%p, attrs = 0x%x", (void *)pp,
2241             req->ctrl_attributes);
2242 
2243         /*
2244          * If this control transfer has a data phase, record the
2245          * direction. If the data phase is an OUT transaction ,
2246          * copy the data into the buffer of the transfer wrapper.
2247          */
2248         if (req->ctrl_wLength != 0) {
2249                 /* There is a data stage.  Find the direction */
2250                 if (req->ctrl_bmRequestType & USB_DEV_REQ_DEV_TO_HOST) {
2251                         tw->tw_direction = PID_IN;
2252                 } else {
2253                         tw->tw_direction = PID_OUT;
2254 
2255                         /* Copy the data into the buffer */
2256                         ddi_rep_put8(tw->tw_accesshandle,
2257                             req->ctrl_data->b_rptr,
2258                             (uint8_t *)(tw->tw_buf + UHCI_CTRL_EPT_MAX_SIZE),
2259                             req->ctrl_wLength,
2260                             DDI_DEV_AUTOINCR);
2261                 }
2262         }
2263 
2264         return (USB_SUCCESS);
2265 }
2266 
2267 
2268 /*
2269  * uhci_create_stats:
2270  *      Allocate and initialize the uhci kstat structures
2271  */
2272 void
2273 uhci_create_stats(uhci_state_t *uhcip)
2274 {
2275         int                     i;
2276         char                    kstatname[KSTAT_STRLEN];
2277         char                    *usbtypes[USB_N_COUNT_KSTATS] =
2278             {"ctrl", "isoch", "bulk", "intr"};
2279         uint_t                  instance = uhcip->uhci_instance;
2280         const char              *dname = ddi_driver_name(uhcip->uhci_dip);
2281         uhci_intrs_stats_t      *isp;
2282 
2283         if (UHCI_INTRS_STATS(uhcip) == NULL) {
2284                 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,intrs",
2285                     dname, instance);
2286                 UHCI_INTRS_STATS(uhcip) = kstat_create("usba", instance,
2287                     kstatname, "usb_interrupts", KSTAT_TYPE_NAMED,
2288                     sizeof (uhci_intrs_stats_t) / sizeof (kstat_named_t),
2289                     KSTAT_FLAG_PERSISTENT);
2290 
2291                 if (UHCI_INTRS_STATS(uhcip) != NULL) {
2292                         isp = UHCI_INTRS_STATS_DATA(uhcip);
2293                         kstat_named_init(&isp->uhci_intrs_hc_halted,
2294                             "HC Halted", KSTAT_DATA_UINT64);
2295                         kstat_named_init(&isp->uhci_intrs_hc_process_err,
2296                             "HC Process Errors", KSTAT_DATA_UINT64);
2297                         kstat_named_init(&isp->uhci_intrs_host_sys_err,
2298                             "Host Sys Errors", KSTAT_DATA_UINT64);
2299                         kstat_named_init(&isp->uhci_intrs_resume_detected,
2300                             "Resume Detected", KSTAT_DATA_UINT64);
2301                         kstat_named_init(&isp->uhci_intrs_usb_err_intr,
2302                             "USB Error", KSTAT_DATA_UINT64);
2303                         kstat_named_init(&isp->uhci_intrs_usb_intr,
2304                             "USB Interrupts", KSTAT_DATA_UINT64);
2305                         kstat_named_init(&isp->uhci_intrs_total,
2306                             "Total Interrupts", KSTAT_DATA_UINT64);
2307                         kstat_named_init(&isp->uhci_intrs_not_claimed,
2308                             "Not Claimed", KSTAT_DATA_UINT64);
2309 
2310                         UHCI_INTRS_STATS(uhcip)->ks_private = uhcip;
2311                         UHCI_INTRS_STATS(uhcip)->ks_update = nulldev;
2312                         kstat_install(UHCI_INTRS_STATS(uhcip));
2313                 }
2314         }
2315 
2316         if (UHCI_TOTAL_STATS(uhcip) == NULL) {
2317                 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,total",
2318                     dname, instance);
2319                 UHCI_TOTAL_STATS(uhcip) = kstat_create("usba", instance,
2320                     kstatname, "usb_byte_count", KSTAT_TYPE_IO, 1,
2321                     KSTAT_FLAG_PERSISTENT);
2322 
2323                 if (UHCI_TOTAL_STATS(uhcip) != NULL) {
2324                         kstat_install(UHCI_TOTAL_STATS(uhcip));
2325                 }
2326         }
2327 
2328         for (i = 0; i < USB_N_COUNT_KSTATS; i++) {
2329                 if (uhcip->uhci_count_stats[i] == NULL) {
2330                         (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,%s",
2331                             dname, instance, usbtypes[i]);
2332                         uhcip->uhci_count_stats[i] = kstat_create("usba",
2333                             instance, kstatname, "usb_byte_count",
2334                             KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT);
2335 
2336                         if (uhcip->uhci_count_stats[i] != NULL) {
2337                                 kstat_install(uhcip->uhci_count_stats[i]);
2338                         }
2339                 }
2340         }
2341 }
2342 
2343 
2344 /*
2345  * uhci_destroy_stats:
2346  *      Clean up uhci kstat structures
2347  */
2348 void
2349 uhci_destroy_stats(uhci_state_t *uhcip)
2350 {
2351         int i;
2352 
2353         if (UHCI_INTRS_STATS(uhcip)) {
2354                 kstat_delete(UHCI_INTRS_STATS(uhcip));
2355                 UHCI_INTRS_STATS(uhcip) = NULL;
2356         }
2357 
2358         if (UHCI_TOTAL_STATS(uhcip)) {
2359                 kstat_delete(UHCI_TOTAL_STATS(uhcip));
2360                 UHCI_TOTAL_STATS(uhcip) = NULL;
2361         }
2362 
2363         for (i = 0; i < USB_N_COUNT_KSTATS; i++) {
2364                 if (uhcip->uhci_count_stats[i]) {
2365                         kstat_delete(uhcip->uhci_count_stats[i]);
2366                         uhcip->uhci_count_stats[i] = NULL;
2367                 }
2368         }
2369 }
2370 
2371 
2372 void
2373 uhci_do_intrs_stats(uhci_state_t *uhcip, int val)
2374 {
2375         if (UHCI_INTRS_STATS(uhcip) == NULL) {
2376 
2377                 return;
2378         }
2379 
2380         UHCI_INTRS_STATS_DATA(uhcip)->uhci_intrs_total.value.ui64++;
2381         switch (val) {
2382         case USBSTS_REG_HC_HALTED:
2383                 UHCI_INTRS_STATS_DATA(uhcip)->uhci_intrs_hc_halted.value.ui64++;
2384                 break;
2385         case USBSTS_REG_HC_PROCESS_ERR:
2386                 UHCI_INTRS_STATS_DATA(uhcip)->
2387                     uhci_intrs_hc_process_err.value.ui64++;
2388                 break;
2389         case USBSTS_REG_HOST_SYS_ERR:
2390                 UHCI_INTRS_STATS_DATA(uhcip)->
2391                     uhci_intrs_host_sys_err.value.ui64++;
2392                 break;
2393         case USBSTS_REG_RESUME_DETECT:
2394                 UHCI_INTRS_STATS_DATA(uhcip)->
2395                     uhci_intrs_resume_detected.value.ui64++;
2396                 break;
2397         case USBSTS_REG_USB_ERR_INTR:
2398                 UHCI_INTRS_STATS_DATA(uhcip)->
2399                     uhci_intrs_usb_err_intr.value.ui64++;
2400                 break;
2401         case USBSTS_REG_USB_INTR:
2402                 UHCI_INTRS_STATS_DATA(uhcip)->uhci_intrs_usb_intr.value.ui64++;
2403                 break;
2404         default:
2405                 UHCI_INTRS_STATS_DATA(uhcip)->
2406                     uhci_intrs_not_claimed.value.ui64++;
2407                 break;
2408         }
2409 }
2410 
2411 
2412 void
2413 uhci_do_byte_stats(uhci_state_t *uhcip, size_t len, uint8_t attr, uint8_t addr)
2414 {
2415         uint8_t type = attr & USB_EP_ATTR_MASK;
2416         uint8_t dir = addr & USB_EP_DIR_MASK;
2417 
2418         switch (dir) {
2419         case USB_EP_DIR_IN:
2420                 UHCI_TOTAL_STATS_DATA(uhcip)->reads++;
2421                 UHCI_TOTAL_STATS_DATA(uhcip)->nread += len;
2422                 switch (type) {
2423                 case USB_EP_ATTR_CONTROL:
2424                         UHCI_CTRL_STATS(uhcip)->reads++;
2425                         UHCI_CTRL_STATS(uhcip)->nread += len;
2426                         break;
2427                 case USB_EP_ATTR_BULK:
2428                         UHCI_BULK_STATS(uhcip)->reads++;
2429                         UHCI_BULK_STATS(uhcip)->nread += len;
2430                         break;
2431                 case USB_EP_ATTR_INTR:
2432                         UHCI_INTR_STATS(uhcip)->reads++;
2433                         UHCI_INTR_STATS(uhcip)->nread += len;
2434                         break;
2435                 case USB_EP_ATTR_ISOCH:
2436                         UHCI_ISOC_STATS(uhcip)->reads++;
2437                         UHCI_ISOC_STATS(uhcip)->nread += len;
2438                         break;
2439                 }
2440                 break;
2441         case USB_EP_DIR_OUT:
2442                 UHCI_TOTAL_STATS_DATA(uhcip)->writes++;
2443                 UHCI_TOTAL_STATS_DATA(uhcip)->nwritten += len;
2444                 switch (type) {
2445                 case USB_EP_ATTR_CONTROL:
2446                         UHCI_CTRL_STATS(uhcip)->writes++;
2447                         UHCI_CTRL_STATS(uhcip)->nwritten += len;
2448                         break;
2449                 case USB_EP_ATTR_BULK:
2450                         UHCI_BULK_STATS(uhcip)->writes++;
2451                         UHCI_BULK_STATS(uhcip)->nwritten += len;
2452                         break;
2453                 case USB_EP_ATTR_INTR:
2454                         UHCI_INTR_STATS(uhcip)->writes++;
2455                         UHCI_INTR_STATS(uhcip)->nwritten += len;
2456                         break;
2457                 case USB_EP_ATTR_ISOCH:
2458                         UHCI_ISOC_STATS(uhcip)->writes++;
2459                         UHCI_ISOC_STATS(uhcip)->nwritten += len;
2460                         break;
2461                 }
2462                 break;
2463         }
2464 }
2465 
2466 
2467 /*
2468  * uhci_free_tw:
2469  *      Free the Transfer Wrapper (TW).
2470  */
2471 void
2472 uhci_free_tw(uhci_state_t *uhcip, uhci_trans_wrapper_t *tw)
2473 {
2474         int rval, i;
2475 
2476         USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, "uhci_free_tw:");
2477 
2478         ASSERT(tw != NULL);
2479 
2480         if (tw->tw_isoc_strtlen > 0) {
2481                 ASSERT(tw->tw_isoc_bufs != NULL);
2482                 for (i = 0; i < tw->tw_ncookies; i++) {
2483                         rval = ddi_dma_unbind_handle(
2484                             tw->tw_isoc_bufs[i].dma_handle);
2485                         ASSERT(rval == USB_SUCCESS);
2486                         ddi_dma_mem_free(&tw->tw_isoc_bufs[i].mem_handle);
2487                         ddi_dma_free_handle(&tw->tw_isoc_bufs[i].dma_handle);
2488                 }
2489                 kmem_free(tw->tw_isoc_bufs, tw->tw_isoc_strtlen);
2490         } else if (tw->tw_dmahandle != NULL) {
2491                 rval = ddi_dma_unbind_handle(tw->tw_dmahandle);
2492                 ASSERT(rval == DDI_SUCCESS);
2493 
2494                 ddi_dma_mem_free(&tw->tw_accesshandle);
2495                 ddi_dma_free_handle(&tw->tw_dmahandle);
2496         }
2497 
2498         kmem_free(tw, sizeof (uhci_trans_wrapper_t));
2499 }
2500 
2501 
2502 /*
2503  * uhci_deallocate_tw:
2504  *      Deallocate of a Transaction Wrapper (TW) and this involves
2505  *      the freeing of DMA resources.
2506  */
2507 void
2508 uhci_deallocate_tw(uhci_state_t *uhcip,
2509     uhci_pipe_private_t *pp, uhci_trans_wrapper_t *tw)
2510 {
2511         uhci_trans_wrapper_t    *head;
2512 
2513         USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl,
2514             "uhci_deallocate_tw:");
2515 
2516         /*
2517          * If the transfer wrapper has no Host Controller (HC)
2518          * Transfer Descriptors (TD) associated with it,  then
2519          * remove the transfer wrapper. The transfers are done
2520          * in FIFO order, so this should be the first transfer
2521          * wrapper on the list.
2522          */
2523         if (tw->tw_hctd_head != NULL) {
2524                 ASSERT(tw->tw_hctd_tail != NULL);
2525 
2526                 return;
2527         }
2528 
2529         ASSERT(tw->tw_hctd_tail == NULL);
2530         ASSERT(pp->pp_tw_head != NULL);
2531 
2532         /*
2533          * If pp->pp_tw_head is NULL, set the tail also to NULL.
2534          */
2535         head = pp->pp_tw_head;
2536 
2537         if (head == tw) {
2538                 pp->pp_tw_head = head->tw_next;
2539                 if (pp->pp_tw_head == NULL) {
2540                         pp->pp_tw_tail = NULL;
2541                 }
2542         } else {
2543                 while (head->tw_next != tw)
2544                         head = head->tw_next;
2545                 head->tw_next = tw->tw_next;
2546                 if (tw->tw_next == NULL) {
2547                         pp->pp_tw_tail = head;
2548                 }
2549         }
2550         uhci_free_tw(uhcip, tw);
2551 }
2552 
2553 
2554 void
2555 uhci_delete_td(uhci_state_t *uhcip, uhci_td_t *td)
2556 {
2557         uhci_td_t               *tmp_td;
2558         uhci_trans_wrapper_t    *tw = td->tw;
2559 
2560         if ((td->outst_td_next == NULL) && (td->outst_td_prev == NULL)) {
2561                 uhcip->uhci_outst_tds_head = NULL;
2562                 uhcip->uhci_outst_tds_tail = NULL;
2563         } else if (td->outst_td_next == NULL) {
2564                 td->outst_td_prev->outst_td_next = NULL;
2565                 uhcip->uhci_outst_tds_tail = td->outst_td_prev;
2566         } else if (td->outst_td_prev == NULL) {
2567                 td->outst_td_next->outst_td_prev = NULL;
2568                 uhcip->uhci_outst_tds_head = td->outst_td_next;
2569         } else {
2570                 td->outst_td_prev->outst_td_next = td->outst_td_next;
2571                 td->outst_td_next->outst_td_prev = td->outst_td_prev;
2572         }
2573 
2574         tmp_td = tw->tw_hctd_head;
2575 
2576         if (tmp_td != td) {
2577                 while (tmp_td->tw_td_next != td) {
2578                         tmp_td = tmp_td->tw_td_next;
2579                 }
2580                 ASSERT(tmp_td);
2581                 tmp_td->tw_td_next = td->tw_td_next;
2582                 if (td->tw_td_next == NULL) {
2583                         tw->tw_hctd_tail = tmp_td;
2584                 }
2585         } else {
2586                 tw->tw_hctd_head = tw->tw_hctd_head->tw_td_next;
2587                 if (tw->tw_hctd_head == NULL) {
2588                         tw->tw_hctd_tail = NULL;
2589                 }
2590         }
2591 
2592         td->flag  = TD_FLAG_FREE;
2593 }
2594 
2595 
2596 void
2597 uhci_remove_tds_tws(
2598         uhci_state_t            *uhcip,
2599         usba_pipe_handle_data_t *ph)
2600 {
2601         usb_opaque_t            curr_reqp;
2602         uhci_pipe_private_t     *pp = (uhci_pipe_private_t *)ph->p_hcd_private;
2603         usb_ep_descr_t          *ept = &pp->pp_pipe_handle->p_ep;
2604         uhci_trans_wrapper_t    *tw_tmp;
2605         uhci_trans_wrapper_t    *tw_head = pp->pp_tw_head;
2606 
2607         while (tw_head != NULL) {
2608                 tw_tmp = tw_head;
2609                 tw_head = tw_head->tw_next;
2610 
2611                 curr_reqp = tw_tmp->tw_curr_xfer_reqp;
2612                 if (curr_reqp) {
2613                         /* do this for control/bulk/intr */
2614                         if ((tw_tmp->tw_direction == PID_IN) &&
2615                             (UHCI_XFER_TYPE(ept) == USB_EP_ATTR_INTR)) {
2616                                 uhci_deallocate_periodic_in_resource(uhcip,
2617                                     pp, tw_tmp);
2618                         } else {
2619                                 uhci_hcdi_callback(uhcip, pp,
2620                                     pp->pp_pipe_handle, tw_tmp, USB_CR_FLUSHED);
2621                         }
2622                 } /* end of curr_reqp */
2623 
2624                 if (tw_tmp->tw_claim != UHCI_MODIFY_TD_BITS_CLAIMED) {
2625                         continue;
2626                 }
2627 
2628                 while (tw_tmp->tw_hctd_head != NULL) {
2629                         uhci_delete_td(uhcip, tw_tmp->tw_hctd_head);
2630                 }
2631 
2632                 uhci_deallocate_tw(uhcip, pp, tw_tmp);
2633         }
2634 }
2635 
2636 
2637 /*
2638  * uhci_remove_qh:
2639  *      Remove the Queue Head from the Host Controller's
2640  *      appropriate QH list.
2641  */
2642 void
2643 uhci_remove_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp)
2644 {
2645         uhci_td_t       *dummy_td;
2646 
2647         ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
2648 
2649         USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
2650             "uhci_remove_qh:");
2651 
2652         dummy_td = pp->pp_qh->td_tailp;
2653         dummy_td->flag = TD_FLAG_FREE;
2654 
2655         switch (UHCI_XFER_TYPE(&pp->pp_pipe_handle->p_ep)) {
2656         case USB_EP_ATTR_CONTROL:
2657                 uhci_remove_ctrl_qh(uhcip, pp);
2658                 break;
2659         case USB_EP_ATTR_BULK:
2660                 uhci_remove_bulk_qh(uhcip, pp);
2661                 break;
2662         case USB_EP_ATTR_INTR:
2663                 uhci_remove_intr_qh(uhcip, pp);
2664                 break;
2665         }
2666 }
2667 
2668 
2669 static void
2670 uhci_remove_intr_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp)
2671 {
2672         queue_head_t   *qh = pp->pp_qh;
2673         queue_head_t   *next_lattice_qh =
2674             QH_VADDR(GetQH32(uhcip, qh->link_ptr) & QH_LINK_PTR_MASK);
2675 
2676         qh->prev_qh->link_ptr      = qh->link_ptr;
2677         next_lattice_qh->prev_qh = qh->prev_qh;
2678         qh->qh_flag = QUEUE_HEAD_FLAG_FREE;
2679 
2680 }
2681 
2682 /*
2683  * uhci_remove_bulk_qh:
2684  *      Remove a bulk QH from the Host Controller's QH list. There may be a
2685  *      loop for bulk QHs, we must care about this while removing a bulk QH.
2686  */
2687 static void
2688 uhci_remove_bulk_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp)
2689 {
2690         queue_head_t   *qh = pp->pp_qh;
2691         queue_head_t   *next_lattice_qh;
2692         uint32_t        paddr;
2693 
2694         paddr = (GetQH32(uhcip, qh->link_ptr) & QH_LINK_PTR_MASK);
2695         next_lattice_qh = (qh == uhcip->uhci_bulk_xfers_q_tail) ?
2696             0 : QH_VADDR(paddr);
2697 
2698         if ((qh == uhcip->uhci_bulk_xfers_q_tail) &&
2699             (qh->prev_qh == uhcip->uhci_bulk_xfers_q_head)) {
2700                 SetQH32(uhcip, qh->prev_qh->link_ptr, HC_END_OF_LIST);
2701         } else {
2702                 qh->prev_qh->link_ptr = qh->link_ptr;
2703         }
2704 
2705         if (next_lattice_qh == NULL) {
2706                 uhcip->uhci_bulk_xfers_q_tail = qh->prev_qh;
2707         } else {
2708                 next_lattice_qh->prev_qh = qh->prev_qh;
2709         }
2710 
2711         qh->qh_flag = QUEUE_HEAD_FLAG_FREE;
2712 
2713 }
2714 
2715 
2716 static void
2717 uhci_remove_ctrl_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp)
2718 {
2719         queue_head_t   *qh = pp->pp_qh;
2720         queue_head_t   *next_lattice_qh =
2721             QH_VADDR(GetQH32(uhcip, qh->link_ptr) & QH_LINK_PTR_MASK);
2722 
2723         qh->prev_qh->link_ptr = qh->link_ptr;
2724         if (next_lattice_qh->prev_qh != NULL) {
2725                 next_lattice_qh->prev_qh = qh->prev_qh;
2726         } else {
2727                 uhcip->uhci_ctrl_xfers_q_tail = qh->prev_qh;
2728         }
2729 
2730         qh->qh_flag = QUEUE_HEAD_FLAG_FREE;
2731 }
2732 
2733 
2734 /*
2735  * uhci_allocate_td_from_pool:
2736  *      Allocate a Transfer Descriptor (TD) from the TD buffer pool.
2737  */
2738 static uhci_td_t *
2739 uhci_allocate_td_from_pool(uhci_state_t *uhcip)
2740 {
2741         int             index;
2742         uhci_td_t       *td;
2743 
2744         ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
2745 
2746         /*
2747          * Search for a blank Transfer Descriptor (TD)
2748          * in the TD buffer pool.
2749          */
2750         for (index = 0; index < uhci_td_pool_size; index ++) {
2751                 if (uhcip->uhci_td_pool_addr[index].flag == TD_FLAG_FREE) {
2752                         break;
2753                 }
2754         }
2755 
2756         if (index == uhci_td_pool_size) {
2757                 USB_DPRINTF_L2(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl,
2758                     "uhci_allocate_td_from_pool: TD exhausted");
2759 
2760                 return (NULL);
2761         }
2762 
2763         USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl,
2764             "uhci_allocate_td_from_pool: Allocated %d", index);
2765 
2766         /* Create a new dummy for the end of the TD list */
2767         td = &uhcip->uhci_td_pool_addr[index];
2768 
2769         /* Mark the newly allocated TD as a dummy */
2770         td->flag =  TD_FLAG_DUMMY;
2771         td->qh_td_prev       =  NULL;
2772 
2773         return (td);
2774 }
2775 
2776 
2777 /*
2778  * uhci_insert_bulk_td:
2779  */
2780 int
2781 uhci_insert_bulk_td(
2782         uhci_state_t            *uhcip,
2783         usba_pipe_handle_data_t *ph,
2784         usb_bulk_req_t          *req,
2785         usb_flags_t             flags)
2786 {
2787         size_t                  length;
2788         uint_t                  mps;    /* MaxPacketSize */
2789         uint_t                  num_bulk_tds, i, j;
2790         uint32_t                buf_offs;
2791         uhci_td_t               *bulk_td_ptr;
2792         uhci_td_t               *current_dummy, *tmp_td;
2793         uhci_pipe_private_t     *pp = (uhci_pipe_private_t *)ph->p_hcd_private;
2794         uhci_trans_wrapper_t    *tw;
2795         uhci_bulk_isoc_xfer_t   *bulk_xfer_info;
2796         uhci_bulk_isoc_td_pool_t *td_pool_ptr;
2797 
2798         USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
2799             "uhci_insert_bulk_td: req: 0x%p, flags = 0x%x", (void *)req, flags);
2800 
2801         ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
2802 
2803         /*
2804          * Create transfer wrapper
2805          */
2806         if ((tw = uhci_create_transfer_wrapper(uhcip, pp, req->bulk_len,
2807             flags)) == NULL) {
2808                 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
2809                     "uhci_insert_bulk_td: TW allocation failed");
2810 
2811                 return (USB_NO_RESOURCES);
2812         }
2813 
2814         tw->tw_bytes_xfered          = 0;
2815         tw->tw_bytes_pending         = req->bulk_len;
2816         tw->tw_handle_td             = uhci_handle_bulk_td;
2817         tw->tw_handle_callback_value = (usb_opaque_t)req->bulk_data;
2818         tw->tw_timeout_cnt           = req->bulk_timeout;
2819         tw->tw_data                  = req->bulk_data;
2820         tw->tw_curr_xfer_reqp                = (usb_opaque_t)req;
2821 
2822         /* Get the bulk pipe direction */
2823         tw->tw_direction = (UHCI_XFER_DIR(&ph->p_ep) == USB_EP_DIR_OUT) ?
2824             PID_OUT : PID_IN;
2825 
2826         USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
2827             "uhci_insert_bulk_td: direction: 0x%x", tw->tw_direction);
2828 
2829         /* If the DATA OUT, copy the data into transfer buffer. */
2830         if (tw->tw_direction == PID_OUT) {
2831                 if (req->bulk_len) {
2832                         ASSERT(req->bulk_data != NULL);
2833 
2834                         /* Copy the data into the message */
2835                         ddi_rep_put8(tw->tw_accesshandle,
2836                             req->bulk_data->b_rptr,
2837                             (uint8_t *)tw->tw_buf,
2838                             req->bulk_len, DDI_DEV_AUTOINCR);
2839                 }
2840         }
2841 
2842         /* Get the max packet size.  */
2843         length = mps = pp->pp_pipe_handle->p_ep.wMaxPacketSize;
2844 
2845         /*
2846          * Calculate number of TD's to insert in the current frame interval.
2847          * Max number TD's allowed (driver implementation) is 128
2848          * in one frame interval. Once all the TD's are completed
2849          * then the remaining TD's will be inserted into the lattice
2850          * in the uhci_handle_bulk_td().
2851          */
2852         if ((tw->tw_bytes_pending / mps) >= MAX_NUM_BULK_TDS_PER_XFER) {
2853                 num_bulk_tds = MAX_NUM_BULK_TDS_PER_XFER;
2854         } else {
2855                 num_bulk_tds = (tw->tw_bytes_pending / mps);
2856 
2857                 if (tw->tw_bytes_pending % mps || tw->tw_bytes_pending == 0) {
2858                         num_bulk_tds++;
2859                         length = (tw->tw_bytes_pending % mps);
2860                 }
2861         }
2862 
2863         /*
2864          * Allocate memory for the bulk xfer information structure
2865          */
2866         if ((bulk_xfer_info = kmem_zalloc(
2867             sizeof (uhci_bulk_isoc_xfer_t), KM_NOSLEEP)) == NULL) {
2868                 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
2869                     "uhci_insert_bulk_td: kmem_zalloc failed");
2870 
2871                 /* Free the transfer wrapper */
2872                 uhci_deallocate_tw(uhcip, pp, tw);
2873 
2874                 return (USB_FAILURE);
2875         }
2876 
2877         /* Allocate memory for the bulk TD's */
2878         if (uhci_alloc_bulk_isoc_tds(uhcip, num_bulk_tds, bulk_xfer_info) !=
2879             USB_SUCCESS) {
2880                 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
2881                     "uhci_insert_bulk_td: alloc_bulk_isoc_tds failed");
2882 
2883                 kmem_free(bulk_xfer_info, sizeof (uhci_bulk_isoc_xfer_t));
2884 
2885                 /* Free the transfer wrapper */
2886                 uhci_deallocate_tw(uhcip, pp, tw);
2887 
2888                 return (USB_FAILURE);
2889         }
2890 
2891         td_pool_ptr = &bulk_xfer_info->td_pools[0];
2892         bulk_td_ptr = (uhci_td_t *)td_pool_ptr->pool_addr;
2893         bulk_td_ptr[0].qh_td_prev = NULL;
2894         current_dummy = pp->pp_qh->td_tailp;
2895         buf_offs = 0;
2896         pp->pp_qh->bulk_xfer_info = bulk_xfer_info;
2897 
2898         /* Fill up all the bulk TD's */
2899         for (i = 0; i < bulk_xfer_info->num_pools; i++) {
2900                 for (j = 0; j < (td_pool_ptr->num_tds - 1); j++) {
2901                         uhci_fill_in_bulk_isoc_td(uhcip, &bulk_td_ptr[j],
2902                             &bulk_td_ptr[j+1], BULKTD_PADDR(td_pool_ptr,
2903                             &bulk_td_ptr[j+1]), ph, buf_offs, mps, tw);
2904                         buf_offs += mps;
2905                 }
2906 
2907                 /* fill in the last TD */
2908                 if (i == (bulk_xfer_info->num_pools - 1)) {
2909                         uhci_fill_in_bulk_isoc_td(uhcip, &bulk_td_ptr[j],
2910                             current_dummy, TD_PADDR(current_dummy),
2911                             ph, buf_offs, length, tw);
2912                 } else {
2913                         /* fill in the TD at the tail of a pool */
2914                         tmp_td = &bulk_td_ptr[j];
2915                         td_pool_ptr = &bulk_xfer_info->td_pools[i + 1];
2916                         bulk_td_ptr = (uhci_td_t *)td_pool_ptr->pool_addr;
2917                         uhci_fill_in_bulk_isoc_td(uhcip, tmp_td,
2918                             &bulk_td_ptr[0], BULKTD_PADDR(td_pool_ptr,
2919                             &bulk_td_ptr[0]), ph, buf_offs, mps, tw);
2920                         buf_offs += mps;
2921                 }
2922         }
2923 
2924         bulk_xfer_info->num_tds      = (ushort_t)num_bulk_tds;
2925 
2926         /*
2927          * Point the end of the lattice tree to the start of the bulk xfers
2928          * queue head. This allows the HC to execute the same Queue Head/TD
2929          * in the same frame. There are some bulk devices, which NAKs after
2930          * completing each TD. As a result, the performance on such devices
2931          * is very bad.  This loop will  provide a chance to execute NAk'ed
2932          * bulk TDs again in the same frame.
2933          */
2934         if (uhcip->uhci_pending_bulk_cmds++ == 0) {
2935                 uhcip->uhci_bulk_xfers_q_tail->link_ptr =
2936                     uhcip->uhci_bulk_xfers_q_head->link_ptr;
2937                 USB_DPRINTF_L3(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
2938                     "uhci_insert_bulk_td: count = %d no tds  %d",
2939                     uhcip->uhci_pending_bulk_cmds, num_bulk_tds);
2940         }
2941 
2942         /* Insert on the bulk queue head for the execution by HC */
2943         SetQH32(uhcip, pp->pp_qh->element_ptr,
2944             bulk_xfer_info->td_pools[0].cookie.dmac_address);
2945 
2946         return (USB_SUCCESS);
2947 }
2948 
2949 
2950 /*
2951  * uhci_fill_in_bulk_isoc_td
2952  *     Fills the bulk/isoc TD
2953  *
2954  * offset - different meanings for bulk and isoc TDs:
2955  *          starting offset into the TW buffer for a bulk TD
2956  *          and the index into the isoc packet list for an isoc TD
2957  */
2958 void
2959 uhci_fill_in_bulk_isoc_td(uhci_state_t *uhcip, uhci_td_t *current_td,
2960         uhci_td_t               *next_td,
2961         uint32_t                next_td_paddr,
2962         usba_pipe_handle_data_t *ph,
2963         uint_t                  offset,
2964         uint_t                  length,
2965         uhci_trans_wrapper_t    *tw)
2966 {
2967         uhci_pipe_private_t     *pp = (uhci_pipe_private_t *)ph->p_hcd_private;
2968         usb_ep_descr_t          *ept = &pp->pp_pipe_handle->p_ep;
2969         uint32_t                buf_addr;
2970 
2971         USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
2972             "uhci_fill_in_bulk_isoc_td: tw 0x%p offs 0x%x length 0x%x",
2973             (void *)tw, offset, length);
2974 
2975         bzero((char *)current_td, sizeof (uhci_td_t));
2976         SetTD32(uhcip, current_td->link_ptr, next_td_paddr | HC_DEPTH_FIRST);
2977 
2978         switch (UHCI_XFER_TYPE(ept)) {
2979         case USB_EP_ATTR_ISOCH:
2980                 if (((usb_isoc_req_t *)tw->tw_curr_xfer_reqp)->isoc_attributes
2981                     & USB_ATTRS_SHORT_XFER_OK) {
2982                         SetTD_spd(uhcip, current_td, 1);
2983                 }
2984                 break;
2985         case USB_EP_ATTR_BULK:
2986                 if (((usb_bulk_req_t *)tw->tw_curr_xfer_reqp)->bulk_attributes
2987                     & USB_ATTRS_SHORT_XFER_OK) {
2988                         SetTD_spd(uhcip, current_td, 1);
2989                 }
2990                 break;
2991         }
2992 
2993         mutex_enter(&ph->p_usba_device->usb_mutex);
2994 
2995         SetTD_c_err(uhcip, current_td, UHCI_MAX_ERR_COUNT);
2996         SetTD_status(uhcip, current_td, UHCI_TD_ACTIVE);
2997         SetTD_ioc(uhcip, current_td, INTERRUPT_ON_COMPLETION);
2998         SetTD_mlen(uhcip, current_td,
2999             (length == 0) ? ZERO_LENGTH : (length - 1));
3000         SetTD_dtogg(uhcip, current_td, pp->pp_data_toggle);
3001         SetTD_devaddr(uhcip, current_td, ph->p_usba_device->usb_addr);
3002         SetTD_endpt(uhcip, current_td, ph->p_ep.bEndpointAddress &
3003             END_POINT_ADDRESS_MASK);
3004         SetTD_PID(uhcip, current_td, tw->tw_direction);
3005 
3006         /* Get the right buffer address for the current TD */
3007         switch (UHCI_XFER_TYPE(ept)) {
3008         case USB_EP_ATTR_ISOCH:
3009                 buf_addr = tw->tw_isoc_bufs[offset].cookie.dmac_address;
3010                 break;
3011         case USB_EP_ATTR_BULK:
3012                 buf_addr = uhci_get_tw_paddr_by_offs(uhcip, offset,
3013                     length, tw);
3014                 break;
3015         }
3016         SetTD32(uhcip, current_td->buffer_address, buf_addr);
3017 
3018         /*
3019          * Adjust the data toggle.
3020          * The data toggle bit must always be 0 for isoc transfers.
3021          * And set the "iso" bit in the TD for isoc transfers.
3022          */
3023         if (UHCI_XFER_TYPE(ept) == USB_EP_ATTR_ISOCH) {
3024                 pp->pp_data_toggle = 0;
3025                 SetTD_iso(uhcip, current_td, 1);
3026         } else {
3027                 ADJ_DATA_TOGGLE(pp);
3028                 next_td->qh_td_prev = current_td;
3029                 pp->pp_qh->td_tailp = next_td;
3030         }
3031 
3032         current_td->outst_td_next = NULL;
3033         current_td->outst_td_prev = uhcip->uhci_outst_tds_tail;
3034         if (uhcip->uhci_outst_tds_head == NULL) {
3035                 uhcip->uhci_outst_tds_head = current_td;
3036         } else {
3037                 uhcip->uhci_outst_tds_tail->outst_td_next = current_td;
3038         }
3039         uhcip->uhci_outst_tds_tail = current_td;
3040         current_td->tw = tw;
3041 
3042         if (tw->tw_hctd_head == NULL) {
3043                 ASSERT(tw->tw_hctd_tail == NULL);
3044                 tw->tw_hctd_head = current_td;
3045                 tw->tw_hctd_tail = current_td;
3046         } else {
3047                 /* Add the td to the end of the list */
3048                 tw->tw_hctd_tail->tw_td_next = current_td;
3049                 tw->tw_hctd_tail = current_td;
3050         }
3051 
3052         mutex_exit(&ph->p_usba_device->usb_mutex);
3053 }
3054 
3055 
3056 /*
3057  * uhci_alloc_bulk_isoc_tds:
3058  *      - Allocates the isoc/bulk TD pools. It will allocate one whole
3059  *        pool to store all the TDs if the system allows. Only when the
3060  *        first allocation fails, it tries to allocate several small
3061  *        pools with each pool limited in physical page size.
3062  */
3063 static int
3064 uhci_alloc_bulk_isoc_tds(
3065         uhci_state_t            *uhcip,
3066         uint_t                  num_tds,
3067         uhci_bulk_isoc_xfer_t   *info)
3068 {
3069         USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
3070             "uhci_alloc_bulk_isoc_tds: num_tds: 0x%x info: 0x%p",
3071             num_tds, (void *)info);
3072 
3073         info->num_pools = 1;
3074         /* allocate as a whole pool at the first time */
3075         if (uhci_alloc_memory_for_tds(uhcip, num_tds, info) !=
3076             USB_SUCCESS) {
3077                 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
3078                     "alloc_memory_for_tds failed: num_tds %d num_pools %d",
3079                     num_tds, info->num_pools);
3080 
3081                 /* reduce the td number per pool and alloc again */
3082                 info->num_pools = num_tds / UHCI_MAX_TD_NUM_PER_POOL;
3083                 if (num_tds % UHCI_MAX_TD_NUM_PER_POOL) {
3084                         info->num_pools++;
3085                 }
3086 
3087                 if (uhci_alloc_memory_for_tds(uhcip, num_tds, info) !=
3088                     USB_SUCCESS) {
3089                         USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
3090                             "alloc_memory_for_tds failed: num_tds %d "
3091                             "num_pools %d", num_tds, info->num_pools);
3092 
3093                         return (USB_NO_RESOURCES);
3094                 }
3095         }
3096 
3097         return (USB_SUCCESS);
3098 }
3099 
3100 
3101 /*
3102  * uhci_alloc_memory_for_tds:
3103  *      - Allocates memory for the isoc/bulk td pools.
3104  */
3105 static int
3106 uhci_alloc_memory_for_tds(
3107         uhci_state_t            *uhcip,
3108         uint_t                  num_tds,
3109         uhci_bulk_isoc_xfer_t   *info)
3110 {
3111         int                     result, i, j, err;
3112         size_t                  real_length;
3113         uint_t                  ccount, num;
3114         ddi_device_acc_attr_t   dev_attr;
3115         uhci_bulk_isoc_td_pool_t *td_pool_ptr1, *td_pool_ptr2;
3116 
3117         USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
3118             "uhci_alloc_memory_for_tds: num_tds: 0x%x info: 0x%p "
3119             "num_pools: %u", num_tds, (void *)info, info->num_pools);
3120 
3121         /* The host controller will be little endian */
3122         dev_attr.devacc_attr_version            = DDI_DEVICE_ATTR_V0;
3123         dev_attr.devacc_attr_endian_flags       = DDI_STRUCTURE_LE_ACC;
3124         dev_attr.devacc_attr_dataorder          = DDI_STRICTORDER_ACC;
3125 
3126         /* Allocate the TD pool structures */
3127         if ((info->td_pools = kmem_zalloc(
3128             (sizeof (uhci_bulk_isoc_td_pool_t) * info->num_pools),
3129             KM_SLEEP)) == NULL) {
3130                 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
3131                     "uhci_alloc_memory_for_tds: alloc td_pools failed");
3132 
3133                 return (USB_FAILURE);
3134         }
3135 
3136         for (i = 0; i < info->num_pools; i++) {
3137                 if (info->num_pools == 1) {
3138                         num = num_tds;
3139                 } else if (i < (info->num_pools - 1)) {
3140                         num = UHCI_MAX_TD_NUM_PER_POOL;
3141                 } else {
3142                         num = (num_tds % UHCI_MAX_TD_NUM_PER_POOL);
3143                 }
3144 
3145                 td_pool_ptr1 = &info->td_pools[i];
3146 
3147                 /* Allocate the bulk TD pool DMA handle */
3148                 if (ddi_dma_alloc_handle(uhcip->uhci_dip,
3149                     &uhcip->uhci_dma_attr, DDI_DMA_SLEEP, 0,
3150                     &td_pool_ptr1->dma_handle) != DDI_SUCCESS) {
3151 
3152                         for (j = 0; j < i; j++) {
3153                                 td_pool_ptr2 = &info->td_pools[j];
3154                                 result = ddi_dma_unbind_handle(
3155                                     td_pool_ptr2->dma_handle);
3156                                 ASSERT(result == DDI_SUCCESS);
3157                                 ddi_dma_mem_free(&td_pool_ptr2->mem_handle);
3158                                 ddi_dma_free_handle(&td_pool_ptr2->dma_handle);
3159                         }
3160 
3161                         kmem_free(info->td_pools,
3162                             (sizeof (uhci_bulk_isoc_td_pool_t) *
3163                             info->num_pools));
3164 
3165                         return (USB_FAILURE);
3166                 }
3167 
3168                 /* Allocate the memory for the bulk TD pool */
3169                 if (ddi_dma_mem_alloc(td_pool_ptr1->dma_handle,
3170                     num * sizeof (uhci_td_t), &dev_attr,
3171                     DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
3172                     &td_pool_ptr1->pool_addr, &real_length,
3173                     &td_pool_ptr1->mem_handle) != DDI_SUCCESS) {
3174 
3175                         ddi_dma_free_handle(&td_pool_ptr1->dma_handle);
3176 
3177                         for (j = 0; j < i; j++) {
3178                                 td_pool_ptr2 = &info->td_pools[j];
3179                                 result = ddi_dma_unbind_handle(
3180                                     td_pool_ptr2->dma_handle);
3181                                 ASSERT(result == DDI_SUCCESS);
3182                                 ddi_dma_mem_free(&td_pool_ptr2->mem_handle);
3183                                 ddi_dma_free_handle(&td_pool_ptr2->dma_handle);
3184                         }
3185 
3186                         kmem_free(info->td_pools,
3187                             (sizeof (uhci_bulk_isoc_td_pool_t) *
3188                             info->num_pools));
3189 
3190                         return (USB_FAILURE);
3191                 }
3192 
3193                 /* Map the bulk TD pool into the I/O address space */
3194                 result = ddi_dma_addr_bind_handle(td_pool_ptr1->dma_handle,
3195                     NULL, (caddr_t)td_pool_ptr1->pool_addr, real_length,
3196                     DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
3197                     &td_pool_ptr1->cookie, &ccount);
3198 
3199                 /* Process the result */
3200                 err = USB_SUCCESS;
3201 
3202                 if (result != DDI_DMA_MAPPED) {
3203                         USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
3204                             "uhci_allocate_memory_for_tds: Result = %d",
3205                             result);
3206                         uhci_decode_ddi_dma_addr_bind_handle_result(uhcip,
3207                             result);
3208 
3209                         err = USB_FAILURE;
3210                 }
3211 
3212                 if ((result == DDI_DMA_MAPPED) && (ccount != 1)) {
3213                         /* The cookie count should be 1 */
3214                         USB_DPRINTF_L2(PRINT_MASK_ATTA,
3215                             uhcip->uhci_log_hdl,
3216                             "uhci_allocate_memory_for_tds: "
3217                             "More than 1 cookie");
3218 
3219                         result = ddi_dma_unbind_handle(
3220                             td_pool_ptr1->dma_handle);
3221                         ASSERT(result == DDI_SUCCESS);
3222 
3223                         err = USB_FAILURE;
3224                 }
3225 
3226                 if (err == USB_FAILURE) {
3227 
3228                         ddi_dma_mem_free(&td_pool_ptr1->mem_handle);
3229                         ddi_dma_free_handle(&td_pool_ptr1->dma_handle);
3230 
3231                         for (j = 0; j < i; j++) {
3232                                 td_pool_ptr2 = &info->td_pools[j];
3233                                 result = ddi_dma_unbind_handle(
3234                                     td_pool_ptr2->dma_handle);
3235                                 ASSERT(result == DDI_SUCCESS);
3236                                 ddi_dma_mem_free(&td_pool_ptr2->mem_handle);
3237                                 ddi_dma_free_handle(&td_pool_ptr2->dma_handle);
3238                         }
3239 
3240                         kmem_free(info->td_pools,
3241                             (sizeof (uhci_bulk_isoc_td_pool_t) *
3242                             info->num_pools));
3243 
3244                         return (USB_FAILURE);
3245                 }
3246 
3247                 bzero((void *)td_pool_ptr1->pool_addr,
3248                     num * sizeof (uhci_td_t));
3249                 td_pool_ptr1->num_tds = (ushort_t)num;
3250         }
3251 
3252         return (USB_SUCCESS);
3253 }
3254 
3255 
3256 /*
3257  * uhci_handle_bulk_td:
3258  *
3259  *      Handles the completed bulk transfer descriptors
3260  */
3261 void
3262 uhci_handle_bulk_td(uhci_state_t *uhcip, uhci_td_t *td)
3263 {
3264         uint_t                  num_bulk_tds, index, td_count, j;
3265         usb_cr_t                error;
3266         uint_t                  length, bytes_xfered;
3267         ushort_t                MaxPacketSize;
3268         uint32_t                buf_offs, paddr;
3269         uhci_td_t               *bulk_td_ptr, *current_dummy, *td_head;
3270         uhci_td_t               *tmp_td;
3271         queue_head_t            *qh, *next_qh;
3272         uhci_trans_wrapper_t    *tw = td->tw;
3273         uhci_pipe_private_t     *pp = tw->tw_pipe_private;
3274         uhci_bulk_isoc_xfer_t   *bulk_xfer_info;
3275         uhci_bulk_isoc_td_pool_t *td_pool_ptr;
3276         usba_pipe_handle_data_t *ph;
3277 
3278         USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
3279             "uhci_handle_bulk_td: td = 0x%p tw = 0x%p", (void *)td, (void *)tw);
3280 
3281         /*
3282          * Update the tw_bytes_pending, and tw_bytes_xfered
3283          */
3284         bytes_xfered = ZERO_LENGTH;
3285 
3286         /*
3287          * Check whether there are any errors occurred in the xfer.
3288          * If so, update the data_toggle for the queue head and
3289          * return error to the upper layer.
3290          */
3291         if (GetTD_status(uhcip, td) & TD_STATUS_MASK) {
3292                 uhci_handle_bulk_td_errors(uhcip, td);
3293 
3294                 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
3295                     "uhci_handle_bulk_td: error; data toggle: 0x%x",
3296                     pp->pp_data_toggle);
3297 
3298                 return;
3299         }
3300 
3301         /*
3302          * Update the tw_bytes_pending, and tw_bytes_xfered
3303          */
3304         bytes_xfered = GetTD_alen(uhcip, td);
3305         if (bytes_xfered != ZERO_LENGTH) {
3306                 tw->tw_bytes_pending -= (bytes_xfered + 1);
3307                 tw->tw_bytes_xfered  += (bytes_xfered + 1);
3308         }
3309 
3310         /*
3311          * Get Bulk pipe information and pipe handle
3312          */
3313         bulk_xfer_info  = pp->pp_qh->bulk_xfer_info;
3314         ph = tw->tw_pipe_private->pp_pipe_handle;
3315 
3316         /*
3317          * Check whether data underrun occurred.
3318          * If so, complete the transfer
3319          * Update the data toggle bit
3320          */
3321         if (bytes_xfered != GetTD_mlen(uhcip, td)) {
3322                 bulk_xfer_info->num_tds = 1;
3323                 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
3324                     "uhci_handle_bulk_td: Data underrun occured");
3325 
3326                 pp->pp_data_toggle = GetTD_dtogg(uhcip, td) == 0 ? 1 : 0;
3327         }
3328 
3329         /*
3330          * If the TD's in the current frame are completed, then check
3331          * whether we have any more bytes to xfer. If so, insert TD's.
3332          * If no more bytes needs to be transferred, then do callback to the
3333          * upper layer.
3334          * If the TD's in the current frame are not completed, then
3335          * just delete the TD from the linked lists.
3336          */
3337         USB_DPRINTF_L3(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
3338             "uhci_handle_bulk_td: completed TD data toggle: 0x%x",
3339             GetTD_dtogg(uhcip, td));
3340 
3341         if (--bulk_xfer_info->num_tds == 0) {
3342                 uhci_delete_td(uhcip, td);
3343 
3344                 if ((tw->tw_bytes_pending) &&
3345                     (GetTD_mlen(uhcip, td) - GetTD_alen(uhcip, td) == 0)) {
3346 
3347                         MaxPacketSize = pp->pp_pipe_handle->p_ep.wMaxPacketSize;
3348                         length = MaxPacketSize;
3349 
3350                         qh = pp->pp_qh;
3351                         paddr = GetQH32(uhcip, qh->link_ptr) & QH_LINK_PTR_MASK;
3352                         if (GetQH32(uhcip, qh->link_ptr) !=
3353                             GetQH32(uhcip,
3354                             uhcip->uhci_bulk_xfers_q_head->link_ptr)) {
3355                                 next_qh = QH_VADDR(paddr);
3356                                 SetQH32(uhcip, qh->prev_qh->link_ptr,
3357                                     paddr|(0x2));
3358                                 next_qh->prev_qh = qh->prev_qh;
3359                                 SetQH32(uhcip, qh->link_ptr,
3360                                     GetQH32(uhcip,
3361                                     uhcip->uhci_bulk_xfers_q_head->link_ptr));
3362                                 qh->prev_qh = uhcip->uhci_bulk_xfers_q_tail;
3363                                 SetQH32(uhcip,
3364                                     uhcip->uhci_bulk_xfers_q_tail->link_ptr,
3365                                     QH_PADDR(qh) | 0x2);
3366                                 uhcip->uhci_bulk_xfers_q_tail = qh;
3367                         }
3368 
3369                         if ((tw->tw_bytes_pending / MaxPacketSize) >=
3370                             MAX_NUM_BULK_TDS_PER_XFER) {
3371                                 num_bulk_tds = MAX_NUM_BULK_TDS_PER_XFER;
3372                         } else {
3373                                 num_bulk_tds =
3374                                     (tw->tw_bytes_pending / MaxPacketSize);
3375                                 if (tw->tw_bytes_pending % MaxPacketSize) {
3376                                         num_bulk_tds++;
3377                                         length = (tw->tw_bytes_pending %
3378                                             MaxPacketSize);
3379                                 }
3380                         }
3381 
3382                         current_dummy = pp->pp_qh->td_tailp;
3383                         td_pool_ptr = &bulk_xfer_info->td_pools[0];
3384                         bulk_td_ptr = (uhci_td_t *)td_pool_ptr->pool_addr;
3385                         buf_offs = tw->tw_bytes_xfered;
3386                         td_count = num_bulk_tds;
3387                         index = 0;
3388 
3389                         /* reuse the TDs to transfer more data */
3390                         while (td_count > 0) {
3391                                 for (j = 0;
3392                                     (j < (td_pool_ptr->num_tds - 1)) &&
3393                                     (td_count > 1); j++, td_count--) {
3394                                         uhci_fill_in_bulk_isoc_td(uhcip,
3395                                             &bulk_td_ptr[j], &bulk_td_ptr[j+1],
3396                                             BULKTD_PADDR(td_pool_ptr,
3397                                             &bulk_td_ptr[j+1]), ph, buf_offs,
3398                                             MaxPacketSize, tw);
3399                                         buf_offs += MaxPacketSize;
3400                                 }
3401 
3402                                 if (td_count == 1) {
3403                                         uhci_fill_in_bulk_isoc_td(uhcip,
3404                                             &bulk_td_ptr[j], current_dummy,
3405                                             TD_PADDR(current_dummy), ph,
3406                                             buf_offs, length, tw);
3407 
3408                                         break;
3409                                 } else {
3410                                         tmp_td = &bulk_td_ptr[j];
3411                                         ASSERT(index <
3412                                             (bulk_xfer_info->num_pools - 1));
3413                                         td_pool_ptr = &bulk_xfer_info->
3414                                             td_pools[index + 1];
3415                                         bulk_td_ptr = (uhci_td_t *)
3416                                             td_pool_ptr->pool_addr;
3417                                         uhci_fill_in_bulk_isoc_td(uhcip,
3418                                             tmp_td, &bulk_td_ptr[0],
3419                                             BULKTD_PADDR(td_pool_ptr,
3420                                             &bulk_td_ptr[0]), ph, buf_offs,
3421                                             MaxPacketSize, tw);
3422                                         buf_offs += MaxPacketSize;
3423                                         td_count--;
3424                                         index++;
3425                                 }
3426                         }
3427 
3428                         pp->pp_qh->bulk_xfer_info = bulk_xfer_info;
3429                         bulk_xfer_info->num_tds      = (ushort_t)num_bulk_tds;
3430                         SetQH32(uhcip, pp->pp_qh->element_ptr,
3431                             bulk_xfer_info->td_pools[0].cookie.dmac_address);
3432                 } else {
3433                         usba_pipe_handle_data_t *usb_pp = pp->pp_pipe_handle;
3434 
3435                         pp->pp_qh->bulk_xfer_info = NULL;
3436 
3437                         if (tw->tw_bytes_pending) {
3438                                 /* Update the element pointer */
3439                                 SetQH32(uhcip, pp->pp_qh->element_ptr,
3440                                     TD_PADDR(pp->pp_qh->td_tailp));
3441 
3442                                 /* Remove all the tds */
3443                                 td_head = tw->tw_hctd_head;
3444                                 while (td_head != NULL) {
3445                                         uhci_delete_td(uhcip, td_head);
3446                                         td_head = tw->tw_hctd_head;
3447                                 }
3448                         }
3449 
3450                         if (tw->tw_direction == PID_IN) {
3451                                 usb_req_attrs_t attrs = ((usb_bulk_req_t *)
3452                                     tw->tw_curr_xfer_reqp)->bulk_attributes;
3453 
3454                                 error = USB_CR_OK;
3455 
3456                                 /* Data run occurred */
3457                                 if (tw->tw_bytes_pending &&
3458                                     (!(attrs & USB_ATTRS_SHORT_XFER_OK))) {
3459                                         error = USB_CR_DATA_UNDERRUN;
3460                                 }
3461 
3462                                 uhci_sendup_td_message(uhcip, error, tw);
3463                         } else {
3464                                 uhci_do_byte_stats(uhcip, tw->tw_length,
3465                                     usb_pp->p_ep.bmAttributes,
3466                                     usb_pp->p_ep.bEndpointAddress);
3467 
3468                                 /* Data underrun occurred */
3469                                 if (tw->tw_bytes_pending) {
3470 
3471                                         tw->tw_data->b_rptr +=
3472                                             tw->tw_bytes_xfered;
3473 
3474                                         USB_DPRINTF_L2(PRINT_MASK_ATTA,
3475                                             uhcip->uhci_log_hdl,
3476                                             "uhci_handle_bulk_td: "
3477                                             "data underrun occurred");
3478 
3479                                         uhci_hcdi_callback(uhcip, pp,
3480                                             tw->tw_pipe_private->pp_pipe_handle,
3481                                             tw, USB_CR_DATA_UNDERRUN);
3482                                 } else {
3483                                         uhci_hcdi_callback(uhcip, pp,
3484                                             tw->tw_pipe_private->pp_pipe_handle,
3485                                             tw, USB_CR_OK);
3486                                 }
3487                         } /* direction */
3488 
3489                         /* Deallocate DMA memory */
3490                         uhci_deallocate_tw(uhcip, pp, tw);
3491                         for (j = 0; j < bulk_xfer_info->num_pools; j++) {
3492                                 td_pool_ptr = &bulk_xfer_info->td_pools[j];
3493                                 (void) ddi_dma_unbind_handle(
3494                                     td_pool_ptr->dma_handle);
3495                                 ddi_dma_mem_free(&td_pool_ptr->mem_handle);
3496                                 ddi_dma_free_handle(&td_pool_ptr->dma_handle);
3497                         }
3498                         kmem_free(bulk_xfer_info->td_pools,
3499                             (sizeof (uhci_bulk_isoc_td_pool_t) *
3500                             bulk_xfer_info->num_pools));
3501                         kmem_free(bulk_xfer_info,
3502                             sizeof (uhci_bulk_isoc_xfer_t));
3503 
3504                         /*
3505                          * When there are no pending bulk commands, point the
3506                          * end of the lattice tree to NULL. This will make sure
3507                          * that the HC control does not loop anymore and PCI
3508                          * bus is not affected.
3509                          */
3510                         if (--uhcip->uhci_pending_bulk_cmds == 0) {
3511                                 uhcip->uhci_bulk_xfers_q_tail->link_ptr =
3512                                     HC_END_OF_LIST;
3513                                 USB_DPRINTF_L3(PRINT_MASK_ATTA,
3514                                     uhcip->uhci_log_hdl,
3515                                     "uhci_handle_bulk_td: count = %d",
3516                                     uhcip->uhci_pending_bulk_cmds);
3517                         }
3518                 }
3519         } else {
3520                 uhci_delete_td(uhcip, td);
3521         }
3522 }
3523 
3524 
3525 void
3526 uhci_handle_bulk_td_errors(uhci_state_t *uhcip, uhci_td_t *td)
3527 {
3528         usb_cr_t                usb_err;
3529         uint32_t                paddr_tail, element_ptr, paddr;
3530         uhci_td_t               *next_td;
3531         uhci_pipe_private_t     *pp;
3532         uhci_trans_wrapper_t    *tw = td->tw;
3533         usba_pipe_handle_data_t *ph;
3534         uhci_bulk_isoc_td_pool_t *td_pool_ptr = NULL;
3535 
3536         USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
3537             "uhci_handle_bulk_td_errors: td = %p", (void *)td);
3538 
3539 #ifdef  DEBUG
3540         uhci_print_td(uhcip, td);
3541 #endif
3542 
3543         tw = td->tw;
3544         ph = tw->tw_pipe_private->pp_pipe_handle;
3545         pp = (uhci_pipe_private_t *)ph->p_hcd_private;
3546 
3547         /*
3548          * Find the type of error occurred and return the error
3549          * to the upper layer. And adjust the data toggle.
3550          */
3551         element_ptr = GetQH32(uhcip, pp->pp_qh->element_ptr) &
3552             QH_ELEMENT_PTR_MASK;
3553         paddr_tail = TD_PADDR(pp->pp_qh->td_tailp);
3554 
3555         /*
3556          * If a timeout occurs before a transfer has completed,
3557          * the timeout handler sets the CRC/Timeout bit and clears the Active
3558          * bit in the link_ptr for each td in the transfer.
3559          * It then waits (at least) 1 ms so that any tds the controller might
3560          * have been executing will have completed.
3561          * So at this point element_ptr will point to either:
3562          * 1) the next td for the transfer (which has not been executed,
3563          * and has the CRC/Timeout status bit set and Active bit cleared),
3564          * 2) the dummy td for this qh.
3565          * So if the element_ptr does not point to the dummy td, we know
3566          * it points to the next td that would have been executed.
3567          * That td has the data toggle we want to save.
3568          * All outstanding tds have been marked as CRC/Timeout,
3569          * so it doesn't matter which td we pass to uhci_parse_td_error
3570          * for the error status.
3571          */
3572         if (element_ptr != paddr_tail) {
3573                 paddr = (element_ptr & QH_ELEMENT_PTR_MASK);
3574                 uhci_get_bulk_td_by_paddr(uhcip, pp->pp_qh->bulk_xfer_info,
3575                     paddr, &td_pool_ptr);
3576                 next_td = BULKTD_VADDR(td_pool_ptr, paddr);
3577                 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
3578                     "uhci_handle_bulk_td_errors: next td = %p",
3579                     (void *)next_td);
3580 
3581                 usb_err = uhci_parse_td_error(uhcip, pp, next_td);
3582         } else {
3583                 usb_err = uhci_parse_td_error(uhcip, pp, td);
3584         }
3585 
3586         /*
3587          * Update the link pointer.
3588          */
3589         SetQH32(uhcip, pp->pp_qh->element_ptr, TD_PADDR(pp->pp_qh->td_tailp));
3590 
3591         /*
3592          * Send up number of bytes transferred before the error condition.
3593          */
3594         if ((tw->tw_direction == PID_OUT) && tw->tw_data) {
3595                 tw->tw_data->b_rptr += tw->tw_bytes_xfered;
3596         }
3597 
3598         uhci_remove_bulk_tds_tws(uhcip, tw->tw_pipe_private, UHCI_IN_ERROR);
3599 
3600         /*
3601          * When there  are no pending bulk commands, point the end of the
3602          * lattice tree to NULL. This will make sure that the  HC control
3603          * does not loop anymore and PCI bus is not affected.
3604          */
3605         if (--uhcip->uhci_pending_bulk_cmds == 0) {
3606                 uhcip->uhci_bulk_xfers_q_tail->link_ptr = HC_END_OF_LIST;
3607                 USB_DPRINTF_L3(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
3608                     "uhci_handle_bulk_td_errors: count = %d",
3609                     uhcip->uhci_pending_bulk_cmds);
3610         }
3611 
3612         uhci_hcdi_callback(uhcip, pp, ph, tw, usb_err);
3613         uhci_deallocate_tw(uhcip, pp, tw);
3614 }
3615 
3616 
3617 /*
3618  * uhci_get_bulk_td_by_paddr:
3619  *      Obtain the address of the TD pool the physical address falls in.
3620  *
3621  * td_pool_pp - pointer to the address of the TD pool containing the paddr
3622  */
3623 /* ARGSUSED */
3624 static void
3625 uhci_get_bulk_td_by_paddr(
3626         uhci_state_t                    *uhcip,
3627         uhci_bulk_isoc_xfer_t           *info,
3628         uint32_t                        paddr,
3629         uhci_bulk_isoc_td_pool_t        **td_pool_pp)
3630 {
3631         uint_t                          i = 0;
3632 
3633         while (i < info->num_pools) {
3634                 *td_pool_pp = &info->td_pools[i];
3635                 if (((*td_pool_pp)->cookie.dmac_address <= paddr) &&
3636                     (((*td_pool_pp)->cookie.dmac_address +
3637                     (*td_pool_pp)->cookie.dmac_size) > paddr)) {
3638 
3639                         break;
3640                 }
3641                 i++;
3642         }
3643 
3644         ASSERT(i < info->num_pools);
3645 }
3646 
3647 
3648 void
3649 uhci_remove_bulk_tds_tws(
3650         uhci_state_t            *uhcip,
3651         uhci_pipe_private_t     *pp,
3652         int                     what)
3653 {
3654         uint_t                  rval, i;
3655         uhci_td_t               *head;
3656         uhci_td_t               *head_next;
3657         usb_opaque_t            curr_reqp;
3658         uhci_bulk_isoc_xfer_t   *info;
3659         uhci_bulk_isoc_td_pool_t *td_pool_ptr;
3660 
3661         ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
3662 
3663         if ((info = pp->pp_qh->bulk_xfer_info) == NULL) {
3664 
3665                 return;
3666         }
3667 
3668         head = uhcip->uhci_outst_tds_head;
3669 
3670         while (head) {
3671                 uhci_trans_wrapper_t *tw_tmp = head->tw;
3672                 head_next = head->outst_td_next;
3673 
3674                 if (pp->pp_qh == tw_tmp->tw_pipe_private->pp_qh) {
3675                         curr_reqp = tw_tmp->tw_curr_xfer_reqp;
3676                         if (curr_reqp &&
3677                             ((what == UHCI_IN_CLOSE) ||
3678                             (what == UHCI_IN_RESET))) {
3679                                 uhci_hcdi_callback(uhcip, pp,
3680                                     pp->pp_pipe_handle,
3681                                     tw_tmp, USB_CR_FLUSHED);
3682                         } /* end of curr_reqp */
3683 
3684                         uhci_delete_td(uhcip, head);
3685 
3686                         if (what == UHCI_IN_CLOSE || what == UHCI_IN_RESET) {
3687                                 ASSERT(info->num_tds > 0);
3688                                 if (--info->num_tds == 0) {
3689                                         uhci_deallocate_tw(uhcip, pp, tw_tmp);
3690 
3691                                         /*
3692                                          * This will make sure that the HC
3693                                          * does not loop anymore when there
3694                                          * are no pending bulk commands.
3695                                          */
3696                                         if (--uhcip->uhci_pending_bulk_cmds
3697                                             == 0) {
3698                                                 uhcip->uhci_bulk_xfers_q_tail->
3699                                                     link_ptr = HC_END_OF_LIST;
3700                                                 USB_DPRINTF_L3(PRINT_MASK_ATTA,
3701                                                     uhcip->uhci_log_hdl,
3702                                                     "uhci_remove_bulk_tds_tws:"
3703                                                     " count = %d",
3704                                                     uhcip->
3705                                                     uhci_pending_bulk_cmds);
3706                                         }
3707                                 }
3708                         }
3709                 }
3710 
3711                 head = head_next;
3712         }
3713 
3714         if (what == UHCI_IN_CLOSE || what == UHCI_IN_RESET) {
3715                 ASSERT(info->num_tds == 0);
3716         }
3717 
3718         for (i = 0; i < info->num_pools; i++) {
3719                 td_pool_ptr = &info->td_pools[i];
3720                 rval = ddi_dma_unbind_handle(td_pool_ptr->dma_handle);
3721                 ASSERT(rval == DDI_SUCCESS);
3722                 ddi_dma_mem_free(&td_pool_ptr->mem_handle);
3723                 ddi_dma_free_handle(&td_pool_ptr->dma_handle);
3724         }
3725         kmem_free(info->td_pools, (sizeof (uhci_bulk_isoc_td_pool_t) *
3726             info->num_pools));
3727         kmem_free(info, sizeof (uhci_bulk_isoc_xfer_t));
3728         pp->pp_qh->bulk_xfer_info = NULL;
3729 }
3730 
3731 
3732 /*
3733  * uhci_save_data_toggle ()
3734  *      Save the data toggle in the usba_device structure
3735  */
3736 void
3737 uhci_save_data_toggle(uhci_pipe_private_t *pp)
3738 {
3739         usba_pipe_handle_data_t *ph = pp->pp_pipe_handle;
3740 
3741         /* Save the data toggle in the usb devices structure. */
3742         mutex_enter(&ph->p_mutex);
3743         usba_hcdi_set_data_toggle(ph->p_usba_device, ph->p_ep.bEndpointAddress,
3744             pp->pp_data_toggle);
3745         mutex_exit(&ph->p_mutex);
3746 }
3747 
3748 /*
3749  * uhci_create_isoc_transfer_wrapper:
3750  *      Create a Transaction Wrapper (TW) for isoc transfer.
3751  *      This involves the allocating of DMA resources.
3752  *
3753  *      For isoc transfers, one isoc transfer includes multiple packets
3754  *      and each packet may have a different length. So each packet is
3755  *      transfered by one TD. We only know the individual packet length
3756  *      won't exceed 1023 bytes, but we don't know exactly the lengths.
3757  *      It is hard to make one physically discontiguous DMA buffer which
3758  *      can fit in all the TDs like what can be done to the ctrl/bulk/
3759  *      intr transfers. It is also undesirable to make one physically
3760  *      contiguous DMA buffer for all the packets, since this may easily
3761  *      fail when the system is in low memory. So an individual DMA
3762  *      buffer is allocated for an individual isoc packet and each DMA
3763  *      buffer is physically contiguous. An extra structure is allocated
3764  *      to save the multiple DMA handles.
3765  */
3766 static uhci_trans_wrapper_t *
3767 uhci_create_isoc_transfer_wrapper(
3768         uhci_state_t            *uhcip,
3769         uhci_pipe_private_t     *pp,
3770         usb_isoc_req_t          *req,
3771         size_t                  length,
3772         usb_flags_t             usb_flags)
3773 {
3774         int                     result;
3775         size_t                  real_length, strtlen, xfer_size;
3776         uhci_trans_wrapper_t    *tw;
3777         ddi_device_acc_attr_t   dev_attr;
3778         ddi_dma_attr_t          dma_attr;
3779         int                     kmem_flag;
3780         int                     (*dmamem_wait)(caddr_t);
3781         uint_t                  i, j, ccount;
3782         usb_isoc_req_t          *tmp_req = req;
3783 
3784         ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
3785 
3786         if (UHCI_XFER_TYPE(&pp->pp_pipe_handle->p_ep) != USB_EP_ATTR_ISOCH) {
3787 
3788                 return (NULL);
3789         }
3790 
3791         if ((req == NULL) && (UHCI_XFER_DIR(&pp->pp_pipe_handle->p_ep) ==
3792             USB_EP_DIR_IN)) {
3793                 tmp_req = (usb_isoc_req_t *)pp->pp_client_periodic_in_reqp;
3794         }
3795 
3796         if (tmp_req == NULL) {
3797 
3798                 return (NULL);
3799         }
3800 
3801 
3802         USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
3803             "uhci_create_isoc_transfer_wrapper: length = 0x%lx flags = 0x%x",
3804             length, usb_flags);
3805 
3806         /* SLEEP flag should not be used in interrupt context */
3807         if (servicing_interrupt()) {
3808                 kmem_flag = KM_NOSLEEP;
3809                 dmamem_wait = DDI_DMA_DONTWAIT;
3810         } else {
3811                 kmem_flag = KM_SLEEP;
3812                 dmamem_wait = DDI_DMA_SLEEP;
3813         }
3814 
3815         /* Allocate space for the transfer wrapper */
3816         if ((tw = kmem_zalloc(sizeof (uhci_trans_wrapper_t), kmem_flag)) ==
3817             NULL) {
3818                 USB_DPRINTF_L2(PRINT_MASK_LISTS,  uhcip->uhci_log_hdl,
3819                     "uhci_create_isoc_transfer_wrapper: kmem_alloc failed");
3820 
3821                 return (NULL);
3822         }
3823 
3824         /* Allocate space for the isoc buffer handles */
3825         strtlen = sizeof (uhci_isoc_buf_t) * tmp_req->isoc_pkts_count;
3826         if ((tw->tw_isoc_bufs = kmem_zalloc(strtlen, kmem_flag)) == NULL) {
3827                 USB_DPRINTF_L2(PRINT_MASK_LISTS,  uhcip->uhci_log_hdl,
3828                     "uhci_create_isoc_transfer_wrapper: kmem_alloc "
3829                     "isoc buffer failed");
3830                 kmem_free(tw, sizeof (uhci_trans_wrapper_t));
3831 
3832                 return (NULL);
3833         }
3834 
3835         bcopy(&uhcip->uhci_dma_attr, &dma_attr, sizeof (ddi_dma_attr_t));
3836         dma_attr.dma_attr_sgllen = 1;
3837 
3838         dev_attr.devacc_attr_version            = DDI_DEVICE_ATTR_V0;
3839         dev_attr.devacc_attr_endian_flags       = DDI_STRUCTURE_LE_ACC;
3840         dev_attr.devacc_attr_dataorder          = DDI_STRICTORDER_ACC;
3841 
3842         /* Store the transfer length */
3843         tw->tw_length = length;
3844 
3845         for (i = 0; i < tmp_req->isoc_pkts_count; i++) {
3846                 tw->tw_isoc_bufs[i].index = (ushort_t)i;
3847 
3848                 /* Allocate the DMA handle */
3849                 if ((result = ddi_dma_alloc_handle(uhcip->uhci_dip, &dma_attr,
3850                     dmamem_wait, 0, &tw->tw_isoc_bufs[i].dma_handle)) !=
3851                     DDI_SUCCESS) {
3852                         USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
3853                             "uhci_create_isoc_transfer_wrapper: "
3854                             "Alloc handle %d failed", i);
3855 
3856                         for (j = 0; j < i; j++) {
3857                                 result = ddi_dma_unbind_handle(
3858                                     tw->tw_isoc_bufs[j].dma_handle);
3859                                 ASSERT(result == USB_SUCCESS);
3860                                 ddi_dma_mem_free(&tw->tw_isoc_bufs[j].
3861                                     mem_handle);
3862                                 ddi_dma_free_handle(&tw->tw_isoc_bufs[j].
3863                                     dma_handle);
3864                         }
3865                         kmem_free(tw->tw_isoc_bufs, strtlen);
3866                         kmem_free(tw, sizeof (uhci_trans_wrapper_t));
3867 
3868                         return (NULL);
3869                 }
3870 
3871                 /* Allocate the memory */
3872                 xfer_size = tmp_req->isoc_pkt_descr[i].isoc_pkt_length;
3873                 if ((result = ddi_dma_mem_alloc(tw->tw_isoc_bufs[i].dma_handle,
3874                     xfer_size, &dev_attr, DDI_DMA_CONSISTENT, dmamem_wait,
3875                     NULL, (caddr_t *)&tw->tw_isoc_bufs[i].buf_addr,
3876                     &real_length, &tw->tw_isoc_bufs[i].mem_handle)) !=
3877                     DDI_SUCCESS) {
3878                         USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
3879                             "uhci_create_isoc_transfer_wrapper: "
3880                             "dma_mem_alloc %d fail", i);
3881                         ddi_dma_free_handle(&tw->tw_isoc_bufs[i].dma_handle);
3882 
3883                         for (j = 0; j < i; j++) {
3884                                 result = ddi_dma_unbind_handle(
3885                                     tw->tw_isoc_bufs[j].dma_handle);
3886                                 ASSERT(result == USB_SUCCESS);
3887                                 ddi_dma_mem_free(&tw->tw_isoc_bufs[j].
3888                                     mem_handle);
3889                                 ddi_dma_free_handle(&tw->tw_isoc_bufs[j].
3890                                     dma_handle);
3891                         }
3892                         kmem_free(tw->tw_isoc_bufs, strtlen);
3893                         kmem_free(tw, sizeof (uhci_trans_wrapper_t));
3894 
3895                         return (NULL);
3896                 }
3897 
3898                 ASSERT(real_length >= xfer_size);
3899 
3900                 /* Bind the handle */
3901                 result = ddi_dma_addr_bind_handle(
3902                     tw->tw_isoc_bufs[i].dma_handle, NULL,
3903                     (caddr_t)tw->tw_isoc_bufs[i].buf_addr, real_length,
3904                     DDI_DMA_RDWR|DDI_DMA_CONSISTENT, dmamem_wait, NULL,
3905                     &tw->tw_isoc_bufs[i].cookie, &ccount);
3906 
3907                 if ((result == DDI_DMA_MAPPED) && (ccount == 1)) {
3908                         tw->tw_isoc_bufs[i].length = xfer_size;
3909 
3910                         continue;
3911                 } else {
3912                         USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
3913                             "uhci_create_isoc_transfer_wrapper: "
3914                             "Bind handle %d failed", i);
3915                         if (result == DDI_DMA_MAPPED) {
3916                                 result = ddi_dma_unbind_handle(
3917                                     tw->tw_isoc_bufs[i].dma_handle);
3918                                 ASSERT(result == USB_SUCCESS);
3919                         }
3920                         ddi_dma_mem_free(&tw->tw_isoc_bufs[i].mem_handle);
3921                         ddi_dma_free_handle(&tw->tw_isoc_bufs[i].dma_handle);
3922 
3923                         for (j = 0; j < i; j++) {
3924                                 result = ddi_dma_unbind_handle(
3925                                     tw->tw_isoc_bufs[j].dma_handle);
3926                                 ASSERT(result == USB_SUCCESS);
3927                                 ddi_dma_mem_free(&tw->tw_isoc_bufs[j].
3928                                     mem_handle);
3929                                 ddi_dma_free_handle(&tw->tw_isoc_bufs[j].
3930                                     dma_handle);
3931                         }
3932                         kmem_free(tw->tw_isoc_bufs, strtlen);
3933                         kmem_free(tw, sizeof (uhci_trans_wrapper_t));
3934 
3935                         return (NULL);
3936                 }
3937         }
3938 
3939         tw->tw_ncookies = tmp_req->isoc_pkts_count;
3940         tw->tw_isoc_strtlen = strtlen;
3941 
3942         /*
3943          * Only allow one wrapper to be added at a time. Insert the
3944          * new transaction wrapper into the list for this pipe.
3945          */
3946         if (pp->pp_tw_head == NULL) {
3947                 pp->pp_tw_head = tw;
3948                 pp->pp_tw_tail = tw;
3949         } else {
3950                 pp->pp_tw_tail->tw_next = tw;
3951                 pp->pp_tw_tail = tw;
3952                 ASSERT(tw->tw_next == NULL);
3953         }
3954 
3955         /* Store a back pointer to the pipe private structure */
3956         tw->tw_pipe_private = pp;
3957 
3958         /* Store the transfer type - synchronous or asynchronous */
3959         tw->tw_flags = usb_flags;
3960 
3961         USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
3962             "uhci_create_isoc_transfer_wrapper: tw = 0x%p, ncookies = %u",
3963             (void *)tw, tw->tw_ncookies);
3964 
3965         return (tw);
3966 }
3967 
3968 /*
3969  * uhci_insert_isoc_td:
3970  *      - Create transfer wrapper
3971  *      - Allocate memory for the isoc td's
3972  *      - Fill up all the TD's and submit to the HC
3973  *      - Update all the linked lists
3974  */
3975 int
3976 uhci_insert_isoc_td(
3977         uhci_state_t            *uhcip,
3978         usba_pipe_handle_data_t *ph,
3979         usb_isoc_req_t          *isoc_req,
3980         size_t                  length,
3981         usb_flags_t             flags)
3982 {
3983         int                     rval = USB_SUCCESS;
3984         int                     error;
3985         uint_t                  ddic;
3986         uint32_t                i, j, index;
3987         uint32_t                bytes_to_xfer;
3988         uint32_t                expired_frames = 0;
3989         usb_frame_number_t      start_frame, end_frame, current_frame;
3990         uhci_td_t               *td_ptr;
3991         uhci_pipe_private_t     *pp = (uhci_pipe_private_t *)ph->p_hcd_private;
3992         uhci_trans_wrapper_t    *tw;
3993         uhci_bulk_isoc_xfer_t   *isoc_xfer_info;
3994         uhci_bulk_isoc_td_pool_t *td_pool_ptr;
3995 
3996         USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl,
3997             "uhci_insert_isoc_td: ph = 0x%p isoc req = %p length = %lu",
3998             (void *)ph, (void *)isoc_req, length);
3999 
4000         ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
4001 
4002         /* Allocate a transfer wrapper */
4003         if ((tw = uhci_create_isoc_transfer_wrapper(uhcip, pp, isoc_req,
4004             length, flags)) == NULL) {
4005                 USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl,
4006                     "uhci_insert_isoc_td: TW allocation failed");
4007 
4008                 return (USB_NO_RESOURCES);
4009         }
4010 
4011         /* Save current isochronous request pointer */
4012         tw->tw_curr_xfer_reqp = (usb_opaque_t)isoc_req;
4013 
4014         /*
4015          * Initialize the transfer wrapper. These values are useful
4016          * for sending back the reply.
4017          */
4018         tw->tw_handle_td             = uhci_handle_isoc_td;
4019         tw->tw_handle_callback_value = NULL;
4020         tw->tw_direction = (UHCI_XFER_DIR(&ph->p_ep) == USB_EP_DIR_OUT) ?
4021             PID_OUT : PID_IN;
4022 
4023         /*
4024          * If the transfer isoc send, then copy the data from the request
4025          * to the transfer wrapper.
4026          */
4027         if ((tw->tw_direction == PID_OUT) && length) {
4028                 uchar_t *p;
4029 
4030                 ASSERT(isoc_req->isoc_data != NULL);
4031                 p = isoc_req->isoc_data->b_rptr;
4032 
4033                 /* Copy the data into the message */
4034                 for (i = 0; i < isoc_req->isoc_pkts_count; i++) {
4035                         ddi_rep_put8(tw->tw_isoc_bufs[i].mem_handle,
4036                             p, (uint8_t *)tw->tw_isoc_bufs[i].buf_addr,
4037                             isoc_req->isoc_pkt_descr[i].isoc_pkt_length,
4038                             DDI_DEV_AUTOINCR);
4039                         p += isoc_req->isoc_pkt_descr[i].isoc_pkt_length;
4040                 }
4041         }
4042 
4043         if (tw->tw_direction == PID_IN) {
4044                 if ((rval = uhci_allocate_periodic_in_resource(uhcip, pp, tw,
4045                     flags)) != USB_SUCCESS) {
4046                         USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl,
4047                             "uhci_insert_isoc_td: isoc_req_t alloc failed");
4048                         uhci_deallocate_tw(uhcip, pp, tw);
4049 
4050                         return (rval);
4051                 }
4052 
4053                 isoc_req = (usb_isoc_req_t *)tw->tw_curr_xfer_reqp;
4054         }
4055 
4056         tw->tw_isoc_req      = (usb_isoc_req_t *)tw->tw_curr_xfer_reqp;
4057 
4058         /* Get the pointer to the isoc_xfer_info structure */
4059         isoc_xfer_info = (uhci_bulk_isoc_xfer_t *)&tw->tw_xfer_info;
4060         isoc_xfer_info->num_tds = isoc_req->isoc_pkts_count;
4061 
4062         /*
4063          * Allocate memory for isoc tds
4064          */
4065         if ((rval = uhci_alloc_bulk_isoc_tds(uhcip, isoc_req->isoc_pkts_count,
4066             isoc_xfer_info)) != USB_SUCCESS) {
4067                 USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl,
4068                     "uhci_alloc_bulk_isoc_td: Memory allocation failure");
4069 
4070                 if (tw->tw_direction == PID_IN) {
4071                         uhci_deallocate_periodic_in_resource(uhcip, pp, tw);
4072                 }
4073                 uhci_deallocate_tw(uhcip, pp, tw);
4074 
4075                 return (rval);
4076         }
4077 
4078         /*
4079          * Get the isoc td pool address, buffer address and
4080          * max packet size that the device supports.
4081          */
4082         td_pool_ptr = &isoc_xfer_info->td_pools[0];
4083         td_ptr = (uhci_td_t *)td_pool_ptr->pool_addr;
4084         index = 0;
4085 
4086         /*
4087          * Fill up the isoc tds
4088          */
4089         USB_DPRINTF_L3(PRINT_MASK_ISOC, uhcip->uhci_log_hdl,
4090             "uhci_insert_isoc_td : isoc pkts %d", isoc_req->isoc_pkts_count);
4091 
4092         for (i = 0; i < isoc_xfer_info->num_pools; i++) {
4093                 for (j = 0; j < td_pool_ptr->num_tds; j++) {
4094                         bytes_to_xfer =
4095                             isoc_req->isoc_pkt_descr[index].isoc_pkt_length;
4096 
4097                         uhci_fill_in_bulk_isoc_td(uhcip, &td_ptr[j],
4098                             (uhci_td_t *)NULL, HC_END_OF_LIST, ph, index,
4099                             bytes_to_xfer, tw);
4100                         td_ptr[j].isoc_pkt_index = (ushort_t)index;
4101                         index++;
4102                 }
4103 
4104                 if (i < (isoc_xfer_info->num_pools - 1)) {
4105                         td_pool_ptr = &isoc_xfer_info->td_pools[i + 1];
4106                         td_ptr = (uhci_td_t *)td_pool_ptr->pool_addr;
4107                 }
4108         }
4109 
4110         /*
4111          * Get the starting frame number.
4112          * The client drivers sets the flag USB_ATTRS_ISOC_XFER_ASAP to inform
4113          * the HCD to care of starting frame number.
4114          *
4115          * Following code is very time critical. So, perform atomic execution.
4116          */
4117         ddic = ddi_enter_critical();
4118         current_frame = uhci_get_sw_frame_number(uhcip);
4119 
4120         if (isoc_req->isoc_attributes & USB_ATTRS_ISOC_START_FRAME) {
4121                 start_frame = isoc_req->isoc_frame_no;
4122                 end_frame = start_frame + isoc_req->isoc_pkts_count;
4123 
4124                 /* Check available frames */
4125                 if ((end_frame - current_frame) < UHCI_MAX_ISOC_FRAMES) {
4126                         if (current_frame > start_frame) {
4127                                 if ((current_frame + FRNUM_OFFSET) <
4128                                     end_frame) {
4129                                         expired_frames = current_frame +
4130                                             FRNUM_OFFSET - start_frame;
4131                                         start_frame = current_frame +
4132                                             FRNUM_OFFSET;
4133                                 } else {
4134                                         rval = USB_INVALID_START_FRAME;
4135                                 }
4136                         }
4137                 } else {
4138                         rval = USB_INVALID_START_FRAME;
4139                 }
4140 
4141         } else if (isoc_req->isoc_attributes & USB_ATTRS_ISOC_XFER_ASAP) {
4142                 start_frame = pp->pp_frame_num;
4143 
4144                 if (start_frame == INVALID_FRNUM) {
4145                         start_frame = current_frame + FRNUM_OFFSET;
4146                 } else if (current_frame > start_frame) {
4147                         start_frame = current_frame + FRNUM_OFFSET;
4148                 }
4149 
4150                 end_frame = start_frame + isoc_req->isoc_pkts_count;
4151                 isoc_req->isoc_frame_no = start_frame;
4152 
4153         }
4154 
4155         if (rval != USB_SUCCESS) {
4156 
4157                 /* Exit the critical */
4158                 ddi_exit_critical(ddic);
4159 
4160                 USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl,
4161                     "uhci_insert_isoc_td: Invalid starting frame number");
4162 
4163                 if (tw->tw_direction == PID_IN) {
4164                         uhci_deallocate_periodic_in_resource(uhcip, pp, tw);
4165                 }
4166 
4167                 while (tw->tw_hctd_head) {
4168                         uhci_delete_td(uhcip, tw->tw_hctd_head);
4169                 }
4170 
4171                 for (i = 0; i < isoc_xfer_info->num_pools; i++) {
4172                         td_pool_ptr = &isoc_xfer_info->td_pools[i];
4173                         error = ddi_dma_unbind_handle(td_pool_ptr->dma_handle);
4174                         ASSERT(error == DDI_SUCCESS);
4175                         ddi_dma_mem_free(&td_pool_ptr->mem_handle);
4176                         ddi_dma_free_handle(&td_pool_ptr->dma_handle);
4177                 }
4178                 kmem_free(isoc_xfer_info->td_pools,
4179                     (sizeof (uhci_bulk_isoc_td_pool_t) *
4180                     isoc_xfer_info->num_pools));
4181 
4182                 uhci_deallocate_tw(uhcip, pp, tw);
4183 
4184                 return (rval);
4185         }
4186 
4187         for (i = 0; i < expired_frames; i++) {
4188                 isoc_req->isoc_pkt_descr[i].isoc_pkt_status =
4189                     USB_CR_NOT_ACCESSED;
4190                 isoc_req->isoc_pkt_descr[i].isoc_pkt_actual_length =
4191                     isoc_req->isoc_pkt_descr[i].isoc_pkt_length;
4192                 uhci_get_isoc_td_by_index(uhcip, isoc_xfer_info, i,
4193                     &td_ptr, &td_pool_ptr);
4194                 uhci_delete_td(uhcip, td_ptr);
4195                 --isoc_xfer_info->num_tds;
4196         }
4197 
4198         /*
4199          * Add the TD's to the HC list
4200          */
4201         start_frame = (start_frame & 0x3ff);
4202         for (; i < isoc_req->isoc_pkts_count; i++) {
4203                 uhci_get_isoc_td_by_index(uhcip, isoc_xfer_info, i,
4204                     &td_ptr, &td_pool_ptr);
4205                 if (uhcip->uhci_isoc_q_tailp[start_frame]) {
4206                         td_ptr->isoc_prev =
4207                             uhcip->uhci_isoc_q_tailp[start_frame];
4208                         td_ptr->isoc_next = NULL;
4209                         td_ptr->link_ptr =
4210                             uhcip->uhci_isoc_q_tailp[start_frame]->link_ptr;
4211                         uhcip->uhci_isoc_q_tailp[start_frame]->isoc_next =
4212                             td_ptr;
4213                         SetTD32(uhcip,
4214                             uhcip->uhci_isoc_q_tailp[start_frame]->link_ptr,
4215                             ISOCTD_PADDR(td_pool_ptr, td_ptr));
4216                         uhcip->uhci_isoc_q_tailp[start_frame] = td_ptr;
4217                 } else {
4218                         uhcip->uhci_isoc_q_tailp[start_frame] = td_ptr;
4219                         td_ptr->isoc_next = NULL;
4220                         td_ptr->isoc_prev = NULL;
4221                         SetTD32(uhcip, td_ptr->link_ptr,
4222                             GetFL32(uhcip,
4223                             uhcip->uhci_frame_lst_tablep[start_frame]));
4224                         SetFL32(uhcip,
4225                             uhcip->uhci_frame_lst_tablep[start_frame],
4226                             ISOCTD_PADDR(td_pool_ptr, td_ptr));
4227                 }
4228                 td_ptr->starting_frame = (uint_t)start_frame;
4229 
4230                 if (++start_frame == NUM_FRAME_LST_ENTRIES)
4231                         start_frame = 0;
4232         }
4233 
4234         ddi_exit_critical(ddic);
4235         pp->pp_frame_num = end_frame;
4236 
4237         USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl,
4238             "uhci_insert_isoc_td: current frame number 0x%llx, pipe frame num"
4239             " 0x%llx", (unsigned long long)current_frame,
4240             (unsigned long long)(pp->pp_frame_num));
4241 
4242         return (rval);
4243 }
4244 
4245 
4246 /*
4247  * uhci_get_isoc_td_by_index:
4248  *      Obtain the addresses of the TD pool and the TD at the index.
4249  *
4250  * tdpp - pointer to the address of the TD at the isoc packet index
4251  * td_pool_pp - pointer to the address of the TD pool containing
4252  *              the specified TD
4253  */
4254 /* ARGSUSED */
4255 static void
4256 uhci_get_isoc_td_by_index(
4257         uhci_state_t                    *uhcip,
4258         uhci_bulk_isoc_xfer_t           *info,
4259         uint_t                          index,
4260         uhci_td_t                       **tdpp,
4261         uhci_bulk_isoc_td_pool_t        **td_pool_pp)
4262 {
4263         uint_t                  i = 0, j = 0;
4264         uhci_td_t               *td_ptr;
4265 
4266         while (j < info->num_pools) {
4267                 if ((i + info->td_pools[j].num_tds) <= index) {
4268                         i += info->td_pools[j].num_tds;
4269                         j++;
4270                 } else {
4271                         i = index - i;
4272 
4273                         break;
4274                 }
4275         }
4276 
4277         ASSERT(j < info->num_pools);
4278         *td_pool_pp = &info->td_pools[j];
4279         td_ptr = (uhci_td_t *)((*td_pool_pp)->pool_addr);
4280         *tdpp = &td_ptr[i];
4281 }
4282 
4283 
4284 /*
4285  * uhci_handle_isoc_td:
4286  *      Handles the completed isoc tds
4287  */
4288 void
4289 uhci_handle_isoc_td(uhci_state_t *uhcip, uhci_td_t *td)
4290 {
4291         uint_t                  rval, i;
4292         uint32_t                pkt_index = td->isoc_pkt_index;
4293         usb_cr_t                cr;
4294         uhci_trans_wrapper_t    *tw = td->tw;
4295         usb_isoc_req_t          *isoc_req = (usb_isoc_req_t *)tw->tw_isoc_req;
4296         uhci_pipe_private_t     *pp = tw->tw_pipe_private;
4297         uhci_bulk_isoc_xfer_t   *isoc_xfer_info = &tw->tw_xfer_info;
4298         usba_pipe_handle_data_t *usb_pp;
4299         uhci_bulk_isoc_td_pool_t *td_pool_ptr;
4300 
4301         USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl,
4302             "uhci_handle_isoc_td: td = 0x%p, pp = 0x%p, tw = 0x%p, req = 0x%p, "
4303             "index = %x", (void *)td, (void *)pp, (void *)tw, (void *)isoc_req,
4304             pkt_index);
4305 
4306         ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
4307 
4308         usb_pp = pp->pp_pipe_handle;
4309 
4310         /*
4311          * Check whether there are any errors occurred. If so, update error
4312          * count and return it to the upper.But never return a non zero
4313          * completion reason.
4314          */
4315         cr = USB_CR_OK;
4316         if (GetTD_status(uhcip, td) & TD_STATUS_MASK) {
4317                 USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl,
4318                     "uhci_handle_isoc_td: Error Occurred: TD Status = %x",
4319                     GetTD_status(uhcip, td));
4320                 isoc_req->isoc_error_count++;
4321         }
4322 
4323         if (isoc_req != NULL) {
4324                 isoc_req->isoc_pkt_descr[pkt_index].isoc_pkt_status = cr;
4325                 isoc_req->isoc_pkt_descr[pkt_index].isoc_pkt_actual_length =
4326                     (GetTD_alen(uhcip, td) == ZERO_LENGTH) ? 0 :
4327                     GetTD_alen(uhcip, td) + 1;
4328         }
4329 
4330         uhci_delete_isoc_td(uhcip, td);
4331 
4332         if (--isoc_xfer_info->num_tds != 0) {
4333                 USB_DPRINTF_L3(PRINT_MASK_ISOC, uhcip->uhci_log_hdl,
4334                     "uhci_handle_isoc_td: Number of TDs %d",
4335                     isoc_xfer_info->num_tds);
4336 
4337                 return;
4338         }
4339 
4340         tw->tw_claim = UHCI_INTR_HDLR_CLAIMED;
4341         if (tw->tw_direction == PID_IN) {
4342                 uhci_sendup_td_message(uhcip, cr, tw);
4343 
4344                 if ((uhci_handle_isoc_receive(uhcip, pp, tw)) != USB_SUCCESS) {
4345                         USB_DPRINTF_L3(PRINT_MASK_ISOC, uhcip->uhci_log_hdl,
4346                             "uhci_handle_isoc_td: Drop message");
4347                 }
4348 
4349         } else {
4350                 /* update kstats only for OUT. sendup_td_msg() does it for IN */
4351                 uhci_do_byte_stats(uhcip, tw->tw_length,
4352                     usb_pp->p_ep.bmAttributes, usb_pp->p_ep.bEndpointAddress);
4353 
4354                 uhci_hcdi_callback(uhcip, pp, usb_pp, tw, USB_CR_OK);
4355         }
4356 
4357         for (i = 0; i < isoc_xfer_info->num_pools; i++) {
4358                 td_pool_ptr = &isoc_xfer_info->td_pools[i];
4359                 rval = ddi_dma_unbind_handle(td_pool_ptr->dma_handle);
4360                 ASSERT(rval == DDI_SUCCESS);
4361                 ddi_dma_mem_free(&td_pool_ptr->mem_handle);
4362                 ddi_dma_free_handle(&td_pool_ptr->dma_handle);
4363         }
4364         kmem_free(isoc_xfer_info->td_pools,
4365             (sizeof (uhci_bulk_isoc_td_pool_t) *
4366             isoc_xfer_info->num_pools));
4367         uhci_deallocate_tw(uhcip, pp, tw);
4368 }
4369 
4370 
4371 /*
4372  * uhci_handle_isoc_receive:
4373  *      - Sends the isoc data to the client
4374  *      - Inserts another isoc receive request
4375  */
4376 static int
4377 uhci_handle_isoc_receive(
4378         uhci_state_t            *uhcip,
4379         uhci_pipe_private_t     *pp,
4380         uhci_trans_wrapper_t    *tw)
4381 {
4382         USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl,
4383             "uhci_handle_isoc_receive: tw = 0x%p", (void *)tw);
4384 
4385         ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
4386 
4387         /*
4388          * -- check for pipe state being polling before
4389          * inserting a new request. Check when is TD
4390          * de-allocation being done? (so we can reuse the same TD)
4391          */
4392         if (uhci_start_isoc_receive_polling(uhcip,
4393             pp->pp_pipe_handle, (usb_isoc_req_t *)tw->tw_curr_xfer_reqp,
4394             0) != USB_SUCCESS) {
4395                 USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl,
4396                     "uhci_handle_isoc_receive: receive polling failed");
4397 
4398                 return (USB_FAILURE);
4399         }
4400 
4401         return (USB_SUCCESS);
4402 }
4403 
4404 
4405 /*
4406  * uhci_delete_isoc_td:
4407  *      - Delete from the outstanding command queue
4408  *      - Delete from the tw queue
4409  *      - Delete from the isoc queue
4410  *      - Delete from the HOST CONTROLLER list
4411  */
4412 static void
4413 uhci_delete_isoc_td(uhci_state_t *uhcip, uhci_td_t *td)
4414 {
4415         uint32_t        starting_frame = td->starting_frame;
4416 
4417         if ((td->isoc_next == NULL) && (td->isoc_prev == NULL)) {
4418                 SetFL32(uhcip, uhcip->uhci_frame_lst_tablep[starting_frame],
4419                     GetTD32(uhcip, td->link_ptr));
4420                 uhcip->uhci_isoc_q_tailp[starting_frame] = 0;
4421         } else if (td->isoc_next == NULL) {
4422                 td->isoc_prev->link_ptr = td->link_ptr;
4423                 td->isoc_prev->isoc_next = NULL;
4424                 uhcip->uhci_isoc_q_tailp[starting_frame] = td->isoc_prev;
4425         } else if (td->isoc_prev == NULL) {
4426                 td->isoc_next->isoc_prev = NULL;
4427                 SetFL32(uhcip, uhcip->uhci_frame_lst_tablep[starting_frame],
4428                     GetTD32(uhcip, td->link_ptr));
4429         } else {
4430                 td->isoc_prev->isoc_next = td->isoc_next;
4431                 td->isoc_next->isoc_prev = td->isoc_prev;
4432                 td->isoc_prev->link_ptr = td->link_ptr;
4433         }
4434 
4435         uhci_delete_td(uhcip, td);
4436 }
4437 
4438 
4439 /*
4440  * uhci_send_isoc_receive
4441  *      - Allocates usb_isoc_request
4442  *      - Updates the isoc request
4443  *      - Inserts the isoc td's into the HC processing list.
4444  */
4445 int
4446 uhci_start_isoc_receive_polling(
4447         uhci_state_t            *uhcip,
4448         usba_pipe_handle_data_t *ph,
4449         usb_isoc_req_t          *isoc_req,
4450         usb_flags_t             usb_flags)
4451 {
4452         int                     ii, error;
4453         size_t                  max_isoc_xfer_size, length, isoc_pkts_length;
4454         ushort_t                isoc_pkt_count;
4455         uhci_pipe_private_t     *pp = (uhci_pipe_private_t *)ph->p_hcd_private;
4456         usb_isoc_pkt_descr_t    *isoc_pkt_descr;
4457 
4458         USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl,
4459             "uhci_start_isoc_receive_polling: usb_flags = %x", usb_flags);
4460 
4461         ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
4462 
4463         max_isoc_xfer_size = ph->p_ep.wMaxPacketSize * UHCI_MAX_ISOC_PKTS;
4464 
4465         if (isoc_req) {
4466                 isoc_pkt_descr = isoc_req->isoc_pkt_descr;
4467                 isoc_pkt_count = isoc_req->isoc_pkts_count;
4468                 isoc_pkts_length = isoc_req->isoc_pkts_length;
4469         } else {
4470                 isoc_pkt_descr = ((usb_isoc_req_t *)
4471                     pp->pp_client_periodic_in_reqp)->isoc_pkt_descr;
4472                 isoc_pkt_count = ((usb_isoc_req_t *)
4473                     pp->pp_client_periodic_in_reqp)->isoc_pkts_count;
4474                 isoc_pkts_length = ((usb_isoc_req_t *)
4475                     pp->pp_client_periodic_in_reqp)->isoc_pkts_length;
4476         }
4477 
4478         for (ii = 0, length = 0; ii < isoc_pkt_count; ii++) {
4479                 length += isoc_pkt_descr->isoc_pkt_length;
4480                 isoc_pkt_descr++;
4481         }
4482 
4483         if ((isoc_pkts_length) && (isoc_pkts_length != length)) {
4484 
4485                 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
4486                     "uhci_start_isoc_receive_polling: isoc_pkts_length 0x%lx "
4487                     "is not equal to the sum of all pkt lengths 0x%lx in "
4488                     "an isoc request", isoc_pkts_length, length);
4489 
4490                 return (USB_FAILURE);
4491         }
4492 
4493         /* Check the size of isochronous request */
4494         if (length > max_isoc_xfer_size) {
4495                 USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl,
4496                     "uhci_start_isoc_receive_polling: "
4497                     "Max isoc request size = %lx, Given isoc req size = %lx",
4498                     max_isoc_xfer_size, length);
4499 
4500                 return (USB_FAILURE);
4501         }
4502 
4503         /* Add the TD into the Host Controller's isoc list */
4504         error = uhci_insert_isoc_td(uhcip, ph, isoc_req, length, usb_flags);
4505 
4506         return (error);
4507 }
4508 
4509 
4510 /*
4511  * uhci_remove_isoc_tds_tws
4512  *      This routine scans the pipe and removes all the td's
4513  *      and transfer wrappers and deallocates the memory
4514  *      associated with those td's and tw's.
4515  */
4516 void
4517 uhci_remove_isoc_tds_tws(uhci_state_t *uhcip, uhci_pipe_private_t *pp)
4518 {
4519         uint_t                  rval, i;
4520         uhci_td_t               *tmp_td, *td_head;
4521         usb_isoc_req_t          *isoc_req;
4522         uhci_trans_wrapper_t    *tmp_tw, *tw_head;
4523         uhci_bulk_isoc_xfer_t   *isoc_xfer_info;
4524         uhci_bulk_isoc_td_pool_t *td_pool_ptr;
4525 
4526         USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl,
4527             "uhci_remove_isoc_tds_tws: pp = %p", (void *)pp);
4528 
4529         tw_head = pp->pp_tw_head;
4530         while (tw_head) {
4531                 tmp_tw = tw_head;
4532                 tw_head = tw_head->tw_next;
4533                 td_head = tmp_tw->tw_hctd_head;
4534                 if (tmp_tw->tw_direction == PID_IN) {
4535                         uhci_deallocate_periodic_in_resource(uhcip, pp,
4536                             tmp_tw);
4537                 } else if (tmp_tw->tw_direction == PID_OUT) {
4538                         uhci_hcdi_callback(uhcip, pp, pp->pp_pipe_handle,
4539                             tmp_tw, USB_CR_FLUSHED);
4540                 }
4541 
4542                 while (td_head) {
4543                         tmp_td = td_head;
4544                         td_head = td_head->tw_td_next;
4545                         uhci_delete_isoc_td(uhcip, tmp_td);
4546                 }
4547 
4548                 isoc_req = (usb_isoc_req_t *)tmp_tw->tw_isoc_req;
4549                 if (isoc_req) {
4550                         usb_free_isoc_req(isoc_req);
4551                 }
4552 
4553                 ASSERT(tmp_tw->tw_hctd_head == NULL);
4554 
4555                 if (tmp_tw->tw_xfer_info.td_pools) {
4556                         isoc_xfer_info =
4557                             (uhci_bulk_isoc_xfer_t *)&tmp_tw->tw_xfer_info;
4558                         for (i = 0; i < isoc_xfer_info->num_pools; i++) {
4559                                 td_pool_ptr = &isoc_xfer_info->td_pools[i];
4560                                 rval = ddi_dma_unbind_handle(
4561                                     td_pool_ptr->dma_handle);
4562                                 ASSERT(rval == DDI_SUCCESS);
4563                                 ddi_dma_mem_free(&td_pool_ptr->mem_handle);
4564                                 ddi_dma_free_handle(&td_pool_ptr->dma_handle);
4565                         }
4566                         kmem_free(isoc_xfer_info->td_pools,
4567                             (sizeof (uhci_bulk_isoc_td_pool_t) *
4568                             isoc_xfer_info->num_pools));
4569                 }
4570 
4571                 uhci_deallocate_tw(uhcip, pp, tmp_tw);
4572         }
4573 }
4574 
4575 
4576 /*
4577  * uhci_isoc_update_sw_frame_number()
4578  *      to avoid code duplication, call uhci_get_sw_frame_number()
4579  */
4580 void
4581 uhci_isoc_update_sw_frame_number(uhci_state_t *uhcip)
4582 {
4583         (void) uhci_get_sw_frame_number(uhcip);
4584 }
4585 
4586 
4587 /*
4588  * uhci_get_sw_frame_number:
4589  *      Hold the uhci_int_mutex before calling this routine.
4590  */
4591 uint64_t
4592 uhci_get_sw_frame_number(uhci_state_t *uhcip)
4593 {
4594         uint64_t sw_frnum, hw_frnum, current_frnum;
4595 
4596         ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
4597 
4598         sw_frnum = uhcip->uhci_sw_frnum;
4599         hw_frnum = Get_OpReg16(FRNUM);
4600 
4601         /*
4602          * Check bit 10 in the software counter and hardware frame counter.
4603          * If both are same, then don't increment the software frame counter
4604          * (Bit 10 of hw frame counter toggle for every 1024 frames)
4605          * The lower 11 bits of software counter contains the hardware frame
4606          * counter value. The MSB (bit 10) of software counter is incremented
4607          * for every 1024 frames either here or in get frame number routine.
4608          */
4609         if ((sw_frnum & UHCI_BIT_10_MASK) == (hw_frnum & UHCI_BIT_10_MASK)) {
4610                 /* The MSB of hw counter did not toggle */
4611                 current_frnum = ((sw_frnum & (SW_FRNUM_MASK)) | hw_frnum);
4612         } else {
4613                 /*
4614                  * The hw counter wrapped around. And the interrupt handler
4615                  * did not get a chance to update the sw frame counter.
4616                  * So, update the sw frame counter and return correct frame no.
4617                  */
4618                 sw_frnum >>= UHCI_SIZE_OF_HW_FRNUM - 1;
4619                 current_frnum =
4620                     ((++sw_frnum << (UHCI_SIZE_OF_HW_FRNUM - 1)) | hw_frnum);
4621         }
4622         uhcip->uhci_sw_frnum = current_frnum;
4623 
4624         USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
4625             "uhci_get_sw_frame_number: sw=%lld hd=%lld",
4626             (unsigned long long)(uhcip->uhci_sw_frnum),
4627             (unsigned long long)hw_frnum);
4628 
4629         return (current_frnum);
4630 }
4631 
4632 
4633 /*
4634  * uhci_cmd_timeout_hdlr:
4635  *      This routine will get called for every second. It checks for
4636  *      timed out control commands/bulk commands. Timeout any commands
4637  *      that exceeds the time out period specified by the pipe policy.
4638  */
4639 void
4640 uhci_cmd_timeout_hdlr(void *arg)
4641 {
4642         uint_t                  flag = B_FALSE;
4643         uhci_td_t               *head, *tmp_td;
4644         uhci_state_t            *uhcip = (uhci_state_t *)arg;
4645         uhci_pipe_private_t     *pp;
4646 
4647         /*
4648          * Check whether any of the control xfers are timed out.
4649          * If so, complete those commands with time out as reason.
4650          */
4651         mutex_enter(&uhcip->uhci_int_mutex);
4652         head = uhcip->uhci_outst_tds_head;
4653 
4654         while (head) {
4655                 /*
4656                  * If timeout out is zero, then dont timeout command.
4657                  */
4658                 if (head->tw->tw_timeout_cnt == 0)  {
4659                         head = head->outst_td_next;
4660                         continue;
4661                 }
4662 
4663                 if (!(head->tw->tw_flags & TW_TIMEOUT_FLAG)) {
4664                         head->tw->tw_flags |= TW_TIMEOUT_FLAG;
4665                         --head->tw->tw_timeout_cnt;
4666                 }
4667 
4668                 /* only do it for bulk and control TDs */
4669                 if ((head->tw->tw_timeout_cnt == 0) &&
4670                     (head->tw->tw_handle_td != uhci_handle_isoc_td)) {
4671 
4672                         USB_DPRINTF_L3(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
4673                             "Command timed out: td = %p", (void *)head);
4674 
4675                         head->tw->tw_claim = UHCI_TIMEOUT_HDLR_CLAIMED;
4676 
4677                         /*
4678                          * Check finally whether the command completed
4679                          */
4680                         if (GetTD_status(uhcip, head) & UHCI_TD_ACTIVE) {
4681                                 SetTD32(uhcip, head->link_ptr,
4682                                     GetTD32(uhcip, head->link_ptr) |
4683                                     HC_END_OF_LIST);
4684                                 pp = head->tw->tw_pipe_private;
4685                                 SetQH32(uhcip, pp->pp_qh->element_ptr,
4686                                     GetQH32(uhcip, pp->pp_qh->element_ptr) |
4687                                     HC_END_OF_LIST);
4688                         }
4689 
4690                         flag = B_TRUE;
4691                 }
4692 
4693                 head = head->outst_td_next;
4694         }
4695 
4696         if (flag) {
4697                 (void) uhci_wait_for_sof(uhcip);
4698         }
4699 
4700         head = uhcip->uhci_outst_tds_head;
4701         while (head) {
4702                 if (head->tw->tw_flags & TW_TIMEOUT_FLAG) {
4703                         head->tw->tw_flags &= ~TW_TIMEOUT_FLAG;
4704                 }
4705                 if (head->tw->tw_claim == UHCI_TIMEOUT_HDLR_CLAIMED) {
4706                         head->tw->tw_claim = UHCI_NOT_CLAIMED;
4707                         tmp_td = head->tw->tw_hctd_head;
4708                         while (tmp_td) {
4709                                 SetTD_status(uhcip, tmp_td,
4710                                     UHCI_TD_CRC_TIMEOUT);
4711                                 tmp_td = tmp_td->tw_td_next;
4712                         }
4713                 }
4714                 head = head->outst_td_next;
4715         }
4716 
4717         /*
4718          * Process the td which was completed before shifting from normal
4719          * mode to polled mode
4720          */
4721         if (uhcip->uhci_polled_flag == UHCI_POLLED_FLAG_TRUE) {
4722                 uhci_process_submitted_td_queue(uhcip);
4723                 uhcip->uhci_polled_flag = UHCI_POLLED_FLAG_FALSE;
4724         } else if (flag) {
4725                 /* Process the completed/timed out commands */
4726                 uhci_process_submitted_td_queue(uhcip);
4727         }
4728 
4729         /* Re-register the control/bulk/intr commands' timeout handler */
4730         if (uhcip->uhci_cmd_timeout_id) {
4731                 uhcip->uhci_cmd_timeout_id = timeout(uhci_cmd_timeout_hdlr,
4732                     (void *)uhcip, UHCI_ONE_SECOND);
4733         }
4734 
4735         mutex_exit(&uhcip->uhci_int_mutex);
4736 }
4737 
4738 
4739 /*
4740  * uhci_wait_for_sof:
4741  *      Wait for the start of the next frame (implying any changes made in the
4742  *      lattice have now taken effect).
4743  *      To be sure this is the case, we wait for the completion of the current
4744  *      frame (which might have already been pending), then another complete
4745  *      frame to ensure everything has taken effect.
4746  */
4747 int
4748 uhci_wait_for_sof(uhci_state_t *uhcip)
4749 {
4750         int     n, error;
4751         ushort_t    cmd_reg;
4752         usb_frame_number_t      before_frame_number, after_frame_number;
4753         clock_t rval;
4754         USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
4755             "uhci_wait_for_sof: uhcip = %p", (void *)uhcip);
4756 
4757         ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
4758 
4759         error = uhci_state_is_operational(uhcip);
4760 
4761         if (error != USB_SUCCESS) {
4762 
4763                 return (error);
4764         }
4765 
4766         before_frame_number =  uhci_get_sw_frame_number(uhcip);
4767         for (n = 0; n < MAX_SOF_WAIT_COUNT; n++) {
4768                 SetTD_ioc(uhcip, uhcip->uhci_sof_td, 1);
4769                 uhcip->uhci_cv_signal = B_TRUE;
4770 
4771                 rval = cv_reltimedwait(&uhcip->uhci_cv_SOF,
4772                     &uhcip->uhci_int_mutex, UHCI_ONE_SECOND, TR_CLOCK_TICK);
4773 
4774                 after_frame_number = uhci_get_sw_frame_number(uhcip);
4775                 if ((rval == -1) &&
4776                     (after_frame_number <= before_frame_number)) {
4777                         cmd_reg = Get_OpReg16(USBCMD);
4778                         Set_OpReg16(USBCMD, (cmd_reg | USBCMD_REG_HC_RUN));
4779                         Set_OpReg16(USBINTR, ENABLE_ALL_INTRS);
4780                         after_frame_number = uhci_get_sw_frame_number(uhcip);
4781                 }
4782                 before_frame_number = after_frame_number;
4783         }
4784 
4785         SetTD_ioc(uhcip, uhcip->uhci_sof_td, 0);
4786 
4787         return (uhcip->uhci_cv_signal ? USB_FAILURE : USB_SUCCESS);
4788 
4789 }
4790 
4791 /*
4792  * uhci_allocate_periodic_in_resource:
4793  *      Allocate interrupt/isochronous request structure for the
4794  *      interrupt/isochronous IN transfer.
4795  */
4796 int
4797 uhci_allocate_periodic_in_resource(
4798         uhci_state_t            *uhcip,
4799         uhci_pipe_private_t     *pp,
4800         uhci_trans_wrapper_t    *tw,
4801         usb_flags_t             flags)
4802 {
4803         size_t                  length = 0;
4804         usb_opaque_t            client_periodic_in_reqp;
4805         usb_intr_req_t          *cur_intr_req;
4806         usb_isoc_req_t          *curr_isoc_reqp;
4807         usba_pipe_handle_data_t *ph = pp->pp_pipe_handle;
4808 
4809         USB_DPRINTF_L4(PRINT_MASK_HCDI, uhcip->uhci_log_hdl,
4810             "uhci_allocate_periodic_in_resource:\n\t"
4811             "ph = 0x%p, pp = 0x%p, tw = 0x%p, flags = 0x%x",
4812             (void *)ph, (void *)pp, (void *)tw, flags);
4813 
4814         ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
4815 
4816         /* Check the current periodic in request pointer */
4817         if (tw->tw_curr_xfer_reqp) {
4818                 USB_DPRINTF_L2(PRINT_MASK_HCDI, uhcip->uhci_log_hdl,
4819                     "uhci_allocate_periodic_in_resource: Interrupt "
4820                     "request structure already exists: "
4821                     "allocation failed");
4822 
4823                 return (USB_SUCCESS);
4824         }
4825 
4826         /* Get the client periodic in request pointer */
4827         client_periodic_in_reqp = pp->pp_client_periodic_in_reqp;
4828 
4829         /*
4830          * If it a periodic IN request and periodic request is NULL,
4831          * allocate corresponding usb periodic IN request for the
4832          * current periodic polling request and copy the information
4833          * from the saved periodic request structure.
4834          */
4835         if (UHCI_XFER_TYPE(&ph->p_ep) == USB_EP_ATTR_INTR) {
4836                 /* Get the interrupt transfer length */
4837                 length = ((usb_intr_req_t *)client_periodic_in_reqp)->
4838                     intr_len;
4839 
4840                 cur_intr_req = usba_hcdi_dup_intr_req(ph->p_dip,
4841                     (usb_intr_req_t *)client_periodic_in_reqp, length, flags);
4842                 if (cur_intr_req == NULL) {
4843                         USB_DPRINTF_L2(PRINT_MASK_HCDI, uhcip->uhci_log_hdl,
4844                             "uhci_allocate_periodic_in_resource: Interrupt "
4845                             "request structure allocation failed");
4846 
4847                         return (USB_NO_RESOURCES);
4848                 }
4849 
4850                 /* Check and save the timeout value */
4851                 tw->tw_timeout_cnt = (cur_intr_req->intr_attributes &
4852                     USB_ATTRS_ONE_XFER) ? cur_intr_req->intr_timeout: 0;
4853                 tw->tw_curr_xfer_reqp = (usb_opaque_t)cur_intr_req;
4854                 tw->tw_length = cur_intr_req->intr_len;
4855         } else {
4856                 ASSERT(client_periodic_in_reqp != NULL);
4857 
4858                 if ((curr_isoc_reqp = usba_hcdi_dup_isoc_req(ph->p_dip,
4859                     (usb_isoc_req_t *)client_periodic_in_reqp, flags)) ==
4860                     NULL) {
4861                         USB_DPRINTF_L2(PRINT_MASK_HCDI, uhcip->uhci_log_hdl,
4862                             "uhci_allocate_periodic_in_resource: Isochronous "
4863                             "request structure allocation failed");
4864 
4865                         return (USB_NO_RESOURCES);
4866                 }
4867 
4868                 /*
4869                  * Save the client's isochronous request pointer and
4870                  * length of isochronous transfer in transfer wrapper.
4871                  * The dup'ed request is saved in pp_client_periodic_in_reqp
4872                  */
4873                 tw->tw_curr_xfer_reqp =
4874                     (usb_opaque_t)pp->pp_client_periodic_in_reqp;
4875                 pp->pp_client_periodic_in_reqp = (usb_opaque_t)curr_isoc_reqp;
4876         }
4877 
4878         mutex_enter(&ph->p_mutex);
4879         ph->p_req_count++;
4880         mutex_exit(&ph->p_mutex);
4881 
4882         return (USB_SUCCESS);
4883 }
4884 
4885 
4886 /*
4887  * uhci_deallocate_periodic_in_resource:
4888  *      Deallocate interrupt/isochronous request structure for the
4889  *      interrupt/isochronous IN transfer.
4890  */
4891 void
4892 uhci_deallocate_periodic_in_resource(
4893         uhci_state_t            *uhcip,
4894         uhci_pipe_private_t     *pp,
4895         uhci_trans_wrapper_t    *tw)
4896 {
4897         usb_opaque_t            curr_xfer_reqp;
4898         usba_pipe_handle_data_t *ph = pp->pp_pipe_handle;
4899 
4900         USB_DPRINTF_L4(PRINT_MASK_HCDI, uhcip->uhci_log_hdl,
4901             "uhci_deallocate_periodic_in_resource: "
4902             "pp = 0x%p tw = 0x%p", (void *)pp, (void *)tw);
4903 
4904         ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
4905 
4906         curr_xfer_reqp = tw->tw_curr_xfer_reqp;
4907         if (curr_xfer_reqp) {
4908                 /*
4909                  * Reset periodic in request usb isoch
4910                  * packet request pointers to null.
4911                  */
4912                 tw->tw_curr_xfer_reqp = NULL;
4913                 tw->tw_isoc_req = NULL;
4914 
4915                 mutex_enter(&ph->p_mutex);
4916                 ph->p_req_count--;
4917                 mutex_exit(&ph->p_mutex);
4918 
4919                 /*
4920                  * Free pre-allocated interrupt or isochronous requests.
4921                  */
4922                 switch (UHCI_XFER_TYPE(&ph->p_ep)) {
4923                 case USB_EP_ATTR_INTR:
4924                         usb_free_intr_req((usb_intr_req_t *)curr_xfer_reqp);
4925                         break;
4926                 case USB_EP_ATTR_ISOCH:
4927                         usb_free_isoc_req((usb_isoc_req_t *)curr_xfer_reqp);
4928                         break;
4929                 }
4930         }
4931 }
4932 
4933 
4934 /*
4935  * uhci_hcdi_callback()
4936  *      convenience wrapper around usba_hcdi_callback()
4937  */
4938 void
4939 uhci_hcdi_callback(uhci_state_t *uhcip, uhci_pipe_private_t *pp,
4940     usba_pipe_handle_data_t *ph, uhci_trans_wrapper_t *tw, usb_cr_t cr)
4941 {
4942         usb_opaque_t    curr_xfer_reqp;
4943 
4944         USB_DPRINTF_L4(PRINT_MASK_HCDI, uhcip->uhci_log_hdl,
4945             "uhci_hcdi_callback: ph = 0x%p, tw = 0x%p, cr = 0x%x",
4946             (void *)ph, (void *)tw, cr);
4947 
4948         ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
4949 
4950         if (tw && tw->tw_curr_xfer_reqp) {
4951                 curr_xfer_reqp = tw->tw_curr_xfer_reqp;
4952                 tw->tw_curr_xfer_reqp = NULL;
4953                 tw->tw_isoc_req = NULL;
4954         } else {
4955                 ASSERT(pp->pp_client_periodic_in_reqp != NULL);
4956 
4957                 curr_xfer_reqp = pp->pp_client_periodic_in_reqp;
4958                 pp->pp_client_periodic_in_reqp = NULL;
4959         }
4960 
4961         ASSERT(curr_xfer_reqp != NULL);
4962 
4963         mutex_exit(&uhcip->uhci_int_mutex);
4964         usba_hcdi_cb(ph, curr_xfer_reqp, cr);
4965         mutex_enter(&uhcip->uhci_int_mutex);
4966 }
4967 
4968 
4969 /*
4970  * uhci_state_is_operational:
4971  *
4972  * Check the Host controller state and return proper values.
4973  */
4974 int
4975 uhci_state_is_operational(uhci_state_t  *uhcip)
4976 {
4977         int     val;
4978 
4979         ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
4980 
4981         switch (uhcip->uhci_hc_soft_state) {
4982         case UHCI_CTLR_INIT_STATE:
4983         case UHCI_CTLR_SUSPEND_STATE:
4984                 val = USB_FAILURE;
4985                 break;
4986         case UHCI_CTLR_OPERATIONAL_STATE:
4987                 val = USB_SUCCESS;
4988                 break;
4989         case UHCI_CTLR_ERROR_STATE:
4990                 val = USB_HC_HARDWARE_ERROR;
4991                 break;
4992         default:
4993                 val = USB_FAILURE;
4994                 break;
4995         }
4996 
4997         return (val);
4998 }
4999 
5000 
5001 #ifdef DEBUG
5002 static void
5003 uhci_print_td(uhci_state_t *uhcip, uhci_td_t *td)
5004 {
5005         uint_t  *ptr = (uint_t *)td;
5006 
5007 #ifndef lint
5008         _NOTE(NO_COMPETING_THREADS_NOW);
5009 #endif
5010         USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl,
5011             "\tDWORD 1 0x%x\t DWORD 2 0x%x", ptr[0], ptr[1]);
5012         USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl,
5013             "\tDWORD 3 0x%x\t DWORD 4 0x%x", ptr[2], ptr[3]);
5014         USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl,
5015             "\tBytes xfered    = %d", td->tw->tw_bytes_xfered);
5016         USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl,
5017             "\tBytes Pending   = %d", td->tw->tw_bytes_pending);
5018         USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl,
5019             "Queue Head Details:");
5020         uhci_print_qh(uhcip, td->tw->tw_pipe_private->pp_qh);
5021 
5022 #ifndef lint
5023         _NOTE(COMPETING_THREADS_NOW);
5024 #endif
5025 }
5026 
5027 
5028 static void
5029 uhci_print_qh(uhci_state_t *uhcip, queue_head_t *qh)
5030 {
5031         uint_t  *ptr = (uint_t *)qh;
5032 
5033         USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl,
5034             "\tLink Ptr = %x Element Ptr = %x", ptr[0], ptr[1]);
5035 }
5036 #endif