1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  23  * Use is subject to license terms.
  24  */
  25 
  26 #include <hxge_impl.h>
  27 #include <hxge_txdma.h>
  28 #include <sys/llc1.h>
  29 
  30 uint32_t hxge_reclaim_pending = TXDMA_RECLAIM_PENDING_DEFAULT;
  31 uint32_t hxge_tx_minfree = 64;
  32 uint32_t hxge_tx_intr_thres = 0;
  33 uint32_t hxge_tx_max_gathers = TX_MAX_GATHER_POINTERS;
  34 uint32_t hxge_tx_tiny_pack = 1;
  35 uint32_t hxge_tx_use_bcopy = 1;
  36 
  37 extern uint32_t hxge_tx_ring_size;
  38 extern uint32_t hxge_bcopy_thresh;
  39 extern uint32_t hxge_dvma_thresh;
  40 extern uint32_t hxge_dma_stream_thresh;
  41 extern dma_method_t hxge_force_dma;
  42 
  43 /* Device register access attributes for PIO.  */
  44 extern ddi_device_acc_attr_t hxge_dev_reg_acc_attr;
  45 
  46 /* Device descriptor access attributes for DMA.  */
  47 extern ddi_device_acc_attr_t hxge_dev_desc_dma_acc_attr;
  48 
  49 /* Device buffer access attributes for DMA.  */
  50 extern ddi_device_acc_attr_t hxge_dev_buf_dma_acc_attr;
  51 extern ddi_dma_attr_t hxge_desc_dma_attr;
  52 extern ddi_dma_attr_t hxge_tx_dma_attr;
  53 
  54 static hxge_status_t hxge_map_txdma(p_hxge_t hxgep);
  55 static void hxge_unmap_txdma(p_hxge_t hxgep);
  56 static hxge_status_t hxge_txdma_hw_start(p_hxge_t hxgep);
  57 static void hxge_txdma_hw_stop(p_hxge_t hxgep);
  58 
  59 static hxge_status_t hxge_map_txdma_channel(p_hxge_t hxgep, uint16_t channel,
  60     p_hxge_dma_common_t *dma_buf_p, p_tx_ring_t *tx_desc_p,
  61     uint32_t num_chunks, p_hxge_dma_common_t *dma_cntl_p,
  62     p_tx_mbox_t *tx_mbox_p);
  63 static void hxge_unmap_txdma_channel(p_hxge_t hxgep, uint16_t channel,
  64     p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p);
  65 static hxge_status_t hxge_map_txdma_channel_buf_ring(p_hxge_t hxgep, uint16_t,
  66     p_hxge_dma_common_t *, p_tx_ring_t *, uint32_t);
  67 static void hxge_unmap_txdma_channel_buf_ring(p_hxge_t hxgep,
  68     p_tx_ring_t tx_ring_p);
  69 static void hxge_map_txdma_channel_cfg_ring(p_hxge_t, uint16_t,
  70     p_hxge_dma_common_t *, p_tx_ring_t, p_tx_mbox_t *);
  71 static void hxge_unmap_txdma_channel_cfg_ring(p_hxge_t hxgep,
  72     p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p);
  73 static hxge_status_t hxge_txdma_start_channel(p_hxge_t hxgep, uint16_t channel,
  74     p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p);
  75 static hxge_status_t hxge_txdma_stop_channel(p_hxge_t hxgep, uint16_t channel,
  76     p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p);
  77 static p_tx_ring_t hxge_txdma_get_ring(p_hxge_t hxgep, uint16_t channel);
  78 static hxge_status_t hxge_tx_err_evnts(p_hxge_t hxgep, uint_t index,
  79     p_hxge_ldv_t ldvp, tdc_stat_t cs);
  80 static p_tx_mbox_t hxge_txdma_get_mbox(p_hxge_t hxgep, uint16_t channel);
  81 static hxge_status_t hxge_txdma_fatal_err_recover(p_hxge_t hxgep,
  82     uint16_t channel, p_tx_ring_t tx_ring_p);
  83 static hxge_status_t hxge_tx_port_fatal_err_recover(p_hxge_t hxgep);
  84 
  85 hxge_status_t
  86 hxge_init_txdma_channels(p_hxge_t hxgep)
  87 {
  88         hxge_status_t   status = HXGE_OK;
  89         block_reset_t   reset_reg;
  90 
  91         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_init_txdma_channels"));
  92 
  93         /*
  94          * Reset TDC block from PEU to cleanup any unknown configuration.
  95          * This may be resulted from previous reboot.
  96          */
  97         reset_reg.value = 0;
  98         reset_reg.bits.tdc_rst = 1;
  99         HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, reset_reg.value);
 100 
 101         HXGE_DELAY(1000);
 102 
 103         status = hxge_map_txdma(hxgep);
 104         if (status != HXGE_OK) {
 105                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
 106                     "<== hxge_init_txdma_channels: status 0x%x", status));
 107                 return (status);
 108         }
 109 
 110         status = hxge_txdma_hw_start(hxgep);
 111         if (status != HXGE_OK) {
 112                 hxge_unmap_txdma(hxgep);
 113                 return (status);
 114         }
 115 
 116         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
 117             "<== hxge_init_txdma_channels: status 0x%x", status));
 118 
 119         return (HXGE_OK);
 120 }
 121 
 122 void
 123 hxge_uninit_txdma_channels(p_hxge_t hxgep)
 124 {
 125         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_uninit_txdma_channels"));
 126 
 127         hxge_txdma_hw_stop(hxgep);
 128         hxge_unmap_txdma(hxgep);
 129 
 130         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_uinit_txdma_channels"));
 131 }
 132 
 133 void
 134 hxge_setup_dma_common(p_hxge_dma_common_t dest_p, p_hxge_dma_common_t src_p,
 135     uint32_t entries, uint32_t size)
 136 {
 137         size_t tsize;
 138         *dest_p = *src_p;
 139         tsize = size * entries;
 140         dest_p->alength = tsize;
 141         dest_p->nblocks = entries;
 142         dest_p->block_size = size;
 143         dest_p->offset += tsize;
 144 
 145         src_p->kaddrp = (caddr_t)dest_p->kaddrp + tsize;
 146         src_p->alength -= tsize;
 147         src_p->dma_cookie.dmac_laddress += tsize;
 148         src_p->dma_cookie.dmac_size -= tsize;
 149 }
 150 
 151 hxge_status_t
 152 hxge_reset_txdma_channel(p_hxge_t hxgep, uint16_t channel, uint64_t reg_data)
 153 {
 154         hpi_status_t    rs = HPI_SUCCESS;
 155         hxge_status_t   status = HXGE_OK;
 156         hpi_handle_t    handle;
 157 
 158         HXGE_DEBUG_MSG((hxgep, TX_CTL, " ==> hxge_reset_txdma_channel"));
 159 
 160         handle = HXGE_DEV_HPI_HANDLE(hxgep);
 161         if ((reg_data & TDC_TDR_RST_MASK) == TDC_TDR_RST_MASK) {
 162                 rs = hpi_txdma_channel_reset(handle, channel);
 163         } else {
 164                 rs = hpi_txdma_channel_control(handle, TXDMA_RESET, channel);
 165         }
 166 
 167         if (rs != HPI_SUCCESS) {
 168                 status = HXGE_ERROR | rs;
 169         }
 170 
 171         /*
 172          * Reset the tail (kick) register to 0. (Hardware will not reset it. Tx
 173          * overflow fatal error if tail is not set to 0 after reset!
 174          */
 175         TXDMA_REG_WRITE64(handle, TDC_TDR_KICK, channel, 0);
 176 
 177         HXGE_DEBUG_MSG((hxgep, TX_CTL, " <== hxge_reset_txdma_channel"));
 178 
 179         return (status);
 180 }
 181 
 182 hxge_status_t
 183 hxge_init_txdma_channel_event_mask(p_hxge_t hxgep, uint16_t channel,
 184     tdc_int_mask_t *mask_p)
 185 {
 186         hpi_handle_t    handle;
 187         hpi_status_t    rs = HPI_SUCCESS;
 188         hxge_status_t   status = HXGE_OK;
 189 
 190         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
 191             "<== hxge_init_txdma_channel_event_mask"));
 192 
 193         handle = HXGE_DEV_HPI_HANDLE(hxgep);
 194 
 195         /*
 196          * Mask off tx_rng_oflow since it is a false alarm. The driver
 197          * ensures not over flowing the hardware and check the hardware
 198          * status.
 199          */
 200         mask_p->bits.tx_rng_oflow = 1;
 201         rs = hpi_txdma_event_mask(handle, OP_SET, channel, mask_p);
 202         if (rs != HPI_SUCCESS) {
 203                 status = HXGE_ERROR | rs;
 204         }
 205 
 206         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
 207             "==> hxge_init_txdma_channel_event_mask"));
 208         return (status);
 209 }
 210 
 211 hxge_status_t
 212 hxge_enable_txdma_channel(p_hxge_t hxgep,
 213     uint16_t channel, p_tx_ring_t tx_desc_p, p_tx_mbox_t mbox_p)
 214 {
 215         hpi_handle_t    handle;
 216         hpi_status_t    rs = HPI_SUCCESS;
 217         hxge_status_t   status = HXGE_OK;
 218 
 219         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_enable_txdma_channel"));
 220 
 221         handle = HXGE_DEV_HPI_HANDLE(hxgep);
 222         /*
 223          * Use configuration data composed at init time. Write to hardware the
 224          * transmit ring configurations.
 225          */
 226         rs = hpi_txdma_ring_config(handle, OP_SET, channel,
 227             (uint64_t *)&(tx_desc_p->tx_ring_cfig.value));
 228 
 229         if (rs != HPI_SUCCESS) {
 230                 return (HXGE_ERROR | rs);
 231         }
 232 
 233         /* Write to hardware the mailbox */
 234         rs = hpi_txdma_mbox_config(handle, OP_SET, channel,
 235             (uint64_t *)&mbox_p->tx_mbox.dma_cookie.dmac_laddress);
 236 
 237         if (rs != HPI_SUCCESS) {
 238                 return (HXGE_ERROR | rs);
 239         }
 240 
 241         /* Start the DMA engine. */
 242         rs = hpi_txdma_channel_init_enable(handle, channel);
 243         if (rs != HPI_SUCCESS) {
 244                 return (HXGE_ERROR | rs);
 245         }
 246         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_enable_txdma_channel"));
 247         return (status);
 248 }
 249 
 250 void
 251 hxge_fill_tx_hdr(p_mblk_t mp, boolean_t fill_len, boolean_t l4_cksum,
 252     int pkt_len, uint8_t npads, p_tx_pkt_hdr_all_t pkthdrp)
 253 {
 254         p_tx_pkt_header_t       hdrp;
 255         p_mblk_t                nmp;
 256         uint64_t                tmp;
 257         size_t                  mblk_len;
 258         size_t                  iph_len;
 259         size_t                  hdrs_size;
 260         uint8_t                 *ip_buf;
 261         uint16_t                eth_type;
 262         uint8_t                 ipproto;
 263         boolean_t               is_vlan = B_FALSE;
 264         size_t                  eth_hdr_size;
 265         uint8_t hdrs_buf[sizeof (struct ether_header) + 64 + sizeof (uint32_t)];
 266 
 267         HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_fill_tx_hdr: mp $%p", mp));
 268 
 269         /*
 270          * Caller should zero out the headers first.
 271          */
 272         hdrp = (p_tx_pkt_header_t)&pkthdrp->pkthdr;
 273 
 274         if (fill_len) {
 275                 HXGE_DEBUG_MSG((NULL, TX_CTL,
 276                     "==> hxge_fill_tx_hdr: pkt_len %d npads %d",
 277                     pkt_len, npads));
 278                 tmp = (uint64_t)pkt_len;
 279                 hdrp->value |= (tmp << TX_PKT_HEADER_TOT_XFER_LEN_SHIFT);
 280 
 281                 goto fill_tx_header_done;
 282         }
 283         tmp = (uint64_t)npads;
 284         hdrp->value |= (tmp << TX_PKT_HEADER_PAD_SHIFT);
 285 
 286         /*
 287          * mp is the original data packet (does not include the Neptune
 288          * transmit header).
 289          */
 290         nmp = mp;
 291         mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr;
 292         HXGE_DEBUG_MSG((NULL, TX_CTL,
 293             "==> hxge_fill_tx_hdr: mp $%p b_rptr $%p len %d",
 294             mp, nmp->b_rptr, mblk_len));
 295         ip_buf = NULL;
 296         bcopy(nmp->b_rptr, &hdrs_buf[0], sizeof (struct ether_vlan_header));
 297         eth_type = ntohs(((p_ether_header_t)hdrs_buf)->ether_type);
 298         HXGE_DEBUG_MSG((NULL, TX_CTL,
 299             "==> : hxge_fill_tx_hdr: (value 0x%llx) ether type 0x%x",
 300             eth_type, hdrp->value));
 301 
 302         if (eth_type < ETHERMTU) {
 303                 tmp = 1ull;
 304                 hdrp->value |= (tmp << TX_PKT_HEADER_LLC_SHIFT);
 305                 HXGE_DEBUG_MSG((NULL, TX_CTL,
 306                     "==> hxge_tx_pkt_hdr_init: LLC value 0x%llx", hdrp->value));
 307                 if (*(hdrs_buf + sizeof (struct ether_header)) ==
 308                     LLC_SNAP_SAP) {
 309                         eth_type = ntohs(*((uint16_t *)(hdrs_buf +
 310                             sizeof (struct ether_header) + 6)));
 311                         HXGE_DEBUG_MSG((NULL, TX_CTL,
 312                             "==> hxge_tx_pkt_hdr_init: LLC ether type 0x%x",
 313                             eth_type));
 314                 } else {
 315                         goto fill_tx_header_done;
 316                 }
 317         } else if (eth_type == VLAN_ETHERTYPE) {
 318                 tmp = 1ull;
 319                 hdrp->value |= (tmp << TX_PKT_HEADER_VLAN__SHIFT);
 320 
 321                 eth_type = ntohs(((struct ether_vlan_header *)
 322                     hdrs_buf)->ether_type);
 323                 is_vlan = B_TRUE;
 324                 HXGE_DEBUG_MSG((NULL, TX_CTL,
 325                     "==> hxge_tx_pkt_hdr_init: VLAN value 0x%llx",
 326                     hdrp->value));
 327         }
 328         if (!is_vlan) {
 329                 eth_hdr_size = sizeof (struct ether_header);
 330         } else {
 331                 eth_hdr_size = sizeof (struct ether_vlan_header);
 332         }
 333 
 334         switch (eth_type) {
 335         case ETHERTYPE_IP:
 336                 if (mblk_len > eth_hdr_size + sizeof (uint8_t)) {
 337                         ip_buf = nmp->b_rptr + eth_hdr_size;
 338                         mblk_len -= eth_hdr_size;
 339                         iph_len = ((*ip_buf) & 0x0f);
 340                         if (mblk_len > (iph_len + sizeof (uint32_t))) {
 341                                 ip_buf = nmp->b_rptr;
 342                                 ip_buf += eth_hdr_size;
 343                         } else {
 344                                 ip_buf = NULL;
 345                         }
 346                 }
 347                 if (ip_buf == NULL) {
 348                         hdrs_size = 0;
 349                         ((p_ether_header_t)hdrs_buf)->ether_type = 0;
 350                         while ((nmp) && (hdrs_size < sizeof (hdrs_buf))) {
 351                                 mblk_len = (size_t)nmp->b_wptr -
 352                                     (size_t)nmp->b_rptr;
 353                                 if (mblk_len >=
 354                                     (sizeof (hdrs_buf) - hdrs_size))
 355                                         mblk_len = sizeof (hdrs_buf) -
 356                                             hdrs_size;
 357                                 bcopy(nmp->b_rptr,
 358                                     &hdrs_buf[hdrs_size], mblk_len);
 359                                 hdrs_size += mblk_len;
 360                                 nmp = nmp->b_cont;
 361                         }
 362                         ip_buf = hdrs_buf;
 363                         ip_buf += eth_hdr_size;
 364                         iph_len = ((*ip_buf) & 0x0f);
 365                 }
 366                 ipproto = ip_buf[9];
 367 
 368                 tmp = (uint64_t)iph_len;
 369                 hdrp->value |= (tmp << TX_PKT_HEADER_IHL_SHIFT);
 370                 tmp = (uint64_t)(eth_hdr_size >> 1);
 371                 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT);
 372 
 373                 HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_fill_tx_hdr: IPv4 "
 374                     " iph_len %d l3start %d eth_hdr_size %d proto 0x%x"
 375                     "tmp 0x%x", iph_len, hdrp->bits.l3start, eth_hdr_size,
 376                     ipproto, tmp));
 377                 HXGE_DEBUG_MSG((NULL, TX_CTL,
 378                     "==> hxge_tx_pkt_hdr_init: IP value 0x%llx", hdrp->value));
 379                 break;
 380 
 381         case ETHERTYPE_IPV6:
 382                 hdrs_size = 0;
 383                 ((p_ether_header_t)hdrs_buf)->ether_type = 0;
 384                 while ((nmp) && (hdrs_size < sizeof (hdrs_buf))) {
 385                         mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr;
 386                         if (mblk_len >= (sizeof (hdrs_buf) - hdrs_size))
 387                                 mblk_len = sizeof (hdrs_buf) - hdrs_size;
 388                         bcopy(nmp->b_rptr, &hdrs_buf[hdrs_size], mblk_len);
 389                         hdrs_size += mblk_len;
 390                         nmp = nmp->b_cont;
 391                 }
 392                 ip_buf = hdrs_buf;
 393                 ip_buf += eth_hdr_size;
 394 
 395                 tmp = 1ull;
 396                 hdrp->value |= (tmp << TX_PKT_HEADER_IP_VER_SHIFT);
 397 
 398                 tmp = (eth_hdr_size >> 1);
 399                 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT);
 400 
 401                 /* byte 6 is the next header protocol */
 402                 ipproto = ip_buf[6];
 403 
 404                 HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_fill_tx_hdr: IPv6 "
 405                     " iph_len %d l3start %d eth_hdr_size %d proto 0x%x",
 406                     iph_len, hdrp->bits.l3start, eth_hdr_size, ipproto));
 407                 HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_tx_pkt_hdr_init: IPv6 "
 408                     "value 0x%llx", hdrp->value));
 409                 break;
 410 
 411         default:
 412                 HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_fill_tx_hdr: non-IP"));
 413                 goto fill_tx_header_done;
 414         }
 415 
 416         switch (ipproto) {
 417         case IPPROTO_TCP:
 418                 HXGE_DEBUG_MSG((NULL, TX_CTL,
 419                     "==> hxge_fill_tx_hdr: TCP (cksum flag %d)", l4_cksum));
 420                 if (l4_cksum) {
 421                         tmp = 1ull;
 422                         hdrp->value |= (tmp << TX_PKT_HEADER_PKT_TYPE_SHIFT);
 423                         HXGE_DEBUG_MSG((NULL, TX_CTL,
 424                             "==> hxge_tx_pkt_hdr_init: TCP CKSUM"
 425                             "value 0x%llx", hdrp->value));
 426                 }
 427                 HXGE_DEBUG_MSG((NULL, TX_CTL,
 428                     "==> hxge_tx_pkt_hdr_init: TCP value 0x%llx", hdrp->value));
 429                 break;
 430 
 431         case IPPROTO_UDP:
 432                 HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_fill_tx_hdr: UDP"));
 433                 if (l4_cksum) {
 434                         tmp = 0x2ull;
 435                         hdrp->value |= (tmp << TX_PKT_HEADER_PKT_TYPE_SHIFT);
 436                 }
 437                 HXGE_DEBUG_MSG((NULL, TX_CTL,
 438                     "==> hxge_tx_pkt_hdr_init: UDP value 0x%llx",
 439                     hdrp->value));
 440                 break;
 441 
 442         default:
 443                 goto fill_tx_header_done;
 444         }
 445 
 446 fill_tx_header_done:
 447         HXGE_DEBUG_MSG((NULL, TX_CTL,
 448             "==> hxge_fill_tx_hdr: pkt_len %d npads %d value 0x%llx",
 449             pkt_len, npads, hdrp->value));
 450         HXGE_DEBUG_MSG((NULL, TX_CTL, "<== hxge_fill_tx_hdr"));
 451 }
 452 
 453 /*ARGSUSED*/
 454 p_mblk_t
 455 hxge_tx_pkt_header_reserve(p_mblk_t mp, uint8_t *npads)
 456 {
 457         p_mblk_t newmp = NULL;
 458 
 459         if ((newmp = allocb(TX_PKT_HEADER_SIZE, BPRI_MED)) == NULL) {
 460                 HXGE_DEBUG_MSG((NULL, TX_CTL,
 461                     "<== hxge_tx_pkt_header_reserve: allocb failed"));
 462                 return (NULL);
 463         }
 464         HXGE_DEBUG_MSG((NULL, TX_CTL,
 465             "==> hxge_tx_pkt_header_reserve: get new mp"));
 466         DB_TYPE(newmp) = M_DATA;
 467         newmp->b_rptr = newmp->b_wptr = DB_LIM(newmp);
 468         linkb(newmp, mp);
 469         newmp->b_rptr -= TX_PKT_HEADER_SIZE;
 470 
 471         HXGE_DEBUG_MSG((NULL, TX_CTL,
 472             "==>hxge_tx_pkt_header_reserve: b_rptr $%p b_wptr $%p",
 473             newmp->b_rptr, newmp->b_wptr));
 474         HXGE_DEBUG_MSG((NULL, TX_CTL,
 475             "<== hxge_tx_pkt_header_reserve: use new mp"));
 476         return (newmp);
 477 }
 478 
 479 int
 480 hxge_tx_pkt_nmblocks(p_mblk_t mp, int *tot_xfer_len_p)
 481 {
 482         uint_t          nmblks;
 483         ssize_t         len;
 484         uint_t          pkt_len;
 485         p_mblk_t        nmp, bmp, tmp;
 486         uint8_t         *b_wptr;
 487 
 488         HXGE_DEBUG_MSG((NULL, TX_CTL,
 489             "==> hxge_tx_pkt_nmblocks: mp $%p rptr $%p wptr $%p len %d",
 490             mp, mp->b_rptr, mp->b_wptr, MBLKL(mp)));
 491 
 492         nmp = mp;
 493         bmp = mp;
 494         nmblks = 0;
 495         pkt_len = 0;
 496         *tot_xfer_len_p = 0;
 497 
 498         while (nmp) {
 499                 len = MBLKL(nmp);
 500                 HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_tx_pkt_nmblocks: "
 501                     "len %d pkt_len %d nmblks %d tot_xfer_len %d",
 502                     len, pkt_len, nmblks, *tot_xfer_len_p));
 503 
 504                 if (len <= 0) {
 505                         bmp = nmp;
 506                         nmp = nmp->b_cont;
 507                         HXGE_DEBUG_MSG((NULL, TX_CTL,
 508                             "==> hxge_tx_pkt_nmblocks:"
 509                             " len (0) pkt_len %d nmblks %d", pkt_len, nmblks));
 510                         continue;
 511                 }
 512                 *tot_xfer_len_p += len;
 513                 HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_tx_pkt_nmblocks: "
 514                     "len %d pkt_len %d nmblks %d tot_xfer_len %d",
 515                     len, pkt_len, nmblks, *tot_xfer_len_p));
 516 
 517                 if (len < hxge_bcopy_thresh) {
 518                         HXGE_DEBUG_MSG((NULL, TX_CTL,
 519                             "==> hxge_tx_pkt_nmblocks: "
 520                             "len %d (< thresh) pkt_len %d nmblks %d",
 521                             len, pkt_len, nmblks));
 522                         if (pkt_len == 0)
 523                                 nmblks++;
 524                         pkt_len += len;
 525                         if (pkt_len >= hxge_bcopy_thresh) {
 526                                 pkt_len = 0;
 527                                 len = 0;
 528                                 nmp = bmp;
 529                         }
 530                 } else {
 531                         HXGE_DEBUG_MSG((NULL, TX_CTL,
 532                             "==> hxge_tx_pkt_nmblocks: "
 533                             "len %d (> thresh) pkt_len %d nmblks %d",
 534                             len, pkt_len, nmblks));
 535                         pkt_len = 0;
 536                         nmblks++;
 537                         /*
 538                          * Hardware limits the transfer length to 4K. If len is
 539                          * more than 4K, we need to break it up to at most 2
 540                          * more blocks.
 541                          */
 542                         if (len > TX_MAX_TRANSFER_LENGTH) {
 543                                 uint32_t nsegs;
 544 
 545                                 HXGE_DEBUG_MSG((NULL, TX_CTL,
 546                                     "==> hxge_tx_pkt_nmblocks: "
 547                                     "len %d pkt_len %d nmblks %d nsegs %d",
 548                                     len, pkt_len, nmblks, nsegs));
 549                                 nsegs = 1;
 550                                 if (len % (TX_MAX_TRANSFER_LENGTH * 2)) {
 551                                         ++nsegs;
 552                                 }
 553                                 do {
 554                                         b_wptr = nmp->b_rptr +
 555                                             TX_MAX_TRANSFER_LENGTH;
 556                                         nmp->b_wptr = b_wptr;
 557                                         if ((tmp = dupb(nmp)) == NULL) {
 558                                                 return (0);
 559                                         }
 560                                         tmp->b_rptr = b_wptr;
 561                                         tmp->b_wptr = nmp->b_wptr;
 562                                         tmp->b_cont = nmp->b_cont;
 563                                         nmp->b_cont = tmp;
 564                                         nmblks++;
 565                                         if (--nsegs) {
 566                                                 nmp = tmp;
 567                                         }
 568                                 } while (nsegs);
 569                                 nmp = tmp;
 570                         }
 571                 }
 572 
 573                 /*
 574                  * Hardware limits the transmit gather pointers to 15.
 575                  */
 576                 if (nmp->b_cont && (nmblks + TX_GATHER_POINTERS_THRESHOLD) >
 577                     TX_MAX_GATHER_POINTERS) {
 578                         HXGE_DEBUG_MSG((NULL, TX_CTL,
 579                             "==> hxge_tx_pkt_nmblocks: pull msg - "
 580                             "len %d pkt_len %d nmblks %d",
 581                             len, pkt_len, nmblks));
 582                         /* Pull all message blocks from b_cont */
 583                         if ((tmp = msgpullup(nmp->b_cont, -1)) == NULL) {
 584                                 return (0);
 585                         }
 586                         freemsg(nmp->b_cont);
 587                         nmp->b_cont = tmp;
 588                         pkt_len = 0;
 589                 }
 590                 bmp = nmp;
 591                 nmp = nmp->b_cont;
 592         }
 593 
 594         HXGE_DEBUG_MSG((NULL, TX_CTL,
 595             "<== hxge_tx_pkt_nmblocks: rptr $%p wptr $%p "
 596             "nmblks %d len %d tot_xfer_len %d",
 597             mp->b_rptr, mp->b_wptr, nmblks, MBLKL(mp), *tot_xfer_len_p));
 598         return (nmblks);
 599 }
 600 
 601 boolean_t
 602 hxge_txdma_reclaim(p_hxge_t hxgep, p_tx_ring_t tx_ring_p, int nmblks)
 603 {
 604         boolean_t               status = B_TRUE;
 605         p_hxge_dma_common_t     tx_desc_dma_p;
 606         hxge_dma_common_t       desc_area;
 607         p_tx_desc_t             tx_desc_ring_vp;
 608         p_tx_desc_t             tx_desc_p;
 609         p_tx_desc_t             tx_desc_pp;
 610         tx_desc_t               r_tx_desc;
 611         p_tx_msg_t              tx_msg_ring;
 612         p_tx_msg_t              tx_msg_p;
 613         hpi_handle_t            handle;
 614         tdc_tdr_head_t          tx_head;
 615         uint32_t                pkt_len;
 616         uint_t                  tx_rd_index;
 617         uint16_t                head_index, tail_index;
 618         uint8_t                 tdc;
 619         boolean_t               head_wrap, tail_wrap;
 620         p_hxge_tx_ring_stats_t  tdc_stats;
 621         tdc_byte_cnt_t          byte_cnt;
 622         tdc_tdr_qlen_t          qlen;
 623         int                     rc;
 624 
 625         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_reclaim"));
 626 
 627         status = ((tx_ring_p->descs_pending < hxge_reclaim_pending) &&
 628             (nmblks != 0));
 629         HXGE_DEBUG_MSG((hxgep, TX_CTL,
 630             "==> hxge_txdma_reclaim: pending %d  reclaim %d nmblks %d",
 631             tx_ring_p->descs_pending, hxge_reclaim_pending, nmblks));
 632 
 633         if (!status) {
 634                 tx_desc_dma_p = &tx_ring_p->tdc_desc;
 635                 desc_area = tx_ring_p->tdc_desc;
 636                 tx_desc_ring_vp = tx_desc_dma_p->kaddrp;
 637                 tx_desc_ring_vp = (p_tx_desc_t)DMA_COMMON_VPTR(desc_area);
 638                 tx_rd_index = tx_ring_p->rd_index;
 639                 tx_desc_p = &tx_desc_ring_vp[tx_rd_index];
 640                 tx_msg_ring = tx_ring_p->tx_msg_ring;
 641                 tx_msg_p = &tx_msg_ring[tx_rd_index];
 642                 tdc = tx_ring_p->tdc;
 643                 tdc_stats = tx_ring_p->tdc_stats;
 644                 if (tx_ring_p->descs_pending > tdc_stats->tx_max_pend) {
 645                         tdc_stats->tx_max_pend = tx_ring_p->descs_pending;
 646                 }
 647                 tail_index = tx_ring_p->wr_index;
 648                 tail_wrap = tx_ring_p->wr_index_wrap;
 649 
 650                 /*
 651                  * tdc_byte_cnt reg can be used to get bytes transmitted. It
 652                  * includes padding too in case of runt packets.
 653                  */
 654                 handle = HXGE_DEV_HPI_HANDLE(hxgep);
 655                 TXDMA_REG_READ64(handle, TDC_BYTE_CNT, tdc, &byte_cnt.value);
 656                 tdc_stats->obytes_with_pad += byte_cnt.bits.byte_count;
 657 
 658                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
 659                     "==> hxge_txdma_reclaim: tdc %d tx_rd_index %d "
 660                     "tail_index %d tail_wrap %d tx_desc_p $%p ($%p) ",
 661                     tdc, tx_rd_index, tail_index, tail_wrap,
 662                     tx_desc_p, (*(uint64_t *)tx_desc_p)));
 663 
 664                 /*
 665                  * Read the hardware maintained transmit head and wrap around
 666                  * bit.
 667                  */
 668                 TXDMA_REG_READ64(handle, TDC_TDR_HEAD, tdc, &tx_head.value);
 669                 head_index = tx_head.bits.head;
 670                 head_wrap = tx_head.bits.wrap;
 671                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
 672                     "==> hxge_txdma_reclaim: "
 673                     "tx_rd_index %d tail %d tail_wrap %d head %d wrap %d",
 674                     tx_rd_index, tail_index, tail_wrap, head_index, head_wrap));
 675 
 676                 /*
 677                  * For debug only. This can be used to verify the qlen and make
 678                  * sure the hardware is wrapping the Tdr correctly.
 679                  */
 680                 TXDMA_REG_READ64(handle, TDC_TDR_QLEN, tdc, &qlen.value);
 681                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
 682                     "==> hxge_txdma_reclaim: tdr_qlen %d tdr_pref_qlen %d",
 683                     qlen.bits.tdr_qlen, qlen.bits.tdr_pref_qlen));
 684 
 685                 if (head_index == tail_index) {
 686                         if (TXDMA_RING_EMPTY(head_index, head_wrap, tail_index,
 687                             tail_wrap) && (head_index == tx_rd_index)) {
 688                                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
 689                                     "==> hxge_txdma_reclaim: EMPTY"));
 690                                 return (B_TRUE);
 691                         }
 692                         HXGE_DEBUG_MSG((hxgep, TX_CTL,
 693                             "==> hxge_txdma_reclaim: Checking if ring full"));
 694                         if (TXDMA_RING_FULL(head_index, head_wrap, tail_index,
 695                             tail_wrap)) {
 696                                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
 697                                     "==> hxge_txdma_reclaim: full"));
 698                                 return (B_FALSE);
 699                         }
 700                 }
 701                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
 702                     "==> hxge_txdma_reclaim: tx_rd_index and head_index"));
 703 
 704                 /* XXXX: limit the # of reclaims */
 705                 tx_desc_pp = &r_tx_desc;
 706                 while ((tx_rd_index != head_index) &&
 707                     (tx_ring_p->descs_pending != 0)) {
 708                         HXGE_DEBUG_MSG((hxgep, TX_CTL,
 709                             "==> hxge_txdma_reclaim: Checking if pending"));
 710                         HXGE_DEBUG_MSG((hxgep, TX_CTL,
 711                             "==> hxge_txdma_reclaim: descs_pending %d ",
 712                             tx_ring_p->descs_pending));
 713                         HXGE_DEBUG_MSG((hxgep, TX_CTL,
 714                             "==> hxge_txdma_reclaim: "
 715                             "(tx_rd_index %d head_index %d (tx_desc_p $%p)",
 716                             tx_rd_index, head_index, tx_desc_p));
 717 
 718                         tx_desc_pp->value = tx_desc_p->value;
 719                         HXGE_DEBUG_MSG((hxgep, TX_CTL,
 720                             "==> hxge_txdma_reclaim: "
 721                             "(tx_rd_index %d head_index %d "
 722                             "tx_desc_p $%p (desc value 0x%llx) ",
 723                             tx_rd_index, head_index,
 724                             tx_desc_pp, (*(uint64_t *)tx_desc_pp)));
 725                         HXGE_DEBUG_MSG((hxgep, TX_CTL,
 726                             "==> hxge_txdma_reclaim: dump desc:"));
 727 
 728                         /*
 729                          * tdc_byte_cnt reg can be used to get bytes
 730                          * transmitted
 731                          */
 732                         pkt_len = tx_desc_pp->bits.tr_len;
 733                         tdc_stats->obytes += pkt_len;
 734                         tdc_stats->opackets += tx_desc_pp->bits.sop;
 735                         HXGE_DEBUG_MSG((hxgep, TX_CTL,
 736                             "==> hxge_txdma_reclaim: pkt_len %d "
 737                             "tdc channel %d opackets %d",
 738                             pkt_len, tdc, tdc_stats->opackets));
 739 
 740                         if (tx_msg_p->flags.dma_type == USE_DVMA) {
 741                                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
 742                                     "tx_desc_p = $%p tx_desc_pp = $%p "
 743                                     "index = %d",
 744                                     tx_desc_p, tx_desc_pp,
 745                                     tx_ring_p->rd_index));
 746                                 (void) dvma_unload(tx_msg_p->dvma_handle,
 747                                     0, -1);
 748                                 tx_msg_p->dvma_handle = NULL;
 749                                 if (tx_ring_p->dvma_wr_index ==
 750                                     tx_ring_p->dvma_wrap_mask) {
 751                                         tx_ring_p->dvma_wr_index = 0;
 752                                 } else {
 753                                         tx_ring_p->dvma_wr_index++;
 754                                 }
 755                                 tx_ring_p->dvma_pending--;
 756                         } else if (tx_msg_p->flags.dma_type == USE_DMA) {
 757                                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
 758                                     "==> hxge_txdma_reclaim: USE DMA"));
 759                                 if (rc = ddi_dma_unbind_handle
 760                                     (tx_msg_p->dma_handle)) {
 761                                         cmn_err(CE_WARN, "hxge_reclaim: "
 762                                             "ddi_dma_unbind_handle "
 763                                             "failed. status %d", rc);
 764                                 }
 765                         }
 766 
 767                         HXGE_DEBUG_MSG((hxgep, TX_CTL,
 768                             "==> hxge_txdma_reclaim: count packets"));
 769 
 770                         /*
 771                          * count a chained packet only once.
 772                          */
 773                         if (tx_msg_p->tx_message != NULL) {
 774                                 freemsg(tx_msg_p->tx_message);
 775                                 tx_msg_p->tx_message = NULL;
 776                         }
 777                         tx_msg_p->flags.dma_type = USE_NONE;
 778                         tx_rd_index = tx_ring_p->rd_index;
 779                         tx_rd_index = (tx_rd_index + 1) &
 780                             tx_ring_p->tx_wrap_mask;
 781                         tx_ring_p->rd_index = tx_rd_index;
 782                         tx_ring_p->descs_pending--;
 783                         tx_desc_p = &tx_desc_ring_vp[tx_rd_index];
 784                         tx_msg_p = &tx_msg_ring[tx_rd_index];
 785                 }
 786 
 787                 status = (nmblks <= ((int)tx_ring_p->tx_ring_size -
 788                     (int)tx_ring_p->descs_pending - TX_FULL_MARK));
 789                 if (status) {
 790                         (void) cas32((uint32_t *)&tx_ring_p->queueing, 1, 0);
 791                 }
 792         } else {
 793                 status = (nmblks <= ((int)tx_ring_p->tx_ring_size -
 794                     (int)tx_ring_p->descs_pending - TX_FULL_MARK));
 795         }
 796 
 797         HXGE_DEBUG_MSG((hxgep, TX_CTL,
 798             "<== hxge_txdma_reclaim status = 0x%08x", status));
 799         return (status);
 800 }
 801 
 802 uint_t
 803 hxge_tx_intr(caddr_t arg1, caddr_t arg2)
 804 {
 805         p_hxge_ldv_t    ldvp = (p_hxge_ldv_t)arg1;
 806         p_hxge_t        hxgep = (p_hxge_t)arg2;
 807         p_hxge_ldg_t    ldgp;
 808         uint8_t         channel;
 809         uint32_t        vindex;
 810         hpi_handle_t    handle;
 811         tdc_stat_t      cs;
 812         p_tx_ring_t     *tx_rings;
 813         p_tx_ring_t     tx_ring_p;
 814         hpi_status_t    rs = HPI_SUCCESS;
 815         uint_t          serviced = DDI_INTR_UNCLAIMED;
 816         hxge_status_t   status = HXGE_OK;
 817 
 818         if (ldvp == NULL) {
 819                 HXGE_DEBUG_MSG((NULL, INT_CTL,
 820                     "<== hxge_tx_intr: hxgep $%p ldvp $%p", hxgep, ldvp));
 821                 return (DDI_INTR_UNCLAIMED);
 822         }
 823 
 824         if (arg2 == NULL || (void *) ldvp->hxgep != arg2) {
 825                 hxgep = ldvp->hxgep;
 826         }
 827 
 828         /*
 829          * If the interface is not started, just swallow the interrupt
 830          * and don't rearm the logical device.
 831          */
 832         if (hxgep->hxge_mac_state != HXGE_MAC_STARTED)
 833                 return (DDI_INTR_CLAIMED);
 834 
 835         HXGE_DEBUG_MSG((hxgep, INT_CTL,
 836             "==> hxge_tx_intr: hxgep(arg2) $%p ldvp(arg1) $%p", hxgep, ldvp));
 837 
 838         /*
 839          * This interrupt handler is for a specific transmit dma channel.
 840          */
 841         handle = HXGE_DEV_HPI_HANDLE(hxgep);
 842 
 843         /* Get the control and status for this channel. */
 844         channel = ldvp->channel;
 845         ldgp = ldvp->ldgp;
 846         HXGE_DEBUG_MSG((hxgep, INT_CTL,
 847             "==> hxge_tx_intr: hxgep $%p ldvp (ldvp) $%p channel %d",
 848             hxgep, ldvp, channel));
 849 
 850         rs = hpi_txdma_control_status(handle, OP_GET, channel, &cs);
 851         vindex = ldvp->vdma_index;
 852         HXGE_DEBUG_MSG((hxgep, INT_CTL,
 853             "==> hxge_tx_intr:channel %d ring index %d status 0x%08x",
 854             channel, vindex, rs));
 855 
 856         if (!rs && cs.bits.marked) {
 857                 HXGE_DEBUG_MSG((hxgep, INT_CTL,
 858                     "==> hxge_tx_intr:channel %d ring index %d "
 859                     "status 0x%08x (marked bit set)", channel, vindex, rs));
 860                 tx_rings = hxgep->tx_rings->rings;
 861                 tx_ring_p = tx_rings[vindex];
 862                 HXGE_DEBUG_MSG((hxgep, INT_CTL,
 863                     "==> hxge_tx_intr:channel %d ring index %d "
 864                     "status 0x%08x (marked bit set, calling reclaim)",
 865                     channel, vindex, rs));
 866 
 867                 MUTEX_ENTER(&tx_ring_p->lock);
 868                 (void) hxge_txdma_reclaim(hxgep, tx_rings[vindex], 0);
 869                 MUTEX_EXIT(&tx_ring_p->lock);
 870                 mac_tx_update(hxgep->mach);
 871         }
 872 
 873         /*
 874          * Process other transmit control and status. Check the ldv state.
 875          */
 876         status = hxge_tx_err_evnts(hxgep, ldvp->vdma_index, ldvp, cs);
 877 
 878         /* Clear the error bits */
 879         RXDMA_REG_WRITE64(handle, TDC_STAT, channel, cs.value);
 880 
 881         /*
 882          * Rearm this logical group if this is a single device group.
 883          */
 884         if (ldgp->nldvs == 1) {
 885                 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_tx_intr: rearm"));
 886                 if (status == HXGE_OK) {
 887                         (void) hpi_intr_ldg_mgmt_set(handle, ldgp->ldg,
 888                             B_TRUE, ldgp->ldg_timer);
 889                 }
 890         }
 891         HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_tx_intr"));
 892         serviced = DDI_INTR_CLAIMED;
 893         return (serviced);
 894 }
 895 
 896 void
 897 hxge_txdma_stop(p_hxge_t hxgep)
 898 {
 899         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_stop"));
 900 
 901         (void) hxge_tx_vmac_disable(hxgep);
 902         (void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_STOP);
 903 
 904         HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_stop"));
 905 }
 906 
 907 hxge_status_t
 908 hxge_txdma_hw_mode(p_hxge_t hxgep, boolean_t enable)
 909 {
 910         int             i, ndmas;
 911         uint16_t        channel;
 912         p_tx_rings_t    tx_rings;
 913         p_tx_ring_t     *tx_desc_rings;
 914         hpi_handle_t    handle;
 915         hpi_status_t    rs = HPI_SUCCESS;
 916         hxge_status_t   status = HXGE_OK;
 917 
 918         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
 919             "==> hxge_txdma_hw_mode: enable mode %d", enable));
 920 
 921         if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
 922                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
 923                     "<== hxge_txdma_mode: not initialized"));
 924                 return (HXGE_ERROR);
 925         }
 926         tx_rings = hxgep->tx_rings;
 927         if (tx_rings == NULL) {
 928                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
 929                     "<== hxge_txdma_hw_mode: NULL global ring pointer"));
 930                 return (HXGE_ERROR);
 931         }
 932         tx_desc_rings = tx_rings->rings;
 933         if (tx_desc_rings == NULL) {
 934                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
 935                     "<== hxge_txdma_hw_mode: NULL rings pointer"));
 936                 return (HXGE_ERROR);
 937         }
 938         ndmas = tx_rings->ndmas;
 939         if (!ndmas) {
 940                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
 941                     "<== hxge_txdma_hw_mode: no dma channel allocated"));
 942                 return (HXGE_ERROR);
 943         }
 944         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_mode: "
 945             "tx_rings $%p tx_desc_rings $%p ndmas %d",
 946             tx_rings, tx_desc_rings, ndmas));
 947 
 948         handle = HXGE_DEV_HPI_HANDLE(hxgep);
 949         for (i = 0; i < ndmas; i++) {
 950                 if (tx_desc_rings[i] == NULL) {
 951                         continue;
 952                 }
 953                 channel = tx_desc_rings[i]->tdc;
 954                 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
 955                     "==> hxge_txdma_hw_mode: channel %d", channel));
 956                 if (enable) {
 957                         rs = hpi_txdma_channel_enable(handle, channel);
 958                         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
 959                             "==> hxge_txdma_hw_mode: channel %d (enable) "
 960                             "rs 0x%x", channel, rs));
 961                 } else {
 962                         /*
 963                          * Stop the dma channel and waits for the stop done. If
 964                          * the stop done bit is not set, then force an error so
 965                          * TXC will stop. All channels bound to this port need
 966                          * to be stopped and reset after injecting an interrupt
 967                          * error.
 968                          */
 969                         rs = hpi_txdma_channel_disable(handle, channel);
 970                         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
 971                             "==> hxge_txdma_hw_mode: channel %d (disable) "
 972                             "rs 0x%x", channel, rs));
 973                 }
 974         }
 975 
 976         status = ((rs == HPI_SUCCESS) ? HXGE_OK : HXGE_ERROR | rs);
 977 
 978         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
 979             "<== hxge_txdma_hw_mode: status 0x%x", status));
 980 
 981         return (status);
 982 }
 983 
 984 void
 985 hxge_txdma_enable_channel(p_hxge_t hxgep, uint16_t channel)
 986 {
 987         hpi_handle_t handle;
 988 
 989         HXGE_DEBUG_MSG((hxgep, DMA_CTL,
 990             "==> hxge_txdma_enable_channel: channel %d", channel));
 991 
 992         handle = HXGE_DEV_HPI_HANDLE(hxgep);
 993         /* enable the transmit dma channels */
 994         (void) hpi_txdma_channel_enable(handle, channel);
 995 
 996         HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_txdma_enable_channel"));
 997 }
 998 
 999 void
1000 hxge_txdma_disable_channel(p_hxge_t hxgep, uint16_t channel)
1001 {
1002         hpi_handle_t handle;
1003 
1004         HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1005             "==> hxge_txdma_disable_channel: channel %d", channel));
1006 
1007         handle = HXGE_DEV_HPI_HANDLE(hxgep);
1008         /* stop the transmit dma channels */
1009         (void) hpi_txdma_channel_disable(handle, channel);
1010 
1011         HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_disable_channel"));
1012 }
1013 
1014 int
1015 hxge_txdma_stop_inj_err(p_hxge_t hxgep, int channel)
1016 {
1017         hpi_handle_t    handle;
1018         int             status;
1019         hpi_status_t    rs = HPI_SUCCESS;
1020 
1021         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_stop_inj_err"));
1022 
1023         /*
1024          * Stop the dma channel waits for the stop done. If the stop done bit
1025          * is not set, then create an error.
1026          */
1027         handle = HXGE_DEV_HPI_HANDLE(hxgep);
1028         rs = hpi_txdma_channel_disable(handle, channel);
1029         status = ((rs == HPI_SUCCESS) ? HXGE_OK : HXGE_ERROR | rs);
1030         if (status == HXGE_OK) {
1031                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1032                     "<== hxge_txdma_stop_inj_err (channel %d): "
1033                     "stopped OK", channel));
1034                 return (status);
1035         }
1036 
1037         HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1038             "==> hxge_txdma_stop_inj_err (channel): stop failed (0x%x) "
1039             " (injected error but still not stopped)", channel, rs));
1040 
1041         HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_stop_inj_err"));
1042 
1043         return (status);
1044 }
1045 
1046 /*ARGSUSED*/
1047 void
1048 hxge_fixup_txdma_rings(p_hxge_t hxgep)
1049 {
1050         int             index, ndmas;
1051         uint16_t        channel;
1052         p_tx_rings_t    tx_rings;
1053 
1054         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_fixup_txdma_rings"));
1055 
1056         /*
1057          * For each transmit channel, reclaim each descriptor and free buffers.
1058          */
1059         tx_rings = hxgep->tx_rings;
1060         if (tx_rings == NULL) {
1061                 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1062                     "<== hxge_fixup_txdma_rings: NULL ring pointer"));
1063                 return;
1064         }
1065 
1066         ndmas = tx_rings->ndmas;
1067         if (!ndmas) {
1068                 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1069                     "<== hxge_fixup_txdma_rings: no channel allocated"));
1070                 return;
1071         }
1072 
1073         if (tx_rings->rings == NULL) {
1074                 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1075                     "<== hxge_fixup_txdma_rings: NULL rings pointer"));
1076                 return;
1077         }
1078 
1079         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_fixup_txdma_rings: "
1080             "tx_rings $%p tx_desc_rings $%p ndmas %d",
1081             tx_rings, tx_rings->rings, ndmas));
1082 
1083         for (index = 0; index < ndmas; index++) {
1084                 channel = tx_rings->rings[index]->tdc;
1085                 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1086                     "==> hxge_fixup_txdma_rings: channel %d", channel));
1087                 hxge_txdma_fixup_channel(hxgep, tx_rings->rings[index],
1088                     channel);
1089         }
1090 
1091         HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_fixup_txdma_rings"));
1092 }
1093 
1094 /*ARGSUSED*/
1095 void
1096 hxge_txdma_fix_channel(p_hxge_t hxgep, uint16_t channel)
1097 {
1098         p_tx_ring_t ring_p;
1099 
1100         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_fix_channel"));
1101 
1102         ring_p = hxge_txdma_get_ring(hxgep, channel);
1103         if (ring_p == NULL) {
1104                 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_fix_channel"));
1105                 return;
1106         }
1107 
1108         if (ring_p->tdc != channel) {
1109                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1110                     "<== hxge_txdma_fix_channel: channel not matched "
1111                     "ring tdc %d passed channel", ring_p->tdc, channel));
1112                 return;
1113         }
1114 
1115         hxge_txdma_fixup_channel(hxgep, ring_p, channel);
1116 
1117         HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_fix_channel"));
1118 }
1119 
1120 /*ARGSUSED*/
1121 void
1122 hxge_txdma_fixup_channel(p_hxge_t hxgep, p_tx_ring_t ring_p, uint16_t channel)
1123 {
1124         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_fixup_channel"));
1125 
1126         if (ring_p == NULL) {
1127                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1128                     "<== hxge_txdma_fixup_channel: NULL ring pointer"));
1129                 return;
1130         }
1131         if (ring_p->tdc != channel) {
1132                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1133                     "<== hxge_txdma_fixup_channel: channel not matched "
1134                     "ring tdc %d passed channel", ring_p->tdc, channel));
1135                 return;
1136         }
1137         MUTEX_ENTER(&ring_p->lock);
1138         (void) hxge_txdma_reclaim(hxgep, ring_p, 0);
1139 
1140         ring_p->rd_index = 0;
1141         ring_p->wr_index = 0;
1142         ring_p->ring_head.value = 0;
1143         ring_p->ring_kick_tail.value = 0;
1144         ring_p->descs_pending = 0;
1145         MUTEX_EXIT(&ring_p->lock);
1146 
1147         HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_fixup_channel"));
1148 }
1149 
1150 /*ARGSUSED*/
1151 void
1152 hxge_txdma_hw_kick(p_hxge_t hxgep)
1153 {
1154         int             index, ndmas;
1155         uint16_t        channel;
1156         p_tx_rings_t    tx_rings;
1157 
1158         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_hw_kick"));
1159 
1160         tx_rings = hxgep->tx_rings;
1161         if (tx_rings == NULL) {
1162                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1163                     "<== hxge_txdma_hw_kick: NULL ring pointer"));
1164                 return;
1165         }
1166         ndmas = tx_rings->ndmas;
1167         if (!ndmas) {
1168                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1169                     "<== hxge_txdma_hw_kick: no channel allocated"));
1170                 return;
1171         }
1172         if (tx_rings->rings == NULL) {
1173                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1174                     "<== hxge_txdma_hw_kick: NULL rings pointer"));
1175                 return;
1176         }
1177         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_kick: "
1178             "tx_rings $%p tx_desc_rings $%p ndmas %d",
1179             tx_rings, tx_rings->rings, ndmas));
1180 
1181         for (index = 0; index < ndmas; index++) {
1182                 channel = tx_rings->rings[index]->tdc;
1183                 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1184                     "==> hxge_txdma_hw_kick: channel %d", channel));
1185                 hxge_txdma_hw_kick_channel(hxgep, tx_rings->rings[index],
1186                     channel);
1187         }
1188 
1189         HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_hw_kick"));
1190 }
1191 
1192 /*ARGSUSED*/
1193 void
1194 hxge_txdma_kick_channel(p_hxge_t hxgep, uint16_t channel)
1195 {
1196         p_tx_ring_t ring_p;
1197 
1198         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_kick_channel"));
1199 
1200         ring_p = hxge_txdma_get_ring(hxgep, channel);
1201         if (ring_p == NULL) {
1202                 HXGE_DEBUG_MSG((hxgep, TX_CTL, " hxge_txdma_kick_channel"));
1203                 return;
1204         }
1205 
1206         if (ring_p->tdc != channel) {
1207                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1208                     "<== hxge_txdma_kick_channel: channel not matched "
1209                     "ring tdc %d passed channel", ring_p->tdc, channel));
1210                 return;
1211         }
1212 
1213         hxge_txdma_hw_kick_channel(hxgep, ring_p, channel);
1214 
1215         HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_kick_channel"));
1216 }
1217 
1218 /*ARGSUSED*/
1219 void
1220 hxge_txdma_hw_kick_channel(p_hxge_t hxgep, p_tx_ring_t ring_p, uint16_t channel)
1221 {
1222         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_hw_kick_channel"));
1223 
1224         if (ring_p == NULL) {
1225                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1226                     "<== hxge_txdma_hw_kick_channel: NULL ring pointer"));
1227                 return;
1228         }
1229 
1230         HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_hw_kick_channel"));
1231 }
1232 
1233 /*ARGSUSED*/
1234 void
1235 hxge_check_tx_hang(p_hxge_t hxgep)
1236 {
1237         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_check_tx_hang"));
1238 
1239         /*
1240          * Needs inputs from hardware for regs: head index had not moved since
1241          * last timeout. packets not transmitted or stuffed registers.
1242          */
1243         if (hxge_txdma_hung(hxgep)) {
1244                 hxge_fixup_hung_txdma_rings(hxgep);
1245         }
1246 
1247         HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_check_tx_hang"));
1248 }
1249 
1250 int
1251 hxge_txdma_hung(p_hxge_t hxgep)
1252 {
1253         int             index, ndmas;
1254         uint16_t        channel;
1255         p_tx_rings_t    tx_rings;
1256         p_tx_ring_t     tx_ring_p;
1257 
1258         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_hung"));
1259 
1260         tx_rings = hxgep->tx_rings;
1261         if (tx_rings == NULL) {
1262                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1263                     "<== hxge_txdma_hung: NULL ring pointer"));
1264                 return (B_FALSE);
1265         }
1266 
1267         ndmas = tx_rings->ndmas;
1268         if (!ndmas) {
1269                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1270                     "<== hxge_txdma_hung: no channel allocated"));
1271                 return (B_FALSE);
1272         }
1273 
1274         if (tx_rings->rings == NULL) {
1275                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1276                     "<== hxge_txdma_hung: NULL rings pointer"));
1277                 return (B_FALSE);
1278         }
1279 
1280         for (index = 0; index < ndmas; index++) {
1281                 channel = tx_rings->rings[index]->tdc;
1282                 tx_ring_p = tx_rings->rings[index];
1283                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1284                     "==> hxge_txdma_hung: channel %d", channel));
1285                 if (hxge_txdma_channel_hung(hxgep, tx_ring_p, channel)) {
1286                         return (B_TRUE);
1287                 }
1288         }
1289 
1290         HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_hung"));
1291 
1292         return (B_FALSE);
1293 }
1294 
1295 int
1296 hxge_txdma_channel_hung(p_hxge_t hxgep, p_tx_ring_t tx_ring_p, uint16_t channel)
1297 {
1298         uint16_t        head_index, tail_index;
1299         boolean_t       head_wrap, tail_wrap;
1300         hpi_handle_t    handle;
1301         tdc_tdr_head_t  tx_head;
1302         uint_t          tx_rd_index;
1303 
1304         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_channel_hung"));
1305 
1306         handle = HXGE_DEV_HPI_HANDLE(hxgep);
1307         HXGE_DEBUG_MSG((hxgep, TX_CTL,
1308             "==> hxge_txdma_channel_hung: channel %d", channel));
1309         MUTEX_ENTER(&tx_ring_p->lock);
1310         (void) hxge_txdma_reclaim(hxgep, tx_ring_p, 0);
1311 
1312         tail_index = tx_ring_p->wr_index;
1313         tail_wrap = tx_ring_p->wr_index_wrap;
1314         tx_rd_index = tx_ring_p->rd_index;
1315         MUTEX_EXIT(&tx_ring_p->lock);
1316 
1317         HXGE_DEBUG_MSG((hxgep, TX_CTL,
1318             "==> hxge_txdma_channel_hung: tdc %d tx_rd_index %d "
1319             "tail_index %d tail_wrap %d ",
1320             channel, tx_rd_index, tail_index, tail_wrap));
1321         /*
1322          * Read the hardware maintained transmit head and wrap around bit.
1323          */
1324         (void) hpi_txdma_ring_head_get(handle, channel, &tx_head);
1325         head_index = tx_head.bits.head;
1326         head_wrap = tx_head.bits.wrap;
1327         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_channel_hung: "
1328             "tx_rd_index %d tail %d tail_wrap %d head %d wrap %d",
1329             tx_rd_index, tail_index, tail_wrap, head_index, head_wrap));
1330 
1331         if (TXDMA_RING_EMPTY(head_index, head_wrap, tail_index, tail_wrap) &&
1332             (head_index == tx_rd_index)) {
1333                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1334                     "==> hxge_txdma_channel_hung: EMPTY"));
1335                 return (B_FALSE);
1336         }
1337         HXGE_DEBUG_MSG((hxgep, TX_CTL,
1338             "==> hxge_txdma_channel_hung: Checking if ring full"));
1339         if (TXDMA_RING_FULL(head_index, head_wrap, tail_index, tail_wrap)) {
1340                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1341                     "==> hxge_txdma_channel_hung: full"));
1342                 return (B_TRUE);
1343         }
1344 
1345         /* If not full, check with hardware to see if it is hung */
1346         HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_channel_hung"));
1347 
1348         return (B_FALSE);
1349 }
1350 
1351 /*ARGSUSED*/
1352 void
1353 hxge_fixup_hung_txdma_rings(p_hxge_t hxgep)
1354 {
1355         int             index, ndmas;
1356         uint16_t        channel;
1357         p_tx_rings_t    tx_rings;
1358 
1359         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_fixup_hung_txdma_rings"));
1360         tx_rings = hxgep->tx_rings;
1361         if (tx_rings == NULL) {
1362                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1363                     "<== hxge_fixup_hung_txdma_rings: NULL ring pointer"));
1364                 return;
1365         }
1366         ndmas = tx_rings->ndmas;
1367         if (!ndmas) {
1368                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1369                     "<== hxge_fixup_hung_txdma_rings: no channel allocated"));
1370                 return;
1371         }
1372         if (tx_rings->rings == NULL) {
1373                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1374                     "<== hxge_fixup_hung_txdma_rings: NULL rings pointer"));
1375                 return;
1376         }
1377         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_fixup_hung_txdma_rings: "
1378             "tx_rings $%p tx_desc_rings $%p ndmas %d",
1379             tx_rings, tx_rings->rings, ndmas));
1380 
1381         for (index = 0; index < ndmas; index++) {
1382                 channel = tx_rings->rings[index]->tdc;
1383                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1384                     "==> hxge_fixup_hung_txdma_rings: channel %d", channel));
1385                 hxge_txdma_fixup_hung_channel(hxgep, tx_rings->rings[index],
1386                     channel);
1387         }
1388 
1389         HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_fixup_hung_txdma_rings"));
1390 }
1391 
1392 /*ARGSUSED*/
1393 void
1394 hxge_txdma_fix_hung_channel(p_hxge_t hxgep, uint16_t channel)
1395 {
1396         p_tx_ring_t ring_p;
1397 
1398         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_fix_hung_channel"));
1399         ring_p = hxge_txdma_get_ring(hxgep, channel);
1400         if (ring_p == NULL) {
1401                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1402                     "<== hxge_txdma_fix_hung_channel"));
1403                 return;
1404         }
1405         if (ring_p->tdc != channel) {
1406                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1407                     "<== hxge_txdma_fix_hung_channel: channel not matched "
1408                     "ring tdc %d passed channel", ring_p->tdc, channel));
1409                 return;
1410         }
1411         hxge_txdma_fixup_channel(hxgep, ring_p, channel);
1412 
1413         HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_fix_hung_channel"));
1414 }
1415 
1416 /*ARGSUSED*/
1417 void
1418 hxge_txdma_fixup_hung_channel(p_hxge_t hxgep, p_tx_ring_t ring_p,
1419     uint16_t channel)
1420 {
1421         hpi_handle_t    handle;
1422         int             status = HXGE_OK;
1423 
1424         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_fixup_hung_channel"));
1425 
1426         if (ring_p == NULL) {
1427                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1428                     "<== hxge_txdma_fixup_hung_channel: NULL ring pointer"));
1429                 return;
1430         }
1431         if (ring_p->tdc != channel) {
1432                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1433                     "<== hxge_txdma_fixup_hung_channel: channel "
1434                     "not matched ring tdc %d passed channel",
1435                     ring_p->tdc, channel));
1436                 return;
1437         }
1438         /* Reclaim descriptors */
1439         MUTEX_ENTER(&ring_p->lock);
1440         (void) hxge_txdma_reclaim(hxgep, ring_p, 0);
1441         MUTEX_EXIT(&ring_p->lock);
1442 
1443         handle = HXGE_DEV_HPI_HANDLE(hxgep);
1444         /*
1445          * Stop the dma channel waits for the stop done. If the stop done bit
1446          * is not set, then force an error.
1447          */
1448         status = hpi_txdma_channel_disable(handle, channel);
1449         if (!(status & HPI_TXDMA_STOP_FAILED)) {
1450                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1451                     "<== hxge_txdma_fixup_hung_channel: stopped OK "
1452                     "ring tdc %d passed channel %d", ring_p->tdc, channel));
1453                 return;
1454         }
1455         /* Stop done bit will be set as a result of error injection */
1456         status = hpi_txdma_channel_disable(handle, channel);
1457         if (!(status & HPI_TXDMA_STOP_FAILED)) {
1458                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1459                     "<== hxge_txdma_fixup_hung_channel: stopped again"
1460                     "ring tdc %d passed channel", ring_p->tdc, channel));
1461                 return;
1462         }
1463 
1464         HXGE_DEBUG_MSG((hxgep, TX_CTL,
1465             "<== hxge_txdma_fixup_hung_channel: stop done still not set!! "
1466             "ring tdc %d passed channel", ring_p->tdc, channel));
1467         HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_fixup_hung_channel"));
1468 }
1469 
1470 /*ARGSUSED*/
1471 void
1472 hxge_reclaim_rings(p_hxge_t hxgep)
1473 {
1474         int             index, ndmas;
1475         uint16_t        channel;
1476         p_tx_rings_t    tx_rings;
1477         p_tx_ring_t     tx_ring_p;
1478 
1479         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_reclaim_ring"));
1480         tx_rings = hxgep->tx_rings;
1481         if (tx_rings == NULL) {
1482                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1483                     "<== hxge_reclain_rimgs: NULL ring pointer"));
1484                 return;
1485         }
1486         ndmas = tx_rings->ndmas;
1487         if (!ndmas) {
1488                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1489                     "<== hxge_reclain_rimgs: no channel allocated"));
1490                 return;
1491         }
1492         if (tx_rings->rings == NULL) {
1493                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1494                     "<== hxge_reclain_rimgs: NULL rings pointer"));
1495                 return;
1496         }
1497         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_reclain_rimgs: "
1498             "tx_rings $%p tx_desc_rings $%p ndmas %d",
1499             tx_rings, tx_rings->rings, ndmas));
1500 
1501         for (index = 0; index < ndmas; index++) {
1502                 channel = tx_rings->rings[index]->tdc;
1503                 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> reclain_rimgs: channel %d",
1504                     channel));
1505                 tx_ring_p = tx_rings->rings[index];
1506                 MUTEX_ENTER(&tx_ring_p->lock);
1507                 (void) hxge_txdma_reclaim(hxgep, tx_ring_p, channel);
1508                 MUTEX_EXIT(&tx_ring_p->lock);
1509         }
1510 
1511         HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_reclaim_rings"));
1512 }
1513 
1514 /*
1515  * Static functions start here.
1516  */
1517 static hxge_status_t
1518 hxge_map_txdma(p_hxge_t hxgep)
1519 {
1520         int                     i, ndmas;
1521         uint16_t                channel;
1522         p_tx_rings_t            tx_rings;
1523         p_tx_ring_t             *tx_desc_rings;
1524         p_tx_mbox_areas_t       tx_mbox_areas_p;
1525         p_tx_mbox_t             *tx_mbox_p;
1526         p_hxge_dma_pool_t       dma_buf_poolp;
1527         p_hxge_dma_pool_t       dma_cntl_poolp;
1528         p_hxge_dma_common_t     *dma_buf_p;
1529         p_hxge_dma_common_t     *dma_cntl_p;
1530         hxge_status_t           status = HXGE_OK;
1531 
1532         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_map_txdma"));
1533 
1534         dma_buf_poolp = hxgep->tx_buf_pool_p;
1535         dma_cntl_poolp = hxgep->tx_cntl_pool_p;
1536 
1537         if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) {
1538                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1539                     "==> hxge_map_txdma: buf not allocated"));
1540                 return (HXGE_ERROR);
1541         }
1542         ndmas = dma_buf_poolp->ndmas;
1543         if (!ndmas) {
1544                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1545                     "<== hxge_map_txdma: no dma allocated"));
1546                 return (HXGE_ERROR);
1547         }
1548         dma_buf_p = dma_buf_poolp->dma_buf_pool_p;
1549         dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
1550 
1551         tx_rings = (p_tx_rings_t)KMEM_ZALLOC(sizeof (tx_rings_t), KM_SLEEP);
1552         tx_desc_rings = (p_tx_ring_t *)KMEM_ZALLOC(
1553             sizeof (p_tx_ring_t) * ndmas, KM_SLEEP);
1554 
1555         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_map_txdma: "
1556             "tx_rings $%p tx_desc_rings $%p", tx_rings, tx_desc_rings));
1557 
1558         tx_mbox_areas_p = (p_tx_mbox_areas_t)
1559             KMEM_ZALLOC(sizeof (tx_mbox_areas_t), KM_SLEEP);
1560         tx_mbox_p = (p_tx_mbox_t *)KMEM_ZALLOC(
1561             sizeof (p_tx_mbox_t) * ndmas, KM_SLEEP);
1562 
1563         /*
1564          * Map descriptors from the buffer pools for each dma channel.
1565          */
1566         for (i = 0; i < ndmas; i++) {
1567                 /*
1568                  * Set up and prepare buffer blocks, descriptors and mailbox.
1569                  */
1570                 channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel;
1571                 status = hxge_map_txdma_channel(hxgep, channel,
1572                     (p_hxge_dma_common_t *)&dma_buf_p[i],
1573                     (p_tx_ring_t *)&tx_desc_rings[i],
1574                     dma_buf_poolp->num_chunks[i],
1575                     (p_hxge_dma_common_t *)&dma_cntl_p[i],
1576                     (p_tx_mbox_t *)&tx_mbox_p[i]);
1577                 if (status != HXGE_OK) {
1578                         goto hxge_map_txdma_fail1;
1579                 }
1580                 tx_desc_rings[i]->index = (uint16_t)i;
1581                 tx_desc_rings[i]->tdc_stats = &hxgep->statsp->tdc_stats[i];
1582         }
1583 
1584         tx_rings->ndmas = ndmas;
1585         tx_rings->rings = tx_desc_rings;
1586         hxgep->tx_rings = tx_rings;
1587         tx_mbox_areas_p->txmbox_areas_p = tx_mbox_p;
1588         hxgep->tx_mbox_areas_p = tx_mbox_areas_p;
1589 
1590         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_map_txdma: "
1591             "tx_rings $%p rings $%p", hxgep->tx_rings, hxgep->tx_rings->rings));
1592         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_map_txdma: "
1593             "tx_rings $%p tx_desc_rings $%p",
1594             hxgep->tx_rings, tx_desc_rings));
1595 
1596         goto hxge_map_txdma_exit;
1597 
1598 hxge_map_txdma_fail1:
1599         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1600             "==> hxge_map_txdma: uninit tx desc "
1601             "(status 0x%x channel %d i %d)", hxgep, status, channel, i));
1602         i--;
1603         for (; i >= 0; i--) {
1604                 channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel;
1605                 hxge_unmap_txdma_channel(hxgep, channel, tx_desc_rings[i],
1606                     tx_mbox_p[i]);
1607         }
1608 
1609         KMEM_FREE(tx_desc_rings, sizeof (p_tx_ring_t) * ndmas);
1610         KMEM_FREE(tx_rings, sizeof (tx_rings_t));
1611         KMEM_FREE(tx_mbox_p, sizeof (p_tx_mbox_t) * ndmas);
1612         KMEM_FREE(tx_mbox_areas_p, sizeof (tx_mbox_areas_t));
1613 
1614 hxge_map_txdma_exit:
1615         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1616             "==> hxge_map_txdma: (status 0x%x channel %d)", status, channel));
1617 
1618         return (status);
1619 }
1620 
1621 static void
1622 hxge_unmap_txdma(p_hxge_t hxgep)
1623 {
1624         int                     i, ndmas;
1625         uint8_t                 channel;
1626         p_tx_rings_t            tx_rings;
1627         p_tx_ring_t             *tx_desc_rings;
1628         p_tx_mbox_areas_t       tx_mbox_areas_p;
1629         p_tx_mbox_t             *tx_mbox_p;
1630         p_hxge_dma_pool_t       dma_buf_poolp;
1631 
1632         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_unmap_txdma"));
1633 
1634         dma_buf_poolp = hxgep->tx_buf_pool_p;
1635         if (!dma_buf_poolp->buf_allocated) {
1636                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1637                     "==> hxge_unmap_txdma: buf not allocated"));
1638                 return;
1639         }
1640         ndmas = dma_buf_poolp->ndmas;
1641         if (!ndmas) {
1642                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1643                     "<== hxge_unmap_txdma: no dma allocated"));
1644                 return;
1645         }
1646         tx_rings = hxgep->tx_rings;
1647         tx_desc_rings = tx_rings->rings;
1648         if (tx_rings == NULL) {
1649                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1650                     "<== hxge_unmap_txdma: NULL ring pointer"));
1651                 return;
1652         }
1653         tx_desc_rings = tx_rings->rings;
1654         if (tx_desc_rings == NULL) {
1655                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1656                     "<== hxge_unmap_txdma: NULL ring pointers"));
1657                 return;
1658         }
1659         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_unmap_txdma: "
1660             "tx_rings $%p tx_desc_rings $%p ndmas %d",
1661             tx_rings, tx_desc_rings, ndmas));
1662 
1663         tx_mbox_areas_p = hxgep->tx_mbox_areas_p;
1664         tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p;
1665 
1666         for (i = 0; i < ndmas; i++) {
1667                 channel = tx_desc_rings[i]->tdc;
1668                 (void) hxge_unmap_txdma_channel(hxgep, channel,
1669                     (p_tx_ring_t)tx_desc_rings[i],
1670                     (p_tx_mbox_t)tx_mbox_p[i]);
1671         }
1672 
1673         KMEM_FREE(tx_desc_rings, sizeof (p_tx_ring_t) * ndmas);
1674         KMEM_FREE(tx_rings, sizeof (tx_rings_t));
1675         KMEM_FREE(tx_mbox_p, sizeof (p_tx_mbox_t) * ndmas);
1676         KMEM_FREE(tx_mbox_areas_p, sizeof (tx_mbox_areas_t));
1677 
1678         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_unmap_txdma"));
1679 }
1680 
1681 static hxge_status_t
1682 hxge_map_txdma_channel(p_hxge_t hxgep, uint16_t channel,
1683     p_hxge_dma_common_t *dma_buf_p, p_tx_ring_t *tx_desc_p,
1684     uint32_t num_chunks, p_hxge_dma_common_t *dma_cntl_p,
1685     p_tx_mbox_t *tx_mbox_p)
1686 {
1687         int status = HXGE_OK;
1688 
1689         /*
1690          * Set up and prepare buffer blocks, descriptors and mailbox.
1691          */
1692         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1693             "==> hxge_map_txdma_channel (channel %d)", channel));
1694 
1695         /*
1696          * Transmit buffer blocks
1697          */
1698         status = hxge_map_txdma_channel_buf_ring(hxgep, channel,
1699             dma_buf_p, tx_desc_p, num_chunks);
1700         if (status != HXGE_OK) {
1701                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1702                     "==> hxge_map_txdma_channel (channel %d): "
1703                     "map buffer failed 0x%x", channel, status));
1704                 goto hxge_map_txdma_channel_exit;
1705         }
1706         /*
1707          * Transmit block ring, and mailbox.
1708          */
1709         hxge_map_txdma_channel_cfg_ring(hxgep, channel, dma_cntl_p, *tx_desc_p,
1710             tx_mbox_p);
1711 
1712         goto hxge_map_txdma_channel_exit;
1713 
1714 hxge_map_txdma_channel_fail1:
1715         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1716             "==> hxge_map_txdma_channel: unmap buf"
1717             "(status 0x%x channel %d)", status, channel));
1718         hxge_unmap_txdma_channel_buf_ring(hxgep, *tx_desc_p);
1719 
1720 hxge_map_txdma_channel_exit:
1721         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1722             "<== hxge_map_txdma_channel: (status 0x%x channel %d)",
1723             status, channel));
1724 
1725         return (status);
1726 }
1727 
1728 /*ARGSUSED*/
1729 static void
1730 hxge_unmap_txdma_channel(p_hxge_t hxgep, uint16_t channel,
1731     p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
1732 {
1733         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1734             "==> hxge_unmap_txdma_channel (channel %d)", channel));
1735 
1736         /* unmap tx block ring, and mailbox.  */
1737         (void) hxge_unmap_txdma_channel_cfg_ring(hxgep, tx_ring_p, tx_mbox_p);
1738 
1739         /* unmap buffer blocks */
1740         (void) hxge_unmap_txdma_channel_buf_ring(hxgep, tx_ring_p);
1741 
1742         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_unmap_txdma_channel"));
1743 }
1744 
1745 /*ARGSUSED*/
1746 static void
1747 hxge_map_txdma_channel_cfg_ring(p_hxge_t hxgep, uint16_t dma_channel,
1748     p_hxge_dma_common_t *dma_cntl_p, p_tx_ring_t tx_ring_p,
1749     p_tx_mbox_t *tx_mbox_p)
1750 {
1751         p_tx_mbox_t             mboxp;
1752         p_hxge_dma_common_t     cntl_dmap;
1753         p_hxge_dma_common_t     dmap;
1754         tdc_tdr_cfg_t           *tx_ring_cfig_p;
1755         tdc_tdr_kick_t          *tx_ring_kick_p;
1756         tdc_tdr_cfg_t           *tx_cs_p;
1757         tdc_int_mask_t          *tx_evmask_p;
1758         tdc_mbh_t               *mboxh_p;
1759         tdc_mbl_t               *mboxl_p;
1760         uint64_t                tx_desc_len;
1761 
1762         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1763             "==> hxge_map_txdma_channel_cfg_ring"));
1764 
1765         cntl_dmap = *dma_cntl_p;
1766 
1767         dmap = (p_hxge_dma_common_t)&tx_ring_p->tdc_desc;
1768         hxge_setup_dma_common(dmap, cntl_dmap, tx_ring_p->tx_ring_size,
1769             sizeof (tx_desc_t));
1770 
1771         /*
1772          * Zero out transmit ring descriptors.
1773          */
1774         bzero((caddr_t)dmap->kaddrp, dmap->alength);
1775         tx_ring_cfig_p = &(tx_ring_p->tx_ring_cfig);
1776         tx_ring_kick_p = &(tx_ring_p->tx_ring_kick);
1777         tx_cs_p = &(tx_ring_p->tx_cs);
1778         tx_evmask_p = &(tx_ring_p->tx_evmask);
1779         tx_ring_cfig_p->value = 0;
1780         tx_ring_kick_p->value = 0;
1781         tx_cs_p->value = 0;
1782         tx_evmask_p->value = 0;
1783 
1784         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1785             "==> hxge_map_txdma_channel_cfg_ring: channel %d des $%p",
1786             dma_channel, dmap->dma_cookie.dmac_laddress));
1787 
1788         tx_ring_cfig_p->value = 0;
1789 
1790         /* Hydra len is 11 bits and the lower 5 bits are 0s */
1791         tx_desc_len = (uint64_t)(tx_ring_p->tx_ring_size >> 5);
1792         tx_ring_cfig_p->value =
1793             (dmap->dma_cookie.dmac_laddress & TDC_TDR_CFG_ADDR_MASK) |
1794             (tx_desc_len << TDC_TDR_CFG_LEN_SHIFT);
1795 
1796         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1797             "==> hxge_map_txdma_channel_cfg_ring: channel %d cfg 0x%llx",
1798             dma_channel, tx_ring_cfig_p->value));
1799 
1800         tx_cs_p->bits.reset = 1;
1801 
1802         /* Map in mailbox */
1803         mboxp = (p_tx_mbox_t)KMEM_ZALLOC(sizeof (tx_mbox_t), KM_SLEEP);
1804         dmap = (p_hxge_dma_common_t)&mboxp->tx_mbox;
1805         hxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (txdma_mailbox_t));
1806         mboxh_p = (tdc_mbh_t *)&tx_ring_p->tx_mbox_mbh;
1807         mboxl_p = (tdc_mbl_t *)&tx_ring_p->tx_mbox_mbl;
1808         mboxh_p->value = mboxl_p->value = 0;
1809 
1810         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1811             "==> hxge_map_txdma_channel_cfg_ring: mbox 0x%lx",
1812             dmap->dma_cookie.dmac_laddress));
1813 
1814         mboxh_p->bits.mbaddr = ((dmap->dma_cookie.dmac_laddress >>
1815             TDC_MBH_ADDR_SHIFT) & TDC_MBH_MASK);
1816         mboxl_p->bits.mbaddr = ((dmap->dma_cookie.dmac_laddress &
1817             TDC_MBL_MASK) >> TDC_MBL_SHIFT);
1818 
1819         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1820             "==> hxge_map_txdma_channel_cfg_ring: mbox 0x%lx",
1821             dmap->dma_cookie.dmac_laddress));
1822         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1823             "==> hxge_map_txdma_channel_cfg_ring: hmbox $%p mbox $%p",
1824             mboxh_p->bits.mbaddr, mboxl_p->bits.mbaddr));
1825 
1826         /*
1827          * Set page valid and no mask
1828          */
1829         tx_ring_p->page_hdl.value = 0;
1830 
1831         *tx_mbox_p = mboxp;
1832 
1833         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1834             "<== hxge_map_txdma_channel_cfg_ring"));
1835 }
1836 
1837 /*ARGSUSED*/
1838 static void
1839 hxge_unmap_txdma_channel_cfg_ring(p_hxge_t hxgep,
1840     p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
1841 {
1842         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1843             "==> hxge_unmap_txdma_channel_cfg_ring: channel %d",
1844             tx_ring_p->tdc));
1845 
1846         KMEM_FREE(tx_mbox_p, sizeof (tx_mbox_t));
1847 
1848         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1849             "<== hxge_unmap_txdma_channel_cfg_ring"));
1850 }
1851 
1852 static hxge_status_t
1853 hxge_map_txdma_channel_buf_ring(p_hxge_t hxgep, uint16_t channel,
1854     p_hxge_dma_common_t *dma_buf_p,
1855     p_tx_ring_t *tx_desc_p, uint32_t num_chunks)
1856 {
1857         p_hxge_dma_common_t     dma_bufp, tmp_bufp;
1858         p_hxge_dma_common_t     dmap;
1859         hxge_os_dma_handle_t    tx_buf_dma_handle;
1860         p_tx_ring_t             tx_ring_p;
1861         p_tx_msg_t              tx_msg_ring;
1862         hxge_status_t           status = HXGE_OK;
1863         int                     ddi_status = DDI_SUCCESS;
1864         int                     i, j, index;
1865         uint32_t                size, bsize;
1866         uint32_t                nblocks, nmsgs;
1867         char                    qname[TASKQ_NAMELEN];
1868 
1869         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1870             "==> hxge_map_txdma_channel_buf_ring"));
1871 
1872         dma_bufp = tmp_bufp = *dma_buf_p;
1873         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1874             " hxge_map_txdma_channel_buf_ring: channel %d to map %d "
1875             "chunks bufp $%p", channel, num_chunks, dma_bufp));
1876 
1877         nmsgs = 0;
1878         for (i = 0; i < num_chunks; i++, tmp_bufp++) {
1879                 nmsgs += tmp_bufp->nblocks;
1880                 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1881                     "==> hxge_map_txdma_channel_buf_ring: channel %d "
1882                     "bufp $%p nblocks %d nmsgs %d",
1883                     channel, tmp_bufp, tmp_bufp->nblocks, nmsgs));
1884         }
1885         if (!nmsgs) {
1886                 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1887                     "<== hxge_map_txdma_channel_buf_ring: channel %d "
1888                     "no msg blocks", channel));
1889                 status = HXGE_ERROR;
1890 
1891                 goto hxge_map_txdma_channel_buf_ring_exit;
1892         }
1893 
1894         tx_ring_p = (p_tx_ring_t)KMEM_ZALLOC(sizeof (tx_ring_t), KM_SLEEP);
1895         tx_ring_p->hxgep = hxgep;
1896         (void) snprintf(qname, TASKQ_NAMELEN, "hxge_%d_%d",
1897             hxgep->instance, channel);
1898         tx_ring_p->taskq = ddi_taskq_create(hxgep->dip, qname, 1,
1899             TASKQ_DEFAULTPRI, 0);
1900         if (tx_ring_p->taskq == NULL) {
1901                 goto hxge_map_txdma_channel_buf_ring_fail1;
1902         }
1903 
1904         MUTEX_INIT(&tx_ring_p->lock, NULL, MUTEX_DRIVER,
1905             (void *) hxgep->interrupt_cookie);
1906         /*
1907          * Allocate transmit message rings and handles for packets not to be
1908          * copied to premapped buffers.
1909          */
1910         size = nmsgs * sizeof (tx_msg_t);
1911         tx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP);
1912         for (i = 0; i < nmsgs; i++) {
1913                 ddi_status = ddi_dma_alloc_handle(hxgep->dip, &hxge_tx_dma_attr,
1914                     DDI_DMA_DONTWAIT, 0, &tx_msg_ring[i].dma_handle);
1915                 if (ddi_status != DDI_SUCCESS) {
1916                         status |= HXGE_DDI_FAILED;
1917                         break;
1918                 }
1919         }
1920 
1921         if (i < nmsgs) {
1922                 HXGE_DEBUG_MSG((hxgep, HXGE_ERR_CTL,
1923                     "Allocate handles failed."));
1924 
1925                 goto hxge_map_txdma_channel_buf_ring_fail1;
1926         }
1927         tx_ring_p->tdc = channel;
1928         tx_ring_p->tx_msg_ring = tx_msg_ring;
1929         tx_ring_p->tx_ring_size = nmsgs;
1930         tx_ring_p->num_chunks = num_chunks;
1931         if (!hxge_tx_intr_thres) {
1932                 hxge_tx_intr_thres = tx_ring_p->tx_ring_size / 4;
1933         }
1934         tx_ring_p->tx_wrap_mask = tx_ring_p->tx_ring_size - 1;
1935         tx_ring_p->rd_index = 0;
1936         tx_ring_p->wr_index = 0;
1937         tx_ring_p->ring_head.value = 0;
1938         tx_ring_p->ring_kick_tail.value = 0;
1939         tx_ring_p->descs_pending = 0;
1940 
1941         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1942             "==> hxge_map_txdma_channel_buf_ring: channel %d "
1943             "actual tx desc max %d nmsgs %d (config hxge_tx_ring_size %d)",
1944             channel, tx_ring_p->tx_ring_size, nmsgs, hxge_tx_ring_size));
1945 
1946         /*
1947          * Map in buffers from the buffer pool.
1948          */
1949         index = 0;
1950         bsize = dma_bufp->block_size;
1951 
1952         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_map_txdma_channel_buf_ring: "
1953             "dma_bufp $%p tx_rng_p $%p tx_msg_rng_p $%p bsize %d",
1954             dma_bufp, tx_ring_p, tx_msg_ring, bsize));
1955 
1956         for (i = 0; i < num_chunks; i++, dma_bufp++) {
1957                 bsize = dma_bufp->block_size;
1958                 nblocks = dma_bufp->nblocks;
1959                 tx_buf_dma_handle = dma_bufp->dma_handle;
1960                 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1961                     "==> hxge_map_txdma_channel_buf_ring: dma chunk %d "
1962                     "size %d dma_bufp $%p",
1963                     i, sizeof (hxge_dma_common_t), dma_bufp));
1964 
1965                 for (j = 0; j < nblocks; j++) {
1966                         tx_msg_ring[index].buf_dma_handle = tx_buf_dma_handle;
1967                         tx_msg_ring[index].offset_index = j;
1968                         dmap = &tx_msg_ring[index++].buf_dma;
1969                         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1970                             "==> hxge_map_txdma_channel_buf_ring: j %d"
1971                             "dmap $%p", i, dmap));
1972                         hxge_setup_dma_common(dmap, dma_bufp, 1, bsize);
1973                 }
1974         }
1975 
1976         if (i < num_chunks) {
1977                 status = HXGE_ERROR;
1978 
1979                 goto hxge_map_txdma_channel_buf_ring_fail1;
1980         }
1981 
1982         *tx_desc_p = tx_ring_p;
1983 
1984         goto hxge_map_txdma_channel_buf_ring_exit;
1985 
1986 hxge_map_txdma_channel_buf_ring_fail1:
1987         if (tx_ring_p->taskq) {
1988                 ddi_taskq_destroy(tx_ring_p->taskq);
1989                 tx_ring_p->taskq = NULL;
1990         }
1991 
1992         index--;
1993         for (; index >= 0; index--) {
1994                 if (tx_msg_ring[index].dma_handle != NULL) {
1995                         ddi_dma_free_handle(&tx_msg_ring[index].dma_handle);
1996                 }
1997         }
1998         MUTEX_DESTROY(&tx_ring_p->lock);
1999         KMEM_FREE(tx_msg_ring, size);
2000         KMEM_FREE(tx_ring_p, sizeof (tx_ring_t));
2001 
2002         status = HXGE_ERROR;
2003 
2004 hxge_map_txdma_channel_buf_ring_exit:
2005         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2006             "<== hxge_map_txdma_channel_buf_ring status 0x%x", status));
2007 
2008         return (status);
2009 }
2010 
2011 /*ARGSUSED*/
2012 static void
2013 hxge_unmap_txdma_channel_buf_ring(p_hxge_t hxgep, p_tx_ring_t tx_ring_p)
2014 {
2015         p_tx_msg_t      tx_msg_ring;
2016         p_tx_msg_t      tx_msg_p;
2017         int             i;
2018 
2019         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2020             "==> hxge_unmap_txdma_channel_buf_ring"));
2021         if (tx_ring_p == NULL) {
2022                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
2023                     "<== hxge_unmap_txdma_channel_buf_ring: NULL ringp"));
2024                 return;
2025         }
2026         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2027             "==> hxge_unmap_txdma_channel_buf_ring: channel %d",
2028             tx_ring_p->tdc));
2029 
2030         MUTEX_ENTER(&tx_ring_p->lock);
2031         tx_msg_ring = tx_ring_p->tx_msg_ring;
2032         for (i = 0; i < tx_ring_p->tx_ring_size; i++) {
2033                 tx_msg_p = &tx_msg_ring[i];
2034                 if (tx_msg_p->flags.dma_type == USE_DVMA) {
2035                         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "entry = %d", i));
2036                         (void) dvma_unload(tx_msg_p->dvma_handle, 0, -1);
2037                         tx_msg_p->dvma_handle = NULL;
2038                         if (tx_ring_p->dvma_wr_index ==
2039                             tx_ring_p->dvma_wrap_mask) {
2040                                 tx_ring_p->dvma_wr_index = 0;
2041                         } else {
2042                                 tx_ring_p->dvma_wr_index++;
2043                         }
2044                         tx_ring_p->dvma_pending--;
2045                 } else if (tx_msg_p->flags.dma_type == USE_DMA) {
2046                         if (ddi_dma_unbind_handle(tx_msg_p->dma_handle)) {
2047                                 cmn_err(CE_WARN, "hxge_unmap_tx_bug_ring: "
2048                                     "ddi_dma_unbind_handle failed.");
2049                         }
2050                 }
2051                 if (tx_msg_p->tx_message != NULL) {
2052                         freemsg(tx_msg_p->tx_message);
2053                         tx_msg_p->tx_message = NULL;
2054                 }
2055         }
2056 
2057         for (i = 0; i < tx_ring_p->tx_ring_size; i++) {
2058                 if (tx_msg_ring[i].dma_handle != NULL) {
2059                         ddi_dma_free_handle(&tx_msg_ring[i].dma_handle);
2060                 }
2061         }
2062         MUTEX_EXIT(&tx_ring_p->lock);
2063 
2064         if (tx_ring_p->taskq) {
2065                 ddi_taskq_destroy(tx_ring_p->taskq);
2066                 tx_ring_p->taskq = NULL;
2067         }
2068 
2069         MUTEX_DESTROY(&tx_ring_p->lock);
2070         KMEM_FREE(tx_msg_ring, sizeof (tx_msg_t) * tx_ring_p->tx_ring_size);
2071         KMEM_FREE(tx_ring_p, sizeof (tx_ring_t));
2072 
2073         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2074             "<== hxge_unmap_txdma_channel_buf_ring"));
2075 }
2076 
2077 static hxge_status_t
2078 hxge_txdma_hw_start(p_hxge_t hxgep)
2079 {
2080         int                     i, ndmas;
2081         uint16_t                channel;
2082         p_tx_rings_t            tx_rings;
2083         p_tx_ring_t             *tx_desc_rings;
2084         p_tx_mbox_areas_t       tx_mbox_areas_p;
2085         p_tx_mbox_t             *tx_mbox_p;
2086         hxge_status_t           status = HXGE_OK;
2087         uint64_t                tmp;
2088 
2089         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_start"));
2090 
2091         /*
2092          * Initialize REORD Table 1. Disable VMAC 2. Reset the FIFO Err Stat.
2093          * 3. Scrub memory and check for errors.
2094          */
2095         (void) hxge_tx_vmac_disable(hxgep);
2096 
2097         /*
2098          * Clear the error status
2099          */
2100         HXGE_REG_WR64(hxgep->hpi_handle, TDC_FIFO_ERR_STAT, 0x7);
2101 
2102         /*
2103          * Scrub the rtab memory for the TDC and reset the TDC.
2104          */
2105         HXGE_REG_WR64(hxgep->hpi_handle, TDC_REORD_TBL_DATA_HI, 0x0ULL);
2106         HXGE_REG_WR64(hxgep->hpi_handle, TDC_REORD_TBL_DATA_LO, 0x0ULL);
2107 
2108         for (i = 0; i < 256; i++) {
2109                 HXGE_REG_WR64(hxgep->hpi_handle, TDC_REORD_TBL_CMD,
2110                     (uint64_t)i);
2111 
2112                 /*
2113                  * Write the command register with an indirect read instruction
2114                  */
2115                 tmp = (0x1ULL << 30) | i;
2116                 HXGE_REG_WR64(hxgep->hpi_handle, TDC_REORD_TBL_CMD, tmp);
2117 
2118                 /*
2119                  * Wait for status done
2120                  */
2121                 tmp = 0;
2122                 do {
2123                         HXGE_REG_RD64(hxgep->hpi_handle, TDC_REORD_TBL_CMD,
2124                             &tmp);
2125                 } while (((tmp >> 31) & 0x1ULL) == 0x0);
2126         }
2127 
2128         for (i = 0; i < 256; i++) {
2129                 /*
2130                  * Write the command register with an indirect read instruction
2131                  */
2132                 tmp = (0x1ULL << 30) | i;
2133                 HXGE_REG_WR64(hxgep->hpi_handle, TDC_REORD_TBL_CMD, tmp);
2134 
2135                 /*
2136                  * Wait for status done
2137                  */
2138                 tmp = 0;
2139                 do {
2140                         HXGE_REG_RD64(hxgep->hpi_handle, TDC_REORD_TBL_CMD,
2141                             &tmp);
2142                 } while (((tmp >> 31) & 0x1ULL) == 0x0);
2143 
2144                 HXGE_REG_RD64(hxgep->hpi_handle, TDC_REORD_TBL_DATA_HI, &tmp);
2145                 if (0x1ff00ULL != (0x1ffffULL & tmp)) {
2146                         HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "PANIC ReordTbl "
2147                             "unexpected data (hi), entry: %x, value: 0x%0llx\n",
2148                             i, (unsigned long long)tmp));
2149                         status = HXGE_ERROR;
2150                 }
2151 
2152                 HXGE_REG_RD64(hxgep->hpi_handle, TDC_REORD_TBL_DATA_LO, &tmp);
2153                 if (tmp != 0) {
2154                         HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "PANIC ReordTbl "
2155                             "unexpected data (lo), entry: %x\n", i));
2156                         status = HXGE_ERROR;
2157                 }
2158 
2159                 HXGE_REG_RD64(hxgep->hpi_handle, TDC_FIFO_ERR_STAT, &tmp);
2160                 if (tmp != 0) {
2161                         HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "PANIC ReordTbl "
2162                             "parity error, entry: %x, val 0x%llx\n",
2163                             i, (unsigned long long)tmp));
2164                         status = HXGE_ERROR;
2165                 }
2166 
2167                 HXGE_REG_RD64(hxgep->hpi_handle, TDC_FIFO_ERR_STAT, &tmp);
2168                 if (tmp != 0) {
2169                         HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "PANIC ReordTbl "
2170                             "parity error, entry: %x\n", i));
2171                         status = HXGE_ERROR;
2172                 }
2173         }
2174 
2175         if (status != HXGE_OK)
2176                 goto hxge_txdma_hw_start_exit;
2177 
2178         /*
2179          * Reset FIFO Error Status for the TDC and enable FIFO error events.
2180          */
2181         HXGE_REG_WR64(hxgep->hpi_handle, TDC_FIFO_ERR_STAT, 0x7);
2182         HXGE_REG_WR64(hxgep->hpi_handle, TDC_FIFO_ERR_MASK, 0x0);
2183 
2184         /*
2185          * Initialize the Transmit DMAs.
2186          */
2187         tx_rings = hxgep->tx_rings;
2188         if (tx_rings == NULL) {
2189                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
2190                     "<== hxge_txdma_hw_start: NULL ring pointer"));
2191                 return (HXGE_ERROR);
2192         }
2193 
2194         tx_desc_rings = tx_rings->rings;
2195         if (tx_desc_rings == NULL) {
2196                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
2197                     "<== hxge_txdma_hw_start: NULL ring pointers"));
2198                 return (HXGE_ERROR);
2199         }
2200         ndmas = tx_rings->ndmas;
2201         if (!ndmas) {
2202                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
2203                     "<== hxge_txdma_hw_start: no dma channel allocated"));
2204                 return (HXGE_ERROR);
2205         }
2206         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_start: "
2207             "tx_rings $%p tx_desc_rings $%p ndmas %d",
2208             tx_rings, tx_desc_rings, ndmas));
2209 
2210         tx_mbox_areas_p = hxgep->tx_mbox_areas_p;
2211         tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p;
2212 
2213         /*
2214          * Init the DMAs.
2215          */
2216         for (i = 0; i < ndmas; i++) {
2217                 channel = tx_desc_rings[i]->tdc;
2218                 status = hxge_txdma_start_channel(hxgep, channel,
2219                     (p_tx_ring_t)tx_desc_rings[i],
2220                     (p_tx_mbox_t)tx_mbox_p[i]);
2221                 if (status != HXGE_OK) {
2222                         goto hxge_txdma_hw_start_fail1;
2223                 }
2224         }
2225 
2226         (void) hxge_tx_vmac_enable(hxgep);
2227 
2228         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2229             "==> hxge_txdma_hw_start: tx_rings $%p rings $%p",
2230             hxgep->tx_rings, hxgep->tx_rings->rings));
2231         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2232             "==> hxge_txdma_hw_start: tx_rings $%p tx_desc_rings $%p",
2233             hxgep->tx_rings, tx_desc_rings));
2234 
2235         goto hxge_txdma_hw_start_exit;
2236 
2237 hxge_txdma_hw_start_fail1:
2238         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2239             "==> hxge_txdma_hw_start: disable (status 0x%x channel %d i %d)",
2240             status, channel, i));
2241 
2242         for (; i >= 0; i--) {
2243                 channel = tx_desc_rings[i]->tdc,
2244                     (void) hxge_txdma_stop_channel(hxgep, channel,
2245                     (p_tx_ring_t)tx_desc_rings[i],
2246                     (p_tx_mbox_t)tx_mbox_p[i]);
2247         }
2248 
2249 hxge_txdma_hw_start_exit:
2250         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2251             "==> hxge_txdma_hw_start: (status 0x%x)", status));
2252 
2253         return (status);
2254 }
2255 
2256 static void
2257 hxge_txdma_hw_stop(p_hxge_t hxgep)
2258 {
2259         int                     i, ndmas;
2260         uint16_t                channel;
2261         p_tx_rings_t            tx_rings;
2262         p_tx_ring_t             *tx_desc_rings;
2263         p_tx_mbox_areas_t       tx_mbox_areas_p;
2264         p_tx_mbox_t             *tx_mbox_p;
2265 
2266         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_stop"));
2267 
2268         tx_rings = hxgep->tx_rings;
2269         if (tx_rings == NULL) {
2270                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
2271                     "<== hxge_txdma_hw_stop: NULL ring pointer"));
2272                 return;
2273         }
2274 
2275         tx_desc_rings = tx_rings->rings;
2276         if (tx_desc_rings == NULL) {
2277                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
2278                     "<== hxge_txdma_hw_stop: NULL ring pointers"));
2279                 return;
2280         }
2281 
2282         ndmas = tx_rings->ndmas;
2283         if (!ndmas) {
2284                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
2285                     "<== hxge_txdma_hw_stop: no dma channel allocated"));
2286                 return;
2287         }
2288 
2289         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_stop: "
2290             "tx_rings $%p tx_desc_rings $%p", tx_rings, tx_desc_rings));
2291 
2292         tx_mbox_areas_p = hxgep->tx_mbox_areas_p;
2293         tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p;
2294 
2295         for (i = 0; i < ndmas; i++) {
2296                 channel = tx_desc_rings[i]->tdc;
2297                 (void) hxge_txdma_stop_channel(hxgep, channel,
2298                     (p_tx_ring_t)tx_desc_rings[i],
2299                     (p_tx_mbox_t)tx_mbox_p[i]);
2300         }
2301 
2302         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_stop: "
2303             "tx_rings $%p tx_desc_rings $%p", tx_rings, tx_desc_rings));
2304         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_txdma_hw_stop"));
2305 }
2306 
2307 static hxge_status_t
2308 hxge_txdma_start_channel(p_hxge_t hxgep, uint16_t channel,
2309     p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
2310 {
2311         hxge_status_t status = HXGE_OK;
2312 
2313         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2314             "==> hxge_txdma_start_channel (channel %d)", channel));
2315         /*
2316          * TXDMA/TXC must be in stopped state.
2317          */
2318         (void) hxge_txdma_stop_inj_err(hxgep, channel);
2319 
2320         /*
2321          * Reset TXDMA channel
2322          */
2323         tx_ring_p->tx_cs.value = 0;
2324         tx_ring_p->tx_cs.bits.reset = 1;
2325         status = hxge_reset_txdma_channel(hxgep, channel,
2326             tx_ring_p->tx_cs.value);
2327         if (status != HXGE_OK) {
2328                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2329                     "==> hxge_txdma_start_channel (channel %d)"
2330                     " reset channel failed 0x%x", channel, status));
2331 
2332                 goto hxge_txdma_start_channel_exit;
2333         }
2334 
2335         /*
2336          * Initialize the TXDMA channel specific FZC control configurations.
2337          * These FZC registers are pertaining to each TX channel (i.e. logical
2338          * pages).
2339          */
2340         status = hxge_init_fzc_txdma_channel(hxgep, channel,
2341             tx_ring_p, tx_mbox_p);
2342         if (status != HXGE_OK) {
2343                 goto hxge_txdma_start_channel_exit;
2344         }
2345 
2346         /*
2347          * Initialize the event masks.
2348          */
2349         tx_ring_p->tx_evmask.value = 0;
2350         status = hxge_init_txdma_channel_event_mask(hxgep,
2351             channel, &tx_ring_p->tx_evmask);
2352         if (status != HXGE_OK) {
2353                 goto hxge_txdma_start_channel_exit;
2354         }
2355 
2356         /*
2357          * Load TXDMA descriptors, buffers, mailbox, initialise the DMA
2358          * channels and enable each DMA channel.
2359          */
2360         status = hxge_enable_txdma_channel(hxgep, channel,
2361             tx_ring_p, tx_mbox_p);
2362         if (status != HXGE_OK) {
2363                 goto hxge_txdma_start_channel_exit;
2364         }
2365 
2366 hxge_txdma_start_channel_exit:
2367         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_txdma_start_channel"));
2368 
2369         return (status);
2370 }
2371 
2372 /*ARGSUSED*/
2373 static hxge_status_t
2374 hxge_txdma_stop_channel(p_hxge_t hxgep, uint16_t channel,
2375     p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
2376 {
2377         int status = HXGE_OK;
2378 
2379         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2380             "==> hxge_txdma_stop_channel: channel %d", channel));
2381 
2382         /*
2383          * Stop (disable) TXDMA and TXC (if stop bit is set and STOP_N_GO bit
2384          * not set, the TXDMA reset state will not be set if reset TXDMA.
2385          */
2386         (void) hxge_txdma_stop_inj_err(hxgep, channel);
2387 
2388         /*
2389          * Reset TXDMA channel
2390          */
2391         tx_ring_p->tx_cs.value = 0;
2392         tx_ring_p->tx_cs.bits.reset = 1;
2393         status = hxge_reset_txdma_channel(hxgep, channel,
2394             tx_ring_p->tx_cs.value);
2395         if (status != HXGE_OK) {
2396                 goto hxge_txdma_stop_channel_exit;
2397         }
2398 
2399 hxge_txdma_stop_channel_exit:
2400         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_txdma_stop_channel"));
2401 
2402         return (status);
2403 }
2404 
2405 static p_tx_ring_t
2406 hxge_txdma_get_ring(p_hxge_t hxgep, uint16_t channel)
2407 {
2408         int             index, ndmas;
2409         uint16_t        tdc;
2410         p_tx_rings_t    tx_rings;
2411 
2412         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_get_ring"));
2413 
2414         tx_rings = hxgep->tx_rings;
2415         if (tx_rings == NULL) {
2416                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
2417                     "<== hxge_txdma_get_ring: NULL ring pointer"));
2418                 return (NULL);
2419         }
2420         ndmas = tx_rings->ndmas;
2421         if (!ndmas) {
2422                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
2423                     "<== hxge_txdma_get_ring: no channel allocated"));
2424                 return (NULL);
2425         }
2426         if (tx_rings->rings == NULL) {
2427                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
2428                     "<== hxge_txdma_get_ring: NULL rings pointer"));
2429                 return (NULL);
2430         }
2431         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_get_ring: "
2432             "tx_rings $%p tx_desc_rings $%p ndmas %d",
2433             tx_rings, tx_rings, ndmas));
2434 
2435         for (index = 0; index < ndmas; index++) {
2436                 tdc = tx_rings->rings[index]->tdc;
2437                 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2438                     "==> hxge_fixup_txdma_rings: channel %d", tdc));
2439                 if (channel == tdc) {
2440                         HXGE_DEBUG_MSG((hxgep, TX_CTL,
2441                             "<== hxge_txdma_get_ring: tdc %d ring $%p",
2442                             tdc, tx_rings->rings[index]));
2443                         return (p_tx_ring_t)(tx_rings->rings[index]);
2444                 }
2445         }
2446 
2447         HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_get_ring"));
2448 
2449         return (NULL);
2450 }
2451 
2452 static p_tx_mbox_t
2453 hxge_txdma_get_mbox(p_hxge_t hxgep, uint16_t channel)
2454 {
2455         int                     index, tdc, ndmas;
2456         p_tx_rings_t            tx_rings;
2457         p_tx_mbox_areas_t       tx_mbox_areas_p;
2458         p_tx_mbox_t             *tx_mbox_p;
2459 
2460         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_get_mbox"));
2461 
2462         tx_rings = hxgep->tx_rings;
2463         if (tx_rings == NULL) {
2464                 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2465                     "<== hxge_txdma_get_mbox: NULL ring pointer"));
2466                 return (NULL);
2467         }
2468         tx_mbox_areas_p = hxgep->tx_mbox_areas_p;
2469         if (tx_mbox_areas_p == NULL) {
2470                 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2471                     "<== hxge_txdma_get_mbox: NULL mbox pointer"));
2472                 return (NULL);
2473         }
2474         tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p;
2475 
2476         ndmas = tx_rings->ndmas;
2477         if (!ndmas) {
2478                 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2479                     "<== hxge_txdma_get_mbox: no channel allocated"));
2480                 return (NULL);
2481         }
2482         if (tx_rings->rings == NULL) {
2483                 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2484                     "<== hxge_txdma_get_mbox: NULL rings pointer"));
2485                 return (NULL);
2486         }
2487         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_get_mbox: "
2488             "tx_rings $%p tx_desc_rings $%p ndmas %d",
2489             tx_rings, tx_rings, ndmas));
2490 
2491         for (index = 0; index < ndmas; index++) {
2492                 tdc = tx_rings->rings[index]->tdc;
2493                 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2494                     "==> hxge_txdma_get_mbox: channel %d", tdc));
2495                 if (channel == tdc) {
2496                         HXGE_DEBUG_MSG((hxgep, TX_CTL,
2497                             "<== hxge_txdma_get_mbox: tdc %d ring $%p",
2498                             tdc, tx_rings->rings[index]));
2499                         return (p_tx_mbox_t)(tx_mbox_p[index]);
2500                 }
2501         }
2502 
2503         HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_get_mbox"));
2504 
2505         return (NULL);
2506 }
2507 
2508 /*ARGSUSED*/
2509 static hxge_status_t
2510 hxge_tx_err_evnts(p_hxge_t hxgep, uint_t index, p_hxge_ldv_t ldvp,
2511     tdc_stat_t cs)
2512 {
2513         hpi_handle_t            handle;
2514         uint8_t                 channel;
2515         p_tx_ring_t             *tx_rings;
2516         p_tx_ring_t             tx_ring_p;
2517         p_hxge_tx_ring_stats_t  tdc_stats;
2518         boolean_t               txchan_fatal = B_FALSE;
2519         hxge_status_t           status = HXGE_OK;
2520         tdc_drop_cnt_t          drop_cnt;
2521 
2522         HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "==> hxge_tx_err_evnts"));
2523         handle = HXGE_DEV_HPI_HANDLE(hxgep);
2524         channel = ldvp->channel;
2525 
2526         tx_rings = hxgep->tx_rings->rings;
2527         tx_ring_p = tx_rings[index];
2528         tdc_stats = tx_ring_p->tdc_stats;
2529 
2530         /* Get the error counts if any */
2531         TXDMA_REG_READ64(handle, TDC_DROP_CNT, channel, &drop_cnt.value);
2532         tdc_stats->count_hdr_size_err += drop_cnt.bits.hdr_size_error_count;
2533         tdc_stats->count_runt += drop_cnt.bits.runt_count;
2534         tdc_stats->count_abort += drop_cnt.bits.abort_count;
2535 
2536         if (cs.bits.peu_resp_err) {
2537                 tdc_stats->peu_resp_err++;
2538                 HXGE_FM_REPORT_ERROR(hxgep, channel,
2539                     HXGE_FM_EREPORT_TDMC_PEU_RESP_ERR);
2540                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2541                     "==> hxge_tx_err_evnts(channel %d): "
2542                     "fatal error: peu_resp_err", channel));
2543                 txchan_fatal = B_TRUE;
2544         }
2545 
2546         if (cs.bits.pkt_size_hdr_err) {
2547                 tdc_stats->pkt_size_hdr_err++;
2548                 HXGE_FM_REPORT_ERROR(hxgep, channel,
2549                     HXGE_FM_EREPORT_TDMC_PKT_SIZE_HDR_ERR);
2550                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2551                     "==> hxge_tx_err_evnts(channel %d): "
2552                     "fatal error: pkt_size_hdr_err", channel));
2553                 txchan_fatal = B_TRUE;
2554         }
2555 
2556         if (cs.bits.runt_pkt_drop_err) {
2557                 tdc_stats->runt_pkt_drop_err++;
2558                 HXGE_FM_REPORT_ERROR(hxgep, channel,
2559                     HXGE_FM_EREPORT_TDMC_RUNT_PKT_DROP_ERR);
2560                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2561                     "==> hxge_tx_err_evnts(channel %d): "
2562                     "fatal error: runt_pkt_drop_err", channel));
2563                 txchan_fatal = B_TRUE;
2564         }
2565 
2566         if (cs.bits.pkt_size_err) {
2567                 tdc_stats->pkt_size_err++;
2568                 HXGE_FM_REPORT_ERROR(hxgep, channel,
2569                     HXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR);
2570                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2571                     "==> hxge_tx_err_evnts(channel %d): "
2572                     "fatal error: pkt_size_err", channel));
2573                 txchan_fatal = B_TRUE;
2574         }
2575 
2576         if (cs.bits.tx_rng_oflow) {
2577                 tdc_stats->tx_rng_oflow++;
2578                 if (tdc_stats->tx_rng_oflow)
2579                         HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2580                             "==> hxge_tx_err_evnts(channel %d): "
2581                             "fatal error: tx_rng_oflow", channel));
2582         }
2583 
2584         if (cs.bits.pref_par_err) {
2585                 tdc_stats->pref_par_err++;
2586 
2587                 /* Get the address of parity error read data */
2588                 TXDMA_REG_READ64(hxgep->hpi_handle, TDC_PREF_PAR_LOG,
2589                     channel, &tdc_stats->errlog.value);
2590 
2591                 HXGE_FM_REPORT_ERROR(hxgep, channel,
2592                     HXGE_FM_EREPORT_TDMC_PREF_PAR_ERR);
2593                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2594                     "==> hxge_tx_err_evnts(channel %d): "
2595                     "fatal error: pref_par_err", channel));
2596                 txchan_fatal = B_TRUE;
2597         }
2598 
2599         if (cs.bits.tdr_pref_cpl_to) {
2600                 tdc_stats->tdr_pref_cpl_to++;
2601                 HXGE_FM_REPORT_ERROR(hxgep, channel,
2602                     HXGE_FM_EREPORT_TDMC_TDR_PREF_CPL_TO);
2603                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2604                     "==> hxge_tx_err_evnts(channel %d): "
2605                     "fatal error: tdr_pref_cpl_to", channel));
2606                 txchan_fatal = B_TRUE;
2607         }
2608 
2609         if (cs.bits.pkt_cpl_to) {
2610                 tdc_stats->pkt_cpl_to++;
2611                 HXGE_FM_REPORT_ERROR(hxgep, channel,
2612                     HXGE_FM_EREPORT_TDMC_PKT_CPL_TO);
2613                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2614                     "==> hxge_tx_err_evnts(channel %d): "
2615                     "fatal error: pkt_cpl_to", channel));
2616                 txchan_fatal = B_TRUE;
2617         }
2618 
2619         if (cs.bits.invalid_sop) {
2620                 tdc_stats->invalid_sop++;
2621                 HXGE_FM_REPORT_ERROR(hxgep, channel,
2622                     HXGE_FM_EREPORT_TDMC_INVALID_SOP);
2623                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2624                     "==> hxge_tx_err_evnts(channel %d): "
2625                     "fatal error: invalid_sop", channel));
2626                 txchan_fatal = B_TRUE;
2627         }
2628 
2629         if (cs.bits.unexpected_sop) {
2630                 tdc_stats->unexpected_sop++;
2631                 HXGE_FM_REPORT_ERROR(hxgep, channel,
2632                     HXGE_FM_EREPORT_TDMC_UNEXPECTED_SOP);
2633                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2634                     "==> hxge_tx_err_evnts(channel %d): "
2635                     "fatal error: unexpected_sop", channel));
2636                 txchan_fatal = B_TRUE;
2637         }
2638 
2639         /* Clear error injection source in case this is an injected error */
2640         TXDMA_REG_WRITE64(hxgep->hpi_handle, TDC_STAT_INT_DBG, channel, 0);
2641 
2642         if (txchan_fatal) {
2643                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2644                     " hxge_tx_err_evnts: "
2645                     " fatal error on channel %d cs 0x%llx\n",
2646                     channel, cs.value));
2647                 status = hxge_txdma_fatal_err_recover(hxgep, channel,
2648                     tx_ring_p);
2649                 if (status == HXGE_OK) {
2650                         FM_SERVICE_RESTORED(hxgep);
2651                 }
2652         }
2653 
2654         HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "<== hxge_tx_err_evnts"));
2655 
2656         return (status);
2657 }
2658 
2659 hxge_status_t
2660 hxge_txdma_handle_sys_errors(p_hxge_t hxgep)
2661 {
2662         hpi_handle_t            handle;
2663         hxge_status_t           status = HXGE_OK;
2664         tdc_fifo_err_stat_t     fifo_stat;
2665         hxge_tdc_sys_stats_t    *tdc_sys_stats;
2666 
2667         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_handle_sys_errors"));
2668 
2669         handle = HXGE_DEV_HPI_HANDLE(hxgep);
2670 
2671         /*
2672          * The FIFO is shared by all channels.
2673          * Get the status of Reorder Buffer and Reorder Table Buffer Errors
2674          */
2675         HXGE_REG_RD64(handle, TDC_FIFO_ERR_STAT, &fifo_stat.value);
2676 
2677         /*
2678          * Clear the error bits. Note that writing a 1 clears the bit. Writing
2679          * a 0 does nothing.
2680          */
2681         HXGE_REG_WR64(handle, TDC_FIFO_ERR_STAT, fifo_stat.value);
2682 
2683         tdc_sys_stats = &hxgep->statsp->tdc_sys_stats;
2684         if (fifo_stat.bits.reord_tbl_par_err) {
2685                 tdc_sys_stats->reord_tbl_par_err++;
2686                 HXGE_FM_REPORT_ERROR(hxgep, NULL,
2687                     HXGE_FM_EREPORT_TDMC_REORD_TBL_PAR);
2688                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2689                     "==> hxge_txdma_handle_sys_errors: fatal error: "
2690                     "reord_tbl_par_err"));
2691         }
2692 
2693         if (fifo_stat.bits.reord_buf_ded_err) {
2694                 tdc_sys_stats->reord_buf_ded_err++;
2695                 HXGE_FM_REPORT_ERROR(hxgep, NULL,
2696                     HXGE_FM_EREPORT_TDMC_REORD_BUF_DED);
2697                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2698                     "==> hxge_txdma_handle_sys_errors: "
2699                     "fatal error: reord_buf_ded_err"));
2700         }
2701 
2702         if (fifo_stat.bits.reord_buf_sec_err) {
2703                 tdc_sys_stats->reord_buf_sec_err++;
2704                 if (tdc_sys_stats->reord_buf_sec_err == 1)
2705                         HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2706                             "==> hxge_txdma_handle_sys_errors: "
2707                             "reord_buf_sec_err"));
2708         }
2709 
2710         if (fifo_stat.bits.reord_tbl_par_err ||
2711             fifo_stat.bits.reord_buf_ded_err) {
2712                 status = hxge_tx_port_fatal_err_recover(hxgep);
2713                 if (status == HXGE_OK) {
2714                         FM_SERVICE_RESTORED(hxgep);
2715                 }
2716         }
2717 
2718         HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_handle_sys_errors"));
2719 
2720         return (status);
2721 }
2722 
2723 static hxge_status_t
2724 hxge_txdma_fatal_err_recover(p_hxge_t hxgep, uint16_t channel,
2725     p_tx_ring_t tx_ring_p)
2726 {
2727         hpi_handle_t    handle;
2728         hpi_status_t    rs = HPI_SUCCESS;
2729         p_tx_mbox_t     tx_mbox_p;
2730         hxge_status_t   status = HXGE_OK;
2731 
2732         HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "==> hxge_txdma_fatal_err_recover"));
2733         HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2734             "Recovering from TxDMAChannel#%d error...", channel));
2735 
2736         /*
2737          * Stop the dma channel waits for the stop done. If the stop done bit
2738          * is not set, then create an error.
2739          */
2740         handle = HXGE_DEV_HPI_HANDLE(hxgep);
2741         HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "stopping txdma channel(%d)",
2742             channel));
2743         MUTEX_ENTER(&tx_ring_p->lock);
2744         rs = hpi_txdma_channel_control(handle, TXDMA_STOP, channel);
2745         if (rs != HPI_SUCCESS) {
2746                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2747                     "==> hxge_txdma_fatal_err_recover (channel %d): "
2748                     "stop failed ", channel));
2749 
2750                 goto fail;
2751         }
2752         HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "reclaiming txdma channel(%d)",
2753             channel));
2754         (void) hxge_txdma_reclaim(hxgep, tx_ring_p, 0);
2755 
2756         /*
2757          * Reset TXDMA channel
2758          */
2759         HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "resetting txdma channel(%d)",
2760             channel));
2761         if ((rs = hpi_txdma_channel_control(handle, TXDMA_RESET, channel)) !=
2762             HPI_SUCCESS) {
2763                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2764                     "==> hxge_txdma_fatal_err_recover (channel %d)"
2765                     " reset channel failed 0x%x", channel, rs));
2766 
2767                 goto fail;
2768         }
2769         /*
2770          * Reset the tail (kick) register to 0. (Hardware will not reset it. Tx
2771          * overflow fatal error if tail is not set to 0 after reset!
2772          */
2773         TXDMA_REG_WRITE64(handle, TDC_TDR_KICK, channel, 0);
2774 
2775         /*
2776          * Restart TXDMA channel
2777          *
2778          * Initialize the TXDMA channel specific FZC control configurations.
2779          * These FZC registers are pertaining to each TX channel (i.e. logical
2780          * pages).
2781          */
2782         tx_mbox_p = hxge_txdma_get_mbox(hxgep, channel);
2783         HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "restarting txdma channel(%d)",
2784             channel));
2785         status = hxge_init_fzc_txdma_channel(hxgep, channel,
2786             tx_ring_p, tx_mbox_p);
2787         if (status != HXGE_OK)
2788                 goto fail;
2789 
2790         /*
2791          * Initialize the event masks.
2792          */
2793         tx_ring_p->tx_evmask.value = 0;
2794         status = hxge_init_txdma_channel_event_mask(hxgep, channel,
2795             &tx_ring_p->tx_evmask);
2796         if (status != HXGE_OK)
2797                 goto fail;
2798 
2799         tx_ring_p->wr_index_wrap = B_FALSE;
2800         tx_ring_p->wr_index = 0;
2801         tx_ring_p->rd_index = 0;
2802 
2803         /*
2804          * Load TXDMA descriptors, buffers, mailbox, initialise the DMA
2805          * channels and enable each DMA channel.
2806          */
2807         HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "enabling txdma channel(%d)",
2808             channel));
2809         status = hxge_enable_txdma_channel(hxgep, channel,
2810             tx_ring_p, tx_mbox_p);
2811         MUTEX_EXIT(&tx_ring_p->lock);
2812         if (status != HXGE_OK)
2813                 goto fail;
2814 
2815         HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2816             "Recovery Successful, TxDMAChannel#%d Restored", channel));
2817         HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "==> hxge_txdma_fatal_err_recover"));
2818 
2819         return (HXGE_OK);
2820 
2821 fail:
2822         MUTEX_EXIT(&tx_ring_p->lock);
2823         HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL,
2824             "hxge_txdma_fatal_err_recover (channel %d): "
2825             "failed to recover this txdma channel", channel));
2826         HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "Recovery failed"));
2827 
2828         return (status);
2829 }
2830 
2831 static hxge_status_t
2832 hxge_tx_port_fatal_err_recover(p_hxge_t hxgep)
2833 {
2834         hpi_handle_t    handle;
2835         hpi_status_t    rs = HPI_SUCCESS;
2836         hxge_status_t   status = HXGE_OK;
2837         p_tx_ring_t     *tx_desc_rings;
2838         p_tx_rings_t    tx_rings;
2839         p_tx_ring_t     tx_ring_p;
2840         int             i, ndmas;
2841         uint16_t        channel;
2842         block_reset_t   reset_reg;
2843 
2844         HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL,
2845             "==> hxge_tx_port_fatal_err_recover"));
2846         HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2847             "Recovering from TxPort error..."));
2848 
2849         handle = HXGE_DEV_HPI_HANDLE(hxgep);
2850 
2851         /* Reset TDC block from PEU for this fatal error */
2852         reset_reg.value = 0;
2853         reset_reg.bits.tdc_rst = 1;
2854         HXGE_REG_WR32(handle, BLOCK_RESET, reset_reg.value);
2855 
2856         HXGE_DELAY(1000);
2857 
2858         /*
2859          * Stop the dma channel waits for the stop done. If the stop done bit
2860          * is not set, then create an error.
2861          */
2862         HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "stopping all DMA channels..."));
2863 
2864         tx_rings = hxgep->tx_rings;
2865         tx_desc_rings = tx_rings->rings;
2866         ndmas = tx_rings->ndmas;
2867 
2868         for (i = 0; i < ndmas; i++) {
2869                 if (tx_desc_rings[i] == NULL) {
2870                         continue;
2871                 }
2872                 tx_ring_p = tx_rings->rings[i];
2873                 MUTEX_ENTER(&tx_ring_p->lock);
2874         }
2875 
2876         for (i = 0; i < ndmas; i++) {
2877                 if (tx_desc_rings[i] == NULL) {
2878                         continue;
2879                 }
2880                 channel = tx_desc_rings[i]->tdc;
2881                 tx_ring_p = tx_rings->rings[i];
2882                 rs = hpi_txdma_channel_control(handle, TXDMA_STOP, channel);
2883                 if (rs != HPI_SUCCESS) {
2884                         HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2885                             "==> hxge_txdma_fatal_err_recover (channel %d): "
2886                             "stop failed ", channel));
2887 
2888                         goto fail;
2889                 }
2890         }
2891 
2892         /*
2893          * Do reclaim on all of th DMAs.
2894          */
2895         HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "reclaiming all DMA channels..."));
2896         for (i = 0; i < ndmas; i++) {
2897                 if (tx_desc_rings[i] == NULL) {
2898                         continue;
2899                 }
2900                 tx_ring_p = tx_rings->rings[i];
2901                 (void) hxge_txdma_reclaim(hxgep, tx_ring_p, 0);
2902         }
2903 
2904         /* Restart the TDC */
2905         if ((status = hxge_txdma_hw_start(hxgep)) != HXGE_OK)
2906                 goto fail;
2907 
2908         for (i = 0; i < ndmas; i++) {
2909                 if (tx_desc_rings[i] == NULL) {
2910                         continue;
2911                 }
2912                 tx_ring_p = tx_rings->rings[i];
2913                 MUTEX_EXIT(&tx_ring_p->lock);
2914         }
2915 
2916         HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2917             "Recovery Successful, TxPort Restored"));
2918         HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL,
2919             "<== hxge_tx_port_fatal_err_recover"));
2920         return (HXGE_OK);
2921 
2922 fail:
2923         for (i = 0; i < ndmas; i++) {
2924                 if (tx_desc_rings[i] == NULL) {
2925                         continue;
2926                 }
2927                 tx_ring_p = tx_rings->rings[i];
2928                 MUTEX_EXIT(&tx_ring_p->lock);
2929         }
2930 
2931         HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "Recovery failed"));
2932         HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL,
2933             "hxge_txdma_fatal_err_recover (channel %d): "
2934             "failed to recover this txdma channel"));
2935 
2936         return (status);
2937 }