1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
  24  * Use is subject to license terms.
  25  */
  26 
  27 #include <hxge_impl.h>
  28 
  29 extern uint32_t hxge_reclaim_pending;
  30 extern uint32_t hxge_bcopy_thresh;
  31 extern uint32_t hxge_dvma_thresh;
  32 extern uint32_t hxge_dma_stream_thresh;
  33 extern uint32_t hxge_tx_minfree;
  34 extern uint32_t hxge_tx_intr_thres;
  35 extern uint32_t hxge_tx_max_gathers;
  36 extern uint32_t hxge_tx_tiny_pack;
  37 extern uint32_t hxge_tx_use_bcopy;
  38 
  39 static int hxge_start(p_hxge_t hxgep, p_tx_ring_t tx_ring_p, p_mblk_t mp);
  40 
  41 void
  42 hxge_tx_ring_task(void *arg)
  43 {
  44         p_tx_ring_t     ring = (p_tx_ring_t)arg;
  45 
  46         MUTEX_ENTER(&ring->lock);
  47         (void) hxge_txdma_reclaim(ring->hxgep, ring, 0);
  48         MUTEX_EXIT(&ring->lock);
  49 
  50         mac_tx_ring_update(ring->hxgep->mach, ring->ring_handle);
  51 }
  52 
  53 static void
  54 hxge_tx_ring_dispatch(p_tx_ring_t ring)
  55 {
  56         /*
  57          * Kick the ring task to reclaim some buffers.
  58          */
  59         (void) ddi_taskq_dispatch(ring->taskq,
  60             hxge_tx_ring_task, (void *)ring, DDI_SLEEP);
  61 }
  62 
  63 mblk_t *
  64 hxge_tx_ring_send(void *arg, mblk_t *mp)
  65 {
  66         p_hxge_ring_handle_t    rhp = (p_hxge_ring_handle_t)arg;
  67         p_hxge_t                hxgep;
  68         p_tx_ring_t             tx_ring_p;
  69         int                     status;
  70 
  71         ASSERT(rhp != NULL);
  72         ASSERT((rhp->index >= 0) && (rhp->index < HXGE_MAX_TDCS));
  73 
  74         hxgep = rhp->hxgep;
  75         tx_ring_p = hxgep->tx_rings->rings[rhp->index];
  76         ASSERT(hxgep == tx_ring_p->hxgep);
  77 
  78         status = hxge_start(hxgep, tx_ring_p, mp);
  79         if (status != 0) {
  80                 hxge_tx_ring_dispatch(tx_ring_p);
  81                 return (mp);
  82         }
  83 
  84         return ((mblk_t *)NULL);
  85 }
  86 
  87 static int
  88 hxge_start(p_hxge_t hxgep, p_tx_ring_t tx_ring_p, p_mblk_t mp)
  89 {
  90         int                     dma_status, status = 0;
  91         p_tx_desc_t             tx_desc_ring_vp;
  92         hpi_handle_t            hpi_desc_handle;
  93         hxge_os_dma_handle_t    tx_desc_dma_handle;
  94         p_tx_desc_t             tx_desc_p;
  95         p_tx_msg_t              tx_msg_ring;
  96         p_tx_msg_t              tx_msg_p;
  97         tx_desc_t               tx_desc, *tmp_desc_p;
  98         tx_desc_t               sop_tx_desc, *sop_tx_desc_p;
  99         p_tx_pkt_header_t       hdrp;
 100         p_tx_pkt_hdr_all_t      pkthdrp;
 101         uint8_t                 npads = 0;
 102         uint64_t                dma_ioaddr;
 103         uint32_t                dma_flags;
 104         int                     last_bidx;
 105         uint8_t                 *b_rptr;
 106         caddr_t                 kaddr;
 107         uint32_t                nmblks;
 108         uint32_t                ngathers;
 109         uint32_t                clen;
 110         int                     len;
 111         uint32_t                pkt_len, pack_len, min_len;
 112         uint32_t                bcopy_thresh;
 113         int                     i, cur_index, sop_index;
 114         uint16_t                tail_index;
 115         boolean_t               tail_wrap = B_FALSE;
 116         hxge_dma_common_t       desc_area;
 117         hxge_os_dma_handle_t    dma_handle;
 118         ddi_dma_cookie_t        dma_cookie;
 119         hpi_handle_t            hpi_handle;
 120         p_mblk_t                nmp;
 121         p_mblk_t                t_mp;
 122         uint32_t                ncookies;
 123         boolean_t               good_packet;
 124         boolean_t               mark_mode = B_FALSE;
 125         p_hxge_stats_t          statsp;
 126         p_hxge_tx_ring_stats_t  tdc_stats;
 127         t_uscalar_t             start_offset = 0;
 128         t_uscalar_t             stuff_offset = 0;
 129         t_uscalar_t             end_offset = 0;
 130         t_uscalar_t             value = 0;
 131         t_uscalar_t             cksum_flags = 0;
 132         boolean_t               cksum_on = B_FALSE;
 133         uint32_t                boff = 0;
 134         uint64_t                tot_xfer_len = 0, tmp_len = 0;
 135         boolean_t               header_set = B_FALSE;
 136         tdc_tdr_kick_t          kick;
 137         uint32_t                offset;
 138 #ifdef HXGE_DEBUG
 139         p_tx_desc_t             tx_desc_ring_pp;
 140         p_tx_desc_t             tx_desc_pp;
 141         tx_desc_t               *save_desc_p;
 142         int                     dump_len;
 143         int                     sad_len;
 144         uint64_t                sad;
 145         int                     xfer_len;
 146         uint32_t                msgsize;
 147 #endif
 148 
 149         HXGE_DEBUG_MSG((hxgep, TX_CTL,
 150             "==> hxge_start: tx dma channel %d", tx_ring_p->tdc));
 151         HXGE_DEBUG_MSG((hxgep, TX_CTL,
 152             "==> hxge_start: Starting tdc %d desc pending %d",
 153             tx_ring_p->tdc, tx_ring_p->descs_pending));
 154 
 155         statsp = hxgep->statsp;
 156 
 157         if (hxgep->statsp->port_stats.lb_mode == hxge_lb_normal) {
 158                 if (!statsp->mac_stats.link_up) {
 159                         freemsg(mp);
 160                         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_start: "
 161                             "link not up or LB mode"));
 162                         goto hxge_start_fail1;
 163                 }
 164         }
 165 
 166         mac_hcksum_get(mp, &start_offset, &stuff_offset, &end_offset, &value,
 167             &cksum_flags);
 168         if (!HXGE_IS_VLAN_PACKET(mp->b_rptr)) {
 169                 start_offset += sizeof (ether_header_t);
 170                 stuff_offset += sizeof (ether_header_t);
 171         } else {
 172                 start_offset += sizeof (struct ether_vlan_header);
 173                 stuff_offset += sizeof (struct ether_vlan_header);
 174         }
 175 
 176         if (cksum_flags & HCK_PARTIALCKSUM) {
 177                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
 178                     "==> hxge_start: mp $%p len %d "
 179                     "cksum_flags 0x%x (partial checksum) ",
 180                     mp, MBLKL(mp), cksum_flags));
 181                 cksum_on = B_TRUE;
 182         }
 183 
 184         MUTEX_ENTER(&tx_ring_p->lock);
 185 start_again:
 186         ngathers = 0;
 187         sop_index = tx_ring_p->wr_index;
 188 #ifdef  HXGE_DEBUG
 189         if (tx_ring_p->descs_pending) {
 190                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
 191                     "==> hxge_start: desc pending %d ",
 192                     tx_ring_p->descs_pending));
 193         }
 194 
 195         dump_len = (int)(MBLKL(mp));
 196         dump_len = (dump_len > 128) ? 128: dump_len;
 197 
 198         HXGE_DEBUG_MSG((hxgep, TX_CTL,
 199             "==> hxge_start: tdc %d: dumping ...: b_rptr $%p "
 200             "(Before header reserve: ORIGINAL LEN %d)",
 201             tx_ring_p->tdc, mp->b_rptr, dump_len));
 202 
 203         HXGE_DEBUG_MSG((hxgep, TX_CTL,
 204             "==> hxge_start: dump packets (IP ORIGINAL b_rptr $%p): %s",
 205             mp->b_rptr, hxge_dump_packet((char *)mp->b_rptr, dump_len)));
 206 #endif
 207 
 208         tdc_stats = tx_ring_p->tdc_stats;
 209         mark_mode = (tx_ring_p->descs_pending &&
 210             ((tx_ring_p->tx_ring_size - tx_ring_p->descs_pending) <
 211             hxge_tx_minfree));
 212 
 213         HXGE_DEBUG_MSG((hxgep, TX_CTL,
 214             "TX Descriptor ring is channel %d mark mode %d",
 215             tx_ring_p->tdc, mark_mode));
 216 
 217         if (!hxge_txdma_reclaim(hxgep, tx_ring_p, hxge_tx_minfree)) {
 218                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
 219                     "TX Descriptor ring is full: channel %d", tx_ring_p->tdc));
 220                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
 221                     "TX Descriptor ring is full: channel %d", tx_ring_p->tdc));
 222                 (void) cas32((uint32_t *)&tx_ring_p->queueing, 0, 1);
 223                 tdc_stats->tx_no_desc++;
 224                 MUTEX_EXIT(&tx_ring_p->lock);
 225                 status = 1;
 226                 goto hxge_start_fail1;
 227         }
 228 
 229         nmp = mp;
 230         i = sop_index = tx_ring_p->wr_index;
 231         nmblks = 0;
 232         ngathers = 0;
 233         pkt_len = 0;
 234         pack_len = 0;
 235         clen = 0;
 236         last_bidx = -1;
 237         good_packet = B_TRUE;
 238 
 239         desc_area = tx_ring_p->tdc_desc;
 240         hpi_handle = desc_area.hpi_handle;
 241         hpi_desc_handle.regh = (hxge_os_acc_handle_t)
 242             DMA_COMMON_ACC_HANDLE(desc_area);
 243         hpi_desc_handle.hxgep = hxgep;
 244         tx_desc_ring_vp = (p_tx_desc_t)DMA_COMMON_VPTR(desc_area);
 245 #ifdef  HXGE_DEBUG
 246 #if defined(__i386)
 247         tx_desc_ring_pp = (p_tx_desc_t)(uint32_t)DMA_COMMON_IOADDR(desc_area);
 248 #else
 249         tx_desc_ring_pp = (p_tx_desc_t)DMA_COMMON_IOADDR(desc_area);
 250 #endif
 251 #endif
 252         tx_desc_dma_handle = (hxge_os_dma_handle_t)DMA_COMMON_HANDLE(desc_area);
 253         tx_msg_ring = tx_ring_p->tx_msg_ring;
 254 
 255         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_start: wr_index %d i %d",
 256             sop_index, i));
 257 
 258 #ifdef  HXGE_DEBUG
 259         msgsize = msgdsize(nmp);
 260         HXGE_DEBUG_MSG((hxgep, TX_CTL,
 261             "==> hxge_start(1): wr_index %d i %d msgdsize %d",
 262             sop_index, i, msgsize));
 263 #endif
 264         /*
 265          * The first 16 bytes of the premapped buffer are reserved
 266          * for header. No padding will be used.
 267          */
 268         pkt_len = pack_len = boff = TX_PKT_HEADER_SIZE;
 269         if (hxge_tx_use_bcopy) {
 270                 bcopy_thresh = (hxge_bcopy_thresh - TX_PKT_HEADER_SIZE);
 271         } else {
 272                 bcopy_thresh = (TX_BCOPY_SIZE - TX_PKT_HEADER_SIZE);
 273         }
 274         while (nmp) {
 275                 good_packet = B_TRUE;
 276                 b_rptr = nmp->b_rptr;
 277                 len = MBLKL(nmp);
 278                 if (len <= 0) {
 279                         nmp = nmp->b_cont;
 280                         continue;
 281                 }
 282                 nmblks++;
 283 
 284                 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_start(1): nmblks %d "
 285                     "len %d pkt_len %d pack_len %d",
 286                     nmblks, len, pkt_len, pack_len));
 287                 /*
 288                  * Hardware limits the transfer length to 4K.
 289                  * If len is more than 4K, we need to break
 290                  * nmp into two chunks: Make first chunk smaller
 291                  * than 4K. The second chunk will be broken into
 292                  * less than 4K (if needed) during the next pass.
 293                  */
 294                 if (len > (TX_MAX_TRANSFER_LENGTH - TX_PKT_HEADER_SIZE)) {
 295                         if ((t_mp = dupb(nmp)) != NULL) {
 296                                 nmp->b_wptr = nmp->b_rptr +
 297                                     (TX_MAX_TRANSFER_LENGTH -
 298                                     TX_PKT_HEADER_SIZE);
 299                                 t_mp->b_rptr = nmp->b_wptr;
 300                                 t_mp->b_cont = nmp->b_cont;
 301                                 nmp->b_cont = t_mp;
 302                                 len = MBLKL(nmp);
 303                         } else {
 304                                 good_packet = B_FALSE;
 305                                 goto hxge_start_fail2;
 306                         }
 307                 }
 308                 tx_desc.value = 0;
 309                 tx_desc_p = &tx_desc_ring_vp[i];
 310 #ifdef  HXGE_DEBUG
 311                 tx_desc_pp = &tx_desc_ring_pp[i];
 312 #endif
 313                 tx_msg_p = &tx_msg_ring[i];
 314 #if defined(__i386)
 315                 hpi_desc_handle.regp = (uint32_t)tx_desc_p;
 316 #else
 317                 hpi_desc_handle.regp = (uint64_t)tx_desc_p;
 318 #endif
 319                 if (!header_set &&
 320                     ((!hxge_tx_use_bcopy && (len > TX_BCOPY_SIZE)) ||
 321                     (len >= bcopy_thresh))) {
 322                         header_set = B_TRUE;
 323                         bcopy_thresh += TX_PKT_HEADER_SIZE;
 324                         boff = 0;
 325                         pack_len = 0;
 326                         kaddr = (caddr_t)DMA_COMMON_VPTR(tx_msg_p->buf_dma);
 327                         hdrp = (p_tx_pkt_header_t)kaddr;
 328                         clen = pkt_len;
 329                         dma_handle = tx_msg_p->buf_dma_handle;
 330                         dma_ioaddr = DMA_COMMON_IOADDR(tx_msg_p->buf_dma);
 331                         offset = tx_msg_p->offset_index * hxge_bcopy_thresh;
 332                         (void) ddi_dma_sync(dma_handle,
 333                             offset, hxge_bcopy_thresh, DDI_DMA_SYNC_FORDEV);
 334 
 335                         tx_msg_p->flags.dma_type = USE_BCOPY;
 336                         goto hxge_start_control_header_only;
 337                 }
 338 
 339                 pkt_len += len;
 340                 pack_len += len;
 341 
 342                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
 343                     "==> hxge_start(3): desc entry %d DESC IOADDR $%p "
 344                     "desc_vp $%p tx_desc_p $%p desc_pp $%p tx_desc_pp $%p "
 345                     "len %d pkt_len %d pack_len %d",
 346                     i,
 347                     DMA_COMMON_IOADDR(desc_area),
 348                     tx_desc_ring_vp, tx_desc_p,
 349                     tx_desc_ring_pp, tx_desc_pp,
 350                     len, pkt_len, pack_len));
 351 
 352                 if (len < bcopy_thresh) {
 353                         HXGE_DEBUG_MSG((hxgep, TX_CTL,
 354                             "==> hxge_start(4): USE BCOPY: "));
 355                         if (hxge_tx_tiny_pack) {
 356                                 uint32_t blst = TXDMA_DESC_NEXT_INDEX(i, -1,
 357                                     tx_ring_p->tx_wrap_mask);
 358                                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
 359                                     "==> hxge_start(5): pack"));
 360                                 if ((pack_len <= bcopy_thresh) &&
 361                                     (last_bidx == blst)) {
 362                                         HXGE_DEBUG_MSG((hxgep, TX_CTL,
 363                                             "==> hxge_start: pack(6) "
 364                                             "(pkt_len %d pack_len %d)",
 365                                             pkt_len, pack_len));
 366                                         i = blst;
 367                                         tx_desc_p = &tx_desc_ring_vp[i];
 368 #ifdef  HXGE_DEBUG
 369                                         tx_desc_pp = &tx_desc_ring_pp[i];
 370 #endif
 371                                         tx_msg_p = &tx_msg_ring[i];
 372                                         boff = pack_len - len;
 373                                         ngathers--;
 374                                 } else if (pack_len > bcopy_thresh &&
 375                                     header_set) {
 376                                         pack_len = len;
 377                                         boff = 0;
 378                                         bcopy_thresh = hxge_bcopy_thresh;
 379                                         HXGE_DEBUG_MSG((hxgep, TX_CTL,
 380                                             "==> hxge_start(7): > max NEW "
 381                                             "bcopy thresh %d "
 382                                             "pkt_len %d pack_len %d(next)",
 383                                             bcopy_thresh, pkt_len, pack_len));
 384                                 }
 385                                 last_bidx = i;
 386                         }
 387                         kaddr = (caddr_t)DMA_COMMON_VPTR(tx_msg_p->buf_dma);
 388                         if ((boff == TX_PKT_HEADER_SIZE) && (nmblks == 1)) {
 389                                 hdrp = (p_tx_pkt_header_t)kaddr;
 390                                 header_set = B_TRUE;
 391                                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
 392                                     "==> hxge_start(7_x2): "
 393                                     "pkt_len %d pack_len %d (new hdrp $%p)",
 394                                     pkt_len, pack_len, hdrp));
 395                         }
 396                         tx_msg_p->flags.dma_type = USE_BCOPY;
 397                         kaddr += boff;
 398                         HXGE_DEBUG_MSG((hxgep, TX_CTL,
 399                             "==> hxge_start(8): USE BCOPY: before bcopy "
 400                             "DESC IOADDR $%p entry %d bcopy packets %d "
 401                             "bcopy kaddr $%p bcopy ioaddr (SAD) $%p "
 402                             "bcopy clen %d bcopy boff %d",
 403                             DMA_COMMON_IOADDR(desc_area), i,
 404                             tdc_stats->tx_hdr_pkts, kaddr, dma_ioaddr,
 405                             clen, boff));
 406                         HXGE_DEBUG_MSG((hxgep, TX_CTL,
 407                             "==> hxge_start: 1USE BCOPY: "));
 408                         HXGE_DEBUG_MSG((hxgep, TX_CTL,
 409                             "==> hxge_start: 2USE BCOPY: "));
 410                         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_start: "
 411                             "last USE BCOPY: copy from b_rptr $%p "
 412                             "to KADDR $%p (len %d offset %d",
 413                             b_rptr, kaddr, len, boff));
 414                         bcopy(b_rptr, kaddr, len);
 415 #ifdef  HXGE_DEBUG
 416                         dump_len = (len > 128) ? 128: len;
 417                         HXGE_DEBUG_MSG((hxgep, TX_CTL,
 418                             "==> hxge_start: dump packets "
 419                             "(After BCOPY len %d)"
 420                             "(b_rptr $%p): %s", len, nmp->b_rptr,
 421                             hxge_dump_packet((char *)nmp->b_rptr,
 422                             dump_len)));
 423 #endif
 424                         dma_handle = tx_msg_p->buf_dma_handle;
 425                         dma_ioaddr = DMA_COMMON_IOADDR(tx_msg_p->buf_dma);
 426                         offset = tx_msg_p->offset_index * hxge_bcopy_thresh;
 427                         (void) ddi_dma_sync(dma_handle,
 428                             offset, hxge_bcopy_thresh, DDI_DMA_SYNC_FORDEV);
 429                         clen = len + boff;
 430                         tdc_stats->tx_hdr_pkts++;
 431                         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_start(9): "
 432                             "USE BCOPY: DESC IOADDR $%p entry %d "
 433                             "bcopy packets %d bcopy kaddr $%p "
 434                             "bcopy ioaddr (SAD) $%p bcopy clen %d "
 435                             "bcopy boff %d",
 436                             DMA_COMMON_IOADDR(desc_area), i,
 437                             tdc_stats->tx_hdr_pkts, kaddr, dma_ioaddr,
 438                             clen, boff));
 439                 } else {
 440                         HXGE_DEBUG_MSG((hxgep, TX_CTL,
 441                             "==> hxge_start(12): USE DVMA: len %d", len));
 442                         tx_msg_p->flags.dma_type = USE_DMA;
 443                         dma_flags = DDI_DMA_WRITE;
 444                         if (len < hxge_dma_stream_thresh) {
 445                                 dma_flags |= DDI_DMA_CONSISTENT;
 446                         } else {
 447                                 dma_flags |= DDI_DMA_STREAMING;
 448                         }
 449 
 450                         dma_handle = tx_msg_p->dma_handle;
 451                         dma_status = ddi_dma_addr_bind_handle(dma_handle, NULL,
 452                             (caddr_t)b_rptr, len, dma_flags,
 453                             DDI_DMA_DONTWAIT, NULL,
 454                             &dma_cookie, &ncookies);
 455                         if (dma_status == DDI_DMA_MAPPED) {
 456                                 dma_ioaddr = dma_cookie.dmac_laddress;
 457                                 len = (int)dma_cookie.dmac_size;
 458                                 clen = (uint32_t)dma_cookie.dmac_size;
 459                                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
 460                                     "==> hxge_start(12_1): "
 461                                     "USE DVMA: len %d clen %d ngathers %d",
 462                                     len, clen, ngathers));
 463 #if defined(__i386)
 464                                 hpi_desc_handle.regp = (uint32_t)tx_desc_p;
 465 #else
 466                                 hpi_desc_handle.regp = (uint64_t)tx_desc_p;
 467 #endif
 468                                 while (ncookies > 1) {
 469                                         ngathers++;
 470                                         /*
 471                                          * this is the fix for multiple
 472                                          * cookies, which are basically
 473                                          * a descriptor entry, we don't set
 474                                          * SOP bit as well as related fields
 475                                          */
 476 
 477                                         (void) hpi_txdma_desc_gather_set(
 478                                             hpi_desc_handle, &tx_desc,
 479                                             (ngathers -1), mark_mode,
 480                                             ngathers, dma_ioaddr, clen);
 481                                         tx_msg_p->tx_msg_size = clen;
 482                                         HXGE_DEBUG_MSG((hxgep, TX_CTL,
 483                                             "==> hxge_start:  DMA "
 484                                             "ncookie %d ngathers %d "
 485                                             "dma_ioaddr $%p len %d"
 486                                             "desc $%p descp $%p (%d)",
 487                                             ncookies, ngathers,
 488                                             dma_ioaddr, clen,
 489                                             *tx_desc_p, tx_desc_p, i));
 490 
 491                                         ddi_dma_nextcookie(dma_handle,
 492                                             &dma_cookie);
 493                                         dma_ioaddr = dma_cookie.dmac_laddress;
 494 
 495                                         len = (int)dma_cookie.dmac_size;
 496                                         clen = (uint32_t)dma_cookie.dmac_size;
 497                                         HXGE_DEBUG_MSG((hxgep, TX_CTL,
 498                                             "==> hxge_start(12_2): "
 499                                             "USE DVMA: len %d clen %d ",
 500                                             len, clen));
 501 
 502                                         i = TXDMA_DESC_NEXT_INDEX(i, 1,
 503                                             tx_ring_p->tx_wrap_mask);
 504                                         tx_desc_p = &tx_desc_ring_vp[i];
 505 
 506                                         hpi_desc_handle.regp =
 507 #if defined(__i386)
 508                                             (uint32_t)tx_desc_p;
 509 #else
 510                                                 (uint64_t)tx_desc_p;
 511 #endif
 512                                         tx_msg_p = &tx_msg_ring[i];
 513                                         tx_msg_p->flags.dma_type = USE_NONE;
 514                                         tx_desc.value = 0;
 515                                         ncookies--;
 516                                 }
 517                                 tdc_stats->tx_ddi_pkts++;
 518                                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
 519                                     "==> hxge_start: DMA: ddi packets %d",
 520                                     tdc_stats->tx_ddi_pkts));
 521                         } else {
 522                                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
 523                                     "dma mapping failed for %d "
 524                                     "bytes addr $%p flags %x (%d)",
 525                                     len, b_rptr, status, status));
 526                                 good_packet = B_FALSE;
 527                                 tdc_stats->tx_dma_bind_fail++;
 528                                 tx_msg_p->flags.dma_type = USE_NONE;
 529                                 status = 1;
 530                                 goto hxge_start_fail2;
 531                         }
 532                 } /* ddi dvma */
 533 
 534                 nmp = nmp->b_cont;
 535 hxge_start_control_header_only:
 536 #if defined(__i386)
 537                 hpi_desc_handle.regp = (uint32_t)tx_desc_p;
 538 #else
 539                 hpi_desc_handle.regp = (uint64_t)tx_desc_p;
 540 #endif
 541                 ngathers++;
 542 
 543                 if (ngathers == 1) {
 544 #ifdef  HXGE_DEBUG
 545                         save_desc_p = &sop_tx_desc;
 546 #endif
 547                         sop_tx_desc_p = &sop_tx_desc;
 548                         sop_tx_desc_p->value = 0;
 549                         sop_tx_desc_p->bits.tr_len = clen;
 550                         sop_tx_desc_p->bits.sad = dma_ioaddr >> 32;
 551                         sop_tx_desc_p->bits.sad_l = dma_ioaddr & 0xffffffff;
 552                 } else {
 553 #ifdef  HXGE_DEBUG
 554                         save_desc_p = &tx_desc;
 555 #endif
 556                         tmp_desc_p = &tx_desc;
 557                         tmp_desc_p->value = 0;
 558                         tmp_desc_p->bits.tr_len = clen;
 559                         tmp_desc_p->bits.sad = dma_ioaddr >> 32;
 560                         tmp_desc_p->bits.sad_l = dma_ioaddr & 0xffffffff;
 561 
 562                         tx_desc_p->value = tmp_desc_p->value;
 563                 }
 564 
 565                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
 566                     "==> hxge_start(13): Desc_entry %d ngathers %d "
 567                     "desc_vp $%p tx_desc_p $%p "
 568                     "len %d clen %d pkt_len %d pack_len %d nmblks %d "
 569                     "dma_ioaddr (SAD) $%p mark %d",
 570                     i, ngathers, tx_desc_ring_vp, tx_desc_p,
 571                     len, clen, pkt_len, pack_len, nmblks,
 572                     dma_ioaddr, mark_mode));
 573 
 574 #ifdef HXGE_DEBUG
 575                 hpi_desc_handle.hxgep = hxgep;
 576                 hpi_desc_handle.function.function = 0;
 577                 hpi_desc_handle.function.instance = hxgep->instance;
 578                 sad = save_desc_p->bits.sad;
 579                 sad = (sad << 32) | save_desc_p->bits.sad_l;
 580                 xfer_len = save_desc_p->bits.tr_len;
 581 
 582                 HXGE_DEBUG_MSG((hxgep, TX_CTL, "\n\t: value 0x%llx\n"
 583                     "\t\tsad $%p\ttr_len %d len %d\tnptrs %d\t"
 584                     "mark %d sop %d\n",
 585                     save_desc_p->value, sad, save_desc_p->bits.tr_len,
 586                     xfer_len, save_desc_p->bits.num_ptr,
 587                     save_desc_p->bits.mark, save_desc_p->bits.sop));
 588 
 589                 hpi_txdma_dump_desc_one(hpi_desc_handle, NULL, i);
 590 #endif
 591 
 592                 tx_msg_p->tx_msg_size = clen;
 593                 i = TXDMA_DESC_NEXT_INDEX(i, 1, tx_ring_p->tx_wrap_mask);
 594                 if (ngathers > hxge_tx_max_gathers) {
 595                         good_packet = B_FALSE;
 596                         mac_hcksum_get(mp, &start_offset, &stuff_offset,
 597                             &end_offset, &value, &cksum_flags);
 598 
 599                         HXGE_DEBUG_MSG((NULL, TX_CTL,
 600                             "==> hxge_start(14): pull msg - "
 601                             "len %d pkt_len %d ngathers %d",
 602                             len, pkt_len, ngathers));
 603                         goto hxge_start_fail2;
 604                 }
 605         } /* while (nmp) */
 606 
 607         tx_msg_p->tx_message = mp;
 608         tx_desc_p = &tx_desc_ring_vp[sop_index];
 609 #if defined(__i386)
 610         hpi_desc_handle.regp = (uint32_t)tx_desc_p;
 611 #else
 612         hpi_desc_handle.regp = (uint64_t)tx_desc_p;
 613 #endif
 614 
 615         pkthdrp = (p_tx_pkt_hdr_all_t)hdrp;
 616         pkthdrp->reserved = 0;
 617         hdrp->value = 0;
 618         (void) hxge_fill_tx_hdr(mp, B_FALSE, cksum_on,
 619             (pkt_len - TX_PKT_HEADER_SIZE), npads, pkthdrp);
 620 
 621         /*
 622          * Hardware header should not be counted as part of the frame
 623          * when determining the frame size
 624          */
 625         if ((pkt_len - TX_PKT_HEADER_SIZE) > (STD_FRAME_SIZE - ETHERFCSL)) {
 626                 tdc_stats->tx_jumbo_pkts++;
 627         }
 628 
 629         min_len = (hxgep->msg_min + TX_PKT_HEADER_SIZE + (npads * 2));
 630         if (pkt_len < min_len) {
 631                 /* Assume we use bcopy to premapped buffers */
 632                 kaddr = (caddr_t)DMA_COMMON_VPTR(tx_msg_p->buf_dma);
 633                 HXGE_DEBUG_MSG((NULL, TX_CTL,
 634                     "==> hxge_start(14-1): < (msg_min + 16)"
 635                     "len %d pkt_len %d min_len %d bzero %d ngathers %d",
 636                     len, pkt_len, min_len, (min_len - pkt_len), ngathers));
 637                 bzero((kaddr + pkt_len), (min_len - pkt_len));
 638                 pkt_len = tx_msg_p->tx_msg_size = min_len;
 639 
 640                 sop_tx_desc_p->bits.tr_len = min_len;
 641 
 642                 HXGE_MEM_PIO_WRITE64(hpi_desc_handle, sop_tx_desc_p->value);
 643                 tx_desc_p->value = sop_tx_desc_p->value;
 644 
 645                 HXGE_DEBUG_MSG((NULL, TX_CTL,
 646                     "==> hxge_start(14-2): < msg_min - "
 647                     "len %d pkt_len %d min_len %d ngathers %d",
 648                     len, pkt_len, min_len, ngathers));
 649         }
 650 
 651         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_start: cksum_flags 0x%x ",
 652             cksum_flags));
 653         if (cksum_flags & HCK_PARTIALCKSUM) {
 654                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
 655                     "==> hxge_start: cksum_flags 0x%x (partial checksum) ",
 656                     cksum_flags));
 657                 cksum_on = B_TRUE;
 658                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
 659                     "==> hxge_start: from IP cksum_flags 0x%x "
 660                     "(partial checksum) "
 661                     "start_offset %d stuff_offset %d",
 662                     cksum_flags, start_offset, stuff_offset));
 663                 tmp_len = (uint64_t)(start_offset >> 1);
 664                 hdrp->value |= (tmp_len << TX_PKT_HEADER_L4START_SHIFT);
 665                 tmp_len = (uint64_t)(stuff_offset >> 1);
 666                 hdrp->value |= (tmp_len << TX_PKT_HEADER_L4STUFF_SHIFT);
 667 
 668                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
 669                     "==> hxge_start: from IP cksum_flags 0x%x "
 670                     "(partial checksum) "
 671                     "after SHIFT start_offset %d stuff_offset %d",
 672                     cksum_flags, start_offset, stuff_offset));
 673         }
 674 
 675         /*
 676          * pkt_len already includes 16 + paddings!!
 677          * Update the control header length
 678          */
 679 
 680         /*
 681          * Note that Hydra is different from Neptune where
 682          * tot_xfer_len = (pkt_len - TX_PKT_HEADER_SIZE);
 683          */
 684         tot_xfer_len = pkt_len;
 685         tmp_len = hdrp->value |
 686             (tot_xfer_len << TX_PKT_HEADER_TOT_XFER_LEN_SHIFT);
 687 
 688         HXGE_DEBUG_MSG((hxgep, TX_CTL,
 689             "==> hxge_start(15_x1): setting SOP "
 690             "tot_xfer_len 0x%llx (%d) pkt_len %d tmp_len "
 691             "0x%llx hdrp->value 0x%llx",
 692             tot_xfer_len, tot_xfer_len, pkt_len, tmp_len, hdrp->value));
 693 #if defined(_BIG_ENDIAN)
 694         hdrp->value = ddi_swap64(tmp_len);
 695 #else
 696         hdrp->value = tmp_len;
 697 #endif
 698         HXGE_DEBUG_MSG((hxgep,
 699             TX_CTL, "==> hxge_start(15_x2): setting SOP "
 700             "after SWAP: tot_xfer_len 0x%llx pkt_len %d "
 701             "tmp_len 0x%llx hdrp->value 0x%llx",
 702             tot_xfer_len, pkt_len, tmp_len, hdrp->value));
 703 
 704         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_start(15): setting SOP "
 705             "wr_index %d tot_xfer_len (%d) pkt_len %d npads %d",
 706             sop_index, tot_xfer_len, pkt_len, npads));
 707 
 708         sop_tx_desc_p->bits.sop = 1;
 709         sop_tx_desc_p->bits.mark = mark_mode;
 710         sop_tx_desc_p->bits.num_ptr = ngathers;
 711 
 712         if (mark_mode)
 713                 tdc_stats->tx_marks++;
 714 
 715         HXGE_MEM_PIO_WRITE64(hpi_desc_handle, sop_tx_desc_p->value);
 716         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_start(16): set SOP done"));
 717 
 718 #ifdef HXGE_DEBUG
 719         hpi_desc_handle.hxgep = hxgep;
 720         hpi_desc_handle.function.function = 0;
 721         hpi_desc_handle.function.instance = hxgep->instance;
 722 
 723         HXGE_DEBUG_MSG((hxgep, TX_CTL, "\n\t: value 0x%llx\n"
 724             "\t\tsad $%p\ttr_len %d len %d\tnptrs %d\tmark %d sop %d\n",
 725             save_desc_p->value, sad, save_desc_p->bits.tr_len,
 726             xfer_len, save_desc_p->bits.num_ptr, save_desc_p->bits.mark,
 727             save_desc_p->bits.sop));
 728         (void) hpi_txdma_dump_desc_one(hpi_desc_handle, NULL, sop_index);
 729 
 730         dump_len = (pkt_len > 128) ? 128: pkt_len;
 731         HXGE_DEBUG_MSG((hxgep, TX_CTL,
 732             "==> hxge_start: dump packets(17) (after sop set, len "
 733             " (len/dump_len/pkt_len/tot_xfer_len) %d/%d/%d/%d):\n"
 734             "ptr $%p: %s", len, dump_len, pkt_len, tot_xfer_len,
 735             (char *)hdrp, hxge_dump_packet((char *)hdrp, dump_len)));
 736         HXGE_DEBUG_MSG((hxgep, TX_CTL,
 737             "==> hxge_start(18): TX desc sync: sop_index %d", sop_index));
 738 #endif
 739 
 740         if ((ngathers == 1) || tx_ring_p->wr_index < i) {
 741                 (void) ddi_dma_sync(tx_desc_dma_handle,
 742                     sop_index * sizeof (tx_desc_t),
 743                     ngathers * sizeof (tx_desc_t), DDI_DMA_SYNC_FORDEV);
 744 
 745                 HXGE_DEBUG_MSG((hxgep, TX_CTL, "hxge_start(19): sync 1 "
 746                     "cs_off = 0x%02X cs_s_off = 0x%02X "
 747                     "pkt_len %d ngathers %d sop_index %d\n",
 748                     stuff_offset, start_offset,
 749                     pkt_len, ngathers, sop_index));
 750         } else { /* more than one descriptor and wrap around */
 751                 uint32_t nsdescs = tx_ring_p->tx_ring_size - sop_index;
 752                 (void) ddi_dma_sync(tx_desc_dma_handle,
 753                     sop_index * sizeof (tx_desc_t),
 754                     nsdescs * sizeof (tx_desc_t), DDI_DMA_SYNC_FORDEV);
 755                 HXGE_DEBUG_MSG((hxgep, TX_CTL, "hxge_start(20): sync 1 "
 756                     "cs_off = 0x%02X cs_s_off = 0x%02X "
 757                     "pkt_len %d ngathers %d sop_index %d\n",
 758                     stuff_offset, start_offset, pkt_len, ngathers, sop_index));
 759 
 760                 (void) ddi_dma_sync(tx_desc_dma_handle, 0,
 761                     (ngathers - nsdescs) * sizeof (tx_desc_t),
 762                     DDI_DMA_SYNC_FORDEV);
 763                 HXGE_DEBUG_MSG((hxgep, TX_CTL, "hxge_start(21): sync 2 "
 764                     "cs_off = 0x%02X cs_s_off = 0x%02X "
 765                     "pkt_len %d ngathers %d sop_index %d\n",
 766                     stuff_offset, start_offset,
 767                     pkt_len, ngathers, sop_index));
 768         }
 769 
 770         tail_index = tx_ring_p->wr_index;
 771         tail_wrap = tx_ring_p->wr_index_wrap;
 772 
 773         tx_ring_p->wr_index = i;
 774         if (tx_ring_p->wr_index <= tail_index) {
 775                 tx_ring_p->wr_index_wrap = ((tail_wrap == B_TRUE) ?
 776                     B_FALSE : B_TRUE);
 777         }
 778 
 779         tx_ring_p->descs_pending += ngathers;
 780         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_start: TX kick: "
 781             "channel %d wr_index %d wrap %d ngathers %d desc_pend %d",
 782             tx_ring_p->tdc, tx_ring_p->wr_index, tx_ring_p->wr_index_wrap,
 783             ngathers, tx_ring_p->descs_pending));
 784         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_start: TX KICKING: "));
 785 
 786         kick.value = 0;
 787         kick.bits.wrap = tx_ring_p->wr_index_wrap;
 788         kick.bits.tail = (uint16_t)tx_ring_p->wr_index;
 789 
 790         /* Kick start the Transmit kick register */
 791         TXDMA_REG_WRITE64(HXGE_DEV_HPI_HANDLE(hxgep),
 792             TDC_TDR_KICK, (uint8_t)tx_ring_p->tdc, kick.value);
 793         tdc_stats->tx_starts++;
 794         MUTEX_EXIT(&tx_ring_p->lock);
 795         HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_start"));
 796         return (status);
 797 
 798 hxge_start_fail2:
 799         if (good_packet == B_FALSE) {
 800                 cur_index = sop_index;
 801                 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_start: clean up"));
 802                 for (i = 0; i < ngathers; i++) {
 803                         tx_desc_p = &tx_desc_ring_vp[cur_index];
 804 #if defined(__i386)
 805                         hpi_handle.regp = (uint32_t)tx_desc_p;
 806 #else
 807                         hpi_handle.regp = (uint64_t)tx_desc_p;
 808 #endif
 809                         tx_msg_p = &tx_msg_ring[cur_index];
 810                         (void) hpi_txdma_desc_set_zero(hpi_handle, 1);
 811                         if (tx_msg_p->flags.dma_type == USE_DVMA) {
 812                                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
 813                                     "tx_desc_p = %X index = %d",
 814                                     tx_desc_p, tx_ring_p->rd_index));
 815                                 (void) dvma_unload(tx_msg_p->dvma_handle,
 816                                     0, -1);
 817                                 tx_msg_p->dvma_handle = NULL;
 818                                 if (tx_ring_p->dvma_wr_index ==
 819                                     tx_ring_p->dvma_wrap_mask)
 820                                         tx_ring_p->dvma_wr_index = 0;
 821                                 else
 822                                         tx_ring_p->dvma_wr_index++;
 823                                 tx_ring_p->dvma_pending--;
 824                         } else if (tx_msg_p->flags.dma_type == USE_DMA) {
 825                                 if (ddi_dma_unbind_handle(
 826                                     tx_msg_p->dma_handle)) {
 827                                         cmn_err(CE_WARN, "hxge_start: "
 828                                             "ddi_dma_unbind_handle failed");
 829                                 }
 830                         }
 831                         tx_msg_p->flags.dma_type = USE_NONE;
 832                         cur_index = TXDMA_DESC_NEXT_INDEX(cur_index, 1,
 833                             tx_ring_p->tx_wrap_mask);
 834 
 835                 }
 836         }
 837 
 838         MUTEX_EXIT(&tx_ring_p->lock);
 839 
 840 hxge_start_fail1:
 841         /* Add FMA to check the access handle hxge_hregh */
 842         HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_start"));
 843         return (status);
 844 }