Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/fibre-channel/fca/oce/oce_rx.c
+++ new/usr/src/uts/common/io/fibre-channel/fca/oce/oce_rx.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /* Copyright © 2003-2011 Emulex. All rights reserved. */
23 23
24 24 /*
25 25 * Source file containing the Receive Path handling
26 26 * functions
27 27 */
28 28 #include <oce_impl.h>
29 29
30 30
31 31 void oce_rx_pool_free(char *arg);
32 32 static void oce_rqb_dtor(oce_rq_bdesc_t *rqbd);
33 33 static int oce_rqb_ctor(oce_rq_bdesc_t *rqbd, struct oce_rq *rq,
34 34 size_t size, int flags);
35 35
36 36 static inline mblk_t *oce_rx(struct oce_dev *dev, struct oce_rq *rq,
37 37 struct oce_nic_rx_cqe *cqe);
38 38 static inline mblk_t *oce_rx_bcopy(struct oce_dev *dev,
39 39 struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
40 40 static int oce_rq_charge(struct oce_rq *rq, uint32_t nbufs, boolean_t repost);
41 41 static void oce_rx_insert_tag(mblk_t *mp, uint16_t vtag);
42 42 static void oce_set_rx_oflags(mblk_t *mp, struct oce_nic_rx_cqe *cqe);
43 43 static inline void oce_rx_drop_pkt(struct oce_rq *rq,
44 44 struct oce_nic_rx_cqe *cqe);
45 45 static oce_rq_bdesc_t *oce_rqb_alloc(struct oce_rq *rq);
46 46 static void oce_rqb_free(struct oce_rq *rq, oce_rq_bdesc_t *rqbd);
47 47 static void oce_rq_post_buffer(struct oce_rq *rq, int nbufs);
48 48
49 49 #pragma inline(oce_rx)
50 50 #pragma inline(oce_rx_bcopy)
51 51 #pragma inline(oce_rq_charge)
52 52 #pragma inline(oce_rx_insert_tag)
53 53 #pragma inline(oce_set_rx_oflags)
54 54 #pragma inline(oce_rx_drop_pkt)
55 55 #pragma inline(oce_rqb_alloc)
56 56 #pragma inline(oce_rqb_free)
57 57 #pragma inline(oce_rq_post_buffer)
58 58
59 59 static ddi_dma_attr_t oce_rx_buf_attr = {
60 60 DMA_ATTR_V0, /* version number */
61 61 0x0000000000000000ull, /* low address */
62 62 0xFFFFFFFFFFFFFFFFull, /* high address */
63 63 0x00000000FFFFFFFFull, /* dma counter max */
64 64 OCE_DMA_ALIGNMENT, /* alignment */
65 65 0x000007FF, /* burst sizes */
66 66 0x00000001, /* minimum transfer size */
67 67 0x00000000FFFFFFFFull, /* maximum transfer size */
68 68 0xFFFFFFFFFFFFFFFFull, /* maximum segment size */
69 69 1, /* scatter/gather list length */
70 70 0x00000001, /* granularity */
71 71 DDI_DMA_FLAGERR|DDI_DMA_RELAXED_ORDERING /* DMA flags */
72 72 };
73 73
74 74 /*
75 75 * function to create a DMA buffer pool for RQ
76 76 *
77 77 * dev - software handle to the device
78 78 * num_items - number of buffers in the pool
79 79 * item_size - size of each buffer
80 80 *
81 81 * return DDI_SUCCESS => success, DDI_FAILURE otherwise
82 82 */
83 83 int
84 84 oce_rqb_cache_create(struct oce_rq *rq, size_t buf_size)
85 85 {
86 86 int size;
87 87 int cnt;
88 88 int ret;
89 89 oce_rq_bdesc_t *rqbd;
90 90
91 91 _NOTE(ARGUNUSED(buf_size));
92 92 rqbd = rq->rq_bdesc_array;
93 93 size = rq->cfg.frag_size + OCE_RQE_BUF_HEADROOM;
94 94 for (cnt = 0; cnt < rq->cfg.nbufs; cnt++, rqbd++) {
95 95 rq->rqb_freelist[cnt] = rqbd;
96 96 ret = oce_rqb_ctor(rqbd, rq,
97 97 size, (DDI_DMA_RDWR|DDI_DMA_STREAMING));
98 98 if (ret != DDI_SUCCESS) {
99 99 goto rqb_fail;
100 100 }
101 101 }
102 102 rq->rqb_free = rq->cfg.nbufs;
103 103 rq->rqb_rc_head = 0;
104 104 rq->rqb_next_free = 0;
105 105 return (DDI_SUCCESS);
106 106
107 107 rqb_fail:
108 108 oce_rqb_cache_destroy(rq);
109 109 return (DDI_FAILURE);
110 110 } /* oce_rqb_cache_create */
111 111
112 112 /*
113 113 * function to Destroy RQ DMA buffer cache
114 114 *
115 115 * rq - pointer to rq structure
116 116 *
117 117 * return none
118 118 */
119 119 void
120 120 oce_rqb_cache_destroy(struct oce_rq *rq)
121 121 {
122 122 oce_rq_bdesc_t *rqbd = NULL;
123 123 int cnt;
124 124
125 125 rqbd = rq->rq_bdesc_array;
126 126 for (cnt = 0; cnt < rq->cfg.nbufs; cnt++, rqbd++) {
127 127 oce_rqb_dtor(rqbd);
128 128 }
129 129 } /* oce_rqb_cache_destroy */
130 130
131 131 /*
132 132 * RQ buffer destructor function
133 133 *
134 134 * rqbd - pointer to rq buffer descriptor
135 135 *
136 136 * return none
137 137 */
138 138 static void
139 139 oce_rqb_dtor(oce_rq_bdesc_t *rqbd)
140 140 {
141 141 if ((rqbd == NULL) || (rqbd->rq == NULL)) {
142 142 return;
143 143 }
144 144 if (rqbd->mp != NULL) {
145 145 rqbd->fr_rtn.free_arg = NULL;
146 146 freemsg(rqbd->mp);
147 147 rqbd->mp = NULL;
148 148 }
149 149 oce_free_dma_buffer(rqbd->rq->parent, rqbd->rqb);
150 150 } /* oce_rqb_dtor */
151 151
152 152 /*
153 153 * RQ buffer constructor function
154 154 *
155 155 * rqbd - pointer to rq buffer descriptor
156 156 * rq - pointer to RQ structure
157 157 * size - size of the buffer
158 158 * flags - KM_SLEEP OR KM_NOSLEEP
159 159 *
160 160 * return DDI_SUCCESS => success, DDI_FAILURE otherwise
161 161 */
162 162 static int
163 163 oce_rqb_ctor(oce_rq_bdesc_t *rqbd, struct oce_rq *rq, size_t size, int flags)
164 164 {
165 165 struct oce_dev *dev;
166 166 oce_dma_buf_t *dbuf;
167 167
168 168 dev = rq->parent;
169 169
170 170 dbuf = oce_alloc_dma_buffer(dev, size, &oce_rx_buf_attr, flags);
171 171 if (dbuf == NULL) {
172 172 return (DDI_FAILURE);
173 173 }
174 174
175 175 /* Set the call back function parameters */
176 176 rqbd->fr_rtn.free_func = (void (*)())oce_rx_pool_free;
177 177 rqbd->fr_rtn.free_arg = (caddr_t)(void *)rqbd;
178 178 rqbd->mp = desballoc((uchar_t *)(dbuf->base),
179 179 dbuf->size, 0, &rqbd->fr_rtn);
180 180 if (rqbd->mp == NULL) {
181 181 oce_free_dma_buffer(dev, dbuf);
182 182 return (DDI_FAILURE);
183 183 }
184 184 rqbd->rqb = dbuf;
185 185 rqbd->rq = rq;
186 186 rqbd->frag_addr.dw.addr_lo = ADDR_LO(dbuf->addr + OCE_RQE_BUF_HEADROOM);
187 187 rqbd->frag_addr.dw.addr_hi = ADDR_HI(dbuf->addr + OCE_RQE_BUF_HEADROOM);
188 188 rqbd->mp->b_rptr = (uchar_t *)rqbd->rqb->base + OCE_RQE_BUF_HEADROOM;
189 189
190 190 return (DDI_SUCCESS);
191 191 } /* oce_rqb_ctor */
192 192
193 193 /*
194 194 * RQ buffer allocator function
195 195 *
196 196 * rq - pointer to RQ structure
197 197 *
198 198 * return pointer to RQ buffer descriptor
199 199 */
200 200 static inline oce_rq_bdesc_t *
201 201 oce_rqb_alloc(struct oce_rq *rq)
202 202 {
203 203 oce_rq_bdesc_t *rqbd;
204 204 uint32_t free_index;
205 205 free_index = rq->rqb_next_free;
206 206 rqbd = rq->rqb_freelist[free_index];
207 207 rq->rqb_freelist[free_index] = NULL;
208 208 rq->rqb_next_free = GET_Q_NEXT(free_index, 1, rq->cfg.nbufs);
209 209 return (rqbd);
210 210 } /* oce_rqb_alloc */
211 211
212 212 /*
213 213 * function to free the RQ buffer
214 214 *
215 215 * rq - pointer to RQ structure
216 216 * rqbd - pointer to recieve buffer descriptor
217 217 *
218 218 * return none
↓ open down ↓ |
218 lines elided |
↑ open up ↑ |
219 219 */
220 220 static inline void
221 221 oce_rqb_free(struct oce_rq *rq, oce_rq_bdesc_t *rqbd)
222 222 {
223 223 uint32_t free_index;
224 224 mutex_enter(&rq->rc_lock);
225 225 free_index = rq->rqb_rc_head;
226 226 rq->rqb_freelist[free_index] = rqbd;
227 227 rq->rqb_rc_head = GET_Q_NEXT(free_index, 1, rq->cfg.nbufs);
228 228 mutex_exit(&rq->rc_lock);
229 - atomic_add_32(&rq->rqb_free, 1);
229 + atomic_inc_32(&rq->rqb_free);
230 230 } /* oce_rqb_free */
231 231
232 232
233 233
234 234
235 235 static void oce_rq_post_buffer(struct oce_rq *rq, int nbufs)
236 236 {
237 237 pd_rxulp_db_t rxdb_reg;
238 238 int count;
239 239 struct oce_dev *dev = rq->parent;
240 240
241 241
242 242 rxdb_reg.dw0 = 0;
243 243 rxdb_reg.bits.qid = rq->rq_id & DB_RQ_ID_MASK;
244 244
245 245 for (count = nbufs/OCE_MAX_RQ_POSTS; count > 0; count--) {
246 246 rxdb_reg.bits.num_posted = OCE_MAX_RQ_POSTS;
247 247 OCE_DB_WRITE32(dev, PD_RXULP_DB, rxdb_reg.dw0);
248 248 rq->buf_avail += OCE_MAX_RQ_POSTS;
249 249 nbufs -= OCE_MAX_RQ_POSTS;
250 250 }
251 251 if (nbufs > 0) {
252 252 rxdb_reg.bits.num_posted = nbufs;
253 253 OCE_DB_WRITE32(dev, PD_RXULP_DB, rxdb_reg.dw0);
254 254 rq->buf_avail += nbufs;
255 255 }
256 256 }
257 257 /*
258 258 * function to charge a given rq with buffers from a pool's free list
259 259 *
260 260 * dev - software handle to the device
261 261 * rq - pointer to the RQ to charge
262 262 * nbufs - numbers of buffers to be charged
263 263 *
264 264 * return number of rqe's charges.
265 265 */
266 266 static inline int
267 267 oce_rq_charge(struct oce_rq *rq, uint32_t nbufs, boolean_t repost)
268 268 {
269 269 struct oce_nic_rqe *rqe;
270 270 oce_rq_bdesc_t *rqbd;
271 271 oce_rq_bdesc_t **shadow_rq;
272 272 int cnt;
273 273 int cur_index;
274 274 oce_ring_buffer_t *ring;
275 275
276 276 shadow_rq = rq->shadow_ring;
277 277 ring = rq->ring;
278 278 cur_index = ring->cidx;
279 279
280 280 for (cnt = 0; cnt < nbufs; cnt++) {
281 281 if (!repost) {
282 282 rqbd = oce_rqb_alloc(rq);
283 283 } else {
284 284 /* just repost the buffers from shadow ring */
285 285 rqbd = shadow_rq[cur_index];
286 286 cur_index = GET_Q_NEXT(cur_index, 1, ring->num_items);
287 287 }
288 288 /* fill the rqes */
289 289 rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring,
290 290 struct oce_nic_rqe);
291 291 rqe->u0.s.frag_pa_lo = rqbd->frag_addr.dw.addr_lo;
292 292 rqe->u0.s.frag_pa_hi = rqbd->frag_addr.dw.addr_hi;
293 293 shadow_rq[rq->ring->pidx] = rqbd;
294 294 DW_SWAP(u32ptr(rqe), sizeof (struct oce_nic_rqe));
295 295 RING_PUT(rq->ring, 1);
296 296 }
297 297
298 298 return (cnt);
299 299 } /* oce_rq_charge */
300 300
301 301 /*
302 302 * function to release the posted buffers
303 303 *
304 304 * rq - pointer to the RQ to charge
305 305 *
306 306 * return none
307 307 */
308 308 void
309 309 oce_rq_discharge(struct oce_rq *rq)
310 310 {
311 311 oce_rq_bdesc_t *rqbd;
312 312 oce_rq_bdesc_t **shadow_rq;
313 313
314 314 shadow_rq = rq->shadow_ring;
315 315 /* Free the posted buffer since RQ is destroyed already */
316 316 while ((int32_t)rq->buf_avail > 0) {
317 317 rqbd = shadow_rq[rq->ring->cidx];
318 318 oce_rqb_free(rq, rqbd);
319 319 RING_GET(rq->ring, 1);
320 320 rq->buf_avail--;
321 321 }
322 322 }
323 323 /*
324 324 * function to process a single packet
325 325 *
326 326 * dev - software handle to the device
327 327 * rq - pointer to the RQ to charge
328 328 * cqe - Pointer to Completion Q entry
329 329 *
330 330 * return mblk pointer => success, NULL => error
331 331 */
332 332 static inline mblk_t *
333 333 oce_rx(struct oce_dev *dev, struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
334 334 {
335 335 mblk_t *mp;
336 336 int pkt_len;
337 337 int32_t frag_cnt = 0;
338 338 mblk_t **mblk_tail;
339 339 mblk_t *mblk_head;
340 340 int frag_size;
341 341 oce_rq_bdesc_t *rqbd;
342 342 uint16_t cur_index;
343 343 oce_ring_buffer_t *ring;
344 344 int i;
345 345
346 346 frag_cnt = cqe->u0.s.num_fragments & 0x7;
347 347 mblk_head = NULL;
348 348 mblk_tail = &mblk_head;
349 349
350 350 ring = rq->ring;
351 351 cur_index = ring->cidx;
352 352
353 353 /* Get the relevant Queue pointers */
354 354 pkt_len = cqe->u0.s.pkt_size;
355 355 for (i = 0; i < frag_cnt; i++) {
356 356 rqbd = rq->shadow_ring[cur_index];
357 357 if (rqbd->mp == NULL) {
358 358 rqbd->mp = desballoc((uchar_t *)rqbd->rqb->base,
359 359 rqbd->rqb->size, 0, &rqbd->fr_rtn);
360 360 if (rqbd->mp == NULL) {
361 361 return (NULL);
362 362 }
363 363
364 364 rqbd->mp->b_rptr =
365 365 (uchar_t *)rqbd->rqb->base + OCE_RQE_BUF_HEADROOM;
366 366 }
367 367
368 368 mp = rqbd->mp;
369 369 frag_size = (pkt_len > rq->cfg.frag_size) ?
370 370 rq->cfg.frag_size : pkt_len;
371 371 mp->b_wptr = mp->b_rptr + frag_size;
372 372 pkt_len -= frag_size;
373 373 mp->b_next = mp->b_cont = NULL;
374 374 /* Chain the message mblks */
375 375 *mblk_tail = mp;
376 376 mblk_tail = &mp->b_cont;
377 377 (void) DBUF_SYNC(rqbd->rqb, DDI_DMA_SYNC_FORCPU);
378 378 cur_index = GET_Q_NEXT(cur_index, 1, ring->num_items);
379 379 }
380 380
381 381 if (mblk_head == NULL) {
382 382 oce_log(dev, CE_WARN, MOD_RX, "%s", "oce_rx:no frags?");
383 383 return (NULL);
384 384 }
385 385
386 386 /* replace the buffer with new ones */
387 387 (void) oce_rq_charge(rq, frag_cnt, B_FALSE);
388 388 atomic_add_32(&rq->pending, frag_cnt);
389 389 return (mblk_head);
390 390 } /* oce_rx */
391 391
392 392 static inline mblk_t *
393 393 oce_rx_bcopy(struct oce_dev *dev, struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
394 394 {
395 395 mblk_t *mp;
396 396 int pkt_len;
397 397 int alloc_len;
398 398 int32_t frag_cnt = 0;
399 399 int frag_size;
400 400 oce_rq_bdesc_t *rqbd;
401 401 unsigned char *rptr;
402 402 uint32_t cur_index;
403 403 oce_ring_buffer_t *ring;
404 404 oce_rq_bdesc_t **shadow_rq;
405 405 int cnt = 0;
406 406
407 407 _NOTE(ARGUNUSED(dev));
408 408
409 409 shadow_rq = rq->shadow_ring;
410 410 pkt_len = cqe->u0.s.pkt_size;
411 411 alloc_len = pkt_len + OCE_RQE_BUF_HEADROOM;
412 412 frag_cnt = cqe->u0.s.num_fragments & 0x7;
413 413
414 414 mp = allocb(alloc_len, BPRI_HI);
415 415 if (mp == NULL) {
416 416 return (NULL);
417 417 }
418 418
419 419 mp->b_rptr += OCE_RQE_BUF_HEADROOM;
420 420 rptr = mp->b_rptr;
421 421 mp->b_wptr = mp->b_rptr + pkt_len;
422 422 ring = rq->ring;
423 423
424 424 cur_index = ring->cidx;
425 425 for (cnt = 0; cnt < frag_cnt; cnt++) {
426 426 rqbd = shadow_rq[cur_index];
427 427 frag_size = (pkt_len > rq->cfg.frag_size) ?
428 428 rq->cfg.frag_size : pkt_len;
429 429 (void) DBUF_SYNC(rqbd->rqb, DDI_DMA_SYNC_FORCPU);
430 430 bcopy(rqbd->rqb->base + OCE_RQE_BUF_HEADROOM, rptr, frag_size);
431 431 rptr += frag_size;
432 432 pkt_len -= frag_size;
433 433 cur_index = GET_Q_NEXT(cur_index, 1, ring->num_items);
434 434 }
435 435 (void) oce_rq_charge(rq, frag_cnt, B_TRUE);
436 436 return (mp);
437 437 }
438 438
439 439 static inline void
440 440 oce_set_rx_oflags(mblk_t *mp, struct oce_nic_rx_cqe *cqe)
441 441 {
442 442 int csum_flags = 0;
443 443
444 444 /* set flags */
445 445 if (cqe->u0.s.ip_cksum_pass) {
446 446 csum_flags |= HCK_IPV4_HDRCKSUM_OK;
447 447 }
448 448
449 449 if (cqe->u0.s.l4_cksum_pass) {
450 450 csum_flags |= (HCK_FULLCKSUM | HCK_FULLCKSUM_OK);
451 451 }
452 452
453 453 if (csum_flags) {
454 454 (void) mac_hcksum_set(mp, 0, 0, 0, 0, csum_flags);
455 455 }
456 456 }
457 457
458 458 static inline void
459 459 oce_rx_insert_tag(mblk_t *mp, uint16_t vtag)
460 460 {
461 461 struct ether_vlan_header *ehp;
462 462
463 463 (void) memmove(mp->b_rptr - VTAG_SIZE,
464 464 mp->b_rptr, 2 * ETHERADDRL);
465 465 mp->b_rptr -= VTAG_SIZE;
466 466 ehp = (struct ether_vlan_header *)voidptr(mp->b_rptr);
467 467 ehp->ether_tpid = htons(ETHERTYPE_VLAN);
468 468 ehp->ether_tci = LE_16(vtag);
469 469 }
470 470
471 471 static inline void
472 472 oce_rx_drop_pkt(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
473 473 {
474 474 int frag_cnt;
475 475 oce_rq_bdesc_t *rqbd;
476 476 oce_rq_bdesc_t **shadow_rq;
477 477 shadow_rq = rq->shadow_ring;
478 478 for (frag_cnt = 0; frag_cnt < cqe->u0.s.num_fragments; frag_cnt++) {
479 479 rqbd = shadow_rq[rq->ring->cidx];
480 480 oce_rqb_free(rq, rqbd);
481 481 RING_GET(rq->ring, 1);
482 482 }
483 483 }
484 484
485 485
486 486 /*
487 487 * function to process a Recieve queue
488 488 *
489 489 * arg - pointer to the RQ to charge
490 490 *
491 491 * return number of cqes processed
492 492 */
493 493 uint16_t
494 494 oce_drain_rq_cq(void *arg)
495 495 {
496 496 struct oce_nic_rx_cqe *cqe;
497 497 struct oce_rq *rq;
498 498 mblk_t *mp = NULL;
499 499 mblk_t *mblk_head;
500 500 mblk_t **mblk_tail;
501 501 uint16_t num_cqe = 0;
502 502 struct oce_cq *cq;
503 503 struct oce_dev *dev;
504 504 int32_t frag_cnt;
505 505 uint32_t nbufs = 0;
506 506
507 507 rq = (struct oce_rq *)arg;
508 508 dev = rq->parent;
509 509 cq = rq->cq;
510 510 mblk_head = NULL;
511 511 mblk_tail = &mblk_head;
512 512
513 513 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
514 514
515 515 (void) DBUF_SYNC(cq->ring->dbuf, DDI_DMA_SYNC_FORKERNEL);
516 516 /* dequeue till you reach an invalid cqe */
517 517 while (RQ_CQE_VALID(cqe)) {
518 518 DW_SWAP(u32ptr(cqe), sizeof (struct oce_nic_rx_cqe));
519 519 frag_cnt = cqe->u0.s.num_fragments & 0x7;
520 520 /* if insufficient buffers to charge then do copy */
521 521 if ((cqe->u0.s.pkt_size < dev->rx_bcopy_limit) ||
522 522 (oce_atomic_reserve(&rq->rqb_free, frag_cnt) < 0)) {
523 523 mp = oce_rx_bcopy(dev, rq, cqe);
524 524 } else {
525 525 mp = oce_rx(dev, rq, cqe);
526 526 if (mp == NULL) {
527 527 atomic_add_32(&rq->rqb_free, frag_cnt);
528 528 mp = oce_rx_bcopy(dev, rq, cqe);
529 529 }
530 530 }
531 531 if (mp != NULL) {
532 532 if (dev->function_mode & FLEX10_MODE) {
533 533 if (cqe->u0.s.vlan_tag_present &&
534 534 cqe->u0.s.qnq) {
535 535 oce_rx_insert_tag(mp,
536 536 cqe->u0.s.vlan_tag);
537 537 }
538 538 } else if (cqe->u0.s.vlan_tag_present) {
539 539 oce_rx_insert_tag(mp, cqe->u0.s.vlan_tag);
540 540 }
541 541 oce_set_rx_oflags(mp, cqe);
542 542
543 543 *mblk_tail = mp;
544 544 mblk_tail = &mp->b_next;
545 545 } else {
546 546 (void) oce_rq_charge(rq, frag_cnt, B_TRUE);
547 547 }
548 548 RING_GET(rq->ring, frag_cnt);
549 549 rq->buf_avail -= frag_cnt;
550 550 nbufs += frag_cnt;
551 551
552 552 oce_rq_post_buffer(rq, frag_cnt);
553 553 RQ_CQE_INVALIDATE(cqe);
554 554 RING_GET(cq->ring, 1);
555 555 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
556 556 struct oce_nic_rx_cqe);
557 557 num_cqe++;
558 558 /* process max ring size */
559 559 if (num_cqe > dev->rx_pkt_per_intr) {
560 560 break;
561 561 }
562 562 } /* for all valid CQEs */
563 563
564 564 if (mblk_head) {
565 565 mac_rx(dev->mac_handle, NULL, mblk_head);
566 566 }
567 567 oce_arm_cq(dev, cq->cq_id, num_cqe, B_TRUE);
568 568 return (num_cqe);
569 569 } /* oce_drain_rq_cq */
570 570
571 571 /*
572 572 * function to free mblk databuffer to the RQ pool
573 573 *
574 574 * arg - pointer to the receive buffer descriptor
575 575 *
576 576 * return none
577 577 */
578 578 void
579 579 oce_rx_pool_free(char *arg)
580 580 {
581 581 oce_rq_bdesc_t *rqbd;
582 582 struct oce_rq *rq;
583 583
584 584 /* During destroy, arg will be NULL */
585 585 if (arg == NULL) {
586 586 return;
587 587 }
588 588
589 589 /* retrieve the pointers from arg */
590 590 rqbd = (oce_rq_bdesc_t *)(void *)arg;
↓ open down ↓ |
351 lines elided |
↑ open up ↑ |
591 591 rq = rqbd->rq;
592 592 rqbd->mp = desballoc((uchar_t *)rqbd->rqb->base,
593 593 rqbd->rqb->size, 0, &rqbd->fr_rtn);
594 594
595 595 if (rqbd->mp) {
596 596 rqbd->mp->b_rptr =
597 597 (uchar_t *)rqbd->rqb->base + OCE_RQE_BUF_HEADROOM;
598 598 }
599 599
600 600 oce_rqb_free(rq, rqbd);
601 - (void) atomic_add_32(&rq->pending, -1);
601 + (void) atomic_dec_32(&rq->pending);
602 602 } /* rx_pool_free */
603 603
604 604 /*
605 605 * function to stop the RX
606 606 *
607 607 * rq - pointer to RQ structure
608 608 *
609 609 * return none
610 610 */
611 611 void
612 612 oce_clean_rq(struct oce_rq *rq)
613 613 {
614 614 uint16_t num_cqe = 0;
615 615 struct oce_cq *cq;
616 616 struct oce_dev *dev;
617 617 struct oce_nic_rx_cqe *cqe;
618 618 int32_t ti = 0;
619 619
620 620 dev = rq->parent;
621 621 cq = rq->cq;
622 622 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
623 623 /* dequeue till you reach an invalid cqe */
624 624 for (ti = 0; ti < DEFAULT_DRAIN_TIME; ti++) {
625 625
626 626 while (RQ_CQE_VALID(cqe)) {
627 627 DW_SWAP(u32ptr(cqe), sizeof (struct oce_nic_rx_cqe));
628 628 oce_rx_drop_pkt(rq, cqe);
629 629 atomic_add_32(&rq->buf_avail,
630 630 -(cqe->u0.s.num_fragments & 0x7));
631 631 oce_arm_cq(dev, cq->cq_id, 1, B_TRUE);
632 632 RQ_CQE_INVALIDATE(cqe);
633 633 RING_GET(cq->ring, 1);
634 634 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
635 635 struct oce_nic_rx_cqe);
636 636 num_cqe++;
637 637 }
638 638 OCE_MSDELAY(1);
639 639 }
640 640 } /* oce_clean_rq */
641 641
642 642 /*
643 643 * function to start the RX
644 644 *
645 645 * rq - pointer to RQ structure
646 646 *
647 647 * return number of rqe's charges.
648 648 */
649 649 int
650 650 oce_start_rq(struct oce_rq *rq)
651 651 {
652 652 int ret = 0;
653 653 int to_charge = 0;
654 654 struct oce_dev *dev = rq->parent;
655 655 to_charge = rq->cfg.q_len - rq->buf_avail;
656 656 to_charge = min(to_charge, rq->rqb_free);
657 657 atomic_add_32(&rq->rqb_free, -to_charge);
658 658 (void) oce_rq_charge(rq, to_charge, B_FALSE);
659 659 /* ok to do it here since Rx has not even started */
660 660 oce_rq_post_buffer(rq, to_charge);
661 661 oce_arm_cq(dev, rq->cq->cq_id, 0, B_TRUE);
662 662 return (ret);
663 663 } /* oce_start_rq */
664 664
665 665 /* Checks for pending rx buffers with Stack */
666 666 int
667 667 oce_rx_pending(struct oce_dev *dev, struct oce_rq *rq, int32_t timeout)
668 668 {
669 669 int ti;
670 670 _NOTE(ARGUNUSED(dev));
671 671
672 672 for (ti = 0; ti < timeout; ti++) {
673 673 if (rq->pending > 0) {
674 674 OCE_MSDELAY(10);
675 675 continue;
676 676 } else {
677 677 rq->pending = 0;
678 678 break;
679 679 }
680 680 }
681 681 return (rq->pending);
682 682 }
↓ open down ↓ |
71 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX