Print this page
XXXX introduce drv_sectohz
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/fibre-channel/fca/emlxs/emlxs_fcp.c
+++ new/usr/src/uts/common/io/fibre-channel/fca/emlxs/emlxs_fcp.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at
9 9 * http://www.opensource.org/licenses/cddl1.txt.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2004-2012 Emulex. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 #include <emlxs.h>
28 28
29 29 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
30 30 EMLXS_MSG_DEF(EMLXS_FCP_C);
31 31
32 32 #define EMLXS_GET_VADDR(hba, rp, icmd) emlxs_mem_get_vaddr(hba, rp, \
33 33 PADDR(icmd->un.cont64[i].addrHigh, icmd->un.cont64[i].addrLow));
34 34
35 35 static void emlxs_sbp_abort_add(emlxs_port_t *port, emlxs_buf_t *sbp,
36 36 Q *abort, uint8_t *flag, emlxs_buf_t *fpkt);
37 37
38 38 #define SCSI3_PERSISTENT_RESERVE_IN 0x5e
39 39 #define SCSI_INQUIRY 0x12
40 40 #define SCSI_RX_DIAG 0x1C
41 41
42 42
43 43 /*
44 44 * emlxs_handle_fcp_event
45 45 *
46 46 * Description: Process an FCP Rsp Ring completion
47 47 *
48 48 */
49 49 /* ARGSUSED */
50 50 extern void
51 51 emlxs_handle_fcp_event(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq)
52 52 {
53 53 emlxs_port_t *port = &PPORT;
54 54 emlxs_config_t *cfg = &CFG;
55 55 IOCB *cmd;
56 56 emlxs_buf_t *sbp;
57 57 fc_packet_t *pkt = NULL;
58 58 #ifdef SAN_DIAG_SUPPORT
59 59 NODELIST *ndlp;
60 60 #endif
61 61 uint32_t iostat;
62 62 uint8_t localstat;
63 63 fcp_rsp_t *rsp;
64 64 uint32_t rsp_data_resid;
65 65 uint32_t check_underrun;
66 66 uint8_t asc;
67 67 uint8_t ascq;
68 68 uint8_t scsi_status;
69 69 uint8_t sense;
70 70 uint32_t did;
71 71 uint32_t fix_it;
72 72 uint8_t *scsi_cmd;
73 73 uint8_t scsi_opcode;
74 74 uint16_t scsi_dl;
75 75 uint32_t data_rx;
76 76 uint32_t length;
77 77
78 78 cmd = &iocbq->iocb;
79 79
80 80 /* Initialize the status */
81 81 iostat = cmd->ULPSTATUS;
82 82 localstat = 0;
83 83 scsi_status = 0;
84 84 asc = 0;
85 85 ascq = 0;
86 86 sense = 0;
87 87 check_underrun = 0;
88 88 fix_it = 0;
89 89
90 90 HBASTATS.FcpEvent++;
91 91
92 92 sbp = (emlxs_buf_t *)iocbq->sbp;
93 93
94 94 if (!sbp) {
95 95 /* completion with missing xmit command */
96 96 HBASTATS.FcpStray++;
97 97
98 98 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_fcp_completion_msg,
99 99 "cmd=%x iotag=%d", cmd->ULPCOMMAND, cmd->ULPIOTAG);
100 100
101 101 return;
102 102 }
103 103
104 104 HBASTATS.FcpCompleted++;
105 105
106 106 #ifdef SAN_DIAG_SUPPORT
107 107 emlxs_update_sd_bucket(sbp);
108 108 #endif /* SAN_DIAG_SUPPORT */
109 109
110 110 pkt = PRIV2PKT(sbp);
111 111
112 112 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
113 113 scsi_cmd = (uint8_t *)pkt->pkt_cmd;
114 114 scsi_opcode = scsi_cmd[12];
115 115 data_rx = 0;
116 116
117 117 /* Sync data in data buffer only on FC_PKT_FCP_READ */
118 118 if (pkt->pkt_datalen && (pkt->pkt_tran_type == FC_PKT_FCP_READ)) {
119 119 EMLXS_MPDATA_SYNC(pkt->pkt_data_dma, 0, pkt->pkt_datalen,
120 120 DDI_DMA_SYNC_FORKERNEL);
121 121
122 122 #ifdef TEST_SUPPORT
123 123 if (hba->underrun_counter && (iostat == IOSTAT_SUCCESS) &&
124 124 (pkt->pkt_datalen >= 512)) {
125 125 hba->underrun_counter--;
126 126 iostat = IOSTAT_FCP_RSP_ERROR;
127 127
128 128 /* Report 512 bytes missing by adapter */
129 129 cmd->un.fcpi.fcpi_parm = pkt->pkt_datalen - 512;
130 130
131 131 /* Corrupt 512 bytes of Data buffer */
132 132 bzero((uint8_t *)pkt->pkt_data, 512);
133 133
134 134 /* Set FCP response to STATUS_GOOD */
135 135 bzero((uint8_t *)pkt->pkt_resp, pkt->pkt_rsplen);
136 136 }
137 137 #endif /* TEST_SUPPORT */
138 138 }
139 139
140 140 /* Process the pkt */
141 141 mutex_enter(&sbp->mtx);
142 142
143 143 /* Check for immediate return */
144 144 if ((iostat == IOSTAT_SUCCESS) &&
145 145 (pkt->pkt_comp) &&
146 146 !(sbp->pkt_flags &
147 147 (PACKET_ULP_OWNED | PACKET_COMPLETED |
148 148 PACKET_IN_COMPLETION | PACKET_IN_TXQ | PACKET_IN_CHIPQ |
149 149 PACKET_IN_DONEQ | PACKET_IN_TIMEOUT | PACKET_IN_FLUSH |
150 150 PACKET_IN_ABORT | PACKET_POLLED))) {
151 151 HBASTATS.FcpGood++;
152 152
153 153 sbp->pkt_flags |=
154 154 (PACKET_STATE_VALID | PACKET_IN_COMPLETION |
155 155 PACKET_COMPLETED | PACKET_ULP_OWNED);
156 156 mutex_exit(&sbp->mtx);
157 157
158 158 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
159 159 emlxs_unswap_pkt(sbp);
160 160 #endif /* EMLXS_MODREV2X */
161 161
162 162 #ifdef FMA_SUPPORT
163 163 emlxs_check_dma(hba, sbp);
164 164 #endif /* FMA_SUPPORT */
165 165
166 166 cp->ulpCmplCmd++;
167 167 (*pkt->pkt_comp) (pkt);
168 168
169 169 #ifdef FMA_SUPPORT
170 170 if (hba->flag & FC_DMA_CHECK_ERROR) {
171 171 emlxs_thread_spawn(hba, emlxs_restart_thread,
172 172 NULL, NULL);
173 173 }
174 174 #endif /* FMA_SUPPORT */
175 175
176 176 return;
177 177 }
178 178
179 179 /*
180 180 * A response is only placed in the resp buffer if IOSTAT_FCP_RSP_ERROR
181 181 * is reported.
182 182 */
183 183
184 184 /* Check if a response buffer was not provided */
185 185 if ((iostat != IOSTAT_FCP_RSP_ERROR) || (pkt->pkt_rsplen == 0)) {
186 186 goto done;
187 187 }
188 188
189 189 EMLXS_MPDATA_SYNC(pkt->pkt_resp_dma, 0, pkt->pkt_rsplen,
190 190 DDI_DMA_SYNC_FORKERNEL);
191 191
192 192 /* Get the response buffer pointer */
193 193 rsp = (fcp_rsp_t *)pkt->pkt_resp;
194 194
195 195 /* Validate the response payload */
196 196 if (!rsp->fcp_u.fcp_status.resid_under &&
197 197 !rsp->fcp_u.fcp_status.resid_over) {
198 198 rsp->fcp_resid = 0;
199 199 }
200 200
201 201 if (!rsp->fcp_u.fcp_status.rsp_len_set) {
202 202 rsp->fcp_response_len = 0;
203 203 }
204 204
205 205 if (!rsp->fcp_u.fcp_status.sense_len_set) {
206 206 rsp->fcp_sense_len = 0;
207 207 }
208 208
209 209 length = sizeof (fcp_rsp_t) + LE_SWAP32(rsp->fcp_response_len) +
210 210 LE_SWAP32(rsp->fcp_sense_len);
211 211
212 212 if (length > pkt->pkt_rsplen) {
213 213 iostat = IOSTAT_RSP_INVALID;
214 214 pkt->pkt_data_resid = pkt->pkt_datalen;
215 215 goto done;
216 216 }
217 217
218 218 /* Set the valid response flag */
219 219 sbp->pkt_flags |= PACKET_FCP_RSP_VALID;
220 220
221 221 scsi_status = rsp->fcp_u.fcp_status.scsi_status;
222 222
223 223 #ifdef SAN_DIAG_SUPPORT
224 224 ndlp = (NODELIST *)iocbq->node;
225 225 if (scsi_status == SCSI_STAT_QUE_FULL) {
226 226 emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_QFULL,
227 227 (HBA_WWN *)&ndlp->nlp_portname, sbp->lun);
228 228 } else if (scsi_status == SCSI_STAT_BUSY) {
229 229 emlxs_log_sd_scsi_event(port,
230 230 SD_SCSI_SUBCATEGORY_DEVBSY,
231 231 (HBA_WWN *)&ndlp->nlp_portname, sbp->lun);
232 232 }
233 233 #endif
234 234
235 235 /*
236 236 * Convert a task abort to a check condition with no data
237 237 * transferred. We saw a data corruption when Solaris received
238 238 * a Task Abort from a tape.
239 239 */
240 240
241 241 if (scsi_status == SCSI_STAT_TASK_ABORT) {
242 242 EMLXS_MSGF(EMLXS_CONTEXT,
243 243 &emlxs_fcp_completion_error_msg,
244 244 "Task Abort. "
245 245 "Fixed. did=0x%06x sbp=%p cmd=%02x dl=%d",
246 246 did, sbp, scsi_opcode, pkt->pkt_datalen);
247 247
248 248 rsp->fcp_u.fcp_status.scsi_status =
249 249 SCSI_STAT_CHECK_COND;
250 250 rsp->fcp_u.fcp_status.rsp_len_set = 0;
251 251 rsp->fcp_u.fcp_status.sense_len_set = 0;
252 252 rsp->fcp_u.fcp_status.resid_over = 0;
253 253
254 254 if (pkt->pkt_datalen) {
255 255 rsp->fcp_u.fcp_status.resid_under = 1;
256 256 rsp->fcp_resid =
257 257 LE_SWAP32(pkt->pkt_datalen);
258 258 } else {
259 259 rsp->fcp_u.fcp_status.resid_under = 0;
260 260 rsp->fcp_resid = 0;
261 261 }
262 262
263 263 scsi_status = SCSI_STAT_CHECK_COND;
264 264 }
265 265
266 266 /*
267 267 * We only need to check underrun if data could
268 268 * have been sent
269 269 */
270 270
271 271 /* Always check underrun if status is good */
272 272 if (scsi_status == SCSI_STAT_GOOD) {
273 273 check_underrun = 1;
274 274 }
275 275 /* Check the sense codes if this is a check condition */
276 276 else if (scsi_status == SCSI_STAT_CHECK_COND) {
277 277 check_underrun = 1;
278 278
279 279 /* Check if sense data was provided */
280 280 if (LE_SWAP32(rsp->fcp_sense_len) >= 14) {
281 281 sense = *((uint8_t *)rsp + 32 + 2);
282 282 asc = *((uint8_t *)rsp + 32 + 12);
283 283 ascq = *((uint8_t *)rsp + 32 + 13);
284 284 }
285 285
286 286 #ifdef SAN_DIAG_SUPPORT
287 287 emlxs_log_sd_scsi_check_event(port,
288 288 (HBA_WWN *)&ndlp->nlp_portname, sbp->lun,
289 289 scsi_opcode, sense, asc, ascq);
290 290 #endif
291 291 }
292 292 /* Status is not good and this is not a check condition */
293 293 /* No data should have been sent */
294 294 else {
295 295 check_underrun = 0;
296 296 }
297 297
298 298 /* Initialize the resids */
299 299 pkt->pkt_resp_resid = 0;
300 300 pkt->pkt_data_resid = 0;
301 301
302 302 /* Check if no data was to be transferred */
303 303 if (pkt->pkt_datalen == 0) {
304 304 goto done;
305 305 }
306 306
307 307 /* Get the residual underrun count reported by the SCSI reply */
308 308 rsp_data_resid = (rsp->fcp_u.fcp_status.resid_under) ?
309 309 LE_SWAP32(rsp->fcp_resid) : 0;
310 310
311 311 /* Set the pkt_data_resid to what the scsi response resid */
312 312 pkt->pkt_data_resid = rsp_data_resid;
313 313
314 314 /* Adjust the pkt_data_resid field if needed */
315 315 if (pkt->pkt_tran_type == FC_PKT_FCP_READ) {
316 316 /*
317 317 * Get the residual underrun count reported by
318 318 * our adapter
319 319 */
320 320 pkt->pkt_data_resid = cmd->un.fcpi.fcpi_parm;
321 321
322 322 #ifdef SAN_DIAG_SUPPORT
323 323 if ((rsp_data_resid == 0) && (pkt->pkt_data_resid)) {
324 324 emlxs_log_sd_fc_rdchk_event(port,
325 325 (HBA_WWN *)&ndlp->nlp_portname, sbp->lun,
326 326 scsi_opcode, pkt->pkt_data_resid);
327 327 }
328 328 #endif
329 329
330 330 /* Get the actual amount of data transferred */
331 331 data_rx = pkt->pkt_datalen - pkt->pkt_data_resid;
332 332
333 333 /*
334 334 * If the residual being reported by the adapter is
335 335 * greater than the residual being reported in the
336 336 * reply, then we have a true underrun.
337 337 */
338 338 if (check_underrun && (pkt->pkt_data_resid > rsp_data_resid)) {
339 339 switch (scsi_opcode) {
340 340 case SCSI_INQUIRY:
341 341 scsi_dl = scsi_cmd[16];
342 342 break;
343 343
344 344 case SCSI_RX_DIAG:
345 345 scsi_dl =
346 346 (scsi_cmd[15] * 0x100) +
347 347 scsi_cmd[16];
348 348 break;
349 349
350 350 default:
351 351 scsi_dl = pkt->pkt_datalen;
352 352 }
353 353
354 354 #ifdef FCP_UNDERRUN_PATCH1
355 355 if (cfg[CFG_ENABLE_PATCH].current & FCP_UNDERRUN_PATCH1) {
356 356 /*
357 357 * If status is not good and no data was
358 358 * actually transferred, then we must fix
359 359 * the issue
360 360 */
361 361 if ((scsi_status != SCSI_STAT_GOOD) && (data_rx == 0)) {
362 362 fix_it = 1;
363 363
364 364 EMLXS_MSGF(EMLXS_CONTEXT,
365 365 &emlxs_fcp_completion_error_msg,
366 366 "Underrun(1). Fixed. "
367 367 "did=0x%06x sbp=%p cmd=%02x "
368 368 "dl=%d,%d rx=%d rsp=%d",
369 369 did, sbp, scsi_opcode,
370 370 pkt->pkt_datalen, scsi_dl,
371 371 (pkt->pkt_datalen -
372 372 pkt->pkt_data_resid),
373 373 rsp_data_resid);
374 374
375 375 }
376 376 }
377 377 #endif /* FCP_UNDERRUN_PATCH1 */
378 378
379 379
380 380 #ifdef FCP_UNDERRUN_PATCH2
381 381 if (cfg[CFG_ENABLE_PATCH].current & FCP_UNDERRUN_PATCH2) {
382 382 if (scsi_status == SCSI_STAT_GOOD) {
383 383 emlxs_msg_t *msg;
384 384
385 385 msg = &emlxs_fcp_completion_error_msg;
386 386 /*
387 387 * If status is good and this is an
388 388 * inquiry request and the amount of
389 389 * data
390 390 */
391 391 /*
392 392 * requested <= data received, then we
393 393 * must fix the issue.
394 394 */
395 395
396 396 if ((scsi_opcode == SCSI_INQUIRY) &&
397 397 (pkt->pkt_datalen >= data_rx) &&
398 398 (scsi_dl <= data_rx)) {
399 399 fix_it = 1;
400 400
401 401 EMLXS_MSGF(EMLXS_CONTEXT, msg,
402 402 "Underrun(2). Fixed. "
403 403 "did=0x%06x sbp=%p "
404 404 "cmd=%02x dl=%d,%d "
405 405 "rx=%d rsp=%d",
406 406 did, sbp, scsi_opcode,
407 407 pkt->pkt_datalen, scsi_dl,
408 408 data_rx, rsp_data_resid);
409 409
410 410 }
411 411
412 412 /*
413 413 * If status is good and this is an
414 414 * inquiry request and the amount of
415 415 * data requested >= 128 bytes, but
416 416 * only 128 bytes were received,
417 417 * then we must fix the issue.
418 418 */
419 419 else if ((scsi_opcode == SCSI_INQUIRY) &&
420 420 (pkt->pkt_datalen >= 128) &&
421 421 (scsi_dl >= 128) && (data_rx == 128)) {
422 422 fix_it = 1;
423 423
424 424 EMLXS_MSGF(EMLXS_CONTEXT, msg,
425 425 "Underrun(3). Fixed. "
426 426 "did=0x%06x sbp=%p "
427 427 "cmd=%02x dl=%d,%d "
428 428 "rx=%d rsp=%d",
429 429 did, sbp, scsi_opcode,
430 430 pkt->pkt_datalen, scsi_dl,
431 431 data_rx, rsp_data_resid);
432 432
433 433 }
434 434 }
435 435 }
436 436 #endif /* FCP_UNDERRUN_PATCH2 */
437 437
438 438 /*
439 439 * Check if SCSI response payload should be
440 440 * fixed or if a DATA_UNDERRUN should be
441 441 * reported
442 442 */
443 443 if (fix_it) {
444 444 /*
445 445 * Fix the SCSI response payload itself
446 446 */
447 447 rsp->fcp_u.fcp_status.resid_under = 1;
448 448 rsp->fcp_resid =
449 449 LE_SWAP32(pkt->pkt_data_resid);
450 450 } else {
451 451 /*
452 452 * Change the status from
453 453 * IOSTAT_FCP_RSP_ERROR to
454 454 * IOSTAT_DATA_UNDERRUN
455 455 */
456 456 iostat = IOSTAT_DATA_UNDERRUN;
457 457 pkt->pkt_data_resid =
458 458 pkt->pkt_datalen;
459 459 }
460 460 }
461 461
462 462 /*
463 463 * If the residual being reported by the adapter is
464 464 * less than the residual being reported in the reply,
465 465 * then we have a true overrun. Since we don't know
466 466 * where the extra data came from or went to then we
467 467 * cannot trust anything we received
468 468 */
469 469 else if (rsp_data_resid > pkt->pkt_data_resid) {
470 470 /*
471 471 * Change the status from
472 472 * IOSTAT_FCP_RSP_ERROR to
473 473 * IOSTAT_DATA_OVERRUN
474 474 */
475 475 iostat = IOSTAT_DATA_OVERRUN;
476 476 pkt->pkt_data_resid = pkt->pkt_datalen;
477 477 }
478 478
479 479 } else if ((hba->sli_mode == EMLXS_HBA_SLI4_MODE) &&
480 480 (pkt->pkt_tran_type == FC_PKT_FCP_WRITE)) {
481 481 /*
482 482 * Get the residual underrun count reported by
483 483 * our adapter
484 484 */
485 485 pkt->pkt_data_resid = cmd->un.fcpi.fcpi_parm;
486 486
487 487 #ifdef SAN_DIAG_SUPPORT
488 488 if ((rsp_data_resid == 0) && (pkt->pkt_data_resid)) {
489 489 emlxs_log_sd_fc_rdchk_event(port,
490 490 (HBA_WWN *)&ndlp->nlp_portname, sbp->lun,
491 491 scsi_opcode, pkt->pkt_data_resid);
492 492 }
493 493 #endif /* SAN_DIAG_SUPPORT */
494 494
495 495 /* Get the actual amount of data transferred */
496 496 data_rx = pkt->pkt_datalen - pkt->pkt_data_resid;
497 497
498 498 /*
499 499 * If the residual being reported by the adapter is
500 500 * greater than the residual being reported in the
501 501 * reply, then we have a true underrun.
502 502 */
503 503 if (check_underrun && (pkt->pkt_data_resid > rsp_data_resid)) {
504 504
505 505 scsi_dl = pkt->pkt_datalen;
506 506
507 507 #ifdef FCP_UNDERRUN_PATCH1
508 508 if (cfg[CFG_ENABLE_PATCH].current & FCP_UNDERRUN_PATCH1) {
509 509 /*
510 510 * If status is not good and no data was
511 511 * actually transferred, then we must fix
512 512 * the issue
513 513 */
514 514 if ((scsi_status != SCSI_STAT_GOOD) && (data_rx == 0)) {
515 515 fix_it = 1;
516 516
517 517 EMLXS_MSGF(EMLXS_CONTEXT,
518 518 &emlxs_fcp_completion_error_msg,
519 519 "Underrun(1). Fixed. "
520 520 "did=0x%06x sbp=%p cmd=%02x "
521 521 "dl=%d,%d rx=%d rsp=%d",
522 522 did, sbp, scsi_opcode,
523 523 pkt->pkt_datalen, scsi_dl,
524 524 (pkt->pkt_datalen -
525 525 pkt->pkt_data_resid),
526 526 rsp_data_resid);
527 527
528 528 }
529 529 }
530 530 #endif /* FCP_UNDERRUN_PATCH1 */
531 531
532 532 /*
533 533 * Check if SCSI response payload should be
534 534 * fixed or if a DATA_UNDERRUN should be
535 535 * reported
536 536 */
537 537 if (fix_it) {
538 538 /*
539 539 * Fix the SCSI response payload itself
540 540 */
541 541 rsp->fcp_u.fcp_status.resid_under = 1;
542 542 rsp->fcp_resid =
543 543 LE_SWAP32(pkt->pkt_data_resid);
544 544 } else {
545 545 /*
546 546 * Change the status from
547 547 * IOSTAT_FCP_RSP_ERROR to
548 548 * IOSTAT_DATA_UNDERRUN
549 549 */
550 550 iostat = IOSTAT_DATA_UNDERRUN;
551 551 pkt->pkt_data_resid =
552 552 pkt->pkt_datalen;
553 553 }
554 554 }
555 555
556 556 /*
557 557 * If the residual being reported by the adapter is
558 558 * less than the residual being reported in the reply,
559 559 * then we have a true overrun. Since we don't know
560 560 * where the extra data came from or went to then we
561 561 * cannot trust anything we received
562 562 */
563 563 else if (rsp_data_resid > pkt->pkt_data_resid) {
564 564 /*
565 565 * Change the status from
566 566 * IOSTAT_FCP_RSP_ERROR to
567 567 * IOSTAT_DATA_OVERRUN
568 568 */
569 569 iostat = IOSTAT_DATA_OVERRUN;
570 570 pkt->pkt_data_resid = pkt->pkt_datalen;
571 571 }
572 572 }
573 573
574 574 done:
575 575
576 576 /* Print completion message */
577 577 switch (iostat) {
578 578 case IOSTAT_SUCCESS:
579 579 /* Build SCSI GOOD status */
580 580 if (pkt->pkt_rsplen) {
581 581 bzero((uint8_t *)pkt->pkt_resp, pkt->pkt_rsplen);
582 582 }
583 583 break;
584 584
585 585 case IOSTAT_FCP_RSP_ERROR:
586 586 break;
587 587
588 588 case IOSTAT_REMOTE_STOP:
589 589 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
590 590 "Remote Stop. did=0x%06x sbp=%p cmd=%02x", did, sbp,
591 591 scsi_opcode);
592 592 break;
593 593
594 594 case IOSTAT_LOCAL_REJECT:
595 595 localstat = cmd->un.grsp.perr.statLocalError;
596 596
597 597 switch (localstat) {
598 598 case IOERR_SEQUENCE_TIMEOUT:
599 599 EMLXS_MSGF(EMLXS_CONTEXT,
600 600 &emlxs_fcp_completion_error_msg,
601 601 "Local reject. "
602 602 "%s did=0x%06x sbp=%p cmd=%02x tmo=%d ",
603 603 emlxs_error_xlate(localstat), did, sbp,
604 604 scsi_opcode, pkt->pkt_timeout);
605 605 break;
606 606
607 607 default:
608 608 EMLXS_MSGF(EMLXS_CONTEXT,
609 609 &emlxs_fcp_completion_error_msg,
610 610 "Local reject. %s 0x%06x %p %02x (%x)(%x)",
611 611 emlxs_error_xlate(localstat), did, sbp,
612 612 scsi_opcode, (uint16_t)cmd->ULPIOTAG,
613 613 (uint16_t)cmd->ULPCONTEXT);
614 614 }
615 615
616 616 break;
617 617
618 618 case IOSTAT_NPORT_RJT:
619 619 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
620 620 "Nport reject. did=0x%06x sbp=%p cmd=%02x", did, sbp,
621 621 scsi_opcode);
622 622 break;
623 623
624 624 case IOSTAT_FABRIC_RJT:
625 625 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
626 626 "Fabric reject. did=0x%06x sbp=%p cmd=%02x", did, sbp,
627 627 scsi_opcode);
628 628 break;
629 629
630 630 case IOSTAT_NPORT_BSY:
631 631 #ifdef SAN_DIAG_SUPPORT
632 632 ndlp = (NODELIST *)iocbq->node;
633 633 emlxs_log_sd_fc_bsy_event(port, (HBA_WWN *)&ndlp->nlp_portname);
634 634 #endif
635 635
636 636 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
637 637 "Nport busy. did=0x%06x sbp=%p cmd=%02x", did, sbp,
638 638 scsi_opcode);
639 639 break;
640 640
641 641 case IOSTAT_FABRIC_BSY:
642 642 #ifdef SAN_DIAG_SUPPORT
643 643 ndlp = (NODELIST *)iocbq->node;
644 644 emlxs_log_sd_fc_bsy_event(port, NULL);
645 645 #endif
646 646
647 647 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
648 648 "Fabric busy. did=0x%06x sbp=%p cmd=%02x", did, sbp,
649 649 scsi_opcode);
650 650 break;
651 651
652 652 case IOSTAT_INTERMED_RSP:
653 653 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
654 654 "Intermediate response. did=0x%06x sbp=%p cmd=%02x", did,
655 655 sbp, scsi_opcode);
656 656 break;
657 657
658 658 case IOSTAT_LS_RJT:
659 659 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
660 660 "LS Reject. did=0x%06x sbp=%p cmd=%02x", did, sbp,
661 661 scsi_opcode);
662 662 break;
663 663
664 664 case IOSTAT_DATA_UNDERRUN:
665 665 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
666 666 "Underrun. did=0x%06x sbp=%p cmd=%02x "
667 667 "dl=%d,%d rx=%d rsp=%d (%02x,%02x,%02x,%02x)",
668 668 did, sbp, scsi_opcode, pkt->pkt_datalen, scsi_dl, data_rx,
669 669 rsp_data_resid, scsi_status, sense, asc, ascq);
670 670 break;
671 671
672 672 case IOSTAT_DATA_OVERRUN:
673 673 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
674 674 "Overrun. did=0x%06x sbp=%p cmd=%02x "
675 675 "dl=%d,%d rx=%d rsp=%d (%02x,%02x,%02x,%02x)",
676 676 did, sbp, scsi_opcode, pkt->pkt_datalen, scsi_dl, data_rx,
677 677 rsp_data_resid, scsi_status, sense, asc, ascq);
678 678 break;
679 679
680 680 case IOSTAT_RSP_INVALID:
681 681 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
682 682 "Rsp Invalid. did=0x%06x sbp=%p cmd=%02x dl=%d rl=%d"
683 683 "(%d, %d, %d)",
684 684 did, sbp, scsi_opcode, pkt->pkt_datalen, pkt->pkt_rsplen,
685 685 LE_SWAP32(rsp->fcp_resid),
686 686 LE_SWAP32(rsp->fcp_sense_len),
687 687 LE_SWAP32(rsp->fcp_response_len));
688 688 break;
689 689
690 690 default:
691 691 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
692 692 "Unknown status=%x reason=%x did=0x%06x sbp=%p cmd=%02x",
693 693 iostat, cmd->un.grsp.perr.statLocalError, did, sbp,
694 694 scsi_opcode);
695 695 break;
696 696 }
697 697
698 698 if (iostat == IOSTAT_SUCCESS) {
699 699 HBASTATS.FcpGood++;
700 700 } else {
701 701 HBASTATS.FcpError++;
702 702 }
703 703
704 704 mutex_exit(&sbp->mtx);
705 705
706 706 emlxs_pkt_complete(sbp, iostat, localstat, 0);
707 707
708 708 return;
709 709
710 710 } /* emlxs_handle_fcp_event() */
711 711
712 712
713 713 /*
714 714 * emlxs_post_buffer
715 715 *
716 716 * This routine will post count buffers to the
717 717 * ring with the QUE_RING_BUF_CN command. This
718 718 * allows 2 buffers / command to be posted.
719 719 * Returns the number of buffers NOT posted.
720 720 */
721 721 /* SLI3 */
722 722 extern int
723 723 emlxs_post_buffer(emlxs_hba_t *hba, RING *rp, int16_t cnt)
724 724 {
725 725 emlxs_port_t *port = &PPORT;
726 726 IOCB *icmd;
727 727 IOCBQ *iocbq;
728 728 MATCHMAP *mp;
729 729 uint16_t tag;
730 730 uint32_t maxqbuf;
731 731 int32_t i;
732 732 int32_t j;
733 733 uint32_t seg;
734 734 uint32_t size;
735 735
736 736 mp = 0;
737 737 maxqbuf = 2;
738 738 tag = (uint16_t)cnt;
739 739 cnt += rp->fc_missbufcnt;
740 740
741 741 if (rp->ringno == hba->channel_els) {
742 742 seg = MEM_BUF;
743 743 size = MEM_ELSBUF_SIZE;
744 744 } else if (rp->ringno == hba->channel_ip) {
745 745 seg = MEM_IPBUF;
746 746 size = MEM_IPBUF_SIZE;
747 747 } else if (rp->ringno == hba->channel_ct) {
748 748 seg = MEM_CTBUF;
749 749 size = MEM_CTBUF_SIZE;
750 750 }
751 751 #ifdef SFCT_SUPPORT
752 752 else if (rp->ringno == hba->CHANNEL_FCT) {
753 753 seg = MEM_FCTBUF;
754 754 size = MEM_FCTBUF_SIZE;
755 755 }
756 756 #endif /* SFCT_SUPPORT */
757 757 else {
758 758 return (0);
759 759 }
760 760
761 761 /*
762 762 * While there are buffers to post
763 763 */
764 764 while (cnt) {
765 765 if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == 0) {
766 766 rp->fc_missbufcnt = cnt;
767 767 return (cnt);
768 768 }
769 769
770 770 iocbq->channel = (void *)&hba->chan[rp->ringno];
771 771 iocbq->port = (void *)port;
772 772 iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
773 773
774 774 icmd = &iocbq->iocb;
775 775
776 776 /*
777 777 * Max buffers can be posted per command
778 778 */
779 779 for (i = 0; i < maxqbuf; i++) {
780 780 if (cnt <= 0)
781 781 break;
782 782
783 783 /* fill in BDEs for command */
784 784 if ((mp = (MATCHMAP *)emlxs_mem_get(hba, seg))
785 785 == 0) {
786 786 icmd->ULPBDECOUNT = i;
787 787 for (j = 0; j < i; j++) {
788 788 mp = EMLXS_GET_VADDR(hba, rp, icmd);
789 789 if (mp) {
790 790 emlxs_mem_put(hba, seg,
791 791 (void *)mp);
792 792 }
793 793 }
794 794
795 795 rp->fc_missbufcnt = cnt + i;
796 796
797 797 emlxs_mem_put(hba, MEM_IOCB, (void *)iocbq);
798 798
799 799 return (cnt + i);
800 800 }
801 801
802 802 /*
803 803 * map that page and save the address pair for lookup
804 804 * later
805 805 */
806 806 emlxs_mem_map_vaddr(hba,
807 807 rp,
808 808 mp,
809 809 (uint32_t *)&icmd->un.cont64[i].addrHigh,
810 810 (uint32_t *)&icmd->un.cont64[i].addrLow);
811 811
812 812 icmd->un.cont64[i].tus.f.bdeSize = size;
813 813 icmd->ULPCOMMAND = CMD_QUE_RING_BUF64_CN;
814 814
815 815 /*
816 816 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
817 817 * "UB Post: ring=%d addr=%08x%08x size=%d",
818 818 * rp->ringno, icmd->un.cont64[i].addrHigh,
819 819 * icmd->un.cont64[i].addrLow, size);
820 820 */
821 821
822 822 cnt--;
823 823 }
824 824
825 825 icmd->ULPIOTAG = tag;
826 826 icmd->ULPBDECOUNT = i;
827 827 icmd->ULPLE = 1;
828 828 icmd->ULPOWNER = OWN_CHIP;
829 829 /* used for delimiter between commands */
830 830 iocbq->bp = (void *)mp;
831 831
832 832 EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[rp->ringno], iocbq);
833 833 }
834 834
835 835 rp->fc_missbufcnt = 0;
836 836
837 837 return (0);
838 838
839 839 } /* emlxs_post_buffer() */
840 840
841 841
842 842 static void
843 843 emlxs_fcp_tag_nodes(emlxs_port_t *port)
844 844 {
845 845 NODELIST *nlp;
846 846 int i;
847 847
848 848 /* We will process all nodes with this tag later */
849 849 rw_enter(&port->node_rwlock, RW_READER);
850 850 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
851 851 nlp = port->node_table[i];
852 852 while (nlp != NULL) {
853 853 nlp->nlp_tag = 1;
854 854 nlp = nlp->nlp_list_next;
855 855 }
856 856 }
857 857 rw_exit(&port->node_rwlock);
858 858 }
859 859
860 860
861 861 static NODELIST *
862 862 emlxs_find_tagged_node(emlxs_port_t *port)
863 863 {
864 864 NODELIST *nlp;
865 865 NODELIST *tagged;
866 866 int i;
867 867
868 868 /* Find first node */
869 869 rw_enter(&port->node_rwlock, RW_READER);
870 870 tagged = 0;
871 871 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
872 872 nlp = port->node_table[i];
873 873 while (nlp != NULL) {
874 874 if (!nlp->nlp_tag) {
875 875 nlp = nlp->nlp_list_next;
876 876 continue;
877 877 }
878 878 nlp->nlp_tag = 0;
879 879
880 880 if (nlp->nlp_Rpi == FABRIC_RPI) {
881 881 nlp = nlp->nlp_list_next;
882 882 continue;
883 883 }
884 884 tagged = nlp;
885 885 break;
886 886 }
887 887 if (tagged) {
888 888 break;
889 889 }
890 890 }
891 891 rw_exit(&port->node_rwlock);
892 892 return (tagged);
893 893 }
894 894
895 895
896 896 extern int
897 897 emlxs_port_offline(emlxs_port_t *port, uint32_t scope)
898 898 {
899 899 emlxs_hba_t *hba = HBA;
900 900 emlxs_config_t *cfg;
901 901 NODELIST *nlp;
902 902 fc_affected_id_t *aid;
903 903 uint32_t mask;
904 904 uint32_t aff_d_id;
905 905 uint32_t linkdown;
906 906 uint32_t vlinkdown;
907 907 uint32_t action;
908 908 int i;
909 909 uint32_t unreg_vpi;
910 910 uint32_t update;
911 911 uint32_t adisc_support;
912 912 uint32_t clear_all;
913 913 uint8_t format;
914 914
915 915 /* Target mode only uses this routine for linkdowns */
916 916 if ((port->mode == MODE_TARGET) && (scope != 0xffffffff) &&
917 917 (scope != 0xfeffffff) && (scope != 0xfdffffff)) {
918 918 return (0);
919 919 }
920 920
921 921 cfg = &CFG;
922 922 aid = (fc_affected_id_t *)&scope;
923 923 linkdown = 0;
924 924 vlinkdown = 0;
925 925 unreg_vpi = 0;
926 926 update = 0;
927 927 clear_all = 0;
928 928
929 929 if (!(port->flag & EMLXS_PORT_BOUND)) {
930 930 return (0);
931 931 }
932 932
933 933 format = aid->aff_format;
934 934
935 935 switch (format) {
936 936 case 0: /* Port */
937 937 mask = 0x00ffffff;
938 938 break;
939 939
940 940 case 1: /* Area */
941 941 mask = 0x00ffff00;
942 942 break;
943 943
944 944 case 2: /* Domain */
945 945 mask = 0x00ff0000;
946 946 break;
947 947
948 948 case 3: /* Network */
949 949 mask = 0x00000000;
950 950 break;
951 951
952 952 #ifdef DHCHAP_SUPPORT
953 953 case 0xfe: /* Virtual link down */
954 954 mask = 0x00000000;
955 955 vlinkdown = 1;
956 956 break;
957 957 #endif /* DHCHAP_SUPPORT */
958 958
959 959 case 0xff: /* link is down */
960 960 mask = 0x00000000;
961 961 linkdown = 1;
962 962 break;
963 963
964 964 case 0xfd: /* New fabric */
965 965 default:
966 966 mask = 0x00000000;
967 967 linkdown = 1;
968 968 clear_all = 1;
969 969 break;
970 970 }
971 971
972 972 aff_d_id = aid->aff_d_id & mask;
973 973
974 974
975 975 /*
976 976 * If link is down then this is a hard shutdown and flush
977 977 * If link not down then this is a soft shutdown and flush
978 978 * (e.g. RSCN)
979 979 */
980 980 if (linkdown) {
981 981 mutex_enter(&EMLXS_PORT_LOCK);
982 982
983 983 port->flag &= EMLXS_PORT_LINKDOWN_MASK;
984 984
985 985 if (port->ulp_statec != FC_STATE_OFFLINE) {
986 986 port->ulp_statec = FC_STATE_OFFLINE;
987 987
988 988 port->prev_did = port->did;
989 989 port->did = 0;
990 990 port->rdid = 0;
991 991
992 992 bcopy(&port->fabric_sparam, &port->prev_fabric_sparam,
993 993 sizeof (SERV_PARM));
994 994 bzero(&port->fabric_sparam, sizeof (SERV_PARM));
995 995
996 996 update = 1;
997 997 }
998 998
999 999 mutex_exit(&EMLXS_PORT_LOCK);
1000 1000
1001 1001 emlxs_timer_cancel_clean_address(port);
1002 1002
1003 1003 /* Tell ULP about it */
1004 1004 if (update) {
1005 1005 if (port->flag & EMLXS_PORT_BOUND) {
1006 1006 if (port->vpi == 0) {
1007 1007 EMLXS_MSGF(EMLXS_CONTEXT,
1008 1008 &emlxs_link_down_msg, NULL);
1009 1009 }
1010 1010
1011 1011 if (port->mode == MODE_INITIATOR) {
1012 1012 emlxs_fca_link_down(port);
1013 1013 }
1014 1014 #ifdef SFCT_SUPPORT
1015 1015 else if (port->mode == MODE_TARGET) {
1016 1016 emlxs_fct_link_down(port);
1017 1017 }
1018 1018 #endif /* SFCT_SUPPORT */
1019 1019
1020 1020 } else {
1021 1021 if (port->vpi == 0) {
1022 1022 EMLXS_MSGF(EMLXS_CONTEXT,
1023 1023 &emlxs_link_down_msg, "*");
1024 1024 }
1025 1025 }
1026 1026
1027 1027
1028 1028 }
1029 1029
1030 1030 unreg_vpi = 1;
1031 1031
1032 1032 #ifdef DHCHAP_SUPPORT
1033 1033 /* Stop authentication with all nodes */
1034 1034 emlxs_dhc_auth_stop(port, NULL);
1035 1035 #endif /* DHCHAP_SUPPORT */
1036 1036
1037 1037 /* Flush the base node */
1038 1038 (void) emlxs_tx_node_flush(port, &port->node_base, 0, 0, 0);
1039 1039 (void) emlxs_chipq_node_flush(port, 0, &port->node_base, 0);
1040 1040
1041 1041 /* Flush any pending ub buffers */
1042 1042 emlxs_ub_flush(port);
1043 1043 }
1044 1044 #ifdef DHCHAP_SUPPORT
1045 1045 /* virtual link down */
1046 1046 else if (vlinkdown) {
1047 1047 mutex_enter(&EMLXS_PORT_LOCK);
1048 1048
1049 1049 if (port->ulp_statec != FC_STATE_OFFLINE) {
1050 1050 port->ulp_statec = FC_STATE_OFFLINE;
1051 1051 update = 1;
1052 1052 }
1053 1053
1054 1054 mutex_exit(&EMLXS_PORT_LOCK);
1055 1055
1056 1056 emlxs_timer_cancel_clean_address(port);
1057 1057
1058 1058 /* Tell ULP about it */
1059 1059 if (update) {
1060 1060 if (port->flag & EMLXS_PORT_BOUND) {
1061 1061 if (port->vpi == 0) {
1062 1062 EMLXS_MSGF(EMLXS_CONTEXT,
1063 1063 &emlxs_link_down_msg,
1064 1064 "Switch authentication failed.");
1065 1065 }
1066 1066
1067 1067 if (port->mode == MODE_INITIATOR) {
1068 1068 emlxs_fca_link_down(port);
1069 1069 }
1070 1070 #ifdef SFCT_SUPPORT
1071 1071 else if (port->mode == MODE_TARGET) {
1072 1072 emlxs_fct_link_down(port);
1073 1073 }
1074 1074 #endif /* SFCT_SUPPORT */
1075 1075 } else {
1076 1076 if (port->vpi == 0) {
1077 1077 EMLXS_MSGF(EMLXS_CONTEXT,
1078 1078 &emlxs_link_down_msg,
1079 1079 "Switch authentication failed. *");
1080 1080 }
1081 1081 }
1082 1082
1083 1083
1084 1084 }
1085 1085
1086 1086 /* Flush the base node */
1087 1087 (void) emlxs_tx_node_flush(port, &port->node_base, 0, 0, 0);
1088 1088 (void) emlxs_chipq_node_flush(port, 0, &port->node_base, 0);
1089 1089 }
1090 1090 #endif /* DHCHAP_SUPPORT */
1091 1091 else {
1092 1092 emlxs_timer_cancel_clean_address(port);
1093 1093 }
1094 1094
1095 1095 if (port->mode == MODE_TARGET) {
1096 1096 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1097 1097 /* Set the node tags */
1098 1098 emlxs_fcp_tag_nodes(port);
1099 1099 unreg_vpi = 0;
1100 1100 while ((nlp = emlxs_find_tagged_node(port))) {
1101 1101 (void) emlxs_rpi_pause_notify(port,
1102 1102 nlp->rpip);
1103 1103 /*
1104 1104 * In port_online we need to resume
1105 1105 * these RPIs before we can use them.
1106 1106 */
1107 1107 }
1108 1108 }
1109 1109 goto done;
1110 1110 }
1111 1111
1112 1112 /* Set the node tags */
1113 1113 emlxs_fcp_tag_nodes(port);
1114 1114
1115 1115 if (!clear_all && (hba->flag & FC_ONLINE_MODE)) {
1116 1116 adisc_support = cfg[CFG_ADISC_SUPPORT].current;
1117 1117 } else {
1118 1118 adisc_support = 0;
1119 1119 }
1120 1120
1121 1121 /* Check ADISC support level */
1122 1122 switch (adisc_support) {
1123 1123 case 0: /* No support - Flush all IO to all matching nodes */
1124 1124
1125 1125 for (;;) {
1126 1126 /*
1127 1127 * We need to hold the locks this way because
1128 1128 * EMLXS_SLI_UNREG_NODE and the flush routines enter the
1129 1129 * same locks. Also, when we release the lock the list
1130 1130 * can change out from under us.
1131 1131 */
1132 1132
1133 1133 /* Find first node */
1134 1134 rw_enter(&port->node_rwlock, RW_READER);
1135 1135 action = 0;
1136 1136 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
1137 1137 nlp = port->node_table[i];
1138 1138 while (nlp != NULL) {
1139 1139 if (!nlp->nlp_tag) {
1140 1140 nlp = nlp->nlp_list_next;
1141 1141 continue;
1142 1142 }
1143 1143 nlp->nlp_tag = 0;
1144 1144
1145 1145 /*
1146 1146 * Check for any device that matches
1147 1147 * our mask
1148 1148 */
1149 1149 if ((nlp->nlp_DID & mask) == aff_d_id) {
1150 1150 if (linkdown) {
1151 1151 action = 1;
1152 1152 break;
1153 1153 } else { /* Must be an RCSN */
1154 1154
1155 1155 action = 2;
1156 1156 break;
1157 1157 }
1158 1158 }
1159 1159 nlp = nlp->nlp_list_next;
1160 1160 }
1161 1161
1162 1162 if (action) {
1163 1163 break;
1164 1164 }
1165 1165 }
1166 1166 rw_exit(&port->node_rwlock);
1167 1167
1168 1168
1169 1169 /* Check if nothing was found */
1170 1170 if (action == 0) {
1171 1171 break;
1172 1172 } else if (action == 1) {
1173 1173 (void) EMLXS_SLI_UNREG_NODE(port, nlp,
1174 1174 NULL, NULL, NULL);
1175 1175 } else if (action == 2) {
1176 1176 EMLXS_SET_DFC_STATE(nlp, NODE_LIMBO);
1177 1177
1178 1178 #ifdef DHCHAP_SUPPORT
1179 1179 emlxs_dhc_auth_stop(port, nlp);
1180 1180 #endif /* DHCHAP_SUPPORT */
1181 1181
1182 1182 /*
1183 1183 * Close the node for any further normal IO
1184 1184 * A PLOGI with reopen the node
1185 1185 */
1186 1186 emlxs_node_close(port, nlp,
1187 1187 hba->channel_fcp, 60);
1188 1188 emlxs_node_close(port, nlp,
1189 1189 hba->channel_ip, 60);
1190 1190
1191 1191 /* Flush tx queue */
1192 1192 (void) emlxs_tx_node_flush(port, nlp, 0, 0, 0);
1193 1193
1194 1194 /* Flush chip queue */
1195 1195 (void) emlxs_chipq_node_flush(port, 0, nlp, 0);
1196 1196 }
1197 1197
1198 1198 }
1199 1199
1200 1200 break;
1201 1201
1202 1202 case 1: /* Partial support - Flush IO for non-FCP2 matching nodes */
1203 1203
1204 1204 for (;;) {
1205 1205
1206 1206 /*
1207 1207 * We need to hold the locks this way because
1208 1208 * EMLXS_SLI_UNREG_NODE and the flush routines enter the
1209 1209 * same locks. Also, when we release the lock the list
1210 1210 * can change out from under us.
1211 1211 */
1212 1212 rw_enter(&port->node_rwlock, RW_READER);
1213 1213 action = 0;
1214 1214 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
1215 1215 nlp = port->node_table[i];
1216 1216 while (nlp != NULL) {
1217 1217 if (!nlp->nlp_tag) {
1218 1218 nlp = nlp->nlp_list_next;
1219 1219 continue;
1220 1220 }
1221 1221 nlp->nlp_tag = 0;
1222 1222
1223 1223 /*
1224 1224 * Check for special FCP2 target device
1225 1225 * that matches our mask
1226 1226 */
1227 1227 if ((nlp->nlp_fcp_info &
1228 1228 NLP_FCP_TGT_DEVICE) &&
1229 1229 (nlp-> nlp_fcp_info &
1230 1230 NLP_FCP_2_DEVICE) &&
1231 1231 (nlp->nlp_DID & mask) ==
1232 1232 aff_d_id) {
1233 1233 action = 3;
1234 1234 break;
1235 1235 }
1236 1236
1237 1237 /*
1238 1238 * Check for any other device that
1239 1239 * matches our mask
1240 1240 */
1241 1241 else if ((nlp->nlp_DID & mask) ==
1242 1242 aff_d_id) {
1243 1243 if (linkdown) {
1244 1244 action = 1;
1245 1245 break;
1246 1246 } else { /* Must be an RSCN */
1247 1247
1248 1248 action = 2;
1249 1249 break;
1250 1250 }
1251 1251 }
1252 1252
1253 1253 nlp = nlp->nlp_list_next;
1254 1254 }
1255 1255
1256 1256 if (action) {
1257 1257 break;
1258 1258 }
1259 1259 }
1260 1260 rw_exit(&port->node_rwlock);
1261 1261
1262 1262 /* Check if nothing was found */
1263 1263 if (action == 0) {
1264 1264 break;
1265 1265 } else if (action == 1) {
1266 1266 (void) EMLXS_SLI_UNREG_NODE(port, nlp,
1267 1267 NULL, NULL, NULL);
1268 1268 } else if (action == 2) {
1269 1269 EMLXS_SET_DFC_STATE(nlp, NODE_LIMBO);
1270 1270
1271 1271 #ifdef DHCHAP_SUPPORT
1272 1272 emlxs_dhc_auth_stop(port, nlp);
1273 1273 #endif /* DHCHAP_SUPPORT */
1274 1274
1275 1275 /*
1276 1276 * Close the node for any further normal IO
1277 1277 * A PLOGI with reopen the node
1278 1278 */
1279 1279 emlxs_node_close(port, nlp,
1280 1280 hba->channel_fcp, 60);
1281 1281 emlxs_node_close(port, nlp,
1282 1282 hba->channel_ip, 60);
1283 1283
1284 1284 /* Flush tx queue */
1285 1285 (void) emlxs_tx_node_flush(port, nlp, 0, 0, 0);
1286 1286
1287 1287 /* Flush chip queue */
1288 1288 (void) emlxs_chipq_node_flush(port, 0, nlp, 0);
1289 1289
1290 1290 } else if (action == 3) { /* FCP2 devices */
1291 1291 EMLXS_SET_DFC_STATE(nlp, NODE_LIMBO);
1292 1292
1293 1293 unreg_vpi = 0;
1294 1294
1295 1295 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1296 1296 (void) emlxs_rpi_pause_notify(port,
1297 1297 nlp->rpip);
1298 1298 }
1299 1299
1300 1300 #ifdef DHCHAP_SUPPORT
1301 1301 emlxs_dhc_auth_stop(port, nlp);
1302 1302 #endif /* DHCHAP_SUPPORT */
1303 1303
1304 1304 /*
1305 1305 * Close the node for any further normal IO
1306 1306 * An ADISC or a PLOGI with reopen the node
1307 1307 */
1308 1308 emlxs_node_close(port, nlp,
1309 1309 hba->channel_fcp, -1);
1310 1310 emlxs_node_close(port, nlp, hba->channel_ip,
1311 1311 ((linkdown) ? 0 : 60));
1312 1312
1313 1313 /* Flush tx queues except for FCP ring */
1314 1314 (void) emlxs_tx_node_flush(port, nlp,
1315 1315 &hba->chan[hba->channel_ct], 0, 0);
1316 1316 (void) emlxs_tx_node_flush(port, nlp,
1317 1317 &hba->chan[hba->channel_els], 0, 0);
1318 1318 (void) emlxs_tx_node_flush(port, nlp,
1319 1319 &hba->chan[hba->channel_ip], 0, 0);
1320 1320
1321 1321 /* Flush chip queues except for FCP ring */
1322 1322 (void) emlxs_chipq_node_flush(port,
1323 1323 &hba->chan[hba->channel_ct], nlp, 0);
1324 1324 (void) emlxs_chipq_node_flush(port,
1325 1325 &hba->chan[hba->channel_els], nlp, 0);
1326 1326 (void) emlxs_chipq_node_flush(port,
1327 1327 &hba->chan[hba->channel_ip], nlp, 0);
1328 1328 }
1329 1329 }
1330 1330 break;
1331 1331
1332 1332 case 2: /* Full support - Hold FCP IO to FCP target matching nodes */
1333 1333
1334 1334 if (!linkdown && !vlinkdown) {
1335 1335 break;
1336 1336 }
1337 1337
1338 1338 for (;;) {
1339 1339 /*
1340 1340 * We need to hold the locks this way because
1341 1341 * EMLXS_SLI_UNREG_NODE and the flush routines enter the
1342 1342 * same locks. Also, when we release the lock the list
1343 1343 * can change out from under us.
1344 1344 */
1345 1345 rw_enter(&port->node_rwlock, RW_READER);
1346 1346 action = 0;
1347 1347 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
1348 1348 nlp = port->node_table[i];
1349 1349 while (nlp != NULL) {
1350 1350 if (!nlp->nlp_tag) {
1351 1351 nlp = nlp->nlp_list_next;
1352 1352 continue;
1353 1353 }
1354 1354 nlp->nlp_tag = 0;
1355 1355
1356 1356 /*
1357 1357 * Check for FCP target device that
1358 1358 * matches our mask
1359 1359 */
1360 1360 if ((nlp-> nlp_fcp_info &
1361 1361 NLP_FCP_TGT_DEVICE) &&
1362 1362 (nlp->nlp_DID & mask) ==
1363 1363 aff_d_id) {
1364 1364 action = 3;
1365 1365 break;
1366 1366 }
1367 1367
1368 1368 /*
1369 1369 * Check for any other device that
1370 1370 * matches our mask
1371 1371 */
1372 1372 else if ((nlp->nlp_DID & mask) ==
1373 1373 aff_d_id) {
1374 1374 if (linkdown) {
1375 1375 action = 1;
1376 1376 break;
1377 1377 } else { /* Must be an RSCN */
1378 1378
1379 1379 action = 2;
1380 1380 break;
1381 1381 }
1382 1382 }
1383 1383
1384 1384 nlp = nlp->nlp_list_next;
1385 1385 }
1386 1386 if (action) {
1387 1387 break;
1388 1388 }
1389 1389 }
1390 1390 rw_exit(&port->node_rwlock);
1391 1391
1392 1392 /* Check if nothing was found */
1393 1393 if (action == 0) {
1394 1394 break;
1395 1395 } else if (action == 1) {
1396 1396 (void) EMLXS_SLI_UNREG_NODE(port, nlp,
1397 1397 NULL, NULL, NULL);
1398 1398 } else if (action == 2) {
1399 1399 EMLXS_SET_DFC_STATE(nlp, NODE_LIMBO);
1400 1400
1401 1401 /*
1402 1402 * Close the node for any further normal IO
1403 1403 * A PLOGI with reopen the node
1404 1404 */
1405 1405 emlxs_node_close(port, nlp,
1406 1406 hba->channel_fcp, 60);
1407 1407 emlxs_node_close(port, nlp,
1408 1408 hba->channel_ip, 60);
1409 1409
1410 1410 /* Flush tx queue */
1411 1411 (void) emlxs_tx_node_flush(port, nlp, 0, 0, 0);
1412 1412
1413 1413 /* Flush chip queue */
1414 1414 (void) emlxs_chipq_node_flush(port, 0, nlp, 0);
1415 1415
1416 1416 } else if (action == 3) { /* FCP2 devices */
1417 1417 EMLXS_SET_DFC_STATE(nlp, NODE_LIMBO);
1418 1418
1419 1419 unreg_vpi = 0;
1420 1420
1421 1421 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1422 1422 (void) emlxs_rpi_pause_notify(port,
1423 1423 nlp->rpip);
1424 1424 }
1425 1425
1426 1426 /*
1427 1427 * Close the node for any further normal IO
1428 1428 * An ADISC or a PLOGI with reopen the node
1429 1429 */
1430 1430 emlxs_node_close(port, nlp,
1431 1431 hba->channel_fcp, -1);
1432 1432 emlxs_node_close(port, nlp, hba->channel_ip,
1433 1433 ((linkdown) ? 0 : 60));
1434 1434
1435 1435 /* Flush tx queues except for FCP ring */
1436 1436 (void) emlxs_tx_node_flush(port, nlp,
1437 1437 &hba->chan[hba->channel_ct], 0, 0);
1438 1438 (void) emlxs_tx_node_flush(port, nlp,
1439 1439 &hba->chan[hba->channel_els], 0, 0);
1440 1440 (void) emlxs_tx_node_flush(port, nlp,
1441 1441 &hba->chan[hba->channel_ip], 0, 0);
1442 1442
1443 1443 /* Flush chip queues except for FCP ring */
1444 1444 (void) emlxs_chipq_node_flush(port,
1445 1445 &hba->chan[hba->channel_ct], nlp, 0);
1446 1446 (void) emlxs_chipq_node_flush(port,
1447 1447 &hba->chan[hba->channel_els], nlp, 0);
1448 1448 (void) emlxs_chipq_node_flush(port,
1449 1449 &hba->chan[hba->channel_ip], nlp, 0);
1450 1450 }
1451 1451 }
1452 1452
1453 1453 break;
1454 1454
1455 1455 } /* switch() */
1456 1456
1457 1457 done:
1458 1458
1459 1459 if (unreg_vpi) {
1460 1460 (void) emlxs_mb_unreg_vpi(port);
1461 1461 }
1462 1462
1463 1463 return (0);
1464 1464
1465 1465 } /* emlxs_port_offline() */
1466 1466
1467 1467
1468 1468 extern void
1469 1469 emlxs_port_online(emlxs_port_t *vport)
1470 1470 {
1471 1471 emlxs_hba_t *hba = vport->hba;
1472 1472 emlxs_port_t *port = &PPORT;
1473 1473 NODELIST *nlp;
1474 1474 uint32_t state;
1475 1475 uint32_t update;
1476 1476 uint32_t npiv_linkup;
1477 1477 char topology[32];
1478 1478 char linkspeed[32];
1479 1479 char mode[32];
1480 1480
1481 1481 /*
1482 1482 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1483 1483 * "linkup_callback. vpi=%d fc_flag=%x", vport->vpi, hba->flag);
1484 1484 */
1485 1485
1486 1486 if ((vport->vpi > 0) &&
1487 1487 (!(hba->flag & FC_NPIV_ENABLED) ||
1488 1488 !(hba->flag & FC_NPIV_SUPPORTED))) {
1489 1489 return;
1490 1490 }
1491 1491
1492 1492 if (!(vport->flag & EMLXS_PORT_BOUND) ||
1493 1493 !(vport->flag & EMLXS_PORT_ENABLED)) {
1494 1494 return;
1495 1495 }
1496 1496
1497 1497 /* Check for mode */
1498 1498 if (port->mode == MODE_TARGET) {
1499 1499 (void) strlcpy(mode, ", target", sizeof (mode));
1500 1500
1501 1501 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1502 1502 /* Set the node tags */
1503 1503 emlxs_fcp_tag_nodes(vport);
1504 1504 while ((nlp = emlxs_find_tagged_node(vport))) {
1505 1505 /* The RPI was paused in port_offline */
1506 1506 (void) emlxs_rpi_resume_notify(vport,
1507 1507 nlp->rpip, 0);
1508 1508 }
1509 1509 }
1510 1510 } else if (port->mode == MODE_INITIATOR) {
1511 1511 (void) strlcpy(mode, ", initiator", sizeof (mode));
1512 1512 } else {
1513 1513 (void) strlcpy(mode, "unknown", sizeof (mode));
1514 1514 }
1515 1515 mutex_enter(&EMLXS_PORT_LOCK);
1516 1516
1517 1517 /* Check for loop topology */
1518 1518 if (hba->topology == TOPOLOGY_LOOP) {
1519 1519 state = FC_STATE_LOOP;
1520 1520 (void) strlcpy(topology, ", loop", sizeof (topology));
1521 1521 } else {
1522 1522 state = FC_STATE_ONLINE;
1523 1523 (void) strlcpy(topology, ", fabric", sizeof (topology));
1524 1524 }
1525 1525
1526 1526 /* Set the link speed */
1527 1527 switch (hba->linkspeed) {
1528 1528 case 0:
1529 1529 (void) strlcpy(linkspeed, "Gb", sizeof (linkspeed));
1530 1530 state |= FC_STATE_1GBIT_SPEED;
1531 1531 break;
1532 1532
1533 1533 case LA_1GHZ_LINK:
1534 1534 (void) strlcpy(linkspeed, "1Gb", sizeof (linkspeed));
1535 1535 state |= FC_STATE_1GBIT_SPEED;
1536 1536 break;
1537 1537 case LA_2GHZ_LINK:
1538 1538 (void) strlcpy(linkspeed, "2Gb", sizeof (linkspeed));
1539 1539 state |= FC_STATE_2GBIT_SPEED;
1540 1540 break;
1541 1541 case LA_4GHZ_LINK:
1542 1542 (void) strlcpy(linkspeed, "4Gb", sizeof (linkspeed));
1543 1543 state |= FC_STATE_4GBIT_SPEED;
1544 1544 break;
1545 1545 case LA_8GHZ_LINK:
1546 1546 (void) strlcpy(linkspeed, "8Gb", sizeof (linkspeed));
1547 1547 state |= FC_STATE_8GBIT_SPEED;
1548 1548 break;
1549 1549 case LA_10GHZ_LINK:
1550 1550 (void) strlcpy(linkspeed, "10Gb", sizeof (linkspeed));
1551 1551 state |= FC_STATE_10GBIT_SPEED;
1552 1552 break;
1553 1553 case LA_16GHZ_LINK:
1554 1554 (void) strlcpy(linkspeed, "16Gb", sizeof (linkspeed));
1555 1555 state |= FC_STATE_16GBIT_SPEED;
1556 1556 break;
1557 1557 default:
1558 1558 (void) snprintf(linkspeed, sizeof (linkspeed), "unknown(0x%x)",
1559 1559 hba->linkspeed);
1560 1560 break;
1561 1561 }
1562 1562
1563 1563 npiv_linkup = 0;
1564 1564 update = 0;
1565 1565
1566 1566 if ((hba->state >= FC_LINK_UP) &&
1567 1567 !(hba->flag & FC_LOOPBACK_MODE) && (vport->ulp_statec != state)) {
1568 1568 update = 1;
1569 1569 vport->ulp_statec = state;
1570 1570
1571 1571 if ((vport->vpi > 0) && !(hba->flag & FC_NPIV_LINKUP)) {
1572 1572 hba->flag |= FC_NPIV_LINKUP;
1573 1573 npiv_linkup = 1;
1574 1574 }
1575 1575 }
1576 1576
1577 1577 mutex_exit(&EMLXS_PORT_LOCK);
1578 1578
1579 1579 if (update) {
1580 1580 if (vport->flag & EMLXS_PORT_BOUND) {
1581 1581 if (vport->vpi == 0) {
1582 1582 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1583 1583 "%s%s%s", linkspeed, topology, mode);
1584 1584
1585 1585 } else if (npiv_linkup) {
1586 1586 EMLXS_MSGF(EMLXS_CONTEXT,
1587 1587 &emlxs_npiv_link_up_msg, "%s%s%s",
1588 1588 linkspeed, topology, mode);
1589 1589 }
1590 1590
1591 1591 if (vport->mode == MODE_INITIATOR) {
1592 1592 emlxs_fca_link_up(vport);
1593 1593 }
1594 1594 #ifdef SFCT_SUPPORT
1595 1595 else if (vport->mode == MODE_TARGET) {
1596 1596 emlxs_fct_link_up(vport);
1597 1597 }
1598 1598 #endif /* SFCT_SUPPORT */
1599 1599 } else {
1600 1600 if (vport->vpi == 0) {
1601 1601 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1602 1602 "%s%s%s *", linkspeed, topology, mode);
1603 1603
1604 1604 } else if (npiv_linkup) {
1605 1605 EMLXS_MSGF(EMLXS_CONTEXT,
1606 1606 &emlxs_npiv_link_up_msg, "%s%s%s *",
1607 1607 linkspeed, topology, mode);
1608 1608 }
1609 1609 }
1610 1610
1611 1611 /* Check for waiting threads */
1612 1612 if (vport->vpi == 0) {
1613 1613 mutex_enter(&EMLXS_LINKUP_LOCK);
1614 1614 if (hba->linkup_wait_flag == TRUE) {
1615 1615 hba->linkup_wait_flag = FALSE;
1616 1616 cv_broadcast(&EMLXS_LINKUP_CV);
1617 1617 }
1618 1618 mutex_exit(&EMLXS_LINKUP_LOCK);
1619 1619 }
1620 1620
1621 1621 /* Flush any pending ub buffers */
1622 1622 emlxs_ub_flush(vport);
1623 1623 }
1624 1624
1625 1625 return;
1626 1626
1627 1627 } /* emlxs_port_online() */
1628 1628
1629 1629
1630 1630 /* SLI3 */
1631 1631 extern void
1632 1632 emlxs_linkdown(emlxs_hba_t *hba)
1633 1633 {
1634 1634 emlxs_port_t *port = &PPORT;
1635 1635 int i;
1636 1636 uint32_t scope;
1637 1637
1638 1638 mutex_enter(&EMLXS_PORT_LOCK);
1639 1639
1640 1640 if (hba->state > FC_LINK_DOWN) {
1641 1641 HBASTATS.LinkDown++;
1642 1642 EMLXS_STATE_CHANGE_LOCKED(hba, FC_LINK_DOWN);
1643 1643 }
1644 1644
1645 1645 /* Set scope */
1646 1646 scope = (hba->flag & FC_NEW_FABRIC)? 0xFDFFFFFF:0xFFFFFFFF;
1647 1647
1648 1648 /* Filter hba flags */
1649 1649 hba->flag &= FC_LINKDOWN_MASK;
1650 1650 hba->discovery_timer = 0;
1651 1651 hba->linkup_timer = 0;
1652 1652
1653 1653 mutex_exit(&EMLXS_PORT_LOCK);
1654 1654
1655 1655 for (i = 0; i < MAX_VPORTS; i++) {
1656 1656 port = &VPORT(i);
1657 1657
1658 1658 if (!(port->flag & EMLXS_PORT_BOUND)) {
1659 1659 continue;
1660 1660 }
1661 1661
1662 1662 (void) emlxs_port_offline(port, scope);
1663 1663
1664 1664 }
1665 1665
1666 1666 emlxs_log_link_event(port);
1667 1667
1668 1668 return;
1669 1669
1670 1670 } /* emlxs_linkdown() */
1671 1671
1672 1672
1673 1673 /* SLI3 */
1674 1674 extern void
1675 1675 emlxs_linkup(emlxs_hba_t *hba)
1676 1676 {
1677 1677 emlxs_port_t *port = &PPORT;
1678 1678 emlxs_config_t *cfg = &CFG;
1679 1679
1680 1680 mutex_enter(&EMLXS_PORT_LOCK);
1681 1681
1682 1682 /* Check for any mode changes */
1683 1683 emlxs_mode_set(hba);
1684 1684
1685 1685 HBASTATS.LinkUp++;
1686 1686 EMLXS_STATE_CHANGE_LOCKED(hba, FC_LINK_UP);
1687 1687
1688 1688 #ifdef MENLO_SUPPORT
1689 1689 if (hba->flag & FC_MENLO_MODE) {
1690 1690 mutex_exit(&EMLXS_PORT_LOCK);
1691 1691
1692 1692 /*
1693 1693 * Trigger linkup CV and don't start linkup & discovery
1694 1694 * timers
1695 1695 */
1696 1696 mutex_enter(&EMLXS_LINKUP_LOCK);
1697 1697 cv_broadcast(&EMLXS_LINKUP_CV);
1698 1698 mutex_exit(&EMLXS_LINKUP_LOCK);
1699 1699
1700 1700 emlxs_log_link_event(port);
1701 1701
1702 1702 return;
1703 1703 }
1704 1704 #endif /* MENLO_SUPPORT */
1705 1705
1706 1706 /* Set the linkup & discovery timers */
1707 1707 hba->linkup_timer = hba->timer_tics + cfg[CFG_LINKUP_TIMEOUT].current;
1708 1708 hba->discovery_timer =
1709 1709 hba->timer_tics + cfg[CFG_LINKUP_TIMEOUT].current +
1710 1710 cfg[CFG_DISC_TIMEOUT].current;
1711 1711
1712 1712 mutex_exit(&EMLXS_PORT_LOCK);
1713 1713
1714 1714 emlxs_log_link_event(port);
1715 1715
1716 1716 return;
1717 1717
1718 1718 } /* emlxs_linkup() */
1719 1719
1720 1720
1721 1721 /*
1722 1722 * emlxs_reset_link
1723 1723 *
1724 1724 * Description:
1725 1725 * Called to reset the link with an init_link
1726 1726 *
1727 1727 * Returns:
1728 1728 *
1729 1729 */
1730 1730 extern int
1731 1731 emlxs_reset_link(emlxs_hba_t *hba, uint32_t linkup, uint32_t wait)
1732 1732 {
1733 1733 emlxs_port_t *port = &PPORT;
1734 1734 emlxs_config_t *cfg;
1735 1735 MAILBOXQ *mbq = NULL;
1736 1736 MAILBOX *mb = NULL;
1737 1737 int rval = 0;
1738 1738 int tmo;
1739 1739 int rc;
1740 1740
1741 1741 /*
1742 1742 * Get a buffer to use for the mailbox command
1743 1743 */
1744 1744 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX))
1745 1745 == NULL) {
1746 1746 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_failed_msg,
1747 1747 "Unable to allocate mailbox buffer.");
1748 1748 rval = 1;
1749 1749 goto reset_link_fail;
1750 1750 }
1751 1751
1752 1752 if (linkup) {
1753 1753 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_msg,
1754 1754 "Resetting link...");
1755 1755 } else {
1756 1756 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_msg,
1757 1757 "Disabling link...");
1758 1758 }
1759 1759
1760 1760 mb = (MAILBOX *)mbq;
1761 1761
1762 1762 /* Bring link down first */
1763 1763 emlxs_mb_down_link(hba, mbq);
1764 1764
1765 1765 #define MBXERR_LINK_DOWN 0x33
1766 1766
1767 1767 if (wait) {
1768 1768 wait = MBX_WAIT;
1769 1769 } else {
1770 1770 wait = MBX_NOWAIT;
1771 1771 }
1772 1772 rc = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, wait, 0);
1773 1773 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS) &&
1774 1774 (rc != MBXERR_LINK_DOWN)) {
1775 1775 rval = 1;
1776 1776 goto reset_link_fail;
1777 1777 }
1778 1778
1779 1779 tmo = 120;
1780 1780 do {
1781 1781 delay(drv_usectohz(500000));
1782 1782 tmo--;
1783 1783
1784 1784 if (!tmo) {
1785 1785 rval = 1;
1786 1786
1787 1787 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_msg,
1788 1788 "Linkdown timeout.");
1789 1789
1790 1790 goto reset_link_fail;
1791 1791 }
1792 1792 } while ((hba->state >= FC_LINK_UP) && (hba->state != FC_ERROR));
1793 1793
1794 1794 if (linkup) {
1795 1795 /*
1796 1796 * Setup and issue mailbox INITIALIZE LINK command
1797 1797 */
1798 1798
1799 1799 if (wait == MBX_NOWAIT) {
1800 1800 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX))
1801 1801 == NULL) {
1802 1802 EMLXS_MSGF(EMLXS_CONTEXT,
1803 1803 &emlxs_link_reset_failed_msg,
1804 1804 "Unable to allocate mailbox buffer.");
1805 1805 rval = 1;
1806 1806 goto reset_link_fail;
1807 1807 }
1808 1808 mb = (MAILBOX *)mbq;
1809 1809 } else {
1810 1810 /* Reuse mbq from previous mbox */
1811 1811 mb = (MAILBOX *)mbq;
1812 1812 }
1813 1813 cfg = &CFG;
1814 1814
1815 1815 emlxs_mb_init_link(hba, mbq,
1816 1816 cfg[CFG_TOPOLOGY].current, cfg[CFG_LINK_SPEED].current);
1817 1817
1818 1818 mb->un.varInitLnk.lipsr_AL_PA = 0;
1819 1819
1820 1820 /* Clear the loopback mode */
1821 1821 mutex_enter(&EMLXS_PORT_LOCK);
1822 1822 hba->flag &= ~FC_LOOPBACK_MODE;
1823 1823 hba->loopback_tics = 0;
1824 1824 mutex_exit(&EMLXS_PORT_LOCK);
1825 1825
1826 1826 rc = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, wait, 0);
1827 1827 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
1828 1828 rval = 1;
1829 1829 goto reset_link_fail;
1830 1830 }
1831 1831
1832 1832 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_msg, NULL);
1833 1833 }
1834 1834
1835 1835 reset_link_fail:
1836 1836
1837 1837 if ((wait == MBX_WAIT) && mbq) {
1838 1838 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
1839 1839 }
1840 1840
1841 1841 return (rval);
1842 1842 } /* emlxs_reset_link() */
1843 1843
1844 1844
1845 1845 extern int
1846 1846 emlxs_online(emlxs_hba_t *hba)
1847 1847 {
1848 1848 emlxs_port_t *port = &PPORT;
1849 1849 int32_t rval = 0;
1850 1850 uint32_t i = 0;
1851 1851
1852 1852 /* Make sure adapter is offline or exit trying (30 seconds) */
1853 1853 while (i++ < 30) {
1854 1854 /* Check if adapter is already going online */
1855 1855 if (hba->flag & (FC_ONLINE_MODE | FC_ONLINING_MODE)) {
1856 1856 return (0);
1857 1857 }
1858 1858
1859 1859 mutex_enter(&EMLXS_PORT_LOCK);
1860 1860
1861 1861 /* Check again */
1862 1862 if (hba->flag & (FC_ONLINE_MODE | FC_ONLINING_MODE)) {
1863 1863 mutex_exit(&EMLXS_PORT_LOCK);
1864 1864 return (0);
1865 1865 }
1866 1866
1867 1867 /* Check if adapter is offline */
1868 1868 if (hba->flag & FC_OFFLINE_MODE) {
1869 1869 /* Mark it going online */
1870 1870 hba->flag &= ~FC_OFFLINE_MODE;
1871 1871 hba->flag |= FC_ONLINING_MODE;
1872 1872
1873 1873 /* Currently !FC_ONLINE_MODE and !FC_OFFLINE_MODE */
1874 1874 mutex_exit(&EMLXS_PORT_LOCK);
1875 1875 break;
1876 1876 }
1877 1877
1878 1878 mutex_exit(&EMLXS_PORT_LOCK);
1879 1879
1880 1880 BUSYWAIT_MS(1000);
1881 1881 }
1882 1882
1883 1883 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
1884 1884 "Going online...");
1885 1885
1886 1886 if (rval = EMLXS_SLI_ONLINE(hba)) {
1887 1887 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg, "status=%x",
1888 1888 rval);
1889 1889 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_offline_msg, NULL);
1890 1890
1891 1891 /* Set FC_OFFLINE_MODE */
1892 1892 mutex_enter(&EMLXS_PORT_LOCK);
1893 1893 hba->flag |= FC_OFFLINE_MODE;
1894 1894 hba->flag &= ~FC_ONLINING_MODE;
1895 1895 mutex_exit(&EMLXS_PORT_LOCK);
1896 1896
1897 1897 return (rval);
1898 1898 }
1899 1899
1900 1900 /* Start the timer */
1901 1901 emlxs_timer_start(hba);
1902 1902
1903 1903 /* Set FC_ONLINE_MODE */
1904 1904 mutex_enter(&EMLXS_PORT_LOCK);
1905 1905 hba->flag |= FC_ONLINE_MODE;
1906 1906 hba->flag &= ~FC_ONLINING_MODE;
1907 1907 mutex_exit(&EMLXS_PORT_LOCK);
1908 1908
1909 1909 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_online_msg, NULL);
1910 1910
1911 1911 #ifdef SFCT_SUPPORT
1912 1912 if (port->flag & EMLXS_TGT_ENABLED) {
1913 1913 (void) emlxs_fct_port_initialize(port);
1914 1914 }
1915 1915 #endif /* SFCT_SUPPORT */
1916 1916
1917 1917 return (rval);
1918 1918
1919 1919 } /* emlxs_online() */
1920 1920
1921 1921
1922 1922 extern int
1923 1923 emlxs_offline(emlxs_hba_t *hba, uint32_t reset_requested)
1924 1924 {
1925 1925 emlxs_port_t *port = &PPORT;
1926 1926 uint32_t i = 0;
1927 1927 int rval = 1;
1928 1928
1929 1929 /* Make sure adapter is online or exit trying (30 seconds) */
1930 1930 while (i++ < 30) {
1931 1931 /* Check if adapter is already going offline */
1932 1932 if (hba->flag & (FC_OFFLINE_MODE | FC_OFFLINING_MODE)) {
1933 1933 return (0);
1934 1934 }
1935 1935
1936 1936 mutex_enter(&EMLXS_PORT_LOCK);
1937 1937
1938 1938 /* Check again */
1939 1939 if (hba->flag & (FC_OFFLINE_MODE | FC_OFFLINING_MODE)) {
1940 1940 mutex_exit(&EMLXS_PORT_LOCK);
1941 1941 return (0);
1942 1942 }
1943 1943
1944 1944 /* Check if adapter is online */
1945 1945 if (hba->flag & FC_ONLINE_MODE) {
1946 1946 /* Mark it going offline */
1947 1947 hba->flag &= ~FC_ONLINE_MODE;
1948 1948 hba->flag |= FC_OFFLINING_MODE;
1949 1949
1950 1950 /* Currently !FC_ONLINE_MODE and !FC_OFFLINE_MODE */
1951 1951 mutex_exit(&EMLXS_PORT_LOCK);
1952 1952 break;
1953 1953 }
1954 1954
1955 1955 mutex_exit(&EMLXS_PORT_LOCK);
1956 1956
1957 1957 BUSYWAIT_MS(1000);
1958 1958 }
1959 1959
1960 1960 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
1961 1961 "Going offline...");
1962 1962
1963 1963 /* Declare link down */
1964 1964 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1965 1965 (void) emlxs_fcf_shutdown_notify(port, 1);
1966 1966 } else {
1967 1967 emlxs_linkdown(hba);
1968 1968 }
1969 1969
1970 1970 #ifdef SFCT_SUPPORT
1971 1971 if (port->flag & EMLXS_TGT_ENABLED) {
1972 1972 (void) emlxs_fct_port_shutdown(port);
1973 1973 }
1974 1974 #endif /* SFCT_SUPPORT */
1975 1975
↓ open down ↓ |
1975 lines elided |
↑ open up ↑ |
1976 1976 /* Check if adapter was shutdown */
1977 1977 if (hba->flag & FC_HARDWARE_ERROR) {
1978 1978 /*
1979 1979 * Force mailbox cleanup
1980 1980 * This will wake any sleeping or polling threads
1981 1981 */
1982 1982 emlxs_mb_fini(hba, NULL, MBX_HARDWARE_ERROR);
1983 1983 }
1984 1984
1985 1985 /* Pause here for the IO to settle */
1986 - delay(drv_usectohz(1000000)); /* 1 sec */
1986 + delay(drv_sectohz(1));
1987 1987
1988 1988 /* Unregister all nodes */
1989 1989 emlxs_ffcleanup(hba);
1990 1990
1991 1991 if (hba->bus_type == SBUS_FC) {
1992 1992 WRITE_SBUS_CSR_REG(hba, FC_SHS_REG(hba), 0x9A);
1993 1993 #ifdef FMA_SUPPORT
1994 1994 /* Access handle validation */
1995 1995 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.sbus_csr_handle);
1996 1996 #endif /* FMA_SUPPORT */
1997 1997 }
1998 1998
1999 1999 /* Stop the timer */
2000 2000 emlxs_timer_stop(hba);
2001 2001
2002 2002 /* For safety flush every iotag list */
2003 2003 if (emlxs_iotag_flush(hba)) {
2004 2004 /* Pause here for the IO to flush */
2005 2005 delay(drv_usectohz(1000));
2006 2006 }
2007 2007
2008 2008 /* Wait for poll command request to settle */
2009 2009 while (hba->io_poll_count > 0) {
2010 2010 delay(drv_usectohz(2000000)); /* 2 sec */
2011 2011 }
2012 2012
2013 2013 /* Shutdown the adapter interface */
2014 2014 EMLXS_SLI_OFFLINE(hba, reset_requested);
2015 2015
2016 2016 mutex_enter(&EMLXS_PORT_LOCK);
2017 2017 hba->flag |= FC_OFFLINE_MODE;
2018 2018 hba->flag &= ~FC_OFFLINING_MODE;
2019 2019 mutex_exit(&EMLXS_PORT_LOCK);
2020 2020
2021 2021 rval = 0;
2022 2022
2023 2023 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_offline_msg, NULL);
2024 2024
2025 2025 done:
2026 2026
2027 2027 return (rval);
2028 2028
2029 2029 } /* emlxs_offline() */
2030 2030
2031 2031
2032 2032
2033 2033 extern int
2034 2034 emlxs_power_down(emlxs_hba_t *hba)
2035 2035 {
2036 2036 #ifdef FMA_SUPPORT
2037 2037 emlxs_port_t *port = &PPORT;
2038 2038 #endif /* FMA_SUPPORT */
2039 2039 int32_t rval = 0;
2040 2040
2041 2041 if ((rval = emlxs_offline(hba, 0))) {
2042 2042 return (rval);
2043 2043 }
2044 2044 EMLXS_SLI_HBA_RESET(hba, 1, 1, 0);
2045 2045
2046 2046
2047 2047 #ifdef FMA_SUPPORT
2048 2048 if (emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
2049 2049 != DDI_FM_OK) {
2050 2050 EMLXS_MSGF(EMLXS_CONTEXT,
2051 2051 &emlxs_invalid_access_handle_msg, NULL);
2052 2052 return (1);
2053 2053 }
2054 2054 #endif /* FMA_SUPPORT */
2055 2055
2056 2056 return (0);
2057 2057
2058 2058 } /* End emlxs_power_down */
2059 2059
2060 2060
2061 2061 extern int
2062 2062 emlxs_power_up(emlxs_hba_t *hba)
2063 2063 {
2064 2064 #ifdef FMA_SUPPORT
2065 2065 emlxs_port_t *port = &PPORT;
2066 2066 #endif /* FMA_SUPPORT */
2067 2067 int32_t rval = 0;
2068 2068
2069 2069
2070 2070 #ifdef FMA_SUPPORT
2071 2071 if (emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
2072 2072 != DDI_FM_OK) {
2073 2073 EMLXS_MSGF(EMLXS_CONTEXT,
2074 2074 &emlxs_invalid_access_handle_msg, NULL);
2075 2075 return (1);
2076 2076 }
2077 2077 #endif /* FMA_SUPPORT */
2078 2078
2079 2079 /* Bring adapter online */
2080 2080 if ((rval = emlxs_online(hba))) {
2081 2081 if (hba->pci_cap_offset[PCI_CAP_ID_PM]) {
2082 2082 /* Put chip in D3 state */
2083 2083 (void) ddi_put8(hba->pci_acc_handle,
2084 2084 (uint8_t *)(hba->pci_addr +
2085 2085 hba->pci_cap_offset[PCI_CAP_ID_PM] +
2086 2086 PCI_PMCSR),
2087 2087 (uint8_t)PCI_PMCSR_D3HOT);
2088 2088 }
2089 2089 return (rval);
2090 2090 }
2091 2091
2092 2092 return (rval);
2093 2093
2094 2094 } /* emlxs_power_up() */
2095 2095
2096 2096
2097 2097 /*
2098 2098 *
2099 2099 * NAME: emlxs_ffcleanup
2100 2100 *
2101 2101 * FUNCTION: Cleanup all the Firefly resources used by configuring the adapter
2102 2102 *
2103 2103 * EXECUTION ENVIRONMENT: process only
2104 2104 *
2105 2105 * CALLED FROM: CFG_TERM
2106 2106 *
2107 2107 * INPUT: hba - pointer to the dev_ctl area.
2108 2108 *
2109 2109 * RETURNS: none
2110 2110 */
2111 2111 extern void
2112 2112 emlxs_ffcleanup(emlxs_hba_t *hba)
2113 2113 {
2114 2114 emlxs_port_t *port = &PPORT;
2115 2115 uint32_t i;
2116 2116
2117 2117 /* Disable all but the mailbox interrupt */
2118 2118 EMLXS_SLI_DISABLE_INTR(hba, HC_MBINT_ENA);
2119 2119
2120 2120 /* Make sure all port nodes are destroyed */
2121 2121 for (i = 0; i < MAX_VPORTS; i++) {
2122 2122 port = &VPORT(i);
2123 2123
2124 2124 if (port->node_count) {
2125 2125 (void) EMLXS_SLI_UNREG_NODE(port, 0, 0, 0, 0);
2126 2126 }
2127 2127 }
2128 2128
2129 2129 /* Clear all interrupt enable conditions */
2130 2130 EMLXS_SLI_DISABLE_INTR(hba, 0);
2131 2131
2132 2132 return;
2133 2133
2134 2134 } /* emlxs_ffcleanup() */
2135 2135
2136 2136
2137 2137 extern uint16_t
2138 2138 emlxs_register_pkt(CHANNEL *cp, emlxs_buf_t *sbp)
2139 2139 {
2140 2140 emlxs_hba_t *hba;
2141 2141 emlxs_port_t *port;
2142 2142 uint16_t iotag;
2143 2143 uint32_t i;
2144 2144
2145 2145 hba = cp->hba;
2146 2146
2147 2147 mutex_enter(&EMLXS_FCTAB_LOCK);
2148 2148
2149 2149 if (sbp->iotag != 0) {
2150 2150 port = &PPORT;
2151 2151
2152 2152 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2153 2153 "Pkt already registered! channel=%d iotag=%d sbp=%p",
2154 2154 sbp->channel, sbp->iotag, sbp);
2155 2155 }
2156 2156
2157 2157 iotag = 0;
2158 2158 for (i = 0; i < hba->max_iotag; i++) {
2159 2159 if (!hba->fc_iotag || hba->fc_iotag >= hba->max_iotag) {
2160 2160 hba->fc_iotag = 1;
2161 2161 }
2162 2162 iotag = hba->fc_iotag++;
2163 2163
2164 2164 if (hba->fc_table[iotag] == 0 ||
2165 2165 hba->fc_table[iotag] == STALE_PACKET) {
2166 2166 hba->io_count++;
2167 2167 hba->fc_table[iotag] = sbp;
2168 2168
2169 2169 sbp->iotag = iotag;
2170 2170 sbp->channel = cp;
2171 2171
2172 2172 break;
2173 2173 }
2174 2174 iotag = 0;
2175 2175 }
2176 2176
2177 2177 mutex_exit(&EMLXS_FCTAB_LOCK);
2178 2178
2179 2179 /*
2180 2180 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2181 2181 * "register_pkt: channel=%d iotag=%d sbp=%p",
2182 2182 * cp->channelno, iotag, sbp);
2183 2183 */
2184 2184
2185 2185 return (iotag);
2186 2186
2187 2187 } /* emlxs_register_pkt() */
2188 2188
2189 2189
2190 2190
2191 2191 extern emlxs_buf_t *
2192 2192 emlxs_unregister_pkt(CHANNEL *cp, uint16_t iotag, uint32_t forced)
2193 2193 {
2194 2194 emlxs_hba_t *hba;
2195 2195 emlxs_buf_t *sbp;
2196 2196
2197 2197 sbp = NULL;
2198 2198 hba = cp->hba;
2199 2199
2200 2200 /* Check the iotag range */
2201 2201 if ((iotag == 0) || (iotag >= hba->max_iotag)) {
2202 2202 return (NULL);
2203 2203 }
2204 2204
2205 2205 /* Remove the sbp from the table */
2206 2206 mutex_enter(&EMLXS_FCTAB_LOCK);
2207 2207 sbp = hba->fc_table[iotag];
2208 2208
2209 2209 if (!sbp || (sbp == STALE_PACKET)) {
2210 2210 mutex_exit(&EMLXS_FCTAB_LOCK);
2211 2211 return (sbp);
2212 2212 }
2213 2213
2214 2214 hba->fc_table[iotag] = ((forced) ? STALE_PACKET : NULL);
2215 2215 hba->io_count--;
2216 2216 sbp->iotag = 0;
2217 2217
2218 2218 mutex_exit(&EMLXS_FCTAB_LOCK);
2219 2219
2220 2220
2221 2221 /* Clean up the sbp */
2222 2222 mutex_enter(&sbp->mtx);
2223 2223
2224 2224 if (sbp->pkt_flags & PACKET_IN_TXQ) {
2225 2225 sbp->pkt_flags &= ~PACKET_IN_TXQ;
2226 2226 hba->channel_tx_count--;
2227 2227 }
2228 2228
2229 2229 if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
2230 2230 sbp->pkt_flags &= ~PACKET_IN_CHIPQ;
2231 2231 }
2232 2232
2233 2233 if (sbp->bmp) {
2234 2234 emlxs_mem_put(hba, MEM_BPL, (void *)sbp->bmp);
2235 2235 sbp->bmp = 0;
2236 2236 }
2237 2237
2238 2238 mutex_exit(&sbp->mtx);
2239 2239
2240 2240 return (sbp);
2241 2241
2242 2242 } /* emlxs_unregister_pkt() */
2243 2243
2244 2244
2245 2245
2246 2246 /* Flush all IO's to all nodes for a given IO Channel */
2247 2247 extern uint32_t
2248 2248 emlxs_tx_channel_flush(emlxs_hba_t *hba, CHANNEL *cp, emlxs_buf_t *fpkt)
2249 2249 {
2250 2250 emlxs_port_t *port = &PPORT;
2251 2251 emlxs_buf_t *sbp;
2252 2252 IOCBQ *iocbq;
2253 2253 IOCBQ *next;
2254 2254 IOCB *iocb;
2255 2255 uint32_t channelno;
2256 2256 Q abort;
2257 2257 NODELIST *ndlp;
2258 2258 IOCB *icmd;
2259 2259 MATCHMAP *mp;
2260 2260 uint32_t i;
2261 2261 uint8_t flag[MAX_CHANNEL];
2262 2262
2263 2263 channelno = cp->channelno;
2264 2264 bzero((void *)&abort, sizeof (Q));
2265 2265 bzero((void *)flag, MAX_CHANNEL * sizeof (uint8_t));
2266 2266
2267 2267 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2268 2268
2269 2269 /* While a node needs servicing */
2270 2270 while (cp->nodeq.q_first) {
2271 2271 ndlp = (NODELIST *) cp->nodeq.q_first;
2272 2272
2273 2273 /* Check if priority queue is not empty */
2274 2274 if (ndlp->nlp_ptx[channelno].q_first) {
2275 2275 /* Transfer all iocb's to local queue */
2276 2276 if (abort.q_first == 0) {
2277 2277 abort.q_first =
2278 2278 ndlp->nlp_ptx[channelno].q_first;
2279 2279 } else {
2280 2280 ((IOCBQ *)abort.q_last)->next =
2281 2281 (IOCBQ *)ndlp->nlp_ptx[channelno].q_first;
2282 2282 }
2283 2283 flag[channelno] = 1;
2284 2284
2285 2285 abort.q_last = ndlp->nlp_ptx[channelno].q_last;
2286 2286 abort.q_cnt += ndlp->nlp_ptx[channelno].q_cnt;
2287 2287 }
2288 2288
2289 2289 /* Check if tx queue is not empty */
2290 2290 if (ndlp->nlp_tx[channelno].q_first) {
2291 2291 /* Transfer all iocb's to local queue */
2292 2292 if (abort.q_first == 0) {
2293 2293 abort.q_first = ndlp->nlp_tx[channelno].q_first;
2294 2294 } else {
2295 2295 ((IOCBQ *)abort.q_last)->next =
2296 2296 (IOCBQ *)ndlp->nlp_tx[channelno].q_first;
2297 2297 }
2298 2298
2299 2299 abort.q_last = ndlp->nlp_tx[channelno].q_last;
2300 2300 abort.q_cnt += ndlp->nlp_tx[channelno].q_cnt;
2301 2301 }
2302 2302
2303 2303 /* Clear the queue pointers */
2304 2304 ndlp->nlp_ptx[channelno].q_first = NULL;
2305 2305 ndlp->nlp_ptx[channelno].q_last = NULL;
2306 2306 ndlp->nlp_ptx[channelno].q_cnt = 0;
2307 2307
2308 2308 ndlp->nlp_tx[channelno].q_first = NULL;
2309 2309 ndlp->nlp_tx[channelno].q_last = NULL;
2310 2310 ndlp->nlp_tx[channelno].q_cnt = 0;
2311 2311
2312 2312 /* Remove node from service queue */
2313 2313
2314 2314 /* If this is the last node on list */
2315 2315 if (cp->nodeq.q_last == (void *)ndlp) {
2316 2316 cp->nodeq.q_last = NULL;
2317 2317 cp->nodeq.q_first = NULL;
2318 2318 cp->nodeq.q_cnt = 0;
2319 2319 } else {
2320 2320 /* Remove node from head */
2321 2321 cp->nodeq.q_first = ndlp->nlp_next[channelno];
2322 2322 ((NODELIST *)cp->nodeq.q_last)->nlp_next[channelno] =
2323 2323 cp->nodeq.q_first;
2324 2324 cp->nodeq.q_cnt--;
2325 2325 }
2326 2326
2327 2327 /* Clear node */
2328 2328 ndlp->nlp_next[channelno] = NULL;
2329 2329 }
2330 2330
2331 2331 /* First cleanup the iocb's while still holding the lock */
2332 2332 iocbq = (IOCBQ *) abort.q_first;
2333 2333 while (iocbq) {
2334 2334 /* Free the IoTag and the bmp */
2335 2335 iocb = &iocbq->iocb;
2336 2336
2337 2337 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2338 2338 sbp = iocbq->sbp;
2339 2339 if (sbp) {
2340 2340 emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
2341 2341 }
2342 2342 } else {
2343 2343 sbp = emlxs_unregister_pkt((CHANNEL *)iocbq->channel,
2344 2344 iocb->ULPIOTAG, 0);
2345 2345 }
2346 2346
2347 2347 if (sbp && (sbp != STALE_PACKET)) {
2348 2348 mutex_enter(&sbp->mtx);
2349 2349
2350 2350 sbp->pkt_flags |= PACKET_IN_FLUSH;
2351 2351 /*
2352 2352 * If the fpkt is already set, then we will leave it
2353 2353 * alone. This ensures that this pkt is only accounted
2354 2354 * for on one fpkt->flush_count
2355 2355 */
2356 2356 if (!sbp->fpkt && fpkt) {
2357 2357 mutex_enter(&fpkt->mtx);
2358 2358 sbp->fpkt = fpkt;
2359 2359 fpkt->flush_count++;
2360 2360 mutex_exit(&fpkt->mtx);
2361 2361 }
2362 2362
2363 2363 mutex_exit(&sbp->mtx);
2364 2364 }
2365 2365
2366 2366 iocbq = (IOCBQ *)iocbq->next;
2367 2367 } /* end of while */
2368 2368
2369 2369 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2370 2370
2371 2371 /* Now abort the iocb's */
2372 2372 iocbq = (IOCBQ *)abort.q_first;
2373 2373 while (iocbq) {
2374 2374 /* Save the next iocbq for now */
2375 2375 next = (IOCBQ *)iocbq->next;
2376 2376
2377 2377 /* Unlink this iocbq */
2378 2378 iocbq->next = NULL;
2379 2379
2380 2380 /* Get the pkt */
2381 2381 sbp = (emlxs_buf_t *)iocbq->sbp;
2382 2382
2383 2383 if (sbp) {
2384 2384 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
2385 2385 "tx: sbp=%p node=%p", sbp, sbp->node);
2386 2386
2387 2387 if (hba->state >= FC_LINK_UP) {
2388 2388 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2389 2389 IOERR_ABORT_REQUESTED, 1);
2390 2390 } else {
2391 2391 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2392 2392 IOERR_LINK_DOWN, 1);
2393 2393 }
2394 2394
2395 2395 }
2396 2396 /* Free the iocb and its associated buffers */
2397 2397 else {
2398 2398 icmd = &iocbq->iocb;
2399 2399
2400 2400 /* SLI3 */
2401 2401 if (icmd->ULPCOMMAND == CMD_QUE_RING_BUF64_CN ||
2402 2402 icmd->ULPCOMMAND == CMD_QUE_RING_BUF_CN ||
2403 2403 icmd->ULPCOMMAND == CMD_QUE_RING_LIST64_CN) {
2404 2404 if ((hba->flag &
2405 2405 (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
2406 2406 /* HBA is detaching or offlining */
2407 2407 if (icmd->ULPCOMMAND !=
2408 2408 CMD_QUE_RING_LIST64_CN) {
2409 2409 void *tmp;
2410 2410 RING *rp;
2411 2411
2412 2412 rp = &hba->sli.sli3.
2413 2413 ring[channelno];
2414 2414 for (i = 0;
2415 2415 i < icmd->ULPBDECOUNT;
2416 2416 i++) {
2417 2417 mp = EMLXS_GET_VADDR(
2418 2418 hba, rp, icmd);
2419 2419
2420 2420 tmp = (void *)mp;
2421 2421 if (mp) {
2422 2422 emlxs_mem_put(
2423 2423 hba, MEM_BUF, tmp);
2424 2424 }
2425 2425 }
2426 2426 }
2427 2427
2428 2428 emlxs_mem_put(hba, MEM_IOCB,
2429 2429 (void *)iocbq);
2430 2430 } else {
2431 2431 /* repost the unsolicited buffer */
2432 2432 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp,
2433 2433 iocbq);
2434 2434 }
2435 2435 } else if (icmd->ULPCOMMAND == CMD_CLOSE_XRI_CN ||
2436 2436 icmd->ULPCOMMAND == CMD_CLOSE_XRI_CX) {
2437 2437
2438 2438 emlxs_tx_put(iocbq, 1);
2439 2439 }
2440 2440 }
2441 2441
2442 2442 iocbq = next;
2443 2443
2444 2444 } /* end of while */
2445 2445
2446 2446 /* Now trigger channel service */
2447 2447 for (channelno = 0; channelno < hba->chan_count; channelno++) {
2448 2448 if (!flag[channelno]) {
2449 2449 continue;
2450 2450 }
2451 2451
2452 2452 EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
2453 2453 }
2454 2454
2455 2455 return (abort.q_cnt);
2456 2456
2457 2457 } /* emlxs_tx_channel_flush() */
2458 2458
2459 2459
2460 2460 /* Flush all IO's on all or a given ring for a given node */
2461 2461 extern uint32_t
2462 2462 emlxs_tx_node_flush(emlxs_port_t *port, NODELIST *ndlp, CHANNEL *chan,
2463 2463 uint32_t shutdown, emlxs_buf_t *fpkt)
2464 2464 {
2465 2465 emlxs_hba_t *hba = HBA;
2466 2466 emlxs_buf_t *sbp;
2467 2467 uint32_t channelno;
2468 2468 CHANNEL *cp;
2469 2469 IOCB *icmd;
2470 2470 IOCBQ *iocbq;
2471 2471 NODELIST *prev;
2472 2472 IOCBQ *next;
2473 2473 IOCB *iocb;
2474 2474 Q abort;
2475 2475 uint32_t i;
2476 2476 MATCHMAP *mp;
2477 2477 uint8_t flag[MAX_CHANNEL];
2478 2478
2479 2479 bzero((void *)&abort, sizeof (Q));
2480 2480
2481 2481 /* Flush all I/O's on tx queue to this target */
2482 2482 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2483 2483
2484 2484 if (!ndlp->nlp_base && shutdown) {
2485 2485 ndlp->nlp_active = 0;
2486 2486 }
2487 2487
2488 2488 for (channelno = 0; channelno < hba->chan_count; channelno++) {
2489 2489 cp = &hba->chan[channelno];
2490 2490
2491 2491 if (chan && cp != chan) {
2492 2492 continue;
2493 2493 }
2494 2494
2495 2495 if (!ndlp->nlp_base || shutdown) {
2496 2496 /* Check if priority queue is not empty */
2497 2497 if (ndlp->nlp_ptx[channelno].q_first) {
2498 2498 /* Transfer all iocb's to local queue */
2499 2499 if (abort.q_first == 0) {
2500 2500 abort.q_first =
2501 2501 ndlp->nlp_ptx[channelno].q_first;
2502 2502 } else {
2503 2503 ((IOCBQ *)(abort.q_last))->next =
2504 2504 (IOCBQ *)ndlp->nlp_ptx[channelno].
2505 2505 q_first;
2506 2506 }
2507 2507
2508 2508 flag[channelno] = 1;
2509 2509
2510 2510 abort.q_last = ndlp->nlp_ptx[channelno].q_last;
2511 2511 abort.q_cnt += ndlp->nlp_ptx[channelno].q_cnt;
2512 2512 }
2513 2513 }
2514 2514
2515 2515 /* Check if tx queue is not empty */
2516 2516 if (ndlp->nlp_tx[channelno].q_first) {
2517 2517
2518 2518 /* Transfer all iocb's to local queue */
2519 2519 if (abort.q_first == 0) {
2520 2520 abort.q_first = ndlp->nlp_tx[channelno].q_first;
2521 2521 } else {
2522 2522 ((IOCBQ *)abort.q_last)->next =
2523 2523 (IOCBQ *)ndlp->nlp_tx[channelno].q_first;
2524 2524 }
2525 2525
2526 2526 abort.q_last = ndlp->nlp_tx[channelno].q_last;
2527 2527 abort.q_cnt += ndlp->nlp_tx[channelno].q_cnt;
2528 2528 }
2529 2529
2530 2530 /* Clear the queue pointers */
2531 2531 ndlp->nlp_ptx[channelno].q_first = NULL;
2532 2532 ndlp->nlp_ptx[channelno].q_last = NULL;
2533 2533 ndlp->nlp_ptx[channelno].q_cnt = 0;
2534 2534
2535 2535 ndlp->nlp_tx[channelno].q_first = NULL;
2536 2536 ndlp->nlp_tx[channelno].q_last = NULL;
2537 2537 ndlp->nlp_tx[channelno].q_cnt = 0;
2538 2538
2539 2539 /* If this node was on the channel queue, remove it */
2540 2540 if (ndlp->nlp_next[channelno]) {
2541 2541 /* If this is the only node on list */
2542 2542 if (cp->nodeq.q_first == (void *)ndlp &&
2543 2543 cp->nodeq.q_last == (void *)ndlp) {
2544 2544 cp->nodeq.q_last = NULL;
2545 2545 cp->nodeq.q_first = NULL;
2546 2546 cp->nodeq.q_cnt = 0;
2547 2547 } else if (cp->nodeq.q_first == (void *)ndlp) {
2548 2548 cp->nodeq.q_first = ndlp->nlp_next[channelno];
2549 2549 ((NODELIST *) cp->nodeq.q_last)->
2550 2550 nlp_next[channelno] = cp->nodeq.q_first;
2551 2551 cp->nodeq.q_cnt--;
2552 2552 } else {
2553 2553 /*
2554 2554 * This is a little more difficult find the
2555 2555 * previous node in the circular channel queue
2556 2556 */
2557 2557 prev = ndlp;
2558 2558 while (prev->nlp_next[channelno] != ndlp) {
2559 2559 prev = prev->nlp_next[channelno];
2560 2560 }
2561 2561
2562 2562 prev->nlp_next[channelno] =
2563 2563 ndlp->nlp_next[channelno];
2564 2564
2565 2565 if (cp->nodeq.q_last == (void *)ndlp) {
2566 2566 cp->nodeq.q_last = (void *)prev;
2567 2567 }
2568 2568 cp->nodeq.q_cnt--;
2569 2569
2570 2570 }
2571 2571
2572 2572 /* Clear node */
2573 2573 ndlp->nlp_next[channelno] = NULL;
2574 2574 }
2575 2575
2576 2576 }
2577 2577
2578 2578 /* First cleanup the iocb's while still holding the lock */
2579 2579 iocbq = (IOCBQ *) abort.q_first;
2580 2580 while (iocbq) {
2581 2581 /* Free the IoTag and the bmp */
2582 2582 iocb = &iocbq->iocb;
2583 2583
2584 2584 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2585 2585 sbp = iocbq->sbp;
2586 2586 if (sbp) {
2587 2587 emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
2588 2588 }
2589 2589 } else {
2590 2590 sbp = emlxs_unregister_pkt((CHANNEL *)iocbq->channel,
2591 2591 iocb->ULPIOTAG, 0);
2592 2592 }
2593 2593
2594 2594 if (sbp && (sbp != STALE_PACKET)) {
2595 2595 mutex_enter(&sbp->mtx);
2596 2596 sbp->pkt_flags |= PACKET_IN_FLUSH;
2597 2597 /*
2598 2598 * If the fpkt is already set, then we will leave it
2599 2599 * alone. This ensures that this pkt is only accounted
2600 2600 * for on one fpkt->flush_count
2601 2601 */
2602 2602 if (!sbp->fpkt && fpkt) {
2603 2603 mutex_enter(&fpkt->mtx);
2604 2604 sbp->fpkt = fpkt;
2605 2605 fpkt->flush_count++;
2606 2606 mutex_exit(&fpkt->mtx);
2607 2607 }
2608 2608
2609 2609 mutex_exit(&sbp->mtx);
2610 2610 }
2611 2611
2612 2612 iocbq = (IOCBQ *) iocbq->next;
2613 2613
2614 2614 } /* end of while */
2615 2615
2616 2616 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2617 2617
2618 2618 /* Now abort the iocb's outside the locks */
2619 2619 iocbq = (IOCBQ *)abort.q_first;
2620 2620 while (iocbq) {
2621 2621 /* Save the next iocbq for now */
2622 2622 next = (IOCBQ *)iocbq->next;
2623 2623
2624 2624 /* Unlink this iocbq */
2625 2625 iocbq->next = NULL;
2626 2626
2627 2627 /* Get the pkt */
2628 2628 sbp = (emlxs_buf_t *)iocbq->sbp;
2629 2629
2630 2630 if (sbp) {
2631 2631 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
2632 2632 "tx: sbp=%p node=%p", sbp, sbp->node);
2633 2633
2634 2634 if (hba->state >= FC_LINK_UP) {
2635 2635 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2636 2636 IOERR_ABORT_REQUESTED, 1);
2637 2637 } else {
2638 2638 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2639 2639 IOERR_LINK_DOWN, 1);
2640 2640 }
2641 2641
2642 2642 }
2643 2643 /* Free the iocb and its associated buffers */
2644 2644 else {
2645 2645 /* CMD_CLOSE_XRI_CN should also free the memory */
2646 2646 icmd = &iocbq->iocb;
2647 2647
2648 2648 /* SLI3 */
2649 2649 if (icmd->ULPCOMMAND == CMD_QUE_RING_BUF64_CN ||
2650 2650 icmd->ULPCOMMAND == CMD_QUE_RING_BUF_CN ||
2651 2651 icmd->ULPCOMMAND == CMD_QUE_RING_LIST64_CN) {
2652 2652 if ((hba->flag &
2653 2653 (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
2654 2654 /* HBA is detaching or offlining */
2655 2655 if (icmd->ULPCOMMAND !=
2656 2656 CMD_QUE_RING_LIST64_CN) {
2657 2657 void *tmp;
2658 2658 RING *rp;
2659 2659 int ch;
2660 2660
2661 2661 ch = ((CHANNEL *)
2662 2662 iocbq->channel)->channelno;
2663 2663 rp = &hba->sli.sli3.ring[ch];
2664 2664 for (i = 0;
2665 2665 i < icmd->ULPBDECOUNT;
2666 2666 i++) {
2667 2667 mp = EMLXS_GET_VADDR(
2668 2668 hba, rp, icmd);
2669 2669
2670 2670 tmp = (void *)mp;
2671 2671 if (mp) {
2672 2672 emlxs_mem_put(
2673 2673 hba, MEM_BUF, tmp);
2674 2674 }
2675 2675 }
2676 2676 }
2677 2677
2678 2678 emlxs_mem_put(hba, MEM_IOCB,
2679 2679 (void *)iocbq);
2680 2680 } else {
2681 2681 /* repost the unsolicited buffer */
2682 2682 EMLXS_SLI_ISSUE_IOCB_CMD(hba,
2683 2683 (CHANNEL *)iocbq->channel, iocbq);
2684 2684 }
2685 2685 } else if (icmd->ULPCOMMAND == CMD_CLOSE_XRI_CN ||
2686 2686 icmd->ULPCOMMAND == CMD_CLOSE_XRI_CX) {
2687 2687 /*
2688 2688 * Resend the abort iocbq if any
2689 2689 */
2690 2690 emlxs_tx_put(iocbq, 1);
2691 2691 }
2692 2692 }
2693 2693
2694 2694 iocbq = next;
2695 2695
2696 2696 } /* end of while */
2697 2697
2698 2698 /* Now trigger channel service */
2699 2699 for (channelno = 0; channelno < hba->chan_count; channelno++) {
2700 2700 if (!flag[channelno]) {
2701 2701 continue;
2702 2702 }
2703 2703
2704 2704 EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
2705 2705 }
2706 2706
2707 2707 return (abort.q_cnt);
2708 2708
2709 2709 } /* emlxs_tx_node_flush() */
2710 2710
2711 2711
2712 2712 /* Check for IO's on all or a given ring for a given node */
2713 2713 extern uint32_t
2714 2714 emlxs_tx_node_check(emlxs_port_t *port, NODELIST *ndlp, CHANNEL *chan)
2715 2715 {
2716 2716 emlxs_hba_t *hba = HBA;
2717 2717 uint32_t channelno;
2718 2718 CHANNEL *cp;
2719 2719 uint32_t count;
2720 2720
2721 2721 count = 0;
2722 2722
2723 2723 /* Flush all I/O's on tx queue to this target */
2724 2724 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2725 2725
2726 2726 for (channelno = 0; channelno < hba->chan_count; channelno++) {
2727 2727 cp = &hba->chan[channelno];
2728 2728
2729 2729 if (chan && cp != chan) {
2730 2730 continue;
2731 2731 }
2732 2732
2733 2733 /* Check if priority queue is not empty */
2734 2734 if (ndlp->nlp_ptx[channelno].q_first) {
2735 2735 count += ndlp->nlp_ptx[channelno].q_cnt;
2736 2736 }
2737 2737
2738 2738 /* Check if tx queue is not empty */
2739 2739 if (ndlp->nlp_tx[channelno].q_first) {
2740 2740 count += ndlp->nlp_tx[channelno].q_cnt;
2741 2741 }
2742 2742
2743 2743 }
2744 2744
2745 2745 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2746 2746
2747 2747 return (count);
2748 2748
2749 2749 } /* emlxs_tx_node_check() */
2750 2750
2751 2751
2752 2752
2753 2753 /* Flush all IO's on the any ring for a given node's lun */
2754 2754 extern uint32_t
2755 2755 emlxs_tx_lun_flush(emlxs_port_t *port, NODELIST *ndlp, uint32_t lun,
2756 2756 emlxs_buf_t *fpkt)
2757 2757 {
2758 2758 emlxs_hba_t *hba = HBA;
2759 2759 emlxs_buf_t *sbp;
2760 2760 uint32_t channelno;
2761 2761 IOCBQ *iocbq;
2762 2762 IOCBQ *prev;
2763 2763 IOCBQ *next;
2764 2764 IOCB *iocb;
2765 2765 IOCB *icmd;
2766 2766 Q abort;
2767 2767 uint32_t i;
2768 2768 MATCHMAP *mp;
2769 2769 uint8_t flag[MAX_CHANNEL];
2770 2770
2771 2771 if (lun == EMLXS_LUN_NONE) {
2772 2772 return (0);
2773 2773 }
2774 2774
2775 2775 bzero((void *)&abort, sizeof (Q));
2776 2776
2777 2777 /* Flush I/O's on txQ to this target's lun */
2778 2778 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2779 2779
2780 2780 for (channelno = 0; channelno < hba->chan_count; channelno++) {
2781 2781
2782 2782 /* Scan the priority queue first */
2783 2783 prev = NULL;
2784 2784 iocbq = (IOCBQ *) ndlp->nlp_ptx[channelno].q_first;
2785 2785
2786 2786 while (iocbq) {
2787 2787 next = (IOCBQ *)iocbq->next;
2788 2788 iocb = &iocbq->iocb;
2789 2789 sbp = (emlxs_buf_t *)iocbq->sbp;
2790 2790
2791 2791 /* Check if this IO is for our lun */
2792 2792 if (sbp && (sbp->lun == lun)) {
2793 2793 /* Remove iocb from the node's ptx queue */
2794 2794 if (next == 0) {
2795 2795 ndlp->nlp_ptx[channelno].q_last =
2796 2796 (uint8_t *)prev;
2797 2797 }
2798 2798
2799 2799 if (prev == 0) {
2800 2800 ndlp->nlp_ptx[channelno].q_first =
2801 2801 (uint8_t *)next;
2802 2802 } else {
2803 2803 prev->next = next;
2804 2804 }
2805 2805
2806 2806 iocbq->next = NULL;
2807 2807 ndlp->nlp_ptx[channelno].q_cnt--;
2808 2808
2809 2809 /*
2810 2810 * Add this iocb to our local abort Q
2811 2811 */
2812 2812 if (abort.q_first) {
2813 2813 ((IOCBQ *)abort.q_last)->next = iocbq;
2814 2814 abort.q_last = (uint8_t *)iocbq;
2815 2815 abort.q_cnt++;
2816 2816 } else {
2817 2817 abort.q_first = (uint8_t *)iocbq;
2818 2818 abort.q_last = (uint8_t *)iocbq;
2819 2819 abort.q_cnt = 1;
2820 2820 }
2821 2821 iocbq->next = NULL;
2822 2822 flag[channelno] = 1;
2823 2823
2824 2824 } else {
2825 2825 prev = iocbq;
2826 2826 }
2827 2827
2828 2828 iocbq = next;
2829 2829
2830 2830 } /* while (iocbq) */
2831 2831
2832 2832
2833 2833 /* Scan the regular queue */
2834 2834 prev = NULL;
2835 2835 iocbq = (IOCBQ *)ndlp->nlp_tx[channelno].q_first;
2836 2836
2837 2837 while (iocbq) {
2838 2838 next = (IOCBQ *)iocbq->next;
2839 2839 iocb = &iocbq->iocb;
2840 2840 sbp = (emlxs_buf_t *)iocbq->sbp;
2841 2841
2842 2842 /* Check if this IO is for our lun */
2843 2843 if (sbp && (sbp->lun == lun)) {
2844 2844 /* Remove iocb from the node's tx queue */
2845 2845 if (next == 0) {
2846 2846 ndlp->nlp_tx[channelno].q_last =
2847 2847 (uint8_t *)prev;
2848 2848 }
2849 2849
2850 2850 if (prev == 0) {
2851 2851 ndlp->nlp_tx[channelno].q_first =
2852 2852 (uint8_t *)next;
2853 2853 } else {
2854 2854 prev->next = next;
2855 2855 }
2856 2856
2857 2857 iocbq->next = NULL;
2858 2858 ndlp->nlp_tx[channelno].q_cnt--;
2859 2859
2860 2860 /*
2861 2861 * Add this iocb to our local abort Q
2862 2862 */
2863 2863 if (abort.q_first) {
2864 2864 ((IOCBQ *) abort.q_last)->next = iocbq;
2865 2865 abort.q_last = (uint8_t *)iocbq;
2866 2866 abort.q_cnt++;
2867 2867 } else {
2868 2868 abort.q_first = (uint8_t *)iocbq;
2869 2869 abort.q_last = (uint8_t *)iocbq;
2870 2870 abort.q_cnt = 1;
2871 2871 }
2872 2872 iocbq->next = NULL;
2873 2873 } else {
2874 2874 prev = iocbq;
2875 2875 }
2876 2876
2877 2877 iocbq = next;
2878 2878
2879 2879 } /* while (iocbq) */
2880 2880 } /* for loop */
2881 2881
2882 2882 /* First cleanup the iocb's while still holding the lock */
2883 2883 iocbq = (IOCBQ *)abort.q_first;
2884 2884 while (iocbq) {
2885 2885 /* Free the IoTag and the bmp */
2886 2886 iocb = &iocbq->iocb;
2887 2887
2888 2888 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2889 2889 sbp = iocbq->sbp;
2890 2890 if (sbp) {
2891 2891 emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
2892 2892 }
2893 2893 } else {
2894 2894 sbp = emlxs_unregister_pkt((CHANNEL *)iocbq->channel,
2895 2895 iocb->ULPIOTAG, 0);
2896 2896 }
2897 2897
2898 2898 if (sbp && (sbp != STALE_PACKET)) {
2899 2899 mutex_enter(&sbp->mtx);
2900 2900 sbp->pkt_flags |= PACKET_IN_FLUSH;
2901 2901 /*
2902 2902 * If the fpkt is already set, then we will leave it
2903 2903 * alone. This ensures that this pkt is only accounted
2904 2904 * for on one fpkt->flush_count
2905 2905 */
2906 2906 if (!sbp->fpkt && fpkt) {
2907 2907 mutex_enter(&fpkt->mtx);
2908 2908 sbp->fpkt = fpkt;
2909 2909 fpkt->flush_count++;
2910 2910 mutex_exit(&fpkt->mtx);
2911 2911 }
2912 2912
2913 2913 mutex_exit(&sbp->mtx);
2914 2914 }
2915 2915
2916 2916 iocbq = (IOCBQ *) iocbq->next;
2917 2917
2918 2918 } /* end of while */
2919 2919
2920 2920 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2921 2921
2922 2922 /* Now abort the iocb's outside the locks */
2923 2923 iocbq = (IOCBQ *)abort.q_first;
2924 2924 while (iocbq) {
2925 2925 /* Save the next iocbq for now */
2926 2926 next = (IOCBQ *)iocbq->next;
2927 2927
2928 2928 /* Unlink this iocbq */
2929 2929 iocbq->next = NULL;
2930 2930
2931 2931 /* Get the pkt */
2932 2932 sbp = (emlxs_buf_t *)iocbq->sbp;
2933 2933
2934 2934 if (sbp) {
2935 2935 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
2936 2936 "tx: sbp=%p node=%p", sbp, sbp->node);
2937 2937
2938 2938 if (hba->state >= FC_LINK_UP) {
2939 2939 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2940 2940 IOERR_ABORT_REQUESTED, 1);
2941 2941 } else {
2942 2942 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2943 2943 IOERR_LINK_DOWN, 1);
2944 2944 }
2945 2945 }
2946 2946
2947 2947 /* Free the iocb and its associated buffers */
2948 2948 else {
2949 2949 /* Should never happen! */
2950 2950 icmd = &iocbq->iocb;
2951 2951
2952 2952 /* SLI3 */
2953 2953 if (icmd->ULPCOMMAND == CMD_QUE_RING_BUF64_CN ||
2954 2954 icmd->ULPCOMMAND == CMD_QUE_RING_BUF_CN ||
2955 2955 icmd->ULPCOMMAND == CMD_QUE_RING_LIST64_CN) {
2956 2956 if ((hba->flag &
2957 2957 (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
2958 2958 /* HBA is detaching or offlining */
2959 2959 if (icmd->ULPCOMMAND !=
2960 2960 CMD_QUE_RING_LIST64_CN) {
2961 2961 void *tmp;
2962 2962 RING *rp;
2963 2963 int ch;
2964 2964
2965 2965 ch = ((CHANNEL *)
2966 2966 iocbq->channel)->channelno;
2967 2967 rp = &hba->sli.sli3.ring[ch];
2968 2968 for (i = 0;
2969 2969 i < icmd->ULPBDECOUNT;
2970 2970 i++) {
2971 2971 mp = EMLXS_GET_VADDR(
2972 2972 hba, rp, icmd);
2973 2973
2974 2974 tmp = (void *)mp;
2975 2975 if (mp) {
2976 2976 emlxs_mem_put(
2977 2977 hba, MEM_BUF, tmp);
2978 2978 }
2979 2979 }
2980 2980 }
2981 2981
2982 2982 emlxs_mem_put(hba, MEM_IOCB,
2983 2983 (void *)iocbq);
2984 2984 } else {
2985 2985 /* repost the unsolicited buffer */
2986 2986 EMLXS_SLI_ISSUE_IOCB_CMD(hba,
2987 2987 (CHANNEL *)iocbq->channel, iocbq);
2988 2988 }
2989 2989 } else if (icmd->ULPCOMMAND == CMD_CLOSE_XRI_CN ||
2990 2990 icmd->ULPCOMMAND == CMD_CLOSE_XRI_CX) {
2991 2991 /*
2992 2992 * Resend the abort iocbq if any
2993 2993 */
2994 2994 emlxs_tx_put(iocbq, 1);
2995 2995 }
2996 2996 }
2997 2997
2998 2998 iocbq = next;
2999 2999
3000 3000 } /* end of while */
3001 3001
3002 3002 /* Now trigger channel service */
3003 3003 for (channelno = 0; channelno < hba->chan_count; channelno++) {
3004 3004 if (!flag[channelno]) {
3005 3005 continue;
3006 3006 }
3007 3007
3008 3008 EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
3009 3009 }
3010 3010
3011 3011 return (abort.q_cnt);
3012 3012
3013 3013 } /* emlxs_tx_lun_flush() */
3014 3014
3015 3015
3016 3016 extern void
3017 3017 emlxs_tx_put(IOCBQ *iocbq, uint32_t lock)
3018 3018 {
3019 3019 emlxs_hba_t *hba;
3020 3020 emlxs_port_t *port;
3021 3021 uint32_t channelno;
3022 3022 NODELIST *nlp;
3023 3023 CHANNEL *cp;
3024 3024 emlxs_buf_t *sbp;
3025 3025
3026 3026 port = (emlxs_port_t *)iocbq->port;
3027 3027 hba = HBA;
3028 3028 cp = (CHANNEL *)iocbq->channel;
3029 3029 nlp = (NODELIST *)iocbq->node;
3030 3030 channelno = cp->channelno;
3031 3031 sbp = (emlxs_buf_t *)iocbq->sbp;
3032 3032
3033 3033 if (nlp == NULL) {
3034 3034 /* Set node to base node by default */
3035 3035 nlp = &port->node_base;
3036 3036
3037 3037 iocbq->node = (void *)nlp;
3038 3038
3039 3039 if (sbp) {
3040 3040 sbp->node = (void *)nlp;
3041 3041 }
3042 3042 }
3043 3043
3044 3044 if (lock) {
3045 3045 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3046 3046 }
3047 3047
3048 3048 if (!nlp->nlp_active || (sbp && (sbp->pkt_flags & PACKET_IN_ABORT))) {
3049 3049 if (sbp) {
3050 3050 mutex_enter(&sbp->mtx);
3051 3051 sbp->pkt_flags |= PACKET_IN_FLUSH;
3052 3052 mutex_exit(&sbp->mtx);
3053 3053
3054 3054 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3055 3055 emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
3056 3056 } else {
3057 3057 (void) emlxs_unregister_pkt(cp, sbp->iotag, 0);
3058 3058 }
3059 3059
3060 3060 if (lock) {
3061 3061 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3062 3062 }
3063 3063
3064 3064 if (hba->state >= FC_LINK_UP) {
3065 3065 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3066 3066 IOERR_ABORT_REQUESTED, 1);
3067 3067 } else {
3068 3068 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3069 3069 IOERR_LINK_DOWN, 1);
3070 3070 }
3071 3071 return;
3072 3072 } else {
3073 3073 if (lock) {
3074 3074 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3075 3075 }
3076 3076
3077 3077 emlxs_mem_put(hba, MEM_IOCB, (void *)iocbq);
3078 3078 }
3079 3079
3080 3080 return;
3081 3081 }
3082 3082
3083 3083 if (sbp) {
3084 3084
3085 3085 mutex_enter(&sbp->mtx);
3086 3086
3087 3087 if (sbp->pkt_flags &
3088 3088 (PACKET_IN_COMPLETION | PACKET_IN_CHIPQ | PACKET_IN_TXQ)) {
3089 3089 mutex_exit(&sbp->mtx);
3090 3090 if (lock) {
3091 3091 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3092 3092 }
3093 3093 return;
3094 3094 }
3095 3095
3096 3096 sbp->pkt_flags |= PACKET_IN_TXQ;
3097 3097 hba->channel_tx_count++;
3098 3098
3099 3099 mutex_exit(&sbp->mtx);
3100 3100 }
3101 3101
3102 3102
3103 3103 /* Check iocbq priority */
3104 3104 /* Some IOCB has the high priority like reset/close xri etc */
3105 3105 if (iocbq->flag & IOCB_PRIORITY) {
3106 3106 /* Add the iocb to the bottom of the node's ptx queue */
3107 3107 if (nlp->nlp_ptx[channelno].q_first) {
3108 3108 ((IOCBQ *)nlp->nlp_ptx[channelno].q_last)->next = iocbq;
3109 3109 nlp->nlp_ptx[channelno].q_last = (uint8_t *)iocbq;
3110 3110 nlp->nlp_ptx[channelno].q_cnt++;
3111 3111 } else {
3112 3112 nlp->nlp_ptx[channelno].q_first = (uint8_t *)iocbq;
3113 3113 nlp->nlp_ptx[channelno].q_last = (uint8_t *)iocbq;
3114 3114 nlp->nlp_ptx[channelno].q_cnt = 1;
3115 3115 }
3116 3116
3117 3117 iocbq->next = NULL;
3118 3118 } else { /* Normal priority */
3119 3119
3120 3120
3121 3121 /* Add the iocb to the bottom of the node's tx queue */
3122 3122 if (nlp->nlp_tx[channelno].q_first) {
3123 3123 ((IOCBQ *)nlp->nlp_tx[channelno].q_last)->next = iocbq;
3124 3124 nlp->nlp_tx[channelno].q_last = (uint8_t *)iocbq;
3125 3125 nlp->nlp_tx[channelno].q_cnt++;
3126 3126 } else {
3127 3127 nlp->nlp_tx[channelno].q_first = (uint8_t *)iocbq;
3128 3128 nlp->nlp_tx[channelno].q_last = (uint8_t *)iocbq;
3129 3129 nlp->nlp_tx[channelno].q_cnt = 1;
3130 3130 }
3131 3131
3132 3132 iocbq->next = NULL;
3133 3133 }
3134 3134
3135 3135
3136 3136 /*
3137 3137 * Check if the node is not already on channel queue and
3138 3138 * (is not closed or is a priority request)
3139 3139 */
3140 3140 if (!nlp->nlp_next[channelno] &&
3141 3141 (!(nlp->nlp_flag[channelno] & NLP_CLOSED) ||
3142 3142 (iocbq->flag & IOCB_PRIORITY))) {
3143 3143 /* If so, then add it to the channel queue */
3144 3144 if (cp->nodeq.q_first) {
3145 3145 ((NODELIST *)cp->nodeq.q_last)->nlp_next[channelno] =
3146 3146 (uint8_t *)nlp;
3147 3147 nlp->nlp_next[channelno] = cp->nodeq.q_first;
3148 3148
3149 3149 /*
3150 3150 * If this is not the base node then add it
3151 3151 * to the tail
3152 3152 */
3153 3153 if (!nlp->nlp_base) {
3154 3154 cp->nodeq.q_last = (uint8_t *)nlp;
3155 3155 } else { /* Otherwise, add it to the head */
3156 3156
3157 3157 /* The command node always gets priority */
3158 3158 cp->nodeq.q_first = (uint8_t *)nlp;
3159 3159 }
3160 3160
3161 3161 cp->nodeq.q_cnt++;
3162 3162 } else {
3163 3163 cp->nodeq.q_first = (uint8_t *)nlp;
3164 3164 cp->nodeq.q_last = (uint8_t *)nlp;
3165 3165 nlp->nlp_next[channelno] = nlp;
3166 3166 cp->nodeq.q_cnt = 1;
3167 3167 }
3168 3168 }
3169 3169
3170 3170 HBASTATS.IocbTxPut[channelno]++;
3171 3171
3172 3172 /* Adjust the channel timeout timer */
3173 3173 cp->timeout = hba->timer_tics + 5;
3174 3174
3175 3175 if (lock) {
3176 3176 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3177 3177 }
3178 3178
3179 3179 return;
3180 3180
3181 3181 } /* emlxs_tx_put() */
3182 3182
3183 3183
3184 3184 extern IOCBQ *
3185 3185 emlxs_tx_get(CHANNEL *cp, uint32_t lock)
3186 3186 {
3187 3187 emlxs_hba_t *hba;
3188 3188 uint32_t channelno;
3189 3189 IOCBQ *iocbq;
3190 3190 NODELIST *nlp;
3191 3191 emlxs_buf_t *sbp;
3192 3192
3193 3193 hba = cp->hba;
3194 3194 channelno = cp->channelno;
3195 3195
3196 3196 if (lock) {
3197 3197 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3198 3198 }
3199 3199
3200 3200 begin:
3201 3201
3202 3202 iocbq = NULL;
3203 3203
3204 3204 /* Check if a node needs servicing */
3205 3205 if (cp->nodeq.q_first) {
3206 3206 nlp = (NODELIST *)cp->nodeq.q_first;
3207 3207
3208 3208 /* Get next iocb from node's priority queue */
3209 3209
3210 3210 if (nlp->nlp_ptx[channelno].q_first) {
3211 3211 iocbq = (IOCBQ *)nlp->nlp_ptx[channelno].q_first;
3212 3212
3213 3213 /* Check if this is last entry */
3214 3214 if (nlp->nlp_ptx[channelno].q_last == (void *)iocbq) {
3215 3215 nlp->nlp_ptx[channelno].q_first = NULL;
3216 3216 nlp->nlp_ptx[channelno].q_last = NULL;
3217 3217 nlp->nlp_ptx[channelno].q_cnt = 0;
3218 3218 } else {
3219 3219 /* Remove iocb from head */
3220 3220 nlp->nlp_ptx[channelno].q_first =
3221 3221 (void *)iocbq->next;
3222 3222 nlp->nlp_ptx[channelno].q_cnt--;
3223 3223 }
3224 3224
3225 3225 iocbq->next = NULL;
3226 3226 }
3227 3227
3228 3228 /* Get next iocb from node tx queue if node not closed */
3229 3229 else if (nlp->nlp_tx[channelno].q_first &&
3230 3230 !(nlp->nlp_flag[channelno] & NLP_CLOSED)) {
3231 3231 iocbq = (IOCBQ *)nlp->nlp_tx[channelno].q_first;
3232 3232
3233 3233 /* Check if this is last entry */
3234 3234 if (nlp->nlp_tx[channelno].q_last == (void *)iocbq) {
3235 3235 nlp->nlp_tx[channelno].q_first = NULL;
3236 3236 nlp->nlp_tx[channelno].q_last = NULL;
3237 3237 nlp->nlp_tx[channelno].q_cnt = 0;
3238 3238 } else {
3239 3239 /* Remove iocb from head */
3240 3240 nlp->nlp_tx[channelno].q_first =
3241 3241 (void *)iocbq->next;
3242 3242 nlp->nlp_tx[channelno].q_cnt--;
3243 3243 }
3244 3244
3245 3245 iocbq->next = NULL;
3246 3246 }
3247 3247
3248 3248 /* Now deal with node itself */
3249 3249
3250 3250 /* Check if node still needs servicing */
3251 3251 if ((nlp->nlp_ptx[channelno].q_first) ||
3252 3252 (nlp->nlp_tx[channelno].q_first &&
3253 3253 !(nlp->nlp_flag[channelno] & NLP_CLOSED))) {
3254 3254
3255 3255 /*
3256 3256 * If this is the base node, then don't shift the
3257 3257 * pointers. We want to drain the base node before
3258 3258 * moving on
3259 3259 */
3260 3260 if (!nlp->nlp_base) {
3261 3261 /*
3262 3262 * Just shift channel queue pointers to next
3263 3263 * node
3264 3264 */
3265 3265 cp->nodeq.q_last = (void *)nlp;
3266 3266 cp->nodeq.q_first = nlp->nlp_next[channelno];
3267 3267 }
3268 3268 } else {
3269 3269 /* Remove node from channel queue */
3270 3270
3271 3271 /* If this is the last node on list */
3272 3272 if (cp->nodeq.q_last == (void *)nlp) {
3273 3273 cp->nodeq.q_last = NULL;
3274 3274 cp->nodeq.q_first = NULL;
3275 3275 cp->nodeq.q_cnt = 0;
3276 3276 } else {
3277 3277 /* Remove node from head */
3278 3278 cp->nodeq.q_first = nlp->nlp_next[channelno];
3279 3279 ((NODELIST *)cp->nodeq.q_last)->
3280 3280 nlp_next[channelno] = cp->nodeq.q_first;
3281 3281 cp->nodeq.q_cnt--;
3282 3282
3283 3283 }
3284 3284
3285 3285 /* Clear node */
3286 3286 nlp->nlp_next[channelno] = NULL;
3287 3287 }
3288 3288
3289 3289 /*
3290 3290 * If no iocbq was found on this node, then it will have
3291 3291 * been removed. So try again.
3292 3292 */
3293 3293 if (!iocbq) {
3294 3294 goto begin;
3295 3295 }
3296 3296
3297 3297 sbp = (emlxs_buf_t *)iocbq->sbp;
3298 3298
3299 3299 if (sbp) {
3300 3300 /*
3301 3301 * Check flags before we enter mutex in case this
3302 3302 * has been flushed and destroyed
3303 3303 */
3304 3304 if ((sbp->pkt_flags &
3305 3305 (PACKET_IN_COMPLETION | PACKET_IN_CHIPQ)) ||
3306 3306 !(sbp->pkt_flags & PACKET_IN_TXQ)) {
3307 3307 goto begin;
3308 3308 }
3309 3309
3310 3310 mutex_enter(&sbp->mtx);
3311 3311
3312 3312 if ((sbp->pkt_flags &
3313 3313 (PACKET_IN_COMPLETION | PACKET_IN_CHIPQ)) ||
3314 3314 !(sbp->pkt_flags & PACKET_IN_TXQ)) {
3315 3315 mutex_exit(&sbp->mtx);
3316 3316 goto begin;
3317 3317 }
3318 3318
3319 3319 sbp->pkt_flags &= ~PACKET_IN_TXQ;
3320 3320 hba->channel_tx_count--;
3321 3321
3322 3322 mutex_exit(&sbp->mtx);
3323 3323 }
3324 3324 }
3325 3325
3326 3326 if (iocbq) {
3327 3327 HBASTATS.IocbTxGet[channelno]++;
3328 3328 }
3329 3329
3330 3330 /* Adjust the ring timeout timer */
3331 3331 cp->timeout = (cp->nodeq.q_first) ? (hba->timer_tics + 5) : 0;
3332 3332
3333 3333 if (lock) {
3334 3334 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3335 3335 }
3336 3336
3337 3337 return (iocbq);
3338 3338
3339 3339 } /* emlxs_tx_get() */
3340 3340
3341 3341
3342 3342 /*
3343 3343 * Remove all cmd from from_rp's txq to to_rp's txq for ndlp.
3344 3344 * The old IoTag has to be released, the new one has to be
3345 3345 * allocated. Others no change
3346 3346 * TX_CHANNEL lock is held
3347 3347 */
3348 3348 extern void
3349 3349 emlxs_tx_move(NODELIST *ndlp, CHANNEL *from_chan, CHANNEL *to_chan,
3350 3350 uint32_t cmd, emlxs_buf_t *fpkt, uint32_t lock)
3351 3351 {
3352 3352 emlxs_hba_t *hba;
3353 3353 emlxs_port_t *port;
3354 3354 uint32_t fchanno, tchanno, i;
3355 3355
3356 3356 IOCBQ *iocbq;
3357 3357 IOCBQ *prev;
3358 3358 IOCBQ *next;
3359 3359 IOCB *iocb, *icmd;
3360 3360 Q tbm; /* To Be Moved Q */
3361 3361 MATCHMAP *mp;
3362 3362
3363 3363 NODELIST *nlp = ndlp;
3364 3364 emlxs_buf_t *sbp;
3365 3365
3366 3366 NODELIST *n_prev = NULL;
3367 3367 NODELIST *n_next = NULL;
3368 3368 uint16_t count = 0;
3369 3369
3370 3370 hba = from_chan->hba;
3371 3371 port = &PPORT;
3372 3372 cmd = cmd; /* To pass lint */
3373 3373
3374 3374 fchanno = from_chan->channelno;
3375 3375 tchanno = to_chan->channelno;
3376 3376
3377 3377 if (lock) {
3378 3378 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3379 3379 }
3380 3380
3381 3381 bzero((void *)&tbm, sizeof (Q));
3382 3382
3383 3383 /* Scan the ndlp's fchanno txq to get the iocb of fcp cmd */
3384 3384 prev = NULL;
3385 3385 iocbq = (IOCBQ *)nlp->nlp_tx[fchanno].q_first;
3386 3386
3387 3387 while (iocbq) {
3388 3388 next = (IOCBQ *)iocbq->next;
3389 3389 /* Check if this iocb is fcp cmd */
3390 3390 iocb = &iocbq->iocb;
3391 3391
3392 3392 switch (iocb->ULPCOMMAND) {
3393 3393 /* FCP commands */
3394 3394 case CMD_FCP_ICMND_CR:
3395 3395 case CMD_FCP_ICMND_CX:
3396 3396 case CMD_FCP_IREAD_CR:
3397 3397 case CMD_FCP_IREAD_CX:
3398 3398 case CMD_FCP_IWRITE_CR:
3399 3399 case CMD_FCP_IWRITE_CX:
3400 3400 case CMD_FCP_ICMND64_CR:
3401 3401 case CMD_FCP_ICMND64_CX:
3402 3402 case CMD_FCP_IREAD64_CR:
3403 3403 case CMD_FCP_IREAD64_CX:
3404 3404 case CMD_FCP_IWRITE64_CR:
3405 3405 case CMD_FCP_IWRITE64_CX:
3406 3406 /* We found a fcp cmd */
3407 3407 break;
3408 3408 default:
3409 3409 /* this is not fcp cmd continue */
3410 3410 prev = iocbq;
3411 3411 iocbq = next;
3412 3412 continue;
3413 3413 }
3414 3414
3415 3415 /* found a fcp cmd iocb in fchanno txq, now deque it */
3416 3416 if (next == NULL) {
3417 3417 /* This is the last iocbq */
3418 3418 nlp->nlp_tx[fchanno].q_last =
3419 3419 (uint8_t *)prev;
3420 3420 }
3421 3421
3422 3422 if (prev == NULL) {
3423 3423 /* This is the first one then remove it from head */
3424 3424 nlp->nlp_tx[fchanno].q_first =
3425 3425 (uint8_t *)next;
3426 3426 } else {
3427 3427 prev->next = next;
3428 3428 }
3429 3429
3430 3430 iocbq->next = NULL;
3431 3431 nlp->nlp_tx[fchanno].q_cnt--;
3432 3432
3433 3433 /* Add this iocb to our local toberemovedq */
3434 3434 /* This way we donot hold the TX_CHANNEL lock too long */
3435 3435
3436 3436 if (tbm.q_first) {
3437 3437 ((IOCBQ *)tbm.q_last)->next = iocbq;
3438 3438 tbm.q_last = (uint8_t *)iocbq;
3439 3439 tbm.q_cnt++;
3440 3440 } else {
3441 3441 tbm.q_first = (uint8_t *)iocbq;
3442 3442 tbm.q_last = (uint8_t *)iocbq;
3443 3443 tbm.q_cnt = 1;
3444 3444 }
3445 3445
3446 3446 iocbq = next;
3447 3447
3448 3448 } /* While (iocbq) */
3449 3449
3450 3450 if ((tchanno == hba->channel_fcp) && (tbm.q_cnt != 0)) {
3451 3451
3452 3452 /* from_chan->nodeq.q_first must be non NULL */
3453 3453 if (from_chan->nodeq.q_first) {
3454 3454
3455 3455 /* nodeq is not empty, now deal with the node itself */
3456 3456 if ((nlp->nlp_tx[fchanno].q_first)) {
3457 3457
3458 3458 if (!nlp->nlp_base) {
3459 3459 from_chan->nodeq.q_last =
3460 3460 (void *)nlp;
3461 3461 from_chan->nodeq.q_first =
3462 3462 nlp->nlp_next[fchanno];
3463 3463 }
3464 3464
3465 3465 } else {
3466 3466 n_prev = (NODELIST *)from_chan->nodeq.q_first;
3467 3467 count = from_chan->nodeq.q_cnt;
3468 3468
3469 3469 if (n_prev == nlp) {
3470 3470
3471 3471 /* If this is the only node on list */
3472 3472 if (from_chan->nodeq.q_last ==
3473 3473 (void *)nlp) {
3474 3474 from_chan->nodeq.q_last =
3475 3475 NULL;
3476 3476 from_chan->nodeq.q_first =
3477 3477 NULL;
3478 3478 from_chan->nodeq.q_cnt = 0;
3479 3479 } else {
3480 3480 from_chan->nodeq.q_first =
3481 3481 nlp->nlp_next[fchanno];
3482 3482 ((NODELIST *)from_chan->
3483 3483 nodeq.q_last)->
3484 3484 nlp_next[fchanno] =
3485 3485 from_chan->nodeq.q_first;
3486 3486 from_chan->nodeq.q_cnt--;
3487 3487 }
3488 3488 /* Clear node */
3489 3489 nlp->nlp_next[fchanno] = NULL;
3490 3490 } else {
3491 3491 count--;
3492 3492 do {
3493 3493 n_next =
3494 3494 n_prev->nlp_next[fchanno];
3495 3495 if (n_next == nlp) {
3496 3496 break;
3497 3497 }
3498 3498 n_prev = n_next;
3499 3499 } while (count--);
3500 3500
3501 3501 if (count != 0) {
3502 3502
3503 3503 if (n_next ==
3504 3504 (NODELIST *)from_chan->
3505 3505 nodeq.q_last) {
3506 3506 n_prev->
3507 3507 nlp_next[fchanno]
3508 3508 =
3509 3509 ((NODELIST *)
3510 3510 from_chan->
3511 3511 nodeq.q_last)->
3512 3512 nlp_next
3513 3513 [fchanno];
3514 3514 from_chan->nodeq.q_last
3515 3515 = (uint8_t *)n_prev;
3516 3516 } else {
3517 3517
3518 3518 n_prev->
3519 3519 nlp_next[fchanno]
3520 3520 =
3521 3521 n_next-> nlp_next
3522 3522 [fchanno];
3523 3523 }
3524 3524 from_chan->nodeq.q_cnt--;
3525 3525 /* Clear node */
3526 3526 nlp->nlp_next[fchanno] =
3527 3527 NULL;
3528 3528 }
3529 3529 }
3530 3530 }
3531 3531 }
3532 3532 }
3533 3533
3534 3534 /* Now cleanup the iocb's */
3535 3535 prev = NULL;
3536 3536 iocbq = (IOCBQ *)tbm.q_first;
3537 3537
3538 3538 while (iocbq) {
3539 3539
3540 3540 next = (IOCBQ *)iocbq->next;
3541 3541
3542 3542 /* Free the IoTag and the bmp */
3543 3543 iocb = &iocbq->iocb;
3544 3544
3545 3545 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3546 3546 sbp = iocbq->sbp;
3547 3547 if (sbp) {
3548 3548 emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
3549 3549 }
3550 3550 } else {
3551 3551 sbp = emlxs_unregister_pkt((CHANNEL *)iocbq->channel,
3552 3552 iocb->ULPIOTAG, 0);
3553 3553 }
3554 3554
3555 3555 if (sbp && (sbp != STALE_PACKET)) {
3556 3556 mutex_enter(&sbp->mtx);
3557 3557 sbp->pkt_flags |= PACKET_IN_FLUSH;
3558 3558
3559 3559 /*
3560 3560 * If the fpkt is already set, then we will leave it
3561 3561 * alone. This ensures that this pkt is only accounted
3562 3562 * for on one fpkt->flush_count
3563 3563 */
3564 3564 if (!sbp->fpkt && fpkt) {
3565 3565 mutex_enter(&fpkt->mtx);
3566 3566 sbp->fpkt = fpkt;
3567 3567 fpkt->flush_count++;
3568 3568 mutex_exit(&fpkt->mtx);
3569 3569 }
3570 3570 mutex_exit(&sbp->mtx);
3571 3571 }
3572 3572 iocbq = next;
3573 3573
3574 3574 } /* end of while */
3575 3575
3576 3576 iocbq = (IOCBQ *)tbm.q_first;
3577 3577 while (iocbq) {
3578 3578 /* Save the next iocbq for now */
3579 3579 next = (IOCBQ *)iocbq->next;
3580 3580
3581 3581 /* Unlink this iocbq */
3582 3582 iocbq->next = NULL;
3583 3583
3584 3584 /* Get the pkt */
3585 3585 sbp = (emlxs_buf_t *)iocbq->sbp;
3586 3586
3587 3587 if (sbp) {
3588 3588 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
3589 3589 "tx: sbp=%p node=%p", sbp, sbp->node);
3590 3590
3591 3591 if (hba->state >= FC_LINK_UP) {
3592 3592 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3593 3593 IOERR_ABORT_REQUESTED, 1);
3594 3594 } else {
3595 3595 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3596 3596 IOERR_LINK_DOWN, 1);
3597 3597 }
3598 3598
3599 3599 }
3600 3600 /* Free the iocb and its associated buffers */
3601 3601 else {
3602 3602 icmd = &iocbq->iocb;
3603 3603
3604 3604 /* SLI3 */
3605 3605 if (icmd->ULPCOMMAND == CMD_QUE_RING_BUF64_CN ||
3606 3606 icmd->ULPCOMMAND == CMD_QUE_RING_BUF_CN ||
3607 3607 icmd->ULPCOMMAND == CMD_QUE_RING_LIST64_CN) {
3608 3608 if ((hba->flag &
3609 3609 (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
3610 3610 /* HBA is detaching or offlining */
3611 3611 if (icmd->ULPCOMMAND !=
3612 3612 CMD_QUE_RING_LIST64_CN) {
3613 3613 void *tmp;
3614 3614 RING *rp;
3615 3615 int ch;
3616 3616
3617 3617 ch = from_chan->channelno;
3618 3618 rp = &hba->sli.sli3.ring[ch];
3619 3619
3620 3620 for (i = 0;
3621 3621 i < icmd->ULPBDECOUNT;
3622 3622 i++) {
3623 3623 mp = EMLXS_GET_VADDR(
3624 3624 hba, rp, icmd);
3625 3625
3626 3626 tmp = (void *)mp;
3627 3627 if (mp) {
3628 3628 emlxs_mem_put(
3629 3629 hba,
3630 3630 MEM_BUF,
3631 3631 tmp);
3632 3632 }
3633 3633 }
3634 3634
3635 3635 }
3636 3636
3637 3637 emlxs_mem_put(hba, MEM_IOCB,
3638 3638 (void *)iocbq);
3639 3639 } else {
3640 3640 /* repost the unsolicited buffer */
3641 3641 EMLXS_SLI_ISSUE_IOCB_CMD(hba,
3642 3642 from_chan, iocbq);
3643 3643 }
3644 3644 }
3645 3645 }
3646 3646
3647 3647 iocbq = next;
3648 3648
3649 3649 } /* end of while */
3650 3650
3651 3651 /* Now flush the chipq if any */
3652 3652 if (!(nlp->nlp_flag[fchanno] & NLP_CLOSED)) {
3653 3653
3654 3654 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3655 3655
3656 3656 (void) emlxs_chipq_node_flush(port, from_chan, nlp, 0);
3657 3657
3658 3658 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3659 3659 }
3660 3660
3661 3661 if (lock) {
3662 3662 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3663 3663 }
3664 3664
3665 3665 return;
3666 3666
3667 3667 } /* emlxs_tx_move */
3668 3668
3669 3669
3670 3670 extern uint32_t
3671 3671 emlxs_chipq_node_flush(emlxs_port_t *port, CHANNEL *chan, NODELIST *ndlp,
3672 3672 emlxs_buf_t *fpkt)
3673 3673 {
3674 3674 emlxs_hba_t *hba = HBA;
3675 3675 emlxs_buf_t *sbp;
3676 3676 IOCBQ *iocbq;
3677 3677 IOCBQ *next;
3678 3678 Q abort;
3679 3679 CHANNEL *cp;
3680 3680 uint32_t channelno;
3681 3681 uint8_t flag[MAX_CHANNEL];
3682 3682 uint32_t iotag;
3683 3683
3684 3684 bzero((void *)&abort, sizeof (Q));
3685 3685 bzero((void *)flag, sizeof (flag));
3686 3686
3687 3687 for (channelno = 0; channelno < hba->chan_count; channelno++) {
3688 3688 cp = &hba->chan[channelno];
3689 3689
3690 3690 if (chan && cp != chan) {
3691 3691 continue;
3692 3692 }
3693 3693
3694 3694 mutex_enter(&EMLXS_FCTAB_LOCK);
3695 3695
3696 3696 for (iotag = 1; iotag < hba->max_iotag; iotag++) {
3697 3697 sbp = hba->fc_table[iotag];
3698 3698
3699 3699 if (sbp && (sbp != STALE_PACKET) &&
3700 3700 (sbp->pkt_flags & PACKET_IN_CHIPQ) &&
3701 3701 (sbp->node == ndlp) &&
3702 3702 (sbp->channel == cp) &&
3703 3703 !(sbp->pkt_flags & PACKET_XRI_CLOSED)) {
3704 3704 emlxs_sbp_abort_add(port, sbp, &abort, flag,
3705 3705 fpkt);
3706 3706 }
3707 3707
3708 3708 }
3709 3709 mutex_exit(&EMLXS_FCTAB_LOCK);
3710 3710
3711 3711 } /* for */
3712 3712
3713 3713 /* Now put the iocb's on the tx queue */
3714 3714 iocbq = (IOCBQ *)abort.q_first;
3715 3715 while (iocbq) {
3716 3716 /* Save the next iocbq for now */
3717 3717 next = (IOCBQ *)iocbq->next;
3718 3718
3719 3719 /* Unlink this iocbq */
3720 3720 iocbq->next = NULL;
3721 3721
3722 3722 /* Send this iocbq */
3723 3723 emlxs_tx_put(iocbq, 1);
3724 3724
3725 3725 iocbq = next;
3726 3726 }
3727 3727
3728 3728 /* Now trigger channel service */
3729 3729 for (channelno = 0; channelno < hba->chan_count; channelno++) {
3730 3730 if (!flag[channelno]) {
3731 3731 continue;
3732 3732 }
3733 3733
3734 3734 EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
3735 3735 }
3736 3736
3737 3737 return (abort.q_cnt);
3738 3738
3739 3739 } /* emlxs_chipq_node_flush() */
3740 3740
3741 3741
3742 3742 /* Flush all IO's left on all iotag lists */
3743 3743 extern uint32_t
3744 3744 emlxs_iotag_flush(emlxs_hba_t *hba)
3745 3745 {
3746 3746 emlxs_port_t *port = &PPORT;
3747 3747 emlxs_buf_t *sbp;
3748 3748 IOCBQ *iocbq;
3749 3749 IOCB *iocb;
3750 3750 Q abort;
3751 3751 CHANNEL *cp;
3752 3752 uint32_t channelno;
3753 3753 uint32_t iotag;
3754 3754 uint32_t count;
3755 3755
3756 3756 count = 0;
3757 3757 for (channelno = 0; channelno < hba->chan_count; channelno++) {
3758 3758 cp = &hba->chan[channelno];
3759 3759
3760 3760 bzero((void *)&abort, sizeof (Q));
3761 3761
3762 3762 mutex_enter(&EMLXS_FCTAB_LOCK);
3763 3763
3764 3764 for (iotag = 1; iotag < hba->max_iotag; iotag++) {
3765 3765 sbp = hba->fc_table[iotag];
3766 3766
3767 3767 /* Check if the slot is empty */
3768 3768 if (!sbp || (sbp == STALE_PACKET)) {
3769 3769 continue;
3770 3770 }
3771 3771
3772 3772 /* We are building an abort list per channel */
3773 3773 if (sbp->channel != cp) {
3774 3774 continue;
3775 3775 }
3776 3776
3777 3777 hba->fc_table[iotag] = STALE_PACKET;
3778 3778 hba->io_count--;
3779 3779
3780 3780 /* Check if IO is valid */
3781 3781 if (!(sbp->pkt_flags & PACKET_VALID) ||
3782 3782 (sbp->pkt_flags & (PACKET_ULP_OWNED|
3783 3783 PACKET_COMPLETED|PACKET_IN_COMPLETION))) {
3784 3784 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
3785 3785 "iotag_flush: Invalid IO found. iotag=%d",
3786 3786 iotag);
3787 3787
3788 3788 continue;
3789 3789 }
3790 3790
3791 3791 sbp->iotag = 0;
3792 3792
3793 3793 /* Set IOCB status */
3794 3794 iocbq = &sbp->iocbq;
3795 3795 iocb = &iocbq->iocb;
3796 3796
3797 3797 iocb->ULPSTATUS = IOSTAT_LOCAL_REJECT;
3798 3798 iocb->un.grsp.perr.statLocalError = IOERR_LINK_DOWN;
3799 3799 iocb->ULPLE = 1;
3800 3800 iocbq->next = NULL;
3801 3801
3802 3802 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3803 3803 if (sbp->xrip) {
3804 3804 EMLXS_MSGF(EMLXS_CONTEXT,
3805 3805 &emlxs_sli_debug_msg,
3806 3806 "iotag_flush: iotag=%d sbp=%p "
3807 3807 "xrip=%p state=%x flag=%x",
3808 3808 iotag, sbp, sbp->xrip,
3809 3809 sbp->xrip->state, sbp->xrip->flag);
3810 3810 } else {
3811 3811 EMLXS_MSGF(EMLXS_CONTEXT,
3812 3812 &emlxs_sli_debug_msg,
3813 3813 "iotag_flush: iotag=%d sbp=%p "
3814 3814 "xrip=NULL", iotag, sbp);
3815 3815 }
3816 3816
3817 3817 emlxs_sli4_free_xri(port, sbp, sbp->xrip, 0);
3818 3818 } else {
3819 3819 /* Clean up the sbp */
3820 3820 mutex_enter(&sbp->mtx);
3821 3821
3822 3822 if (sbp->pkt_flags & PACKET_IN_TXQ) {
3823 3823 sbp->pkt_flags &= ~PACKET_IN_TXQ;
3824 3824 hba->channel_tx_count --;
3825 3825 }
3826 3826
3827 3827 if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
3828 3828 sbp->pkt_flags &= ~PACKET_IN_CHIPQ;
3829 3829 }
3830 3830
3831 3831 if (sbp->bmp) {
3832 3832 emlxs_mem_put(hba, MEM_BPL,
3833 3833 (void *)sbp->bmp);
3834 3834 sbp->bmp = 0;
3835 3835 }
3836 3836
3837 3837 mutex_exit(&sbp->mtx);
3838 3838 }
3839 3839
3840 3840 /* At this point all nodes are assumed destroyed */
3841 3841 mutex_enter(&sbp->mtx);
3842 3842 sbp->node = 0;
3843 3843 mutex_exit(&sbp->mtx);
3844 3844
3845 3845 /* Add this iocb to our local abort Q */
3846 3846 if (abort.q_first) {
3847 3847 ((IOCBQ *)abort.q_last)->next = iocbq;
3848 3848 abort.q_last = (uint8_t *)iocbq;
3849 3849 abort.q_cnt++;
3850 3850 } else {
3851 3851 abort.q_first = (uint8_t *)iocbq;
3852 3852 abort.q_last = (uint8_t *)iocbq;
3853 3853 abort.q_cnt = 1;
3854 3854 }
3855 3855 }
3856 3856
3857 3857 mutex_exit(&EMLXS_FCTAB_LOCK);
3858 3858
3859 3859 /* Trigger deferred completion */
3860 3860 if (abort.q_first) {
3861 3861 mutex_enter(&cp->rsp_lock);
3862 3862 if (cp->rsp_head == NULL) {
3863 3863 cp->rsp_head = (IOCBQ *)abort.q_first;
3864 3864 cp->rsp_tail = (IOCBQ *)abort.q_last;
3865 3865 } else {
3866 3866 cp->rsp_tail->next = (IOCBQ *)abort.q_first;
3867 3867 cp->rsp_tail = (IOCBQ *)abort.q_last;
3868 3868 }
3869 3869 mutex_exit(&cp->rsp_lock);
3870 3870
3871 3871 emlxs_thread_trigger2(&cp->intr_thread,
3872 3872 emlxs_proc_channel, cp);
3873 3873
3874 3874 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
3875 3875 "iotag_flush: channel=%d count=%d",
3876 3876 channelno, abort.q_cnt);
3877 3877
3878 3878 count += abort.q_cnt;
3879 3879 }
3880 3880 }
3881 3881
3882 3882 return (count);
3883 3883
3884 3884 } /* emlxs_iotag_flush() */
3885 3885
3886 3886
3887 3887
3888 3888 /* Checks for IO's on all or a given channel for a given node */
3889 3889 extern uint32_t
3890 3890 emlxs_chipq_node_check(emlxs_port_t *port, CHANNEL *chan, NODELIST *ndlp)
3891 3891 {
3892 3892 emlxs_hba_t *hba = HBA;
3893 3893 emlxs_buf_t *sbp;
3894 3894 CHANNEL *cp;
3895 3895 uint32_t channelno;
3896 3896 uint32_t count;
3897 3897 uint32_t iotag;
3898 3898
3899 3899 count = 0;
3900 3900
3901 3901 for (channelno = 0; channelno < hba->chan_count; channelno++) {
3902 3902 cp = &hba->chan[channelno];
3903 3903
3904 3904 if (chan && cp != chan) {
3905 3905 continue;
3906 3906 }
3907 3907
3908 3908 mutex_enter(&EMLXS_FCTAB_LOCK);
3909 3909
3910 3910 for (iotag = 1; iotag < hba->max_iotag; iotag++) {
3911 3911 sbp = hba->fc_table[iotag];
3912 3912
3913 3913 if (sbp && (sbp != STALE_PACKET) &&
3914 3914 (sbp->pkt_flags & PACKET_IN_CHIPQ) &&
3915 3915 (sbp->node == ndlp) &&
3916 3916 (sbp->channel == cp) &&
3917 3917 !(sbp->pkt_flags & PACKET_XRI_CLOSED)) {
3918 3918 count++;
3919 3919 }
3920 3920
3921 3921 }
3922 3922 mutex_exit(&EMLXS_FCTAB_LOCK);
3923 3923
3924 3924 } /* for */
3925 3925
3926 3926 return (count);
3927 3927
3928 3928 } /* emlxs_chipq_node_check() */
3929 3929
3930 3930
3931 3931
3932 3932 /* Flush all IO's for a given node's lun (on any channel) */
3933 3933 extern uint32_t
3934 3934 emlxs_chipq_lun_flush(emlxs_port_t *port, NODELIST *ndlp,
3935 3935 uint32_t lun, emlxs_buf_t *fpkt)
3936 3936 {
3937 3937 emlxs_hba_t *hba = HBA;
3938 3938 emlxs_buf_t *sbp;
3939 3939 IOCBQ *iocbq;
3940 3940 IOCBQ *next;
3941 3941 Q abort;
3942 3942 uint32_t iotag;
3943 3943 uint8_t flag[MAX_CHANNEL];
3944 3944 uint32_t channelno;
3945 3945
3946 3946 if (lun == EMLXS_LUN_NONE) {
3947 3947 return (0);
3948 3948 }
3949 3949
3950 3950 bzero((void *)flag, sizeof (flag));
3951 3951 bzero((void *)&abort, sizeof (Q));
3952 3952
3953 3953 mutex_enter(&EMLXS_FCTAB_LOCK);
3954 3954 for (iotag = 1; iotag < hba->max_iotag; iotag++) {
3955 3955 sbp = hba->fc_table[iotag];
3956 3956
3957 3957 if (sbp && (sbp != STALE_PACKET) &&
3958 3958 sbp->pkt_flags & PACKET_IN_CHIPQ &&
3959 3959 sbp->node == ndlp &&
3960 3960 sbp->lun == lun &&
3961 3961 !(sbp->pkt_flags & PACKET_XRI_CLOSED)) {
3962 3962 emlxs_sbp_abort_add(port, sbp,
3963 3963 &abort, flag, fpkt);
3964 3964 }
3965 3965 }
3966 3966 mutex_exit(&EMLXS_FCTAB_LOCK);
3967 3967
3968 3968 /* Now put the iocb's on the tx queue */
3969 3969 iocbq = (IOCBQ *)abort.q_first;
3970 3970 while (iocbq) {
3971 3971 /* Save the next iocbq for now */
3972 3972 next = (IOCBQ *)iocbq->next;
3973 3973
3974 3974 /* Unlink this iocbq */
3975 3975 iocbq->next = NULL;
3976 3976
3977 3977 /* Send this iocbq */
3978 3978 emlxs_tx_put(iocbq, 1);
3979 3979
3980 3980 iocbq = next;
3981 3981 }
3982 3982
3983 3983 /* Now trigger channel service */
3984 3984 for (channelno = 0; channelno < hba->chan_count; channelno++) {
3985 3985 if (!flag[channelno]) {
3986 3986 continue;
3987 3987 }
3988 3988
3989 3989 EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
3990 3990 }
3991 3991
3992 3992 return (abort.q_cnt);
3993 3993
3994 3994 } /* emlxs_chipq_lun_flush() */
3995 3995
3996 3996
3997 3997
3998 3998 /*
3999 3999 * Issue an ABORT_XRI_CN iocb command to abort an FCP command already issued.
4000 4000 * This must be called while holding the EMLXS_FCTAB_LOCK
4001 4001 */
4002 4002 extern IOCBQ *
4003 4003 emlxs_create_abort_xri_cn(emlxs_port_t *port, NODELIST *ndlp,
4004 4004 uint16_t iotag, CHANNEL *cp, uint8_t class, int32_t flag)
4005 4005 {
4006 4006 emlxs_hba_t *hba = HBA;
4007 4007 IOCBQ *iocbq;
4008 4008 IOCB *iocb;
4009 4009 emlxs_wqe_t *wqe;
4010 4010 emlxs_buf_t *sbp;
4011 4011 uint16_t abort_iotag;
4012 4012
4013 4013 if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) {
4014 4014 return (NULL);
4015 4015 }
4016 4016
4017 4017 iocbq->channel = (void *)cp;
4018 4018 iocbq->port = (void *)port;
4019 4019 iocbq->node = (void *)ndlp;
4020 4020 iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
4021 4021
4022 4022 /*
4023 4023 * set up an iotag using special Abort iotags
4024 4024 */
4025 4025 if ((hba->fc_oor_iotag >= EMLXS_MAX_ABORT_TAG)) {
4026 4026 hba->fc_oor_iotag = hba->max_iotag;
4027 4027 }
4028 4028 abort_iotag = hba->fc_oor_iotag++;
4029 4029
4030 4030
4031 4031 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4032 4032 wqe = &iocbq->wqe;
4033 4033 sbp = hba->fc_table[iotag];
4034 4034
4035 4035 /* Try to issue abort by XRI if possible */
4036 4036 if (sbp == NULL || sbp == STALE_PACKET || sbp->xrip == NULL) {
4037 4037 wqe->un.Abort.Criteria = ABORT_REQ_TAG;
4038 4038 wqe->AbortTag = iotag;
4039 4039 } else {
4040 4040 wqe->un.Abort.Criteria = ABORT_XRI_TAG;
4041 4041 wqe->AbortTag = sbp->xrip->XRI;
4042 4042 }
4043 4043 wqe->un.Abort.IA = 0;
4044 4044 wqe->RequestTag = abort_iotag;
4045 4045 wqe->Command = CMD_ABORT_XRI_CX;
4046 4046 wqe->Class = CLASS3;
4047 4047 wqe->CQId = (uint16_t)0xffff; /* default CQ for response */
4048 4048 wqe->CmdType = WQE_TYPE_ABORT;
4049 4049 } else {
4050 4050 iocb = &iocbq->iocb;
4051 4051 iocb->ULPIOTAG = abort_iotag;
4052 4052 iocb->un.acxri.abortType = flag;
4053 4053 iocb->un.acxri.abortContextTag = ndlp->nlp_Rpi;
4054 4054 iocb->un.acxri.abortIoTag = iotag;
4055 4055 iocb->ULPLE = 1;
4056 4056 iocb->ULPCLASS = class;
4057 4057 iocb->ULPCOMMAND = CMD_ABORT_XRI_CN;
4058 4058 iocb->ULPOWNER = OWN_CHIP;
4059 4059 }
4060 4060
4061 4061 return (iocbq);
4062 4062
4063 4063 } /* emlxs_create_abort_xri_cn() */
4064 4064
4065 4065
4066 4066 /* This must be called while holding the EMLXS_FCTAB_LOCK */
4067 4067 extern IOCBQ *
4068 4068 emlxs_create_abort_xri_cx(emlxs_port_t *port, NODELIST *ndlp, uint16_t xid,
4069 4069 CHANNEL *cp, uint8_t class, int32_t flag)
4070 4070 {
4071 4071 emlxs_hba_t *hba = HBA;
4072 4072 IOCBQ *iocbq;
4073 4073 IOCB *iocb;
4074 4074 emlxs_wqe_t *wqe;
4075 4075 uint16_t abort_iotag;
4076 4076
4077 4077 if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) {
4078 4078 return (NULL);
4079 4079 }
4080 4080
4081 4081 iocbq->channel = (void *)cp;
4082 4082 iocbq->port = (void *)port;
4083 4083 iocbq->node = (void *)ndlp;
4084 4084 iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
4085 4085
4086 4086 /*
4087 4087 * set up an iotag using special Abort iotags
4088 4088 */
4089 4089 if ((hba->fc_oor_iotag >= EMLXS_MAX_ABORT_TAG)) {
4090 4090 hba->fc_oor_iotag = hba->max_iotag;
4091 4091 }
4092 4092 abort_iotag = hba->fc_oor_iotag++;
4093 4093
4094 4094 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4095 4095 wqe = &iocbq->wqe;
4096 4096 wqe->un.Abort.Criteria = ABORT_XRI_TAG;
4097 4097 wqe->un.Abort.IA = 0;
4098 4098 wqe->RequestTag = abort_iotag;
4099 4099 wqe->AbortTag = xid;
4100 4100 wqe->Command = CMD_ABORT_XRI_CX;
4101 4101 wqe->Class = CLASS3;
4102 4102 wqe->CQId = (uint16_t)0xffff; /* default CQ for response */
4103 4103 wqe->CmdType = WQE_TYPE_ABORT;
4104 4104 } else {
4105 4105 iocb = &iocbq->iocb;
4106 4106 iocb->ULPCONTEXT = xid;
4107 4107 iocb->ULPIOTAG = abort_iotag;
4108 4108 iocb->un.acxri.abortType = flag;
4109 4109 iocb->ULPLE = 1;
4110 4110 iocb->ULPCLASS = class;
4111 4111 iocb->ULPCOMMAND = CMD_ABORT_XRI_CX;
4112 4112 iocb->ULPOWNER = OWN_CHIP;
4113 4113 }
4114 4114
4115 4115 return (iocbq);
4116 4116
4117 4117 } /* emlxs_create_abort_xri_cx() */
4118 4118
4119 4119
4120 4120
4121 4121 /* This must be called while holding the EMLXS_FCTAB_LOCK */
4122 4122 extern IOCBQ *
4123 4123 emlxs_create_close_xri_cn(emlxs_port_t *port, NODELIST *ndlp,
4124 4124 uint16_t iotag, CHANNEL *cp)
4125 4125 {
4126 4126 emlxs_hba_t *hba = HBA;
4127 4127 IOCBQ *iocbq;
4128 4128 IOCB *iocb;
4129 4129 emlxs_wqe_t *wqe;
4130 4130 emlxs_buf_t *sbp;
4131 4131 uint16_t abort_iotag;
4132 4132
4133 4133 if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) {
4134 4134 return (NULL);
4135 4135 }
4136 4136
4137 4137 iocbq->channel = (void *)cp;
4138 4138 iocbq->port = (void *)port;
4139 4139 iocbq->node = (void *)ndlp;
4140 4140 iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
4141 4141
4142 4142 /*
4143 4143 * set up an iotag using special Abort iotags
4144 4144 */
4145 4145 if ((hba->fc_oor_iotag >= EMLXS_MAX_ABORT_TAG)) {
4146 4146 hba->fc_oor_iotag = hba->max_iotag;
4147 4147 }
4148 4148 abort_iotag = hba->fc_oor_iotag++;
4149 4149
4150 4150 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4151 4151 wqe = &iocbq->wqe;
4152 4152 sbp = hba->fc_table[iotag];
4153 4153
4154 4154 /* Try to issue close by XRI if possible */
4155 4155 if (sbp == NULL || sbp == STALE_PACKET || sbp->xrip == NULL) {
4156 4156 wqe->un.Abort.Criteria = ABORT_REQ_TAG;
4157 4157 wqe->AbortTag = iotag;
4158 4158 } else {
4159 4159 wqe->un.Abort.Criteria = ABORT_XRI_TAG;
4160 4160 wqe->AbortTag = sbp->xrip->XRI;
4161 4161 }
4162 4162 wqe->un.Abort.IA = 1;
4163 4163 wqe->RequestTag = abort_iotag;
4164 4164 wqe->Command = CMD_ABORT_XRI_CX;
4165 4165 wqe->Class = CLASS3;
4166 4166 wqe->CQId = (uint16_t)0xffff; /* default CQ for response */
4167 4167 wqe->CmdType = WQE_TYPE_ABORT;
4168 4168 } else {
4169 4169 iocb = &iocbq->iocb;
4170 4170 iocb->ULPIOTAG = abort_iotag;
4171 4171 iocb->un.acxri.abortType = 0;
4172 4172 iocb->un.acxri.abortContextTag = ndlp->nlp_Rpi;
4173 4173 iocb->un.acxri.abortIoTag = iotag;
4174 4174 iocb->ULPLE = 1;
4175 4175 iocb->ULPCLASS = 0;
4176 4176 iocb->ULPCOMMAND = CMD_CLOSE_XRI_CN;
4177 4177 iocb->ULPOWNER = OWN_CHIP;
4178 4178 }
4179 4179
4180 4180 return (iocbq);
4181 4181
4182 4182 } /* emlxs_create_close_xri_cn() */
4183 4183
4184 4184
4185 4185 /* This must be called while holding the EMLXS_FCTAB_LOCK */
4186 4186 extern IOCBQ *
4187 4187 emlxs_create_close_xri_cx(emlxs_port_t *port, NODELIST *ndlp, uint16_t xid,
4188 4188 CHANNEL *cp)
4189 4189 {
4190 4190 emlxs_hba_t *hba = HBA;
4191 4191 IOCBQ *iocbq;
4192 4192 IOCB *iocb;
4193 4193 emlxs_wqe_t *wqe;
4194 4194 uint16_t abort_iotag;
4195 4195
4196 4196 if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) {
4197 4197 return (NULL);
4198 4198 }
4199 4199
4200 4200 iocbq->channel = (void *)cp;
4201 4201 iocbq->port = (void *)port;
4202 4202 iocbq->node = (void *)ndlp;
4203 4203 iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
4204 4204
4205 4205 /*
4206 4206 * set up an iotag using special Abort iotags
4207 4207 */
4208 4208 if ((hba->fc_oor_iotag >= EMLXS_MAX_ABORT_TAG)) {
4209 4209 hba->fc_oor_iotag = hba->max_iotag;
4210 4210 }
4211 4211 abort_iotag = hba->fc_oor_iotag++;
4212 4212
4213 4213 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4214 4214 wqe = &iocbq->wqe;
4215 4215 wqe->un.Abort.Criteria = ABORT_XRI_TAG;
4216 4216 wqe->un.Abort.IA = 1;
4217 4217 wqe->RequestTag = abort_iotag;
4218 4218 wqe->AbortTag = xid;
4219 4219 wqe->Command = CMD_ABORT_XRI_CX;
4220 4220 wqe->Class = CLASS3;
4221 4221 wqe->CQId = (uint16_t)0xffff; /* default CQ for response */
4222 4222 wqe->CmdType = WQE_TYPE_ABORT;
4223 4223 } else {
4224 4224 iocb = &iocbq->iocb;
4225 4225 iocb->ULPCONTEXT = xid;
4226 4226 iocb->ULPIOTAG = abort_iotag;
4227 4227 iocb->ULPLE = 1;
4228 4228 iocb->ULPCLASS = 0;
4229 4229 iocb->ULPCOMMAND = CMD_CLOSE_XRI_CX;
4230 4230 iocb->ULPOWNER = OWN_CHIP;
4231 4231 }
4232 4232
4233 4233 return (iocbq);
4234 4234
4235 4235 } /* emlxs_create_close_xri_cx() */
4236 4236
4237 4237
4238 4238 void
4239 4239 emlxs_close_els_exchange(emlxs_hba_t *hba, emlxs_port_t *port, uint32_t rxid)
4240 4240 {
4241 4241 CHANNEL *cp;
4242 4242 IOCBQ *iocbq;
4243 4243 IOCB *iocb;
4244 4244
4245 4245 if (rxid == 0 || rxid == 0xFFFF) {
4246 4246 return;
4247 4247 }
4248 4248
4249 4249 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4250 4250 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
4251 4251 "Closing ELS exchange: xid=%x", rxid);
4252 4252
4253 4253 if (emlxs_sli4_unreserve_xri(port, rxid, 1) == 0) {
4254 4254 return;
4255 4255 }
4256 4256 }
4257 4257
4258 4258 cp = &hba->chan[hba->channel_els];
4259 4259
4260 4260 mutex_enter(&EMLXS_FCTAB_LOCK);
4261 4261
4262 4262 /* Create the abort IOCB */
4263 4263 iocbq = emlxs_create_close_xri_cx(port, NULL, rxid, cp);
4264 4264
4265 4265 mutex_exit(&EMLXS_FCTAB_LOCK);
4266 4266
4267 4267 if (iocbq) {
4268 4268 iocb = &iocbq->iocb;
4269 4269 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
4270 4270 "Closing ELS exchange: xid=%x iotag=%d", rxid,
4271 4271 iocb->ULPIOTAG);
4272 4272
4273 4273 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
4274 4274 }
4275 4275
4276 4276 } /* emlxs_close_els_exchange() */
4277 4277
4278 4278
4279 4279 void
4280 4280 emlxs_abort_els_exchange(emlxs_hba_t *hba, emlxs_port_t *port, uint32_t rxid)
4281 4281 {
4282 4282 CHANNEL *cp;
4283 4283 IOCBQ *iocbq;
4284 4284 IOCB *iocb;
4285 4285
4286 4286 if (rxid == 0 || rxid == 0xFFFF) {
4287 4287 return;
4288 4288 }
4289 4289
4290 4290 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4291 4291
4292 4292 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
4293 4293 "Aborting ELS exchange: xid=%x", rxid);
4294 4294
4295 4295 if (emlxs_sli4_unreserve_xri(port, rxid, 1) == 0) {
4296 4296 /* We have no way to abort unsolicited exchanges */
4297 4297 /* that we have not responded to at this time */
4298 4298 /* So we will return for now */
4299 4299 return;
4300 4300 }
4301 4301 }
4302 4302
4303 4303 cp = &hba->chan[hba->channel_els];
4304 4304
4305 4305 mutex_enter(&EMLXS_FCTAB_LOCK);
4306 4306
4307 4307 /* Create the abort IOCB */
4308 4308 if (hba->state >= FC_LINK_UP) {
4309 4309 iocbq = emlxs_create_abort_xri_cx(port, NULL, rxid, cp,
4310 4310 CLASS3, ABORT_TYPE_ABTS);
4311 4311 } else {
4312 4312 iocbq = emlxs_create_close_xri_cx(port, NULL, rxid, cp);
4313 4313 }
4314 4314
4315 4315 mutex_exit(&EMLXS_FCTAB_LOCK);
4316 4316
4317 4317 if (iocbq) {
4318 4318 iocb = &iocbq->iocb;
4319 4319 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
4320 4320 "Aborting ELS exchange: xid=%x iotag=%d", rxid,
4321 4321 iocb->ULPIOTAG);
4322 4322
4323 4323 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
4324 4324 }
4325 4325
4326 4326 } /* emlxs_abort_els_exchange() */
4327 4327
4328 4328
4329 4329 void
4330 4330 emlxs_abort_ct_exchange(emlxs_hba_t *hba, emlxs_port_t *port, uint32_t rxid)
4331 4331 {
4332 4332 CHANNEL *cp;
4333 4333 IOCBQ *iocbq;
4334 4334 IOCB *iocb;
4335 4335
4336 4336 if (rxid == 0 || rxid == 0xFFFF) {
4337 4337 return;
4338 4338 }
4339 4339
4340 4340 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4341 4341 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_ct_msg,
4342 4342 "Aborting CT exchange: xid=%x", rxid);
4343 4343
4344 4344 if (emlxs_sli4_unreserve_xri(port, rxid, 1) == 0) {
4345 4345 /* We have no way to abort unsolicited exchanges */
4346 4346 /* that we have not responded to at this time */
4347 4347 /* So we will return for now */
4348 4348 return;
4349 4349 }
4350 4350 }
4351 4351
4352 4352 cp = &hba->chan[hba->channel_ct];
4353 4353
4354 4354 mutex_enter(&EMLXS_FCTAB_LOCK);
4355 4355
4356 4356 /* Create the abort IOCB */
4357 4357 if (hba->state >= FC_LINK_UP) {
4358 4358 iocbq = emlxs_create_abort_xri_cx(port, NULL, rxid, cp,
4359 4359 CLASS3, ABORT_TYPE_ABTS);
4360 4360 } else {
4361 4361 iocbq = emlxs_create_close_xri_cx(port, NULL, rxid, cp);
4362 4362 }
4363 4363
4364 4364 mutex_exit(&EMLXS_FCTAB_LOCK);
4365 4365
4366 4366 if (iocbq) {
4367 4367 iocb = &iocbq->iocb;
4368 4368 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
4369 4369 "Aborting CT exchange: xid=%x iotag=%d", rxid,
4370 4370 iocb->ULPIOTAG);
4371 4371
4372 4372 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
4373 4373 }
4374 4374
4375 4375 } /* emlxs_abort_ct_exchange() */
4376 4376
4377 4377
4378 4378 /* This must be called while holding the EMLXS_FCTAB_LOCK */
4379 4379 static void
4380 4380 emlxs_sbp_abort_add(emlxs_port_t *port, emlxs_buf_t *sbp, Q *abort,
4381 4381 uint8_t *flag, emlxs_buf_t *fpkt)
4382 4382 {
4383 4383 emlxs_hba_t *hba = HBA;
4384 4384 IOCBQ *iocbq;
4385 4385 CHANNEL *cp;
4386 4386 NODELIST *ndlp;
4387 4387
4388 4388 cp = (CHANNEL *)sbp->channel;
4389 4389 ndlp = sbp->node;
4390 4390
4391 4391 /* Create the close XRI IOCB */
4392 4392 if (hba->state >= FC_LINK_UP) {
4393 4393 iocbq = emlxs_create_abort_xri_cn(port, ndlp, sbp->iotag, cp,
4394 4394 CLASS3, ABORT_TYPE_ABTS);
4395 4395 } else {
4396 4396 iocbq = emlxs_create_close_xri_cn(port, ndlp, sbp->iotag, cp);
4397 4397 }
4398 4398 /*
4399 4399 * Add this iocb to our local abort Q
4400 4400 * This way we don't hold the CHIPQ lock too long
4401 4401 */
4402 4402 if (iocbq) {
4403 4403 if (abort->q_first) {
4404 4404 ((IOCBQ *)abort->q_last)->next = iocbq;
4405 4405 abort->q_last = (uint8_t *)iocbq;
4406 4406 abort->q_cnt++;
4407 4407 } else {
4408 4408 abort->q_first = (uint8_t *)iocbq;
4409 4409 abort->q_last = (uint8_t *)iocbq;
4410 4410 abort->q_cnt = 1;
4411 4411 }
4412 4412 iocbq->next = NULL;
4413 4413 }
4414 4414
4415 4415 /* set the flags */
4416 4416 mutex_enter(&sbp->mtx);
4417 4417
4418 4418 sbp->pkt_flags |= (PACKET_IN_FLUSH | PACKET_XRI_CLOSED);
4419 4419
4420 4420 sbp->ticks = hba->timer_tics + 10;
4421 4421 sbp->abort_attempts++;
4422 4422
4423 4423 flag[cp->channelno] = 1;
4424 4424
4425 4425 /*
4426 4426 * If the fpkt is already set, then we will leave it alone
4427 4427 * This ensures that this pkt is only accounted for on one
4428 4428 * fpkt->flush_count
4429 4429 */
4430 4430 if (!sbp->fpkt && fpkt) {
4431 4431 mutex_enter(&fpkt->mtx);
4432 4432 sbp->fpkt = fpkt;
4433 4433 fpkt->flush_count++;
4434 4434 mutex_exit(&fpkt->mtx);
4435 4435 }
4436 4436
4437 4437 mutex_exit(&sbp->mtx);
4438 4438
4439 4439 return;
4440 4440
4441 4441 } /* emlxs_sbp_abort_add() */
↓ open down ↓ |
2445 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX