1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 25 /* 26 * ibdm.c 27 * 28 * This file contains the InifiniBand Device Manager (IBDM) support functions. 29 * IB nexus driver will only be the client for the IBDM module. 30 * 31 * IBDM registers with IBTF for HCA arrival/removal notification. 32 * IBDM registers with SA access to send DM MADs to discover the IOC's behind 33 * the IOU's. 34 * 35 * IB nexus driver registers with IBDM to find the information about the 36 * HCA's and IOC's (behind the IOU) present on the IB fabric. 37 */ 38 39 #include <sys/sysmacros.h> 40 #include <sys/systm.h> 41 #include <sys/taskq.h> 42 #include <sys/ib/mgt/ibdm/ibdm_impl.h> 43 #include <sys/ib/mgt/ibmf/ibmf_impl.h> 44 #include <sys/ib/ibtl/impl/ibtl_ibnex.h> 45 #include <sys/modctl.h> 46 47 /* Function Prototype declarations */ 48 static int ibdm_free_iou_info(ibdm_dp_gidinfo_t *, ibdm_iou_info_t **); 49 static int ibdm_fini(void); 50 static int ibdm_init(void); 51 static int ibdm_get_reachable_ports(ibdm_port_attr_t *, 52 ibdm_hca_list_t *); 53 static ibdm_dp_gidinfo_t *ibdm_check_dgid(ib_guid_t, ib_sn_prefix_t); 54 static ibdm_dp_gidinfo_t *ibdm_check_dest_nodeguid(ibdm_dp_gidinfo_t *); 55 static boolean_t ibdm_is_cisco(ib_guid_t); 56 static boolean_t ibdm_is_cisco_switch(ibdm_dp_gidinfo_t *); 57 static void ibdm_wait_cisco_probe_completion(ibdm_dp_gidinfo_t *); 58 static int ibdm_set_classportinfo(ibdm_dp_gidinfo_t *); 59 static int ibdm_send_classportinfo(ibdm_dp_gidinfo_t *); 60 static int ibdm_send_iounitinfo(ibdm_dp_gidinfo_t *); 61 static int ibdm_is_dev_mgt_supported(ibdm_dp_gidinfo_t *); 62 static int ibdm_get_node_port_guids(ibmf_saa_handle_t, ib_lid_t, 63 ib_guid_t *, ib_guid_t *); 64 static int ibdm_retry_command(ibdm_timeout_cb_args_t *); 65 static int ibdm_get_diagcode(ibdm_dp_gidinfo_t *, int); 66 static int ibdm_verify_mad_status(ib_mad_hdr_t *); 67 static int ibdm_handle_redirection(ibmf_msg_t *, 68 ibdm_dp_gidinfo_t *, int *); 69 static void ibdm_wait_probe_completion(void); 70 static void ibdm_sweep_fabric(int); 71 static void ibdm_probe_gid_thread(void *); 72 static void ibdm_wakeup_probe_gid_cv(void); 73 static void ibdm_port_attr_ibmf_init(ibdm_port_attr_t *, ib_pkey_t, int); 74 static int ibdm_port_attr_ibmf_fini(ibdm_port_attr_t *, int); 75 static void ibdm_update_port_attr(ibdm_port_attr_t *); 76 static void ibdm_handle_hca_attach(ib_guid_t); 77 static void ibdm_handle_srventry_mad(ibmf_msg_t *, 78 ibdm_dp_gidinfo_t *, int *); 79 static void ibdm_ibmf_recv_cb(ibmf_handle_t, ibmf_msg_t *, void *); 80 static void ibdm_recv_incoming_mad(void *); 81 static void ibdm_process_incoming_mad(ibmf_handle_t, ibmf_msg_t *, void *); 82 static void ibdm_ibmf_send_cb(ibmf_handle_t, ibmf_msg_t *, void *); 83 static void ibdm_pkt_timeout_hdlr(void *arg); 84 static void ibdm_initialize_port(ibdm_port_attr_t *); 85 static void ibdm_update_port_pkeys(ibdm_port_attr_t *port); 86 static void ibdm_handle_diagcode(ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *); 87 static void ibdm_probe_gid(ibdm_dp_gidinfo_t *); 88 static void ibdm_alloc_send_buffers(ibmf_msg_t *); 89 static void ibdm_free_send_buffers(ibmf_msg_t *); 90 static void ibdm_handle_hca_detach(ib_guid_t); 91 static void ibdm_handle_port_change_event(ibt_async_event_t *); 92 static int ibdm_fini_port(ibdm_port_attr_t *); 93 static int ibdm_uninit_hca(ibdm_hca_list_t *); 94 static void ibdm_handle_setclassportinfo(ibmf_handle_t, ibmf_msg_t *, 95 ibdm_dp_gidinfo_t *, int *); 96 static void ibdm_handle_iounitinfo(ibmf_handle_t, 97 ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *); 98 static void ibdm_handle_ioc_profile(ibmf_handle_t, 99 ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *); 100 static void ibdm_event_hdlr(void *, ibt_hca_hdl_t, 101 ibt_async_code_t, ibt_async_event_t *); 102 static void ibdm_handle_classportinfo(ibmf_handle_t, 103 ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *); 104 static void ibdm_update_ioc_port_gidlist(ibdm_ioc_info_t *, 105 ibdm_dp_gidinfo_t *); 106 107 static ibdm_hca_list_t *ibdm_dup_hca_attr(ibdm_hca_list_t *); 108 static ibdm_ioc_info_t *ibdm_dup_ioc_info(ibdm_ioc_info_t *, 109 ibdm_dp_gidinfo_t *gid_list); 110 static void ibdm_probe_ioc(ib_guid_t, ib_guid_t, int); 111 static ibdm_ioc_info_t *ibdm_is_ioc_present(ib_guid_t, 112 ibdm_dp_gidinfo_t *, int *); 113 static ibdm_port_attr_t *ibdm_get_port_attr(ibt_async_event_t *, 114 ibdm_hca_list_t **); 115 static sa_node_record_t *ibdm_get_node_records(ibmf_saa_handle_t, 116 size_t *, ib_guid_t); 117 static int ibdm_get_node_record_by_port(ibmf_saa_handle_t, 118 ib_guid_t, sa_node_record_t **, size_t *); 119 static sa_portinfo_record_t *ibdm_get_portinfo(ibmf_saa_handle_t, size_t *, 120 ib_lid_t); 121 static ibdm_dp_gidinfo_t *ibdm_create_gid_info(ibdm_port_attr_t *, 122 ib_gid_t, ib_gid_t); 123 static ibdm_dp_gidinfo_t *ibdm_find_gid(ib_guid_t, ib_guid_t); 124 static int ibdm_send_ioc_profile(ibdm_dp_gidinfo_t *, uint8_t); 125 static ibdm_ioc_info_t *ibdm_update_ioc_gidlist(ibdm_dp_gidinfo_t *, int); 126 static void ibdm_saa_event_cb(ibmf_saa_handle_t, ibmf_saa_subnet_event_t, 127 ibmf_saa_event_details_t *, void *); 128 static void ibdm_reprobe_update_port_srv(ibdm_ioc_info_t *, 129 ibdm_dp_gidinfo_t *); 130 static ibdm_dp_gidinfo_t *ibdm_handle_gid_rm(ibdm_dp_gidinfo_t *); 131 static void ibdm_rmfrom_glgid_list(ibdm_dp_gidinfo_t *, 132 ibdm_dp_gidinfo_t *); 133 static void ibdm_addto_gidlist(ibdm_gid_t **, ibdm_gid_t *); 134 static void ibdm_free_gid_list(ibdm_gid_t *); 135 static void ibdm_rescan_gidlist(ib_guid_t *ioc_guid); 136 static void ibdm_notify_newgid_iocs(ibdm_dp_gidinfo_t *); 137 static void ibdm_saa_event_taskq(void *); 138 static void ibdm_free_saa_event_arg(ibdm_saa_event_arg_t *); 139 static void ibdm_get_next_port(ibdm_hca_list_t **, 140 ibdm_port_attr_t **, int); 141 static void ibdm_add_to_gl_gid(ibdm_dp_gidinfo_t *, 142 ibdm_dp_gidinfo_t *); 143 static void ibdm_addto_glhcalist(ibdm_dp_gidinfo_t *, 144 ibdm_hca_list_t *); 145 static void ibdm_delete_glhca_list(ibdm_dp_gidinfo_t *); 146 static void ibdm_saa_handle_new_gid(void *); 147 static void ibdm_reset_all_dgids(ibmf_saa_handle_t); 148 static void ibdm_reset_gidinfo(ibdm_dp_gidinfo_t *); 149 static void ibdm_delete_gidinfo(ibdm_dp_gidinfo_t *); 150 static void ibdm_fill_srv_attr_mod(ib_mad_hdr_t *, ibdm_timeout_cb_args_t *); 151 static void ibdm_bump_transactionID(ibdm_dp_gidinfo_t *); 152 static ibdm_ioc_info_t *ibdm_handle_prev_iou(); 153 static int ibdm_serv_cmp(ibdm_srvents_info_t *, ibdm_srvents_info_t *, 154 int); 155 static ibdm_ioc_info_t *ibdm_get_ioc_info_with_gid(ib_guid_t, 156 ibdm_dp_gidinfo_t **); 157 158 int ibdm_dft_timeout = IBDM_DFT_TIMEOUT; 159 int ibdm_dft_retry_cnt = IBDM_DFT_NRETRIES; 160 #ifdef DEBUG 161 int ibdm_ignore_saa_event = 0; 162 #endif 163 int ibdm_enumerate_iocs = 0; 164 165 /* Modload support */ 166 static struct modlmisc ibdm_modlmisc = { 167 &mod_miscops, 168 "InfiniBand Device Manager" 169 }; 170 171 struct modlinkage ibdm_modlinkage = { 172 MODREV_1, 173 (void *)&ibdm_modlmisc, 174 NULL 175 }; 176 177 static ibt_clnt_modinfo_t ibdm_ibt_modinfo = { 178 IBTI_V_CURR, 179 IBT_DM, 180 ibdm_event_hdlr, 181 NULL, 182 "ibdm" 183 }; 184 185 /* Global variables */ 186 ibdm_t ibdm; 187 int ibdm_taskq_enable = IBDM_ENABLE_TASKQ_HANDLING; 188 char *ibdm_string = "ibdm"; 189 190 _NOTE(SCHEME_PROTECTS_DATA("Serialized access by cv", 191 ibdm.ibdm_dp_gidlist_head)) 192 193 /* 194 * _init 195 * Loadable module init, called before any other module. 196 * Initialize mutex 197 * Register with IBTF 198 */ 199 int 200 _init(void) 201 { 202 int err; 203 204 IBTF_DPRINTF_L4("ibdm", "\t_init: addr of ibdm %p", &ibdm); 205 206 if ((err = ibdm_init()) != IBDM_SUCCESS) { 207 IBTF_DPRINTF_L2("ibdm", "_init: ibdm_init failed 0x%x", err); 208 (void) ibdm_fini(); 209 return (DDI_FAILURE); 210 } 211 212 if ((err = mod_install(&ibdm_modlinkage)) != 0) { 213 IBTF_DPRINTF_L2("ibdm", "_init: mod_install failed 0x%x", err); 214 (void) ibdm_fini(); 215 } 216 return (err); 217 } 218 219 220 int 221 _fini(void) 222 { 223 int err; 224 225 if ((err = ibdm_fini()) != IBDM_SUCCESS) { 226 IBTF_DPRINTF_L2("ibdm", "_fini: ibdm_fini failed 0x%x", err); 227 (void) ibdm_init(); 228 return (EBUSY); 229 } 230 231 if ((err = mod_remove(&ibdm_modlinkage)) != 0) { 232 IBTF_DPRINTF_L2("ibdm", "_fini: mod_remove failed 0x%x", err); 233 (void) ibdm_init(); 234 } 235 return (err); 236 } 237 238 239 int 240 _info(struct modinfo *modinfop) 241 { 242 return (mod_info(&ibdm_modlinkage, modinfop)); 243 } 244 245 246 /* 247 * ibdm_init(): 248 * Register with IBTF 249 * Allocate memory for the HCAs 250 * Allocate minor-nodes for the HCAs 251 */ 252 static int 253 ibdm_init(void) 254 { 255 int i, hca_count; 256 ib_guid_t *hca_guids; 257 ibt_status_t status; 258 259 IBTF_DPRINTF_L4("ibdm", "\tibdm_init:"); 260 if (!(ibdm.ibdm_state & IBDM_LOCKS_ALLOCED)) { 261 mutex_init(&ibdm.ibdm_mutex, NULL, MUTEX_DEFAULT, NULL); 262 mutex_init(&ibdm.ibdm_hl_mutex, NULL, MUTEX_DEFAULT, NULL); 263 mutex_init(&ibdm.ibdm_ibnex_mutex, NULL, MUTEX_DEFAULT, NULL); 264 cv_init(&ibdm.ibdm_port_settle_cv, NULL, CV_DRIVER, NULL); 265 mutex_enter(&ibdm.ibdm_mutex); 266 ibdm.ibdm_state |= IBDM_LOCKS_ALLOCED; 267 } 268 269 if (!(ibdm.ibdm_state & IBDM_IBT_ATTACHED)) { 270 if ((status = ibt_attach(&ibdm_ibt_modinfo, NULL, NULL, 271 (void *)&ibdm.ibdm_ibt_clnt_hdl)) != IBT_SUCCESS) { 272 IBTF_DPRINTF_L2("ibdm", "ibdm_init: ibt_attach " 273 "failed %x", status); 274 mutex_exit(&ibdm.ibdm_mutex); 275 return (IBDM_FAILURE); 276 } 277 278 ibdm.ibdm_state |= IBDM_IBT_ATTACHED; 279 mutex_exit(&ibdm.ibdm_mutex); 280 } 281 282 283 if (!(ibdm.ibdm_state & IBDM_HCA_ATTACHED)) { 284 hca_count = ibt_get_hca_list(&hca_guids); 285 IBTF_DPRINTF_L4("ibdm", "ibdm_init: num_hcas = %d", hca_count); 286 for (i = 0; i < hca_count; i++) 287 (void) ibdm_handle_hca_attach(hca_guids[i]); 288 if (hca_count) 289 ibt_free_hca_list(hca_guids, hca_count); 290 291 mutex_enter(&ibdm.ibdm_mutex); 292 ibdm.ibdm_state |= IBDM_HCA_ATTACHED; 293 mutex_exit(&ibdm.ibdm_mutex); 294 } 295 296 if (!(ibdm.ibdm_state & IBDM_CVS_ALLOCED)) { 297 cv_init(&ibdm.ibdm_probe_cv, NULL, CV_DRIVER, NULL); 298 cv_init(&ibdm.ibdm_busy_cv, NULL, CV_DRIVER, NULL); 299 mutex_enter(&ibdm.ibdm_mutex); 300 ibdm.ibdm_state |= IBDM_CVS_ALLOCED; 301 mutex_exit(&ibdm.ibdm_mutex); 302 } 303 return (IBDM_SUCCESS); 304 } 305 306 307 static int 308 ibdm_free_iou_info(ibdm_dp_gidinfo_t *gid_info, ibdm_iou_info_t **ioup) 309 { 310 int ii, k, niocs; 311 size_t size; 312 ibdm_gid_t *delete, *head; 313 timeout_id_t timeout_id; 314 ibdm_ioc_info_t *ioc; 315 ibdm_iou_info_t *gl_iou = *ioup; 316 317 ASSERT(mutex_owned(&gid_info->gl_mutex)); 318 if (gl_iou == NULL) { 319 IBTF_DPRINTF_L4("ibdm", "\tibdm_free_iou_info: No IOU"); 320 return (0); 321 } 322 323 niocs = gl_iou->iou_info.iou_num_ctrl_slots; 324 IBTF_DPRINTF_L4("ibdm", "\tfree_iou_info: gid_info = %p, niocs %d", 325 gid_info, niocs); 326 327 for (ii = 0; ii < niocs; ii++) { 328 ioc = (ibdm_ioc_info_t *)&gl_iou->iou_ioc_info[ii]; 329 330 /* handle the case where an ioc_timeout_id is scheduled */ 331 if (ioc->ioc_timeout_id) { 332 timeout_id = ioc->ioc_timeout_id; 333 ioc->ioc_timeout_id = 0; 334 mutex_exit(&gid_info->gl_mutex); 335 IBTF_DPRINTF_L5("ibdm", "free_iou_info: " 336 "ioc_timeout_id = 0x%x", timeout_id); 337 if (untimeout(timeout_id) == -1) { 338 IBTF_DPRINTF_L2("ibdm", "free_iou_info: " 339 "untimeout ioc_timeout_id failed"); 340 mutex_enter(&gid_info->gl_mutex); 341 return (-1); 342 } 343 mutex_enter(&gid_info->gl_mutex); 344 } 345 346 /* handle the case where an ioc_dc_timeout_id is scheduled */ 347 if (ioc->ioc_dc_timeout_id) { 348 timeout_id = ioc->ioc_dc_timeout_id; 349 ioc->ioc_dc_timeout_id = 0; 350 mutex_exit(&gid_info->gl_mutex); 351 IBTF_DPRINTF_L5("ibdm", "free_iou_info: " 352 "ioc_dc_timeout_id = 0x%x", timeout_id); 353 if (untimeout(timeout_id) == -1) { 354 IBTF_DPRINTF_L2("ibdm", "free_iou_info: " 355 "untimeout ioc_dc_timeout_id failed"); 356 mutex_enter(&gid_info->gl_mutex); 357 return (-1); 358 } 359 mutex_enter(&gid_info->gl_mutex); 360 } 361 362 /* handle the case where serv[k].se_timeout_id is scheduled */ 363 for (k = 0; k < ioc->ioc_profile.ioc_service_entries; k++) { 364 if (ioc->ioc_serv[k].se_timeout_id) { 365 timeout_id = ioc->ioc_serv[k].se_timeout_id; 366 ioc->ioc_serv[k].se_timeout_id = 0; 367 mutex_exit(&gid_info->gl_mutex); 368 IBTF_DPRINTF_L5("ibdm", "free_iou_info: " 369 "ioc->ioc_serv[%d].se_timeout_id = 0x%x", 370 k, timeout_id); 371 if (untimeout(timeout_id) == -1) { 372 IBTF_DPRINTF_L2("ibdm", "free_iou_info:" 373 " untimeout se_timeout_id failed"); 374 mutex_enter(&gid_info->gl_mutex); 375 return (-1); 376 } 377 mutex_enter(&gid_info->gl_mutex); 378 } 379 } 380 381 /* delete GID list in IOC */ 382 head = ioc->ioc_gid_list; 383 while (head) { 384 IBTF_DPRINTF_L4("ibdm", "\tibdm_free_iou_info: " 385 "Deleting gid_list struct %p", head); 386 delete = head; 387 head = head->gid_next; 388 kmem_free(delete, sizeof (ibdm_gid_t)); 389 } 390 ioc->ioc_gid_list = NULL; 391 392 /* delete ioc_serv */ 393 size = ioc->ioc_profile.ioc_service_entries * 394 sizeof (ibdm_srvents_info_t); 395 if (ioc->ioc_serv && size) { 396 kmem_free(ioc->ioc_serv, size); 397 ioc->ioc_serv = NULL; 398 } 399 } 400 /* 401 * Clear the IBDM_CISCO_PROBE_DONE flag to get the IO Unit information 402 * via the switch during the probe process. 403 */ 404 gid_info->gl_flag &= ~IBDM_CISCO_PROBE_DONE; 405 406 IBTF_DPRINTF_L4("ibdm", "\tibdm_free_iou_info: deleting IOU & IOC"); 407 size = sizeof (ibdm_iou_info_t) + niocs * sizeof (ibdm_ioc_info_t); 408 kmem_free(gl_iou, size); 409 *ioup = NULL; 410 return (0); 411 } 412 413 414 /* 415 * ibdm_fini(): 416 * Un-register with IBTF 417 * De allocate memory for the GID info 418 */ 419 static int 420 ibdm_fini() 421 { 422 int ii; 423 ibdm_hca_list_t *hca_list, *temp; 424 ibdm_dp_gidinfo_t *gid_info, *tmp; 425 ibdm_gid_t *head, *delete; 426 427 IBTF_DPRINTF_L4("ibdm", "\tibdm_fini"); 428 429 mutex_enter(&ibdm.ibdm_hl_mutex); 430 if (ibdm.ibdm_state & IBDM_IBT_ATTACHED) { 431 if (ibt_detach(ibdm.ibdm_ibt_clnt_hdl) != IBT_SUCCESS) { 432 IBTF_DPRINTF_L2("ibdm", "\t_fini: ibt_detach failed"); 433 mutex_exit(&ibdm.ibdm_hl_mutex); 434 return (IBDM_FAILURE); 435 } 436 ibdm.ibdm_state &= ~IBDM_IBT_ATTACHED; 437 ibdm.ibdm_ibt_clnt_hdl = NULL; 438 } 439 440 hca_list = ibdm.ibdm_hca_list_head; 441 IBTF_DPRINTF_L4("ibdm", "\tibdm_fini: nhcas %d", ibdm.ibdm_hca_count); 442 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 443 temp = hca_list; 444 hca_list = hca_list->hl_next; 445 IBTF_DPRINTF_L4("ibdm", "\tibdm_fini: hca %p", temp); 446 if (ibdm_uninit_hca(temp) != IBDM_SUCCESS) { 447 IBTF_DPRINTF_L2("ibdm", "\tibdm_fini: " 448 "uninit_hca %p failed", temp); 449 mutex_exit(&ibdm.ibdm_hl_mutex); 450 return (IBDM_FAILURE); 451 } 452 } 453 mutex_exit(&ibdm.ibdm_hl_mutex); 454 455 mutex_enter(&ibdm.ibdm_mutex); 456 if (ibdm.ibdm_state & IBDM_HCA_ATTACHED) 457 ibdm.ibdm_state &= ~IBDM_HCA_ATTACHED; 458 459 gid_info = ibdm.ibdm_dp_gidlist_head; 460 while (gid_info) { 461 mutex_enter(&gid_info->gl_mutex); 462 (void) ibdm_free_iou_info(gid_info, &gid_info->gl_iou); 463 mutex_exit(&gid_info->gl_mutex); 464 ibdm_delete_glhca_list(gid_info); 465 466 tmp = gid_info; 467 gid_info = gid_info->gl_next; 468 mutex_destroy(&tmp->gl_mutex); 469 head = tmp->gl_gid; 470 while (head) { 471 IBTF_DPRINTF_L4("ibdm", 472 "\tibdm_fini: Deleting gid structs"); 473 delete = head; 474 head = head->gid_next; 475 kmem_free(delete, sizeof (ibdm_gid_t)); 476 } 477 kmem_free(tmp, sizeof (ibdm_dp_gidinfo_t)); 478 } 479 mutex_exit(&ibdm.ibdm_mutex); 480 481 if (ibdm.ibdm_state & IBDM_LOCKS_ALLOCED) { 482 ibdm.ibdm_state &= ~IBDM_LOCKS_ALLOCED; 483 mutex_destroy(&ibdm.ibdm_mutex); 484 mutex_destroy(&ibdm.ibdm_hl_mutex); 485 mutex_destroy(&ibdm.ibdm_ibnex_mutex); 486 cv_destroy(&ibdm.ibdm_port_settle_cv); 487 } 488 if (ibdm.ibdm_state & IBDM_CVS_ALLOCED) { 489 ibdm.ibdm_state &= ~IBDM_CVS_ALLOCED; 490 cv_destroy(&ibdm.ibdm_probe_cv); 491 cv_destroy(&ibdm.ibdm_busy_cv); 492 } 493 return (IBDM_SUCCESS); 494 } 495 496 497 /* 498 * ibdm_event_hdlr() 499 * 500 * IBDM registers this asynchronous event handler at the time of 501 * ibt_attach. IBDM support the following async events. For other 502 * event, simply returns success. 503 * IBT_HCA_ATTACH_EVENT: 504 * Retrieves the information about all the port that are 505 * present on this HCA, allocates the port attributes 506 * structure and calls IB nexus callback routine with 507 * the port attributes structure as an input argument. 508 * IBT_HCA_DETACH_EVENT: 509 * Retrieves the information about all the ports that are 510 * present on this HCA and calls IB nexus callback with 511 * port guid as an argument 512 * IBT_EVENT_PORT_UP: 513 * Register with IBMF and SA access 514 * Setup IBMF receive callback routine 515 * IBT_EVENT_PORT_DOWN: 516 * Un-Register with IBMF and SA access 517 * Teardown IBMF receive callback routine 518 */ 519 /*ARGSUSED*/ 520 static void 521 ibdm_event_hdlr(void *clnt_hdl, 522 ibt_hca_hdl_t hca_hdl, ibt_async_code_t code, ibt_async_event_t *event) 523 { 524 ibdm_hca_list_t *hca_list; 525 ibdm_port_attr_t *port; 526 ibmf_saa_handle_t port_sa_hdl; 527 528 IBTF_DPRINTF_L4("ibdm", "\tevent_hdlr: async code 0x%x", code); 529 530 switch (code) { 531 case IBT_HCA_ATTACH_EVENT: /* New HCA registered with IBTF */ 532 ibdm_handle_hca_attach(event->ev_hca_guid); 533 break; 534 535 case IBT_HCA_DETACH_EVENT: /* HCA unregistered with IBTF */ 536 ibdm_handle_hca_detach(event->ev_hca_guid); 537 mutex_enter(&ibdm.ibdm_ibnex_mutex); 538 if (ibdm.ibdm_ibnex_callback != NULL) { 539 (*ibdm.ibdm_ibnex_callback)((void *) 540 &event->ev_hca_guid, IBDM_EVENT_HCA_REMOVED); 541 } 542 mutex_exit(&ibdm.ibdm_ibnex_mutex); 543 break; 544 545 case IBT_EVENT_PORT_UP: 546 IBTF_DPRINTF_L4("ibdm", "\tevent_hdlr: PORT_UP"); 547 mutex_enter(&ibdm.ibdm_hl_mutex); 548 port = ibdm_get_port_attr(event, &hca_list); 549 if (port == NULL) { 550 IBTF_DPRINTF_L2("ibdm", 551 "\tevent_hdlr: HCA not present"); 552 mutex_exit(&ibdm.ibdm_hl_mutex); 553 break; 554 } 555 ibdm_initialize_port(port); 556 hca_list->hl_nports_active++; 557 cv_broadcast(&ibdm.ibdm_port_settle_cv); 558 mutex_exit(&ibdm.ibdm_hl_mutex); 559 560 /* Inform IB nexus driver */ 561 mutex_enter(&ibdm.ibdm_ibnex_mutex); 562 if (ibdm.ibdm_ibnex_callback != NULL) { 563 (*ibdm.ibdm_ibnex_callback)((void *) 564 &event->ev_hca_guid, IBDM_EVENT_PORT_UP); 565 } 566 mutex_exit(&ibdm.ibdm_ibnex_mutex); 567 break; 568 569 case IBT_ERROR_PORT_DOWN: 570 IBTF_DPRINTF_L4("ibdm", "\tevent_hdlr: PORT_DOWN"); 571 mutex_enter(&ibdm.ibdm_hl_mutex); 572 port = ibdm_get_port_attr(event, &hca_list); 573 if (port == NULL) { 574 IBTF_DPRINTF_L2("ibdm", 575 "\tevent_hdlr: HCA not present"); 576 mutex_exit(&ibdm.ibdm_hl_mutex); 577 break; 578 } 579 hca_list->hl_nports_active--; 580 port_sa_hdl = port->pa_sa_hdl; 581 (void) ibdm_fini_port(port); 582 port->pa_state = IBT_PORT_DOWN; 583 cv_broadcast(&ibdm.ibdm_port_settle_cv); 584 mutex_exit(&ibdm.ibdm_hl_mutex); 585 ibdm_reset_all_dgids(port_sa_hdl); 586 break; 587 588 case IBT_PORT_CHANGE_EVENT: 589 IBTF_DPRINTF_L4("ibdm", "\tevent_hdlr: PORT_CHANGE"); 590 if (event->ev_port_flags & IBT_PORT_CHANGE_PKEY) 591 ibdm_handle_port_change_event(event); 592 break; 593 594 default: /* Ignore all other events/errors */ 595 break; 596 } 597 } 598 599 static void 600 ibdm_handle_port_change_event(ibt_async_event_t *event) 601 { 602 ibdm_port_attr_t *port; 603 ibdm_hca_list_t *hca_list; 604 605 IBTF_DPRINTF_L2("ibdm", "\tibdm_handle_port_change_event:" 606 " HCA guid %llx", event->ev_hca_guid); 607 mutex_enter(&ibdm.ibdm_hl_mutex); 608 port = ibdm_get_port_attr(event, &hca_list); 609 if (port == NULL) { 610 IBTF_DPRINTF_L2("ibdm", "\tevent_hdlr: HCA not present"); 611 mutex_exit(&ibdm.ibdm_hl_mutex); 612 return; 613 } 614 ibdm_update_port_pkeys(port); 615 cv_broadcast(&ibdm.ibdm_port_settle_cv); 616 mutex_exit(&ibdm.ibdm_hl_mutex); 617 618 /* Inform IB nexus driver */ 619 mutex_enter(&ibdm.ibdm_ibnex_mutex); 620 if (ibdm.ibdm_ibnex_callback != NULL) { 621 (*ibdm.ibdm_ibnex_callback)((void *) 622 &event->ev_hca_guid, IBDM_EVENT_PORT_PKEY_CHANGE); 623 } 624 mutex_exit(&ibdm.ibdm_ibnex_mutex); 625 } 626 627 /* 628 * ibdm_update_port_pkeys() 629 * Update the pkey table 630 * Update the port attributes 631 */ 632 static void 633 ibdm_update_port_pkeys(ibdm_port_attr_t *port) 634 { 635 uint_t nports, size; 636 uint_t pkey_idx, opkey_idx; 637 uint16_t npkeys; 638 ibt_hca_portinfo_t *pinfop; 639 ib_pkey_t pkey; 640 ibdm_pkey_tbl_t *pkey_tbl; 641 ibdm_port_attr_t newport; 642 643 IBTF_DPRINTF_L4("ibdm", "\tupdate_port_pkeys:"); 644 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 645 646 /* Check whether the port is active */ 647 if (ibt_get_port_state(port->pa_hca_hdl, port->pa_port_num, NULL, 648 NULL) != IBT_SUCCESS) 649 return; 650 651 if (ibt_query_hca_ports(port->pa_hca_hdl, port->pa_port_num, 652 &pinfop, &nports, &size) != IBT_SUCCESS) { 653 /* This should not occur */ 654 port->pa_npkeys = 0; 655 port->pa_pkey_tbl = NULL; 656 return; 657 } 658 659 npkeys = pinfop->p_pkey_tbl_sz; 660 pkey_tbl = kmem_zalloc(npkeys * sizeof (ibdm_pkey_tbl_t), KM_SLEEP); 661 newport.pa_pkey_tbl = pkey_tbl; 662 newport.pa_ibmf_hdl = port->pa_ibmf_hdl; 663 664 for (pkey_idx = 0; pkey_idx < npkeys; pkey_idx++) { 665 pkey = pkey_tbl[pkey_idx].pt_pkey = 666 pinfop->p_pkey_tbl[pkey_idx]; 667 /* 668 * Is this pkey present in the current table ? 669 */ 670 for (opkey_idx = 0; opkey_idx < port->pa_npkeys; opkey_idx++) { 671 if (pkey == port->pa_pkey_tbl[opkey_idx].pt_pkey) { 672 pkey_tbl[pkey_idx].pt_qp_hdl = 673 port->pa_pkey_tbl[opkey_idx].pt_qp_hdl; 674 port->pa_pkey_tbl[opkey_idx].pt_qp_hdl = NULL; 675 break; 676 } 677 } 678 679 if (opkey_idx == port->pa_npkeys) { 680 pkey = pkey_tbl[pkey_idx].pt_pkey; 681 if (IBDM_INVALID_PKEY(pkey)) { 682 pkey_tbl[pkey_idx].pt_qp_hdl = NULL; 683 continue; 684 } 685 ibdm_port_attr_ibmf_init(&newport, pkey, pkey_idx); 686 } 687 } 688 689 for (opkey_idx = 0; opkey_idx < port->pa_npkeys; opkey_idx++) { 690 if (port->pa_pkey_tbl[opkey_idx].pt_qp_hdl != NULL) { 691 if (ibdm_port_attr_ibmf_fini(port, opkey_idx) != 692 IBDM_SUCCESS) { 693 IBTF_DPRINTF_L2("ibdm", "\tupdate_port_pkeys: " 694 "ibdm_port_attr_ibmf_fini failed for " 695 "port pkey 0x%x", 696 port->pa_pkey_tbl[opkey_idx].pt_pkey); 697 } 698 } 699 } 700 701 if (port->pa_pkey_tbl != NULL) { 702 kmem_free(port->pa_pkey_tbl, 703 port->pa_npkeys * sizeof (ibdm_pkey_tbl_t)); 704 } 705 706 port->pa_npkeys = npkeys; 707 port->pa_pkey_tbl = pkey_tbl; 708 port->pa_sn_prefix = pinfop->p_sgid_tbl[0].gid_prefix; 709 port->pa_state = pinfop->p_linkstate; 710 ibt_free_portinfo(pinfop, size); 711 } 712 713 /* 714 * ibdm_initialize_port() 715 * Register with IBMF 716 * Register with SA access 717 * Register a receive callback routine with IBMF. IBMF invokes 718 * this routine whenever a MAD arrives at this port. 719 * Update the port attributes 720 */ 721 static void 722 ibdm_initialize_port(ibdm_port_attr_t *port) 723 { 724 int ii; 725 uint_t nports, size; 726 uint_t pkey_idx; 727 ib_pkey_t pkey; 728 ibt_hca_portinfo_t *pinfop; 729 ibmf_register_info_t ibmf_reg; 730 ibmf_saa_subnet_event_args_t event_args; 731 732 IBTF_DPRINTF_L4("ibdm", "\tinitialize_port:"); 733 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 734 735 /* Check whether the port is active */ 736 if (ibt_get_port_state(port->pa_hca_hdl, port->pa_port_num, NULL, 737 NULL) != IBT_SUCCESS) 738 return; 739 740 if (port->pa_sa_hdl != NULL || port->pa_pkey_tbl != NULL) 741 return; 742 743 if (ibt_query_hca_ports(port->pa_hca_hdl, port->pa_port_num, 744 &pinfop, &nports, &size) != IBT_SUCCESS) { 745 /* This should not occur */ 746 port->pa_npkeys = 0; 747 port->pa_pkey_tbl = NULL; 748 return; 749 } 750 port->pa_sn_prefix = pinfop->p_sgid_tbl[0].gid_prefix; 751 752 port->pa_state = pinfop->p_linkstate; 753 port->pa_npkeys = pinfop->p_pkey_tbl_sz; 754 port->pa_pkey_tbl = (ibdm_pkey_tbl_t *)kmem_zalloc( 755 port->pa_npkeys * sizeof (ibdm_pkey_tbl_t), KM_SLEEP); 756 757 for (pkey_idx = 0; pkey_idx < port->pa_npkeys; pkey_idx++) 758 port->pa_pkey_tbl[pkey_idx].pt_pkey = 759 pinfop->p_pkey_tbl[pkey_idx]; 760 761 ibt_free_portinfo(pinfop, size); 762 763 if (ibdm_enumerate_iocs) { 764 event_args.is_event_callback = ibdm_saa_event_cb; 765 event_args.is_event_callback_arg = port; 766 if (ibmf_sa_session_open(port->pa_port_guid, 0, &event_args, 767 IBMF_VERSION, 0, &port->pa_sa_hdl) != IBMF_SUCCESS) { 768 IBTF_DPRINTF_L2("ibdm", "\tinitialize_port: " 769 "sa access registration failed"); 770 (void) ibdm_fini_port(port); 771 return; 772 } 773 774 ibmf_reg.ir_ci_guid = port->pa_hca_guid; 775 ibmf_reg.ir_port_num = port->pa_port_num; 776 ibmf_reg.ir_client_class = DEV_MGT_MANAGER; 777 778 if (ibmf_register(&ibmf_reg, IBMF_VERSION, 0, NULL, NULL, 779 &port->pa_ibmf_hdl, &port->pa_ibmf_caps) != IBMF_SUCCESS) { 780 IBTF_DPRINTF_L2("ibdm", "\tinitialize_port: " 781 "IBMF registration failed"); 782 (void) ibdm_fini_port(port); 783 return; 784 } 785 786 if (ibmf_setup_async_cb(port->pa_ibmf_hdl, 787 IBMF_QP_HANDLE_DEFAULT, 788 ibdm_ibmf_recv_cb, 0, 0) != IBMF_SUCCESS) { 789 IBTF_DPRINTF_L2("ibdm", "\tinitialize_port: " 790 "IBMF setup recv cb failed"); 791 (void) ibdm_fini_port(port); 792 return; 793 } 794 } else { 795 port->pa_sa_hdl = NULL; 796 port->pa_ibmf_hdl = NULL; 797 } 798 799 for (ii = 0; ii < port->pa_npkeys; ii++) { 800 pkey = port->pa_pkey_tbl[ii].pt_pkey; 801 if (IBDM_INVALID_PKEY(pkey)) { 802 port->pa_pkey_tbl[ii].pt_qp_hdl = NULL; 803 continue; 804 } 805 ibdm_port_attr_ibmf_init(port, pkey, ii); 806 } 807 } 808 809 810 /* 811 * ibdm_port_attr_ibmf_init: 812 * With IBMF - Alloc QP Handle and Setup Async callback 813 */ 814 static void 815 ibdm_port_attr_ibmf_init(ibdm_port_attr_t *port, ib_pkey_t pkey, int ii) 816 { 817 int ret; 818 819 if (ibdm_enumerate_iocs == 0) { 820 port->pa_pkey_tbl[ii].pt_qp_hdl = NULL; 821 return; 822 } 823 824 if ((ret = ibmf_alloc_qp(port->pa_ibmf_hdl, pkey, IB_GSI_QKEY, 825 IBMF_ALT_QP_MAD_NO_RMPP, &port->pa_pkey_tbl[ii].pt_qp_hdl)) != 826 IBMF_SUCCESS) { 827 IBTF_DPRINTF_L2("ibdm", "\tport_attr_ibmf_init: " 828 "IBMF failed to alloc qp %d", ret); 829 port->pa_pkey_tbl[ii].pt_qp_hdl = NULL; 830 return; 831 } 832 833 IBTF_DPRINTF_L4("ibdm", "\tport_attr_ibmf_init: QP handle is %p", 834 port->pa_ibmf_hdl); 835 836 if ((ret = ibmf_setup_async_cb(port->pa_ibmf_hdl, 837 port->pa_pkey_tbl[ii].pt_qp_hdl, ibdm_ibmf_recv_cb, 0, 0)) != 838 IBMF_SUCCESS) { 839 IBTF_DPRINTF_L2("ibdm", "\tport_attr_ibmf_init: " 840 "IBMF setup recv cb failed %d", ret); 841 (void) ibmf_free_qp(port->pa_ibmf_hdl, 842 &port->pa_pkey_tbl[ii].pt_qp_hdl, 0); 843 port->pa_pkey_tbl[ii].pt_qp_hdl = NULL; 844 } 845 } 846 847 848 /* 849 * ibdm_get_port_attr() 850 * Get port attributes from HCA guid and port number 851 * Return pointer to ibdm_port_attr_t on Success 852 * and NULL on failure 853 */ 854 static ibdm_port_attr_t * 855 ibdm_get_port_attr(ibt_async_event_t *event, ibdm_hca_list_t **retval) 856 { 857 ibdm_hca_list_t *hca_list; 858 ibdm_port_attr_t *port_attr; 859 int ii; 860 861 IBTF_DPRINTF_L4("ibdm", "\tget_port_attr: port# %d", event->ev_port); 862 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 863 hca_list = ibdm.ibdm_hca_list_head; 864 while (hca_list) { 865 if (hca_list->hl_hca_guid == event->ev_hca_guid) { 866 for (ii = 0; ii < hca_list->hl_nports; ii++) { 867 port_attr = &hca_list->hl_port_attr[ii]; 868 if (port_attr->pa_port_num == event->ev_port) { 869 *retval = hca_list; 870 return (port_attr); 871 } 872 } 873 } 874 hca_list = hca_list->hl_next; 875 } 876 return (NULL); 877 } 878 879 880 /* 881 * ibdm_update_port_attr() 882 * Update the port attributes 883 */ 884 static void 885 ibdm_update_port_attr(ibdm_port_attr_t *port) 886 { 887 uint_t nports, size; 888 uint_t pkey_idx; 889 ibt_hca_portinfo_t *portinfop; 890 891 IBTF_DPRINTF_L4("ibdm", "\tupdate_port_attr: Begin"); 892 if (ibt_query_hca_ports(port->pa_hca_hdl, 893 port->pa_port_num, &portinfop, &nports, &size) != IBT_SUCCESS) { 894 /* This should not occur */ 895 port->pa_npkeys = 0; 896 port->pa_pkey_tbl = NULL; 897 return; 898 } 899 port->pa_sn_prefix = portinfop->p_sgid_tbl[0].gid_prefix; 900 901 port->pa_state = portinfop->p_linkstate; 902 903 /* 904 * PKey information in portinfo valid only if port is 905 * ACTIVE. Bail out if not. 906 */ 907 if (port->pa_state != IBT_PORT_ACTIVE) { 908 port->pa_npkeys = 0; 909 port->pa_pkey_tbl = NULL; 910 ibt_free_portinfo(portinfop, size); 911 return; 912 } 913 914 port->pa_npkeys = portinfop->p_pkey_tbl_sz; 915 port->pa_pkey_tbl = (ibdm_pkey_tbl_t *)kmem_zalloc( 916 port->pa_npkeys * sizeof (ibdm_pkey_tbl_t), KM_SLEEP); 917 918 for (pkey_idx = 0; pkey_idx < port->pa_npkeys; pkey_idx++) { 919 port->pa_pkey_tbl[pkey_idx].pt_pkey = 920 portinfop->p_pkey_tbl[pkey_idx]; 921 } 922 ibt_free_portinfo(portinfop, size); 923 } 924 925 926 /* 927 * ibdm_handle_hca_attach() 928 */ 929 static void 930 ibdm_handle_hca_attach(ib_guid_t hca_guid) 931 { 932 uint_t size; 933 uint_t ii, nports; 934 ibt_status_t status; 935 ibt_hca_hdl_t hca_hdl; 936 ibt_hca_attr_t *hca_attr; 937 ibdm_hca_list_t *hca_list, *temp; 938 ibdm_port_attr_t *port_attr; 939 ibt_hca_portinfo_t *portinfop; 940 941 IBTF_DPRINTF_L4("ibdm", 942 "\thandle_hca_attach: hca_guid = 0x%llX", hca_guid); 943 944 /* open the HCA first */ 945 if ((status = ibt_open_hca(ibdm.ibdm_ibt_clnt_hdl, hca_guid, 946 &hca_hdl)) != IBT_SUCCESS) { 947 IBTF_DPRINTF_L2("ibdm", "\thandle_hca_attach: " 948 "open_hca failed, status 0x%x", status); 949 return; 950 } 951 952 hca_attr = (ibt_hca_attr_t *) 953 kmem_alloc(sizeof (ibt_hca_attr_t), KM_SLEEP); 954 /* ibt_query_hca always returns IBT_SUCCESS */ 955 (void) ibt_query_hca(hca_hdl, hca_attr); 956 957 IBTF_DPRINTF_L4("ibdm", "\tvid: 0x%x, pid: 0x%x, ver: 0x%x," 958 " #ports: %d", hca_attr->hca_vendor_id, hca_attr->hca_device_id, 959 hca_attr->hca_version_id, hca_attr->hca_nports); 960 961 if ((status = ibt_query_hca_ports(hca_hdl, 0, &portinfop, &nports, 962 &size)) != IBT_SUCCESS) { 963 IBTF_DPRINTF_L2("ibdm", "\thandle_hca_attach: " 964 "ibt_query_hca_ports failed, status 0x%x", status); 965 kmem_free(hca_attr, sizeof (ibt_hca_attr_t)); 966 (void) ibt_close_hca(hca_hdl); 967 return; 968 } 969 hca_list = (ibdm_hca_list_t *) 970 kmem_zalloc((sizeof (ibdm_hca_list_t)), KM_SLEEP); 971 hca_list->hl_port_attr = (ibdm_port_attr_t *)kmem_zalloc( 972 (sizeof (ibdm_port_attr_t) * hca_attr->hca_nports), KM_SLEEP); 973 hca_list->hl_hca_guid = hca_attr->hca_node_guid; 974 hca_list->hl_nports = hca_attr->hca_nports; 975 hca_list->hl_attach_time = gethrtime(); 976 hca_list->hl_hca_hdl = hca_hdl; 977 978 /* 979 * Init a dummy port attribute for the HCA node 980 * This is for Per-HCA Node. Initialize port_attr : 981 * hca_guid & port_guid -> hca_guid 982 * npkeys, pkey_tbl is NULL 983 * port_num, sn_prefix is 0 984 * vendorid, product_id, dev_version from HCA 985 * pa_state is IBT_PORT_ACTIVE 986 */ 987 hca_list->hl_hca_port_attr = (ibdm_port_attr_t *)kmem_zalloc( 988 sizeof (ibdm_port_attr_t), KM_SLEEP); 989 port_attr = hca_list->hl_hca_port_attr; 990 port_attr->pa_vendorid = hca_attr->hca_vendor_id; 991 port_attr->pa_productid = hca_attr->hca_device_id; 992 port_attr->pa_dev_version = hca_attr->hca_version_id; 993 port_attr->pa_hca_guid = hca_attr->hca_node_guid; 994 port_attr->pa_hca_hdl = hca_list->hl_hca_hdl; 995 port_attr->pa_port_guid = hca_attr->hca_node_guid; 996 port_attr->pa_state = IBT_PORT_ACTIVE; 997 998 999 for (ii = 0; ii < nports; ii++) { 1000 port_attr = &hca_list->hl_port_attr[ii]; 1001 port_attr->pa_vendorid = hca_attr->hca_vendor_id; 1002 port_attr->pa_productid = hca_attr->hca_device_id; 1003 port_attr->pa_dev_version = hca_attr->hca_version_id; 1004 port_attr->pa_hca_guid = hca_attr->hca_node_guid; 1005 port_attr->pa_hca_hdl = hca_list->hl_hca_hdl; 1006 port_attr->pa_port_guid = portinfop[ii].p_sgid_tbl->gid_guid; 1007 port_attr->pa_sn_prefix = portinfop[ii].p_sgid_tbl->gid_prefix; 1008 port_attr->pa_port_num = portinfop[ii].p_port_num; 1009 port_attr->pa_state = portinfop[ii].p_linkstate; 1010 1011 /* 1012 * Register with IBMF, SA access when the port is in 1013 * ACTIVE state. Also register a callback routine 1014 * with IBMF to receive incoming DM MAD's. 1015 * The IBDM event handler takes care of registration of 1016 * port which are not active. 1017 */ 1018 IBTF_DPRINTF_L4("ibdm", 1019 "\thandle_hca_attach: port guid %llx Port state 0x%x", 1020 port_attr->pa_port_guid, portinfop[ii].p_linkstate); 1021 1022 if (portinfop[ii].p_linkstate == IBT_PORT_ACTIVE) { 1023 mutex_enter(&ibdm.ibdm_hl_mutex); 1024 hca_list->hl_nports_active++; 1025 ibdm_initialize_port(port_attr); 1026 cv_broadcast(&ibdm.ibdm_port_settle_cv); 1027 mutex_exit(&ibdm.ibdm_hl_mutex); 1028 } 1029 } 1030 mutex_enter(&ibdm.ibdm_hl_mutex); 1031 for (temp = ibdm.ibdm_hca_list_head; temp; temp = temp->hl_next) { 1032 if (temp->hl_hca_guid == hca_guid) { 1033 IBTF_DPRINTF_L2("ibdm", "hca_attach: HCA %llX " 1034 "already seen by IBDM", hca_guid); 1035 mutex_exit(&ibdm.ibdm_hl_mutex); 1036 (void) ibdm_uninit_hca(hca_list); 1037 return; 1038 } 1039 } 1040 ibdm.ibdm_hca_count++; 1041 if (ibdm.ibdm_hca_list_head == NULL) { 1042 ibdm.ibdm_hca_list_head = hca_list; 1043 ibdm.ibdm_hca_list_tail = hca_list; 1044 } else { 1045 ibdm.ibdm_hca_list_tail->hl_next = hca_list; 1046 ibdm.ibdm_hca_list_tail = hca_list; 1047 } 1048 mutex_exit(&ibdm.ibdm_hl_mutex); 1049 mutex_enter(&ibdm.ibdm_ibnex_mutex); 1050 if (ibdm.ibdm_ibnex_callback != NULL) { 1051 (*ibdm.ibdm_ibnex_callback)((void *) 1052 &hca_guid, IBDM_EVENT_HCA_ADDED); 1053 } 1054 mutex_exit(&ibdm.ibdm_ibnex_mutex); 1055 1056 kmem_free(hca_attr, sizeof (ibt_hca_attr_t)); 1057 ibt_free_portinfo(portinfop, size); 1058 } 1059 1060 1061 /* 1062 * ibdm_handle_hca_detach() 1063 */ 1064 static void 1065 ibdm_handle_hca_detach(ib_guid_t hca_guid) 1066 { 1067 ibdm_hca_list_t *head, *prev = NULL; 1068 size_t len; 1069 ibdm_dp_gidinfo_t *gidinfo; 1070 ibdm_port_attr_t *port_attr; 1071 int i; 1072 1073 IBTF_DPRINTF_L4("ibdm", 1074 "\thandle_hca_detach: hca_guid = 0x%llx", hca_guid); 1075 1076 /* Make sure no probes are running */ 1077 mutex_enter(&ibdm.ibdm_mutex); 1078 while (ibdm.ibdm_busy & IBDM_BUSY) 1079 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 1080 ibdm.ibdm_busy |= IBDM_BUSY; 1081 mutex_exit(&ibdm.ibdm_mutex); 1082 1083 mutex_enter(&ibdm.ibdm_hl_mutex); 1084 head = ibdm.ibdm_hca_list_head; 1085 while (head) { 1086 if (head->hl_hca_guid == hca_guid) { 1087 if (prev == NULL) 1088 ibdm.ibdm_hca_list_head = head->hl_next; 1089 else 1090 prev->hl_next = head->hl_next; 1091 if (ibdm.ibdm_hca_list_tail == head) 1092 ibdm.ibdm_hca_list_tail = prev; 1093 ibdm.ibdm_hca_count--; 1094 break; 1095 } 1096 prev = head; 1097 head = head->hl_next; 1098 } 1099 mutex_exit(&ibdm.ibdm_hl_mutex); 1100 if (ibdm_uninit_hca(head) != IBDM_SUCCESS) 1101 (void) ibdm_handle_hca_attach(hca_guid); 1102 1103 #ifdef DEBUG 1104 if (ibdm_enumerate_iocs == 0) { 1105 ASSERT(ibdm.ibdm_dp_gidlist_head == NULL); 1106 } 1107 #endif 1108 1109 /* 1110 * Now clean up the HCA lists in the gidlist. 1111 */ 1112 for (gidinfo = ibdm.ibdm_dp_gidlist_head; gidinfo; gidinfo = 1113 gidinfo->gl_next) { 1114 prev = NULL; 1115 head = gidinfo->gl_hca_list; 1116 while (head) { 1117 if (head->hl_hca_guid == hca_guid) { 1118 if (prev == NULL) 1119 gidinfo->gl_hca_list = 1120 head->hl_next; 1121 else 1122 prev->hl_next = head->hl_next; 1123 for (i = 0; i < head->hl_nports; i++) { 1124 port_attr = &head->hl_port_attr[i]; 1125 if (port_attr->pa_pkey_tbl != NULL) 1126 kmem_free( 1127 port_attr->pa_pkey_tbl, 1128 port_attr->pa_npkeys * 1129 sizeof (ibdm_pkey_tbl_t)); 1130 } 1131 len = sizeof (ibdm_hca_list_t) + 1132 (head->hl_nports * 1133 sizeof (ibdm_port_attr_t)); 1134 kmem_free(head, len); 1135 1136 break; 1137 } 1138 prev = head; 1139 head = head->hl_next; 1140 } 1141 } 1142 1143 mutex_enter(&ibdm.ibdm_mutex); 1144 ibdm.ibdm_busy &= ~IBDM_BUSY; 1145 cv_broadcast(&ibdm.ibdm_busy_cv); 1146 mutex_exit(&ibdm.ibdm_mutex); 1147 } 1148 1149 1150 static int 1151 ibdm_uninit_hca(ibdm_hca_list_t *head) 1152 { 1153 int ii; 1154 ibdm_port_attr_t *port_attr; 1155 1156 for (ii = 0; ii < head->hl_nports; ii++) { 1157 port_attr = &head->hl_port_attr[ii]; 1158 if (ibdm_fini_port(port_attr) != IBDM_SUCCESS) { 1159 IBTF_DPRINTF_L2("ibdm", "uninit_hca: HCA %p port 0x%x " 1160 "ibdm_fini_port() failed", head, ii); 1161 return (IBDM_FAILURE); 1162 } 1163 } 1164 if (head->hl_hca_hdl) 1165 if (ibt_close_hca(head->hl_hca_hdl) != IBT_SUCCESS) { 1166 IBTF_DPRINTF_L2("ibdm", "uninit_hca: " 1167 "ibt_close_hca() failed"); 1168 return (IBDM_FAILURE); 1169 } 1170 kmem_free(head->hl_port_attr, 1171 head->hl_nports * sizeof (ibdm_port_attr_t)); 1172 kmem_free(head->hl_hca_port_attr, sizeof (ibdm_port_attr_t)); 1173 kmem_free(head, sizeof (ibdm_hca_list_t)); 1174 return (IBDM_SUCCESS); 1175 } 1176 1177 1178 /* 1179 * For each port on the HCA, 1180 * 1) Teardown IBMF receive callback function 1181 * 2) Unregister with IBMF 1182 * 3) Unregister with SA access 1183 */ 1184 static int 1185 ibdm_fini_port(ibdm_port_attr_t *port_attr) 1186 { 1187 int ii, ibmf_status; 1188 1189 for (ii = 0; ii < port_attr->pa_npkeys; ii++) { 1190 if (port_attr->pa_pkey_tbl == NULL) 1191 break; 1192 if (!port_attr->pa_pkey_tbl[ii].pt_qp_hdl) 1193 continue; 1194 if (ibdm_port_attr_ibmf_fini(port_attr, ii) != IBDM_SUCCESS) { 1195 IBTF_DPRINTF_L4("ibdm", "\tfini_port: " 1196 "ibdm_port_attr_ibmf_fini failed for " 1197 "port pkey 0x%x", ii); 1198 return (IBDM_FAILURE); 1199 } 1200 } 1201 1202 if (port_attr->pa_ibmf_hdl) { 1203 ibmf_status = ibmf_tear_down_async_cb(port_attr->pa_ibmf_hdl, 1204 IBMF_QP_HANDLE_DEFAULT, 0); 1205 if (ibmf_status != IBMF_SUCCESS) { 1206 IBTF_DPRINTF_L4("ibdm", "\tfini_port: " 1207 "ibmf_tear_down_async_cb failed %d", ibmf_status); 1208 return (IBDM_FAILURE); 1209 } 1210 1211 ibmf_status = ibmf_unregister(&port_attr->pa_ibmf_hdl, 0); 1212 if (ibmf_status != IBMF_SUCCESS) { 1213 IBTF_DPRINTF_L2("ibdm", "\tfini_port: " 1214 "ibmf_unregister failed %d", ibmf_status); 1215 return (IBDM_FAILURE); 1216 } 1217 1218 port_attr->pa_ibmf_hdl = NULL; 1219 } 1220 1221 if (port_attr->pa_sa_hdl) { 1222 ibmf_status = ibmf_sa_session_close(&port_attr->pa_sa_hdl, 0); 1223 if (ibmf_status != IBMF_SUCCESS) { 1224 IBTF_DPRINTF_L2("ibdm", "\tfini_port: " 1225 "ibmf_sa_session_close failed %d", ibmf_status); 1226 return (IBDM_FAILURE); 1227 } 1228 port_attr->pa_sa_hdl = NULL; 1229 } 1230 1231 if (port_attr->pa_pkey_tbl != NULL) { 1232 kmem_free(port_attr->pa_pkey_tbl, 1233 port_attr->pa_npkeys * sizeof (ibdm_pkey_tbl_t)); 1234 port_attr->pa_pkey_tbl = NULL; 1235 port_attr->pa_npkeys = 0; 1236 } 1237 1238 return (IBDM_SUCCESS); 1239 } 1240 1241 1242 /* 1243 * ibdm_port_attr_ibmf_fini: 1244 * With IBMF - Tear down Async callback and free QP Handle 1245 */ 1246 static int 1247 ibdm_port_attr_ibmf_fini(ibdm_port_attr_t *port_attr, int ii) 1248 { 1249 int ibmf_status; 1250 1251 IBTF_DPRINTF_L5("ibdm", "\tport_attr_ibmf_fini:"); 1252 1253 if (ibdm_enumerate_iocs == 0) { 1254 ASSERT(port_attr->pa_pkey_tbl[ii].pt_qp_hdl == NULL); 1255 return (IBDM_SUCCESS); 1256 } 1257 1258 if (port_attr->pa_pkey_tbl[ii].pt_qp_hdl) { 1259 ibmf_status = ibmf_tear_down_async_cb(port_attr->pa_ibmf_hdl, 1260 port_attr->pa_pkey_tbl[ii].pt_qp_hdl, 0); 1261 if (ibmf_status != IBMF_SUCCESS) { 1262 IBTF_DPRINTF_L4("ibdm", "\tport_attr_ibmf_fini: " 1263 "ibmf_tear_down_async_cb failed %d", ibmf_status); 1264 return (IBDM_FAILURE); 1265 } 1266 ibmf_status = ibmf_free_qp(port_attr->pa_ibmf_hdl, 1267 &port_attr->pa_pkey_tbl[ii].pt_qp_hdl, 0); 1268 if (ibmf_status != IBMF_SUCCESS) { 1269 IBTF_DPRINTF_L4("ibdm", "\tport_attr_ibmf_fini: " 1270 "ibmf_free_qp failed %d", ibmf_status); 1271 return (IBDM_FAILURE); 1272 } 1273 port_attr->pa_pkey_tbl[ii].pt_qp_hdl = NULL; 1274 } 1275 return (IBDM_SUCCESS); 1276 } 1277 1278 1279 /* 1280 * ibdm_gid_decr_pending: 1281 * decrement gl_pending_cmds. If zero wakeup sleeping threads 1282 */ 1283 static void 1284 ibdm_gid_decr_pending(ibdm_dp_gidinfo_t *gidinfo) 1285 { 1286 mutex_enter(&ibdm.ibdm_mutex); 1287 mutex_enter(&gidinfo->gl_mutex); 1288 if (--gidinfo->gl_pending_cmds == 0) { 1289 /* 1290 * Handle DGID getting removed. 1291 */ 1292 if (gidinfo->gl_disconnected) { 1293 mutex_exit(&gidinfo->gl_mutex); 1294 mutex_exit(&ibdm.ibdm_mutex); 1295 1296 IBTF_DPRINTF_L3(ibdm_string, "\tgid_decr_pending: " 1297 "gidinfo %p hot removal", gidinfo); 1298 ibdm_delete_gidinfo(gidinfo); 1299 1300 mutex_enter(&ibdm.ibdm_mutex); 1301 ibdm.ibdm_ngid_probes_in_progress--; 1302 ibdm_wait_probe_completion(); 1303 mutex_exit(&ibdm.ibdm_mutex); 1304 return; 1305 } 1306 mutex_exit(&gidinfo->gl_mutex); 1307 mutex_exit(&ibdm.ibdm_mutex); 1308 ibdm_notify_newgid_iocs(gidinfo); 1309 mutex_enter(&ibdm.ibdm_mutex); 1310 mutex_enter(&gidinfo->gl_mutex); 1311 1312 ibdm.ibdm_ngid_probes_in_progress--; 1313 ibdm_wait_probe_completion(); 1314 } 1315 mutex_exit(&gidinfo->gl_mutex); 1316 mutex_exit(&ibdm.ibdm_mutex); 1317 } 1318 1319 1320 /* 1321 * ibdm_wait_probe_completion: 1322 * wait for probing to complete 1323 */ 1324 static void 1325 ibdm_wait_probe_completion(void) 1326 { 1327 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 1328 if (ibdm.ibdm_ngid_probes_in_progress) { 1329 IBTF_DPRINTF_L4("ibdm", "\twait for probe complete"); 1330 ibdm.ibdm_busy |= IBDM_PROBE_IN_PROGRESS; 1331 while (ibdm.ibdm_busy & IBDM_PROBE_IN_PROGRESS) 1332 cv_wait(&ibdm.ibdm_probe_cv, &ibdm.ibdm_mutex); 1333 } 1334 } 1335 1336 1337 /* 1338 * ibdm_wait_cisco_probe_completion: 1339 * wait for the reply from the Cisco FC GW switch after a setclassportinfo 1340 * request is sent. This wait can be achieved on each gid. 1341 */ 1342 static void 1343 ibdm_wait_cisco_probe_completion(ibdm_dp_gidinfo_t *gidinfo) 1344 { 1345 ASSERT(MUTEX_HELD(&gidinfo->gl_mutex)); 1346 IBTF_DPRINTF_L4("ibdm", "\twait for cisco probe complete"); 1347 gidinfo->gl_flag |= IBDM_CISCO_PROBE; 1348 while (gidinfo->gl_flag & IBDM_CISCO_PROBE) 1349 cv_wait(&gidinfo->gl_probe_cv, &gidinfo->gl_mutex); 1350 } 1351 1352 1353 /* 1354 * ibdm_wakeup_probe_gid_cv: 1355 * wakeup waiting threads (based on ibdm_ngid_probes_in_progress) 1356 */ 1357 static void 1358 ibdm_wakeup_probe_gid_cv(void) 1359 { 1360 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 1361 if (!ibdm.ibdm_ngid_probes_in_progress) { 1362 IBTF_DPRINTF_L4("ibdm", "wakeup_probe_gid_thread: Wakeup"); 1363 ibdm.ibdm_busy &= ~IBDM_PROBE_IN_PROGRESS; 1364 cv_broadcast(&ibdm.ibdm_probe_cv); 1365 } 1366 1367 } 1368 1369 1370 /* 1371 * ibdm_sweep_fabric(reprobe_flag) 1372 * Find all possible Managed IOU's and their IOC's that are visible 1373 * to the host. The algorithm used is as follows 1374 * 1375 * Send a "bus walk" request for each port on the host HCA to SA access 1376 * SA returns complete set of GID's that are reachable from 1377 * source port. This is done in parallel. 1378 * 1379 * Initialize GID state to IBDM_GID_PROBE_NOT_DONE 1380 * 1381 * Sort the GID list and eliminate duplicate GID's 1382 * 1) Use DGID for sorting 1383 * 2) use PortGuid for sorting 1384 * Send SA query to retrieve NodeRecord and 1385 * extract PortGuid from that. 1386 * 1387 * Set GID state to IBDM_GID_PROBE_FAILED to all the ports that dont 1388 * support DM MAD's 1389 * Send a "Portinfo" query to get the port capabilities and 1390 * then check for DM MAD's support 1391 * 1392 * Send "ClassPortInfo" request for all the GID's in parallel, 1393 * set the GID state to IBDM_GET_CLASSPORTINFO and wait on the 1394 * cv_signal to complete. 1395 * 1396 * When DM agent on the remote GID sends back the response, IBMF 1397 * invokes DM callback routine. 1398 * 1399 * If the response is proper, send "IOUnitInfo" request and set 1400 * GID state to IBDM_GET_IOUNITINFO. 1401 * 1402 * If the response is proper, send "IocProfileInfo" request to 1403 * all the IOC simultaneously and set GID state to IBDM_GET_IOC_DETAILS. 1404 * 1405 * Send request to get Service entries simultaneously 1406 * 1407 * Signal the waiting thread when received response for all the commands. 1408 * 1409 * Set the GID state to IBDM_GID_PROBE_FAILED when received a error 1410 * response during the probing period. 1411 * 1412 * Note: 1413 * ibdm.ibdm_ngid_probes_in_progress and ibdm_gid_list_t:gl_pending_cmds 1414 * keep track of number commands in progress at any point of time. 1415 * MAD transaction ID is used to identify a particular GID 1416 * TBD: Consider registering the IBMF receive callback on demand 1417 * 1418 * Note: This routine must be called with ibdm.ibdm_mutex held 1419 * TBD: Re probe the failure GID (for certain failures) when requested 1420 * for fabric sweep next time 1421 * 1422 * Parameters : If reprobe_flag is set, All IOCs will be reprobed. 1423 */ 1424 static void 1425 ibdm_sweep_fabric(int reprobe_flag) 1426 { 1427 int ii; 1428 int new_paths = 0; 1429 uint8_t niocs; 1430 taskqid_t tid; 1431 ibdm_ioc_info_t *ioc; 1432 ibdm_hca_list_t *hca_list = NULL; 1433 ibdm_port_attr_t *port = NULL; 1434 ibdm_dp_gidinfo_t *gid_info; 1435 1436 IBTF_DPRINTF_L4("ibdm", "\tsweep_fabric: Enter"); 1437 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 1438 1439 /* 1440 * Check whether a sweep already in progress. If so, just 1441 * wait for the fabric sweep to complete 1442 */ 1443 while (ibdm.ibdm_busy & IBDM_BUSY) 1444 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 1445 ibdm.ibdm_busy |= IBDM_BUSY; 1446 mutex_exit(&ibdm.ibdm_mutex); 1447 1448 ibdm_dump_sweep_fabric_timestamp(0); 1449 1450 /* Rescan the GID list for any removed GIDs for reprobe */ 1451 if (reprobe_flag) 1452 ibdm_rescan_gidlist(NULL); 1453 1454 /* 1455 * Get list of all the ports reachable from the local known HCA 1456 * ports which are active 1457 */ 1458 mutex_enter(&ibdm.ibdm_hl_mutex); 1459 for (ibdm_get_next_port(&hca_list, &port, 1); port; 1460 ibdm_get_next_port(&hca_list, &port, 1)) { 1461 /* 1462 * Get PATHS to all the reachable ports from 1463 * SGID and update the global ibdm structure. 1464 */ 1465 new_paths = ibdm_get_reachable_ports(port, hca_list); 1466 ibdm.ibdm_ngids += new_paths; 1467 } 1468 mutex_exit(&ibdm.ibdm_hl_mutex); 1469 1470 mutex_enter(&ibdm.ibdm_mutex); 1471 ibdm.ibdm_ngid_probes_in_progress += ibdm.ibdm_ngids; 1472 mutex_exit(&ibdm.ibdm_mutex); 1473 1474 /* Send a request to probe GIDs asynchronously. */ 1475 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; 1476 gid_info = gid_info->gl_next) { 1477 mutex_enter(&gid_info->gl_mutex); 1478 gid_info->gl_reprobe_flag = reprobe_flag; 1479 mutex_exit(&gid_info->gl_mutex); 1480 1481 /* process newly encountered GIDs */ 1482 tid = taskq_dispatch(system_taskq, ibdm_probe_gid_thread, 1483 (void *)gid_info, TQ_NOSLEEP); 1484 IBTF_DPRINTF_L4("ibdm", "\tsweep_fabric: gid_info = %p" 1485 " taskq_id = %x", gid_info, tid); 1486 /* taskq failed to dispatch call it directly */ 1487 if (tid == NULL) 1488 ibdm_probe_gid_thread((void *)gid_info); 1489 } 1490 1491 mutex_enter(&ibdm.ibdm_mutex); 1492 ibdm_wait_probe_completion(); 1493 1494 /* 1495 * Update the properties, if reprobe_flag is set 1496 * Skip if gl_reprobe_flag is set, this will be 1497 * a re-inserted / new GID, for which notifications 1498 * have already been send. 1499 */ 1500 if (reprobe_flag) { 1501 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; 1502 gid_info = gid_info->gl_next) { 1503 if (gid_info->gl_iou == NULL) 1504 continue; 1505 if (gid_info->gl_reprobe_flag) { 1506 gid_info->gl_reprobe_flag = 0; 1507 continue; 1508 } 1509 1510 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 1511 for (ii = 0; ii < niocs; ii++) { 1512 ioc = IBDM_GIDINFO2IOCINFO(gid_info, ii); 1513 if (ioc) 1514 ibdm_reprobe_update_port_srv(ioc, 1515 gid_info); 1516 } 1517 } 1518 } else if (ibdm.ibdm_prev_iou) { 1519 ibdm_ioc_info_t *ioc_list; 1520 1521 /* 1522 * Get the list of IOCs which have changed. 1523 * If any IOCs have changed, Notify IBNexus 1524 */ 1525 ibdm.ibdm_prev_iou = 0; 1526 ioc_list = ibdm_handle_prev_iou(); 1527 if (ioc_list) { 1528 if (ibdm.ibdm_ibnex_callback != NULL) { 1529 (*ibdm.ibdm_ibnex_callback)( 1530 (void *)ioc_list, 1531 IBDM_EVENT_IOC_PROP_UPDATE); 1532 } 1533 } 1534 } 1535 1536 ibdm_dump_sweep_fabric_timestamp(1); 1537 1538 ibdm.ibdm_busy &= ~IBDM_BUSY; 1539 cv_broadcast(&ibdm.ibdm_busy_cv); 1540 IBTF_DPRINTF_L5("ibdm", "\tsweep_fabric: EXIT"); 1541 } 1542 1543 1544 /* 1545 * ibdm_is_cisco: 1546 * Check if this is a Cisco device or not. 1547 */ 1548 static boolean_t 1549 ibdm_is_cisco(ib_guid_t guid) 1550 { 1551 if ((guid >> IBDM_OUI_GUID_SHIFT) == IBDM_CISCO_COMPANY_ID) 1552 return (B_TRUE); 1553 return (B_FALSE); 1554 } 1555 1556 1557 /* 1558 * ibdm_is_cisco_switch: 1559 * Check if this switch is a CISCO switch or not. 1560 * Note that if this switch is already activated, ibdm_is_cisco_switch() 1561 * returns B_FALSE not to re-activate it again. 1562 */ 1563 static boolean_t 1564 ibdm_is_cisco_switch(ibdm_dp_gidinfo_t *gid_info) 1565 { 1566 int company_id, device_id; 1567 ASSERT(gid_info != 0); 1568 ASSERT(MUTEX_HELD(&gid_info->gl_mutex)); 1569 1570 /* 1571 * If this switch is already activated, don't re-activate it. 1572 */ 1573 if (gid_info->gl_flag & IBDM_CISCO_PROBE_DONE) 1574 return (B_FALSE); 1575 1576 /* 1577 * Check if this switch is a Cisco FC GW or not. 1578 * Use the node guid (the OUI part) instead of the vendor id 1579 * since the vendor id is zero in practice. 1580 */ 1581 company_id = gid_info->gl_nodeguid >> IBDM_OUI_GUID_SHIFT; 1582 device_id = gid_info->gl_devid; 1583 1584 if (company_id == IBDM_CISCO_COMPANY_ID && 1585 device_id == IBDM_CISCO_DEVICE_ID) 1586 return (B_TRUE); 1587 return (B_FALSE); 1588 } 1589 1590 1591 /* 1592 * ibdm_probe_gid_thread: 1593 * thread that does the actual work for sweeping the fabric 1594 * for a given GID 1595 */ 1596 static void 1597 ibdm_probe_gid_thread(void *args) 1598 { 1599 int reprobe_flag; 1600 ib_guid_t node_guid; 1601 ib_guid_t port_guid; 1602 ibdm_dp_gidinfo_t *gid_info; 1603 1604 gid_info = (ibdm_dp_gidinfo_t *)args; 1605 reprobe_flag = gid_info->gl_reprobe_flag; 1606 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid_thread: gid_info = %p, flag = %d", 1607 gid_info, reprobe_flag); 1608 ASSERT(gid_info != NULL); 1609 ASSERT(gid_info->gl_pending_cmds == 0); 1610 1611 if (gid_info->gl_state != IBDM_GID_PROBE_NOT_DONE && 1612 reprobe_flag == 0) { 1613 /* 1614 * This GID may have been already probed. Send 1615 * in a CLP to check if IOUnitInfo changed? 1616 * Explicitly set gl_reprobe_flag to 0 so that 1617 * IBnex is not notified on completion 1618 */ 1619 if (gid_info->gl_state == IBDM_GID_PROBING_COMPLETE) { 1620 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid_thread: " 1621 "get new IOCs information"); 1622 mutex_enter(&gid_info->gl_mutex); 1623 gid_info->gl_pending_cmds++; 1624 gid_info->gl_state = IBDM_GET_IOUNITINFO; 1625 gid_info->gl_reprobe_flag = 0; 1626 mutex_exit(&gid_info->gl_mutex); 1627 if (ibdm_send_iounitinfo(gid_info) != IBDM_SUCCESS) { 1628 mutex_enter(&gid_info->gl_mutex); 1629 --gid_info->gl_pending_cmds; 1630 mutex_exit(&gid_info->gl_mutex); 1631 mutex_enter(&ibdm.ibdm_mutex); 1632 --ibdm.ibdm_ngid_probes_in_progress; 1633 ibdm_wakeup_probe_gid_cv(); 1634 mutex_exit(&ibdm.ibdm_mutex); 1635 } 1636 } else { 1637 mutex_enter(&ibdm.ibdm_mutex); 1638 --ibdm.ibdm_ngid_probes_in_progress; 1639 ibdm_wakeup_probe_gid_cv(); 1640 mutex_exit(&ibdm.ibdm_mutex); 1641 } 1642 return; 1643 } else if (reprobe_flag && gid_info->gl_state == 1644 IBDM_GID_PROBING_COMPLETE) { 1645 /* 1646 * Reprobe all IOCs for the GID which has completed 1647 * probe. Skip other port GIDs to same IOU. 1648 * Explicitly set gl_reprobe_flag to 0 so that 1649 * IBnex is not notified on completion 1650 */ 1651 ibdm_ioc_info_t *ioc_info; 1652 uint8_t niocs, ii; 1653 1654 ASSERT(gid_info->gl_iou); 1655 mutex_enter(&gid_info->gl_mutex); 1656 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 1657 gid_info->gl_state = IBDM_GET_IOC_DETAILS; 1658 gid_info->gl_pending_cmds += niocs; 1659 gid_info->gl_reprobe_flag = 0; 1660 mutex_exit(&gid_info->gl_mutex); 1661 for (ii = 0; ii < niocs; ii++) { 1662 uchar_t slot_info; 1663 ib_dm_io_unitinfo_t *giou_info; 1664 1665 /* 1666 * Check whether IOC is present in the slot 1667 * Series of nibbles (in the field 1668 * iou_ctrl_list) represents a slot in the 1669 * IOU. 1670 * Byte format: 76543210 1671 * Bits 0-3 of first byte represent Slot 2 1672 * bits 4-7 of first byte represent slot 1, 1673 * bits 0-3 of second byte represent slot 4 1674 * and so on 1675 * Each 4-bit nibble has the following meaning 1676 * 0x0 : IOC not installed 1677 * 0x1 : IOC is present 1678 * 0xf : Slot does not exist 1679 * and all other values are reserved. 1680 */ 1681 ioc_info = IBDM_GIDINFO2IOCINFO(gid_info, ii); 1682 giou_info = &gid_info->gl_iou->iou_info; 1683 slot_info = giou_info->iou_ctrl_list[(ii/2)]; 1684 if ((ii % 2) == 0) 1685 slot_info = (slot_info >> 4); 1686 1687 if ((slot_info & 0xf) != 1) { 1688 ioc_info->ioc_state = 1689 IBDM_IOC_STATE_PROBE_FAILED; 1690 ibdm_gid_decr_pending(gid_info); 1691 continue; 1692 } 1693 1694 if (ibdm_send_ioc_profile(gid_info, ii) != 1695 IBDM_SUCCESS) { 1696 ibdm_gid_decr_pending(gid_info); 1697 } 1698 } 1699 1700 return; 1701 } else if (gid_info->gl_state != IBDM_GID_PROBE_NOT_DONE) { 1702 mutex_enter(&ibdm.ibdm_mutex); 1703 --ibdm.ibdm_ngid_probes_in_progress; 1704 ibdm_wakeup_probe_gid_cv(); 1705 mutex_exit(&ibdm.ibdm_mutex); 1706 return; 1707 } 1708 1709 /* 1710 * Check whether the destination GID supports DM agents. If 1711 * not, stop probing the GID and continue with the next GID 1712 * in the list. 1713 */ 1714 if (ibdm_is_dev_mgt_supported(gid_info) != IBDM_SUCCESS) { 1715 mutex_enter(&gid_info->gl_mutex); 1716 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 1717 gid_info->gl_is_dm_capable = B_FALSE; 1718 mutex_exit(&gid_info->gl_mutex); 1719 ibdm_delete_glhca_list(gid_info); 1720 mutex_enter(&ibdm.ibdm_mutex); 1721 --ibdm.ibdm_ngid_probes_in_progress; 1722 ibdm_wakeup_probe_gid_cv(); 1723 mutex_exit(&ibdm.ibdm_mutex); 1724 return; 1725 } 1726 1727 /* 1728 * This GID is Device management capable 1729 */ 1730 mutex_enter(&gid_info->gl_mutex); 1731 gid_info->gl_is_dm_capable = B_TRUE; 1732 mutex_exit(&gid_info->gl_mutex); 1733 1734 /* Get the nodeguid and portguid of the port */ 1735 if (ibdm_get_node_port_guids(gid_info->gl_sa_hdl, gid_info->gl_dlid, 1736 &node_guid, &port_guid) != IBDM_SUCCESS) { 1737 mutex_enter(&gid_info->gl_mutex); 1738 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 1739 mutex_exit(&gid_info->gl_mutex); 1740 ibdm_delete_glhca_list(gid_info); 1741 mutex_enter(&ibdm.ibdm_mutex); 1742 --ibdm.ibdm_ngid_probes_in_progress; 1743 ibdm_wakeup_probe_gid_cv(); 1744 mutex_exit(&ibdm.ibdm_mutex); 1745 return; 1746 } 1747 1748 /* 1749 * Check whether we already knew about this NodeGuid 1750 * If so, do not probe the GID and continue with the 1751 * next GID in the gid list. Set the GID state to 1752 * probing done. 1753 */ 1754 mutex_enter(&ibdm.ibdm_mutex); 1755 gid_info->gl_nodeguid = node_guid; 1756 gid_info->gl_portguid = port_guid; 1757 if (ibdm_check_dest_nodeguid(gid_info) != NULL) { 1758 mutex_exit(&ibdm.ibdm_mutex); 1759 mutex_enter(&gid_info->gl_mutex); 1760 gid_info->gl_state = IBDM_GID_PROBING_SKIPPED; 1761 mutex_exit(&gid_info->gl_mutex); 1762 ibdm_delete_glhca_list(gid_info); 1763 mutex_enter(&ibdm.ibdm_mutex); 1764 --ibdm.ibdm_ngid_probes_in_progress; 1765 ibdm_wakeup_probe_gid_cv(); 1766 mutex_exit(&ibdm.ibdm_mutex); 1767 return; 1768 } 1769 ibdm_add_to_gl_gid(gid_info, gid_info); 1770 mutex_exit(&ibdm.ibdm_mutex); 1771 1772 /* 1773 * New or reinserted GID : Enable notification to IBnex 1774 */ 1775 mutex_enter(&gid_info->gl_mutex); 1776 gid_info->gl_reprobe_flag = 1; 1777 1778 /* 1779 * A Cisco FC GW needs the special handling to get IOUnitInfo. 1780 */ 1781 if (ibdm_is_cisco_switch(gid_info)) { 1782 gid_info->gl_pending_cmds++; 1783 gid_info->gl_state = IBDM_SET_CLASSPORTINFO; 1784 mutex_exit(&gid_info->gl_mutex); 1785 1786 if (ibdm_set_classportinfo(gid_info) != IBDM_SUCCESS) { 1787 mutex_enter(&gid_info->gl_mutex); 1788 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 1789 --gid_info->gl_pending_cmds; 1790 mutex_exit(&gid_info->gl_mutex); 1791 1792 /* free the hca_list on this gid_info */ 1793 ibdm_delete_glhca_list(gid_info); 1794 1795 mutex_enter(&ibdm.ibdm_mutex); 1796 --ibdm.ibdm_ngid_probes_in_progress; 1797 ibdm_wakeup_probe_gid_cv(); 1798 mutex_exit(&ibdm.ibdm_mutex); 1799 1800 return; 1801 } 1802 1803 mutex_enter(&gid_info->gl_mutex); 1804 ibdm_wait_cisco_probe_completion(gid_info); 1805 1806 IBTF_DPRINTF_L4("ibdm", "\tibdm_probe_gid_thread: " 1807 "CISCO Wakeup signal received"); 1808 } 1809 1810 /* move on to the 'GET_CLASSPORTINFO' stage */ 1811 gid_info->gl_pending_cmds++; 1812 gid_info->gl_state = IBDM_GET_CLASSPORTINFO; 1813 mutex_exit(&gid_info->gl_mutex); 1814 1815 IBTF_DPRINTF_L3(ibdm_string, "\tibdm_probe_gid_thread: " 1816 "%d: gid_info %p gl_state %d pending_cmds %d", 1817 __LINE__, gid_info, gid_info->gl_state, 1818 gid_info->gl_pending_cmds); 1819 1820 /* 1821 * Send ClassPortInfo request to the GID asynchronously. 1822 */ 1823 if (ibdm_send_classportinfo(gid_info) != IBDM_SUCCESS) { 1824 1825 mutex_enter(&gid_info->gl_mutex); 1826 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 1827 --gid_info->gl_pending_cmds; 1828 mutex_exit(&gid_info->gl_mutex); 1829 1830 /* free the hca_list on this gid_info */ 1831 ibdm_delete_glhca_list(gid_info); 1832 1833 mutex_enter(&ibdm.ibdm_mutex); 1834 --ibdm.ibdm_ngid_probes_in_progress; 1835 ibdm_wakeup_probe_gid_cv(); 1836 mutex_exit(&ibdm.ibdm_mutex); 1837 1838 return; 1839 } 1840 } 1841 1842 1843 /* 1844 * ibdm_check_dest_nodeguid 1845 * Searches for the NodeGuid in the GID list 1846 * Returns matching gid_info if found and otherwise NULL 1847 * 1848 * This function is called to handle new GIDs discovered 1849 * during device sweep / probe or for GID_AVAILABLE event. 1850 * 1851 * Parameter : 1852 * gid_info GID to check 1853 */ 1854 static ibdm_dp_gidinfo_t * 1855 ibdm_check_dest_nodeguid(ibdm_dp_gidinfo_t *gid_info) 1856 { 1857 ibdm_dp_gidinfo_t *gid_list; 1858 ibdm_gid_t *tmp; 1859 1860 IBTF_DPRINTF_L4("ibdm", "\tcheck_dest_nodeguid"); 1861 1862 gid_list = ibdm.ibdm_dp_gidlist_head; 1863 while (gid_list) { 1864 if ((gid_list != gid_info) && 1865 (gid_info->gl_nodeguid == gid_list->gl_nodeguid)) { 1866 IBTF_DPRINTF_L4("ibdm", 1867 "\tcheck_dest_nodeguid: NodeGuid is present"); 1868 1869 /* Add to gid_list */ 1870 tmp = kmem_zalloc(sizeof (ibdm_gid_t), 1871 KM_SLEEP); 1872 tmp->gid_dgid_hi = gid_info->gl_dgid_hi; 1873 tmp->gid_dgid_lo = gid_info->gl_dgid_lo; 1874 tmp->gid_next = gid_list->gl_gid; 1875 gid_list->gl_gid = tmp; 1876 gid_list->gl_ngids++; 1877 return (gid_list); 1878 } 1879 1880 gid_list = gid_list->gl_next; 1881 } 1882 1883 return (NULL); 1884 } 1885 1886 1887 /* 1888 * ibdm_is_dev_mgt_supported 1889 * Get the PortInfo attribute (SA Query) 1890 * Check "CompatabilityMask" field in the Portinfo. 1891 * Return IBDM_SUCCESS if DM MAD's supported (if bit 19 set) 1892 * by the port, otherwise IBDM_FAILURE 1893 */ 1894 static int 1895 ibdm_is_dev_mgt_supported(ibdm_dp_gidinfo_t *gid_info) 1896 { 1897 int ret; 1898 size_t length = 0; 1899 sa_portinfo_record_t req, *resp = NULL; 1900 ibmf_saa_access_args_t qargs; 1901 1902 bzero(&req, sizeof (sa_portinfo_record_t)); 1903 req.EndportLID = gid_info->gl_dlid; 1904 1905 qargs.sq_attr_id = SA_PORTINFORECORD_ATTRID; 1906 qargs.sq_access_type = IBMF_SAA_RETRIEVE; 1907 qargs.sq_component_mask = SA_PORTINFO_COMPMASK_PORTLID; 1908 qargs.sq_template = &req; 1909 qargs.sq_callback = NULL; 1910 qargs.sq_callback_arg = NULL; 1911 1912 ret = ibmf_sa_access(gid_info->gl_sa_hdl, 1913 &qargs, 0, &length, (void **)&resp); 1914 1915 if ((ret != IBMF_SUCCESS) || (length == 0) || (resp == NULL)) { 1916 IBTF_DPRINTF_L2("ibdm", "\tis_dev_mgt_supported:" 1917 "failed to get PORTINFO attribute %d", ret); 1918 return (IBDM_FAILURE); 1919 } 1920 1921 if (resp->PortInfo.CapabilityMask & SM_CAP_MASK_IS_DM_SUPPD) { 1922 IBTF_DPRINTF_L4("ibdm", "\tis_dev_mgt_supported: SUPPD !!"); 1923 ret = IBDM_SUCCESS; 1924 } else { 1925 IBTF_DPRINTF_L4("ibdm", "\tis_dev_mgt_supported: " 1926 "Not SUPPD !!, cap 0x%x", resp->PortInfo.CapabilityMask); 1927 ret = IBDM_FAILURE; 1928 } 1929 kmem_free(resp, length); 1930 return (ret); 1931 } 1932 1933 1934 /* 1935 * ibdm_get_node_port_guids() 1936 * Get the NodeInfoRecord of the port 1937 * Save NodeGuid and PortGUID values in the GID list structure. 1938 * Return IBDM_SUCCESS/IBDM_FAILURE 1939 */ 1940 static int 1941 ibdm_get_node_port_guids(ibmf_saa_handle_t sa_hdl, ib_lid_t dlid, 1942 ib_guid_t *node_guid, ib_guid_t *port_guid) 1943 { 1944 int ret; 1945 size_t length = 0; 1946 sa_node_record_t req, *resp = NULL; 1947 ibmf_saa_access_args_t qargs; 1948 1949 IBTF_DPRINTF_L4("ibdm", "\tget_node_port_guids"); 1950 1951 bzero(&req, sizeof (sa_node_record_t)); 1952 req.LID = dlid; 1953 1954 qargs.sq_attr_id = SA_NODERECORD_ATTRID; 1955 qargs.sq_access_type = IBMF_SAA_RETRIEVE; 1956 qargs.sq_component_mask = SA_NODEINFO_COMPMASK_NODELID; 1957 qargs.sq_template = &req; 1958 qargs.sq_callback = NULL; 1959 qargs.sq_callback_arg = NULL; 1960 1961 ret = ibmf_sa_access(sa_hdl, &qargs, 0, &length, (void **)&resp); 1962 if ((ret != IBMF_SUCCESS) || (length == 0) || (resp == NULL)) { 1963 IBTF_DPRINTF_L2("ibdm", "\tget_node_port_guids:" 1964 " SA Retrieve Failed: %d", ret); 1965 return (IBDM_FAILURE); 1966 } 1967 IBTF_DPRINTF_L4("ibdm", "\tget_node_port_guids: NodeGuid %llx Port" 1968 "GUID %llx", resp->NodeInfo.NodeGUID, resp->NodeInfo.NodeGUID); 1969 1970 *node_guid = resp->NodeInfo.NodeGUID; 1971 *port_guid = resp->NodeInfo.PortGUID; 1972 kmem_free(resp, length); 1973 return (IBDM_SUCCESS); 1974 } 1975 1976 1977 /* 1978 * ibdm_get_reachable_ports() 1979 * Get list of the destination GID (and its path records) by 1980 * querying the SA access. 1981 * 1982 * Returns Number paths 1983 */ 1984 static int 1985 ibdm_get_reachable_ports(ibdm_port_attr_t *portinfo, ibdm_hca_list_t *hca) 1986 { 1987 uint_t ii, jj, nrecs; 1988 uint_t npaths = 0; 1989 size_t length; 1990 ib_gid_t sgid; 1991 ibdm_pkey_tbl_t *pkey_tbl; 1992 sa_path_record_t *result; 1993 sa_path_record_t *precp; 1994 ibdm_dp_gidinfo_t *gid_info; 1995 1996 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 1997 IBTF_DPRINTF_L4("ibdm", "\tget_reachable_ports: portinfo %p", portinfo); 1998 1999 sgid.gid_prefix = portinfo->pa_sn_prefix; 2000 sgid.gid_guid = portinfo->pa_port_guid; 2001 2002 /* get reversible paths */ 2003 if (portinfo->pa_sa_hdl && ibmf_saa_paths_from_gid(portinfo->pa_sa_hdl, 2004 sgid, IBMF_SAA_PKEY_WC, B_TRUE, 0, &nrecs, &length, &result) 2005 != IBMF_SUCCESS) { 2006 IBTF_DPRINTF_L2("ibdm", 2007 "\tget_reachable_ports: Getting path records failed"); 2008 return (0); 2009 } 2010 2011 for (ii = 0; ii < nrecs; ii++) { 2012 sa_node_record_t *nrec; 2013 size_t length; 2014 2015 precp = &result[ii]; 2016 if ((gid_info = ibdm_check_dgid(precp->DGID.gid_guid, 2017 precp->DGID.gid_prefix)) != NULL) { 2018 IBTF_DPRINTF_L5("ibdm", "\tget_reachable_ports: " 2019 "Already exists nrecs %d, ii %d", nrecs, ii); 2020 ibdm_addto_glhcalist(gid_info, hca); 2021 continue; 2022 } 2023 /* 2024 * This is a new GID. Allocate a GID structure and 2025 * initialize the structure 2026 * gl_state is initialized to IBDM_GID_PROBE_NOT_DONE (0) 2027 * by kmem_zalloc call 2028 */ 2029 gid_info = kmem_zalloc(sizeof (ibdm_dp_gidinfo_t), KM_SLEEP); 2030 mutex_init(&gid_info->gl_mutex, NULL, MUTEX_DEFAULT, NULL); 2031 cv_init(&gid_info->gl_probe_cv, NULL, CV_DRIVER, NULL); 2032 gid_info->gl_dgid_hi = precp->DGID.gid_prefix; 2033 gid_info->gl_dgid_lo = precp->DGID.gid_guid; 2034 gid_info->gl_sgid_hi = precp->SGID.gid_prefix; 2035 gid_info->gl_sgid_lo = precp->SGID.gid_guid; 2036 gid_info->gl_p_key = precp->P_Key; 2037 gid_info->gl_sa_hdl = portinfo->pa_sa_hdl; 2038 gid_info->gl_ibmf_hdl = portinfo->pa_ibmf_hdl; 2039 gid_info->gl_slid = precp->SLID; 2040 gid_info->gl_dlid = precp->DLID; 2041 gid_info->gl_transactionID = (++ibdm.ibdm_transactionID) 2042 << IBDM_GID_TRANSACTIONID_SHIFT; 2043 gid_info->gl_min_transactionID = gid_info->gl_transactionID; 2044 gid_info->gl_max_transactionID = (ibdm.ibdm_transactionID +1) 2045 << IBDM_GID_TRANSACTIONID_SHIFT; 2046 gid_info->gl_SL = precp->SL; 2047 2048 /* 2049 * get the node record with this guid if the destination 2050 * device is a Cisco one. 2051 */ 2052 if (ibdm_is_cisco(precp->DGID.gid_guid) && 2053 (gid_info->gl_nodeguid == 0 || gid_info->gl_devid == 0) && 2054 ibdm_get_node_record_by_port(portinfo->pa_sa_hdl, 2055 precp->DGID.gid_guid, &nrec, &length) == IBDM_SUCCESS) { 2056 gid_info->gl_nodeguid = nrec->NodeInfo.NodeGUID; 2057 gid_info->gl_devid = nrec->NodeInfo.DeviceID; 2058 kmem_free(nrec, length); 2059 } 2060 2061 ibdm_addto_glhcalist(gid_info, hca); 2062 2063 ibdm_dump_path_info(precp); 2064 2065 gid_info->gl_qp_hdl = NULL; 2066 ASSERT(portinfo->pa_pkey_tbl != NULL && 2067 portinfo->pa_npkeys != 0); 2068 2069 for (jj = 0; jj < portinfo->pa_npkeys; jj++) { 2070 pkey_tbl = &portinfo->pa_pkey_tbl[jj]; 2071 if ((gid_info->gl_p_key == pkey_tbl->pt_pkey) && 2072 (pkey_tbl->pt_qp_hdl != NULL)) { 2073 gid_info->gl_qp_hdl = pkey_tbl->pt_qp_hdl; 2074 break; 2075 } 2076 } 2077 2078 /* 2079 * QP handle for GID not initialized. No matching Pkey 2080 * was found!! ibdm should *not* hit this case. Flag an 2081 * error and drop the GID if ibdm does encounter this. 2082 */ 2083 if (gid_info->gl_qp_hdl == NULL) { 2084 IBTF_DPRINTF_L2(ibdm_string, 2085 "\tget_reachable_ports: No matching Pkey"); 2086 ibdm_delete_gidinfo(gid_info); 2087 continue; 2088 } 2089 if (ibdm.ibdm_dp_gidlist_head == NULL) { 2090 ibdm.ibdm_dp_gidlist_head = gid_info; 2091 ibdm.ibdm_dp_gidlist_tail = gid_info; 2092 } else { 2093 ibdm.ibdm_dp_gidlist_tail->gl_next = gid_info; 2094 gid_info->gl_prev = ibdm.ibdm_dp_gidlist_tail; 2095 ibdm.ibdm_dp_gidlist_tail = gid_info; 2096 } 2097 npaths++; 2098 } 2099 kmem_free(result, length); 2100 IBTF_DPRINTF_L4("ibdm", "\tget_reachable_ports: npaths = %d", npaths); 2101 return (npaths); 2102 } 2103 2104 2105 /* 2106 * ibdm_check_dgid() 2107 * Look in the global list to check whether we know this DGID already 2108 * Return IBDM_GID_PRESENT/IBDM_GID_NOT_PRESENT 2109 */ 2110 static ibdm_dp_gidinfo_t * 2111 ibdm_check_dgid(ib_guid_t guid, ib_sn_prefix_t prefix) 2112 { 2113 ibdm_dp_gidinfo_t *gid_list; 2114 2115 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 2116 gid_list = gid_list->gl_next) { 2117 if ((guid == gid_list->gl_dgid_lo) && 2118 (prefix == gid_list->gl_dgid_hi)) { 2119 break; 2120 } 2121 } 2122 return (gid_list); 2123 } 2124 2125 2126 /* 2127 * ibdm_find_gid() 2128 * Look in the global list to find a GID entry with matching 2129 * port & node GUID. 2130 * Return pointer to gidinfo if found, else return NULL 2131 */ 2132 static ibdm_dp_gidinfo_t * 2133 ibdm_find_gid(ib_guid_t nodeguid, ib_guid_t portguid) 2134 { 2135 ibdm_dp_gidinfo_t *gid_list; 2136 2137 IBTF_DPRINTF_L4("ibdm", "ibdm_find_gid(%llx, %llx)\n", 2138 nodeguid, portguid); 2139 2140 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 2141 gid_list = gid_list->gl_next) { 2142 if ((portguid == gid_list->gl_portguid) && 2143 (nodeguid == gid_list->gl_nodeguid)) { 2144 break; 2145 } 2146 } 2147 2148 IBTF_DPRINTF_L4("ibdm", "ibdm_find_gid : returned %p\n", 2149 gid_list); 2150 return (gid_list); 2151 } 2152 2153 2154 /* 2155 * ibdm_set_classportinfo() 2156 * ibdm_set_classportinfo() is a function to activate a Cisco FC GW 2157 * by sending the setClassPortInfo request with the trapLID, trapGID 2158 * and etc. to the gateway since the gateway doesn't provide the IO 2159 * Unit Information othewise. This behavior is the Cisco specific one, 2160 * and this function is called to a Cisco FC GW only. 2161 * Returns IBDM_SUCCESS/IBDM_FAILURE 2162 */ 2163 static int 2164 ibdm_set_classportinfo(ibdm_dp_gidinfo_t *gid_info) 2165 { 2166 ibmf_msg_t *msg; 2167 ib_mad_hdr_t *hdr; 2168 ibdm_timeout_cb_args_t *cb_args; 2169 void *data; 2170 ib_mad_classportinfo_t *cpi; 2171 2172 IBTF_DPRINTF_L4("ibdm", 2173 "\tset_classportinfo: gid info 0x%p", gid_info); 2174 2175 /* 2176 * Send command to set classportinfo attribute. Allocate a IBMF 2177 * packet and initialize the packet. 2178 */ 2179 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, 2180 &msg) != IBMF_SUCCESS) { 2181 IBTF_DPRINTF_L4("ibdm", "\tset_classportinfo: pkt alloc fail"); 2182 return (IBDM_FAILURE); 2183 } 2184 2185 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 2186 ibdm_alloc_send_buffers(msg); 2187 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 2188 2189 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2190 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2191 msg->im_local_addr.ia_remote_qno = 1; 2192 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2193 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2194 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 2195 2196 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2197 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2198 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2199 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2200 hdr->R_Method = IB_DM_DEVMGT_METHOD_SET; 2201 hdr->Status = 0; 2202 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2203 hdr->AttributeID = h2b16(IB_DM_ATTR_CLASSPORTINFO); 2204 hdr->AttributeModifier = 0; 2205 2206 data = msg->im_msgbufs_send.im_bufs_cl_data; 2207 cpi = (ib_mad_classportinfo_t *)data; 2208 2209 /* 2210 * Set the classportinfo values to activate this Cisco FC GW. 2211 */ 2212 cpi->TrapGID_hi = h2b64(gid_info->gl_sgid_hi); 2213 cpi->TrapGID_lo = h2b64(gid_info->gl_sgid_lo); 2214 cpi->TrapLID = h2b16(gid_info->gl_slid); 2215 cpi->TrapSL = gid_info->gl_SL; 2216 cpi->TrapP_Key = h2b16(gid_info->gl_p_key); 2217 cpi->TrapQP = h2b32((((ibmf_alt_qp_t *)gid_info->gl_qp_hdl)->isq_qpn)); 2218 cpi->TrapQ_Key = h2b32((((ibmf_alt_qp_t *) 2219 gid_info->gl_qp_hdl)->isq_qkey)); 2220 2221 cb_args = &gid_info->gl_cpi_cb_args; 2222 cb_args->cb_gid_info = gid_info; 2223 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 2224 cb_args->cb_req_type = IBDM_REQ_TYPE_CLASSPORTINFO; 2225 2226 mutex_enter(&gid_info->gl_mutex); 2227 gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 2228 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2229 mutex_exit(&gid_info->gl_mutex); 2230 2231 IBTF_DPRINTF_L5("ibdm", "\tset_classportinfo: " 2232 "timeout id %x", gid_info->gl_timeout_id); 2233 2234 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, 2235 msg, NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 2236 IBTF_DPRINTF_L2("ibdm", 2237 "\tset_classportinfo: ibmf send failed"); 2238 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 2239 } 2240 2241 return (IBDM_SUCCESS); 2242 } 2243 2244 2245 /* 2246 * ibdm_send_classportinfo() 2247 * Send classportinfo request. When the request is completed 2248 * IBMF calls ibdm_classportinfo_cb routine to inform about 2249 * the completion. 2250 * Returns IBDM_SUCCESS/IBDM_FAILURE 2251 */ 2252 static int 2253 ibdm_send_classportinfo(ibdm_dp_gidinfo_t *gid_info) 2254 { 2255 ibmf_msg_t *msg; 2256 ib_mad_hdr_t *hdr; 2257 ibdm_timeout_cb_args_t *cb_args; 2258 2259 IBTF_DPRINTF_L4("ibdm", 2260 "\tsend_classportinfo: gid info 0x%p", gid_info); 2261 2262 /* 2263 * Send command to get classportinfo attribute. Allocate a IBMF 2264 * packet and initialize the packet. 2265 */ 2266 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, 2267 &msg) != IBMF_SUCCESS) { 2268 IBTF_DPRINTF_L4("ibdm", "\tsend_classportinfo: pkt alloc fail"); 2269 return (IBDM_FAILURE); 2270 } 2271 2272 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 2273 ibdm_alloc_send_buffers(msg); 2274 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 2275 2276 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2277 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2278 msg->im_local_addr.ia_remote_qno = 1; 2279 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2280 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2281 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 2282 2283 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2284 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2285 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2286 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2287 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2288 hdr->Status = 0; 2289 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2290 hdr->AttributeID = h2b16(IB_DM_ATTR_CLASSPORTINFO); 2291 hdr->AttributeModifier = 0; 2292 2293 cb_args = &gid_info->gl_cpi_cb_args; 2294 cb_args->cb_gid_info = gid_info; 2295 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 2296 cb_args->cb_req_type = IBDM_REQ_TYPE_CLASSPORTINFO; 2297 2298 mutex_enter(&gid_info->gl_mutex); 2299 gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 2300 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2301 mutex_exit(&gid_info->gl_mutex); 2302 2303 IBTF_DPRINTF_L5("ibdm", "\tsend_classportinfo: " 2304 "timeout id %x", gid_info->gl_timeout_id); 2305 2306 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, 2307 msg, NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 2308 IBTF_DPRINTF_L2("ibdm", 2309 "\tsend_classportinfo: ibmf send failed"); 2310 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 2311 } 2312 2313 return (IBDM_SUCCESS); 2314 } 2315 2316 2317 /* 2318 * ibdm_handle_setclassportinfo() 2319 * Invoked by the IBMF when setClassPortInfo request is completed. 2320 */ 2321 static void 2322 ibdm_handle_setclassportinfo(ibmf_handle_t ibmf_hdl, 2323 ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag) 2324 { 2325 void *data; 2326 timeout_id_t timeout_id; 2327 ib_mad_classportinfo_t *cpi; 2328 2329 IBTF_DPRINTF_L4("ibdm", "\thandle_setclassportinfo:ibmf hdl " 2330 "%p msg %p gid info %p", ibmf_hdl, msg, gid_info); 2331 2332 if (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_CLASSPORTINFO) { 2333 IBTF_DPRINTF_L4("ibdm", "\thandle_setclassportinfo: " 2334 "Not a ClassPortInfo resp"); 2335 *flag |= IBDM_IBMF_PKT_UNEXP_RESP; 2336 return; 2337 } 2338 2339 /* 2340 * Verify whether timeout handler is created/active. 2341 * If created/ active, cancel the timeout handler 2342 */ 2343 mutex_enter(&gid_info->gl_mutex); 2344 if (gid_info->gl_state != IBDM_SET_CLASSPORTINFO) { 2345 IBTF_DPRINTF_L2("ibdm", "\thandle_setclassportinfo:DUP resp"); 2346 *flag |= IBDM_IBMF_PKT_DUP_RESP; 2347 mutex_exit(&gid_info->gl_mutex); 2348 return; 2349 } 2350 ibdm_bump_transactionID(gid_info); 2351 2352 gid_info->gl_iou_cb_args.cb_req_type = 0; 2353 if (gid_info->gl_timeout_id) { 2354 timeout_id = gid_info->gl_timeout_id; 2355 mutex_exit(&gid_info->gl_mutex); 2356 IBTF_DPRINTF_L5("ibdm", "handle_setlassportinfo: " 2357 "gl_timeout_id = 0x%x", timeout_id); 2358 if (untimeout(timeout_id) == -1) { 2359 IBTF_DPRINTF_L2("ibdm", "handle_setclassportinfo: " 2360 "untimeout gl_timeout_id failed"); 2361 } 2362 mutex_enter(&gid_info->gl_mutex); 2363 gid_info->gl_timeout_id = 0; 2364 } 2365 mutex_exit(&gid_info->gl_mutex); 2366 2367 data = msg->im_msgbufs_recv.im_bufs_cl_data; 2368 cpi = (ib_mad_classportinfo_t *)data; 2369 2370 ibdm_dump_classportinfo(cpi); 2371 } 2372 2373 2374 /* 2375 * ibdm_handle_classportinfo() 2376 * Invoked by the IBMF when the classportinfo request is completed. 2377 */ 2378 static void 2379 ibdm_handle_classportinfo(ibmf_handle_t ibmf_hdl, 2380 ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag) 2381 { 2382 void *data; 2383 timeout_id_t timeout_id; 2384 ib_mad_hdr_t *hdr; 2385 ib_mad_classportinfo_t *cpi; 2386 2387 IBTF_DPRINTF_L4("ibdm", "\thandle_classportinfo:ibmf hdl " 2388 "%p msg %p gid info %p", ibmf_hdl, msg, gid_info); 2389 2390 if (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_CLASSPORTINFO) { 2391 IBTF_DPRINTF_L4("ibdm", "\thandle_classportinfo: " 2392 "Not a ClassPortInfo resp"); 2393 *flag |= IBDM_IBMF_PKT_UNEXP_RESP; 2394 return; 2395 } 2396 2397 /* 2398 * Verify whether timeout handler is created/active. 2399 * If created/ active, cancel the timeout handler 2400 */ 2401 mutex_enter(&gid_info->gl_mutex); 2402 ibdm_bump_transactionID(gid_info); 2403 if (gid_info->gl_state != IBDM_GET_CLASSPORTINFO) { 2404 IBTF_DPRINTF_L2("ibdm", "\thandle_classportinfo:DUP resp"); 2405 *flag |= IBDM_IBMF_PKT_DUP_RESP; 2406 mutex_exit(&gid_info->gl_mutex); 2407 return; 2408 } 2409 gid_info->gl_iou_cb_args.cb_req_type = 0; 2410 if (gid_info->gl_timeout_id) { 2411 timeout_id = gid_info->gl_timeout_id; 2412 mutex_exit(&gid_info->gl_mutex); 2413 IBTF_DPRINTF_L5("ibdm", "handle_ioclassportinfo: " 2414 "gl_timeout_id = 0x%x", timeout_id); 2415 if (untimeout(timeout_id) == -1) { 2416 IBTF_DPRINTF_L2("ibdm", "handle_classportinfo: " 2417 "untimeout gl_timeout_id failed"); 2418 } 2419 mutex_enter(&gid_info->gl_mutex); 2420 gid_info->gl_timeout_id = 0; 2421 } 2422 gid_info->gl_state = IBDM_GET_IOUNITINFO; 2423 gid_info->gl_pending_cmds++; 2424 mutex_exit(&gid_info->gl_mutex); 2425 2426 data = msg->im_msgbufs_recv.im_bufs_cl_data; 2427 cpi = (ib_mad_classportinfo_t *)data; 2428 2429 /* 2430 * Cache the "RespTimeValue" and redirection information in the 2431 * global gid list data structure. This cached information will 2432 * be used to send any further requests to the GID. 2433 */ 2434 gid_info->gl_resp_timeout = 2435 (b2h32(cpi->RespTimeValue) & 0x1F); 2436 2437 gid_info->gl_redirected = ((IBDM_IN_IBMFMSG_STATUS(msg) & 2438 MAD_STATUS_REDIRECT_REQUIRED) ? B_TRUE: B_FALSE); 2439 gid_info->gl_redirect_dlid = b2h16(cpi->RedirectLID); 2440 gid_info->gl_redirect_QP = (b2h32(cpi->RedirectQP) & 0xffffff); 2441 gid_info->gl_redirect_pkey = b2h16(cpi->RedirectP_Key); 2442 gid_info->gl_redirect_qkey = b2h32(cpi->RedirectQ_Key); 2443 gid_info->gl_redirectGID_hi = b2h64(cpi->RedirectGID_hi); 2444 gid_info->gl_redirectGID_lo = b2h64(cpi->RedirectGID_lo); 2445 gid_info->gl_redirectSL = cpi->RedirectSL; 2446 2447 ibdm_dump_classportinfo(cpi); 2448 2449 /* 2450 * Send IOUnitInfo request 2451 * Reuse previously allocated IBMF packet for sending ClassPortInfo 2452 * Check whether DM agent on the remote node requested redirection 2453 * If so, send the request to the redirect DGID/DLID/PKEY/QP. 2454 */ 2455 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 2456 ibdm_alloc_send_buffers(msg); 2457 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 2458 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2459 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2460 2461 if (gid_info->gl_redirected == B_TRUE) { 2462 if (gid_info->gl_redirect_dlid != 0) { 2463 msg->im_local_addr.ia_remote_lid = 2464 gid_info->gl_redirect_dlid; 2465 } 2466 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 2467 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 2468 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 2469 msg->im_local_addr.ia_service_level = gid_info->gl_redirectSL; 2470 } else { 2471 msg->im_local_addr.ia_remote_qno = 1; 2472 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2473 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2474 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 2475 } 2476 2477 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2478 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2479 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2480 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2481 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2482 hdr->Status = 0; 2483 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2484 hdr->AttributeID = h2b16(IB_DM_ATTR_IO_UNITINFO); 2485 hdr->AttributeModifier = 0; 2486 2487 gid_info->gl_iou_cb_args.cb_req_type = IBDM_REQ_TYPE_IOUINFO; 2488 gid_info->gl_iou_cb_args.cb_gid_info = gid_info; 2489 gid_info->gl_iou_cb_args.cb_retry_count = ibdm_dft_retry_cnt; 2490 2491 mutex_enter(&gid_info->gl_mutex); 2492 gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 2493 &gid_info->gl_iou_cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2494 mutex_exit(&gid_info->gl_mutex); 2495 2496 IBTF_DPRINTF_L5("ibdm", "handle_classportinfo:" 2497 "timeout %x", gid_info->gl_timeout_id); 2498 2499 if (ibmf_msg_transport(ibmf_hdl, gid_info->gl_qp_hdl, msg, NULL, 2500 ibdm_ibmf_send_cb, &gid_info->gl_iou_cb_args, 0) != IBMF_SUCCESS) { 2501 IBTF_DPRINTF_L2("ibdm", 2502 "\thandle_classportinfo: msg transport failed"); 2503 ibdm_ibmf_send_cb(ibmf_hdl, msg, &gid_info->gl_iou_cb_args); 2504 } 2505 (*flag) |= IBDM_IBMF_PKT_REUSED; 2506 } 2507 2508 2509 /* 2510 * ibdm_send_iounitinfo: 2511 * Sends a DM request to get IOU unitinfo. 2512 */ 2513 static int 2514 ibdm_send_iounitinfo(ibdm_dp_gidinfo_t *gid_info) 2515 { 2516 ibmf_msg_t *msg; 2517 ib_mad_hdr_t *hdr; 2518 2519 IBTF_DPRINTF_L4("ibdm", "\tsend_iounitinfo: gid info 0x%p", gid_info); 2520 2521 /* 2522 * Send command to get iounitinfo attribute. Allocate a IBMF 2523 * packet and initialize the packet. 2524 */ 2525 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, &msg) != 2526 IBMF_SUCCESS) { 2527 IBTF_DPRINTF_L4("ibdm", "\tsend_iounitinfo: pkt alloc fail"); 2528 return (IBDM_FAILURE); 2529 } 2530 2531 mutex_enter(&gid_info->gl_mutex); 2532 ibdm_bump_transactionID(gid_info); 2533 mutex_exit(&gid_info->gl_mutex); 2534 2535 2536 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 2537 ibdm_alloc_send_buffers(msg); 2538 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 2539 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2540 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2541 msg->im_local_addr.ia_remote_qno = 1; 2542 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2543 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2544 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 2545 2546 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2547 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2548 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2549 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2550 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2551 hdr->Status = 0; 2552 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2553 hdr->AttributeID = h2b16(IB_DM_ATTR_IO_UNITINFO); 2554 hdr->AttributeModifier = 0; 2555 2556 gid_info->gl_iou_cb_args.cb_gid_info = gid_info; 2557 gid_info->gl_iou_cb_args.cb_retry_count = ibdm_dft_retry_cnt; 2558 gid_info->gl_iou_cb_args.cb_req_type = IBDM_REQ_TYPE_IOUINFO; 2559 2560 mutex_enter(&gid_info->gl_mutex); 2561 gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 2562 &gid_info->gl_iou_cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2563 mutex_exit(&gid_info->gl_mutex); 2564 2565 IBTF_DPRINTF_L5("ibdm", "send_iouunitinfo:" 2566 "timeout %x", gid_info->gl_timeout_id); 2567 2568 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, msg, 2569 NULL, ibdm_ibmf_send_cb, &gid_info->gl_iou_cb_args, 0) != 2570 IBMF_SUCCESS) { 2571 IBTF_DPRINTF_L2("ibdm", "\tsend_iounitinfo: ibmf send failed"); 2572 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, 2573 msg, &gid_info->gl_iou_cb_args); 2574 } 2575 return (IBDM_SUCCESS); 2576 } 2577 2578 /* 2579 * ibdm_handle_iounitinfo() 2580 * Invoked by the IBMF when IO Unitinfo request is completed. 2581 */ 2582 static void 2583 ibdm_handle_iounitinfo(ibmf_handle_t ibmf_hdl, 2584 ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag) 2585 { 2586 int ii, first = B_TRUE; 2587 int num_iocs; 2588 size_t size; 2589 uchar_t slot_info; 2590 timeout_id_t timeout_id; 2591 ib_mad_hdr_t *hdr; 2592 ibdm_ioc_info_t *ioc_info; 2593 ib_dm_io_unitinfo_t *iou_info; 2594 ib_dm_io_unitinfo_t *giou_info; 2595 ibdm_timeout_cb_args_t *cb_args; 2596 2597 IBTF_DPRINTF_L4("ibdm", "\thandle_iouintinfo:" 2598 " ibmf hdl %p pkt %p gid info %p", ibmf_hdl, msg, gid_info); 2599 2600 if (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_IO_UNITINFO) { 2601 IBTF_DPRINTF_L4("ibdm", "\thandle_iounitinfo: " 2602 "Unexpected response"); 2603 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 2604 return; 2605 } 2606 2607 mutex_enter(&gid_info->gl_mutex); 2608 if (gid_info->gl_state != IBDM_GET_IOUNITINFO) { 2609 IBTF_DPRINTF_L4("ibdm", 2610 "\thandle_iounitinfo: DUP resp"); 2611 mutex_exit(&gid_info->gl_mutex); 2612 (*flag) = IBDM_IBMF_PKT_DUP_RESP; 2613 return; 2614 } 2615 gid_info->gl_iou_cb_args.cb_req_type = 0; 2616 if (gid_info->gl_timeout_id) { 2617 timeout_id = gid_info->gl_timeout_id; 2618 mutex_exit(&gid_info->gl_mutex); 2619 IBTF_DPRINTF_L5("ibdm", "handle_iounitinfo: " 2620 "gl_timeout_id = 0x%x", timeout_id); 2621 if (untimeout(timeout_id) == -1) { 2622 IBTF_DPRINTF_L2("ibdm", "handle_iounitinfo: " 2623 "untimeout gl_timeout_id failed"); 2624 } 2625 mutex_enter(&gid_info->gl_mutex); 2626 gid_info->gl_timeout_id = 0; 2627 } 2628 gid_info->gl_state = IBDM_GET_IOC_DETAILS; 2629 2630 iou_info = IBDM_IN_IBMFMSG2IOU(msg); 2631 ibdm_dump_iounitinfo(iou_info); 2632 num_iocs = iou_info->iou_num_ctrl_slots; 2633 /* 2634 * check if number of IOCs reported is zero? if yes, return. 2635 * when num_iocs are reported zero internal IOC database needs 2636 * to be updated. To ensure that save the number of IOCs in 2637 * the new field "gl_num_iocs". Use a new field instead of 2638 * "giou_info->iou_num_ctrl_slots" as that would prevent 2639 * an unnecessary kmem_alloc/kmem_free when num_iocs is 0. 2640 */ 2641 if (num_iocs == 0 && gid_info->gl_num_iocs == 0) { 2642 IBTF_DPRINTF_L4("ibdm", "\thandle_iounitinfo: no IOC's"); 2643 mutex_exit(&gid_info->gl_mutex); 2644 return; 2645 } 2646 IBTF_DPRINTF_L4("ibdm", "\thandle_iounitinfo: num_iocs = %d", num_iocs); 2647 2648 /* 2649 * if there is an existing gl_iou (IOU has been probed before) 2650 * check if the "iou_changeid" is same as saved entry in 2651 * "giou_info->iou_changeid". 2652 * (note: this logic can prevent IOC enumeration if a given 2653 * vendor doesn't support setting iou_changeid field for its IOU) 2654 * 2655 * if there is an existing gl_iou and iou_changeid has changed : 2656 * free up existing gl_iou info and its related structures. 2657 * reallocate gl_iou info all over again. 2658 * if we donot free this up; then this leads to memory leaks 2659 */ 2660 if (gid_info->gl_iou) { 2661 giou_info = &gid_info->gl_iou->iou_info; 2662 if (b2h16(iou_info->iou_changeid) == 2663 giou_info->iou_changeid) { 2664 IBTF_DPRINTF_L3("ibdm", 2665 "\thandle_iounitinfo: no IOCs changed"); 2666 gid_info->gl_state = IBDM_GID_PROBING_COMPLETE; 2667 mutex_exit(&gid_info->gl_mutex); 2668 return; 2669 } 2670 2671 /* 2672 * Store the iou info as prev_iou to be used after 2673 * sweep is done. 2674 */ 2675 ASSERT(gid_info->gl_prev_iou == NULL); 2676 IBTF_DPRINTF_L4(ibdm_string, 2677 "\thandle_iounitinfo: setting gl_prev_iou %p", 2678 gid_info->gl_prev_iou); 2679 gid_info->gl_prev_iou = gid_info->gl_iou; 2680 ibdm.ibdm_prev_iou = 1; 2681 gid_info->gl_iou = NULL; 2682 } 2683 2684 size = sizeof (ibdm_iou_info_t) + num_iocs * sizeof (ibdm_ioc_info_t); 2685 gid_info->gl_iou = (ibdm_iou_info_t *)kmem_zalloc(size, KM_SLEEP); 2686 giou_info = &gid_info->gl_iou->iou_info; 2687 gid_info->gl_iou->iou_ioc_info = (ibdm_ioc_info_t *) 2688 ((char *)gid_info->gl_iou + sizeof (ibdm_iou_info_t)); 2689 2690 giou_info->iou_num_ctrl_slots = gid_info->gl_num_iocs = num_iocs; 2691 giou_info->iou_flag = iou_info->iou_flag; 2692 bcopy(iou_info->iou_ctrl_list, giou_info->iou_ctrl_list, 128); 2693 giou_info->iou_changeid = b2h16(iou_info->iou_changeid); 2694 gid_info->gl_pending_cmds++; /* for diag code */ 2695 mutex_exit(&gid_info->gl_mutex); 2696 2697 if (ibdm_get_diagcode(gid_info, 0) != IBDM_SUCCESS) { 2698 mutex_enter(&gid_info->gl_mutex); 2699 gid_info->gl_pending_cmds--; 2700 mutex_exit(&gid_info->gl_mutex); 2701 } 2702 /* 2703 * Parallelize getting IOC controller profiles from here. 2704 * Allocate IBMF packets and send commands to get IOC profile for 2705 * each IOC present on the IOU. 2706 */ 2707 for (ii = 0; ii < num_iocs; ii++) { 2708 /* 2709 * Check whether IOC is present in the slot 2710 * Series of nibbles (in the field iou_ctrl_list) represents 2711 * a slot in the IOU. 2712 * Byte format: 76543210 2713 * Bits 0-3 of first byte represent Slot 2 2714 * bits 4-7 of first byte represent slot 1, 2715 * bits 0-3 of second byte represent slot 4 and so on 2716 * Each 4-bit nibble has the following meaning 2717 * 0x0 : IOC not installed 2718 * 0x1 : IOC is present 2719 * 0xf : Slot does not exist 2720 * and all other values are reserved. 2721 */ 2722 ioc_info = IBDM_GIDINFO2IOCINFO(gid_info, ii); 2723 slot_info = giou_info->iou_ctrl_list[(ii/2)]; 2724 if ((ii % 2) == 0) 2725 slot_info = (slot_info >> 4); 2726 2727 if ((slot_info & 0xf) != 1) { 2728 IBTF_DPRINTF_L4("ibdm", "\thandle_iouintinfo: " 2729 "No IOC is present in the slot = %d", ii); 2730 ioc_info->ioc_state = IBDM_IOC_STATE_PROBE_FAILED; 2731 continue; 2732 } 2733 2734 mutex_enter(&gid_info->gl_mutex); 2735 ibdm_bump_transactionID(gid_info); 2736 mutex_exit(&gid_info->gl_mutex); 2737 2738 /* 2739 * Re use the already allocated packet (for IOUnitinfo) to 2740 * send the first IOC controller attribute. Allocate new 2741 * IBMF packets for the rest of the IOC's 2742 */ 2743 if (first != B_TRUE) { 2744 msg = NULL; 2745 if (ibmf_alloc_msg(ibmf_hdl, IBMF_ALLOC_SLEEP, 2746 &msg) != IBMF_SUCCESS) { 2747 IBTF_DPRINTF_L4("ibdm", "\thandle_iouintinfo: " 2748 "IBMF packet allocation failed"); 2749 continue; 2750 } 2751 2752 } 2753 2754 /* allocate send buffers for all messages */ 2755 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 2756 ibdm_alloc_send_buffers(msg); 2757 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 2758 2759 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2760 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2761 if (gid_info->gl_redirected == B_TRUE) { 2762 if (gid_info->gl_redirect_dlid != 0) { 2763 msg->im_local_addr.ia_remote_lid = 2764 gid_info->gl_redirect_dlid; 2765 } 2766 msg->im_local_addr.ia_remote_qno = 2767 gid_info->gl_redirect_QP; 2768 msg->im_local_addr.ia_p_key = 2769 gid_info->gl_redirect_pkey; 2770 msg->im_local_addr.ia_q_key = 2771 gid_info->gl_redirect_qkey; 2772 msg->im_local_addr.ia_service_level = 2773 gid_info->gl_redirectSL; 2774 } else { 2775 msg->im_local_addr.ia_remote_qno = 1; 2776 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2777 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2778 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 2779 } 2780 2781 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2782 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2783 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2784 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2785 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2786 hdr->Status = 0; 2787 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2788 hdr->AttributeID = h2b16(IB_DM_ATTR_IOC_CTRL_PROFILE); 2789 hdr->AttributeModifier = h2b32(ii + 1); 2790 2791 ioc_info->ioc_state = IBDM_IOC_STATE_PROBE_INVALID; 2792 cb_args = &ioc_info->ioc_cb_args; 2793 cb_args->cb_gid_info = gid_info; 2794 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 2795 cb_args->cb_req_type = IBDM_REQ_TYPE_IOCINFO; 2796 cb_args->cb_ioc_num = ii; 2797 2798 mutex_enter(&gid_info->gl_mutex); 2799 gid_info->gl_pending_cmds++; /* for diag code */ 2800 2801 ioc_info->ioc_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 2802 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2803 mutex_exit(&gid_info->gl_mutex); 2804 2805 IBTF_DPRINTF_L5("ibdm", "\thandle_iounitinfo:" 2806 "timeout 0x%x, ioc_num %d", ioc_info->ioc_timeout_id, ii); 2807 2808 if (ibmf_msg_transport(ibmf_hdl, gid_info->gl_qp_hdl, msg, 2809 NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 2810 IBTF_DPRINTF_L2("ibdm", 2811 "\thandle_iounitinfo: msg transport failed"); 2812 ibdm_ibmf_send_cb(ibmf_hdl, msg, cb_args); 2813 } 2814 (*flag) |= IBDM_IBMF_PKT_REUSED; 2815 first = B_FALSE; 2816 gid_info->gl_iou->iou_niocs_probe_in_progress++; 2817 } 2818 } 2819 2820 2821 /* 2822 * ibdm_handle_ioc_profile() 2823 * Invoked by the IBMF when the IOCControllerProfile request 2824 * gets completed 2825 */ 2826 static void 2827 ibdm_handle_ioc_profile(ibmf_handle_t ibmf_hdl, 2828 ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag) 2829 { 2830 int first = B_TRUE, reprobe = 0; 2831 uint_t ii, ioc_no, srv_start; 2832 uint_t nserv_entries; 2833 timeout_id_t timeout_id; 2834 ib_mad_hdr_t *hdr; 2835 ibdm_ioc_info_t *ioc_info; 2836 ibdm_timeout_cb_args_t *cb_args; 2837 ib_dm_ioc_ctrl_profile_t *ioc, *gioc; 2838 2839 IBTF_DPRINTF_L4("ibdm", "\thandle_ioc_profile:" 2840 " ibmf hdl %p msg %p gid info %p", ibmf_hdl, msg, gid_info); 2841 2842 ioc = IBDM_IN_IBMFMSG2IOC(msg); 2843 /* 2844 * Check whether we know this IOC already 2845 * This will return NULL if reprobe is in progress 2846 * IBDM_IOC_STATE_REPROBE_PROGRESS will be set. 2847 * Do not hold mutexes here. 2848 */ 2849 if (ibdm_is_ioc_present(ioc->ioc_guid, gid_info, flag) != NULL) { 2850 IBTF_DPRINTF_L4("ibdm", "\thandle_ioc_profile:" 2851 "IOC guid %llx is present", ioc->ioc_guid); 2852 return; 2853 } 2854 ioc_no = IBDM_IN_IBMFMSG_ATTRMOD(msg); 2855 IBTF_DPRINTF_L4("ibdm", "\thandle_ioc_profile: ioc_no = %d", ioc_no-1); 2856 2857 /* Make sure that IOC index is with the valid range */ 2858 if (IBDM_IS_IOC_NUM_INVALID(ioc_no, gid_info)) { 2859 IBTF_DPRINTF_L2("ibdm", "\thandle_ioc_profile: " 2860 "IOC index Out of range, index %d", ioc); 2861 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 2862 return; 2863 } 2864 ioc_info = &gid_info->gl_iou->iou_ioc_info[ioc_no - 1]; 2865 ioc_info->ioc_iou_info = gid_info->gl_iou; 2866 2867 mutex_enter(&gid_info->gl_mutex); 2868 if (ioc_info->ioc_state == IBDM_IOC_STATE_REPROBE_PROGRESS) { 2869 reprobe = 1; 2870 ioc_info->ioc_prev_serv = ioc_info->ioc_serv; 2871 ioc_info->ioc_serv = NULL; 2872 ioc_info->ioc_prev_serv_cnt = 2873 ioc_info->ioc_profile.ioc_service_entries; 2874 } else if (ioc_info->ioc_state != IBDM_IOC_STATE_PROBE_INVALID) { 2875 IBTF_DPRINTF_L2("ibdm", "\thandle_ioc_profile: DUP response" 2876 "ioc %d, ioc_state %x", ioc_no - 1, ioc_info->ioc_state); 2877 mutex_exit(&gid_info->gl_mutex); 2878 (*flag) |= IBDM_IBMF_PKT_DUP_RESP; 2879 return; 2880 } 2881 ioc_info->ioc_cb_args.cb_req_type = 0; 2882 if (ioc_info->ioc_timeout_id) { 2883 timeout_id = ioc_info->ioc_timeout_id; 2884 ioc_info->ioc_timeout_id = 0; 2885 mutex_exit(&gid_info->gl_mutex); 2886 IBTF_DPRINTF_L5("ibdm", "handle_ioc_profile: " 2887 "ioc_timeout_id = 0x%x", timeout_id); 2888 if (untimeout(timeout_id) == -1) { 2889 IBTF_DPRINTF_L2("ibdm", "handle_ioc_profile: " 2890 "untimeout ioc_timeout_id failed"); 2891 } 2892 mutex_enter(&gid_info->gl_mutex); 2893 } 2894 2895 ioc_info->ioc_state = IBDM_IOC_STATE_PROBE_SUCCESS; 2896 if (reprobe == 0) { 2897 ioc_info->ioc_iou_guid = gid_info->gl_nodeguid; 2898 ioc_info->ioc_nodeguid = gid_info->gl_nodeguid; 2899 } 2900 2901 /* 2902 * Save all the IOC information in the global structures. 2903 * Note the wire format is Big Endian and the Sparc process also 2904 * big endian. So, there is no need to convert the data fields 2905 * The conversion routines used below are ineffective on Sparc 2906 * machines where as they will be effective on little endian 2907 * machines such as Intel processors. 2908 */ 2909 gioc = (ib_dm_ioc_ctrl_profile_t *)&ioc_info->ioc_profile; 2910 2911 /* 2912 * Restrict updates to onlyport GIDs and service entries during reprobe 2913 */ 2914 if (reprobe == 0) { 2915 gioc->ioc_guid = b2h64(ioc->ioc_guid); 2916 gioc->ioc_vendorid = 2917 ((b2h32(ioc->ioc_vendorid) & IB_DM_VENDORID_MASK) 2918 >> IB_DM_VENDORID_SHIFT); 2919 gioc->ioc_deviceid = b2h32(ioc->ioc_deviceid); 2920 gioc->ioc_device_ver = b2h16(ioc->ioc_device_ver); 2921 gioc->ioc_subsys_vendorid = 2922 ((b2h32(ioc->ioc_subsys_vendorid) & IB_DM_VENDORID_MASK) 2923 >> IB_DM_VENDORID_SHIFT); 2924 gioc->ioc_subsys_id = b2h32(ioc->ioc_subsys_id); 2925 gioc->ioc_io_class = b2h16(ioc->ioc_io_class); 2926 gioc->ioc_io_subclass = b2h16(ioc->ioc_io_subclass); 2927 gioc->ioc_protocol = b2h16(ioc->ioc_protocol); 2928 gioc->ioc_protocol_ver = b2h16(ioc->ioc_protocol_ver); 2929 gioc->ioc_send_msg_qdepth = 2930 b2h16(ioc->ioc_send_msg_qdepth); 2931 gioc->ioc_rdma_read_qdepth = 2932 b2h16(ioc->ioc_rdma_read_qdepth); 2933 gioc->ioc_send_msg_sz = b2h32(ioc->ioc_send_msg_sz); 2934 gioc->ioc_rdma_xfer_sz = b2h32(ioc->ioc_rdma_xfer_sz); 2935 gioc->ioc_ctrl_opcap_mask = ioc->ioc_ctrl_opcap_mask; 2936 bcopy(ioc->ioc_id_string, gioc->ioc_id_string, 2937 IB_DM_IOC_ID_STRING_LEN); 2938 2939 ioc_info->ioc_iou_diagcode = gid_info->gl_iou->iou_diagcode; 2940 ioc_info->ioc_iou_dc_valid = gid_info->gl_iou->iou_dc_valid; 2941 ioc_info->ioc_diagdeviceid = (IB_DM_IOU_DEVICEID_MASK & 2942 gid_info->gl_iou->iou_info.iou_flag) ? B_TRUE : B_FALSE; 2943 2944 if (ioc_info->ioc_diagdeviceid == B_TRUE) { 2945 gid_info->gl_pending_cmds++; 2946 IBTF_DPRINTF_L3(ibdm_string, 2947 "\tibdm_handle_ioc_profile: " 2948 "%d: gid_info %p gl_state %d pending_cmds %d", 2949 __LINE__, gid_info, gid_info->gl_state, 2950 gid_info->gl_pending_cmds); 2951 } 2952 } 2953 gioc->ioc_service_entries = ioc->ioc_service_entries; 2954 mutex_exit(&gid_info->gl_mutex); 2955 2956 ibdm_dump_ioc_profile(gioc); 2957 2958 if ((ioc_info->ioc_diagdeviceid == B_TRUE) && (reprobe == 0)) { 2959 if (ibdm_get_diagcode(gid_info, ioc_no) != IBDM_SUCCESS) { 2960 mutex_enter(&gid_info->gl_mutex); 2961 gid_info->gl_pending_cmds--; 2962 mutex_exit(&gid_info->gl_mutex); 2963 } 2964 } 2965 ioc_info->ioc_serv = (ibdm_srvents_info_t *)kmem_zalloc( 2966 (gioc->ioc_service_entries * sizeof (ibdm_srvents_info_t)), 2967 KM_SLEEP); 2968 2969 /* 2970 * In one single request, maximum number of requests that can be 2971 * obtained is 4. If number of service entries are more than four, 2972 * calculate number requests needed and send them parallelly. 2973 */ 2974 nserv_entries = ioc->ioc_service_entries; 2975 ii = 0; 2976 while (nserv_entries) { 2977 mutex_enter(&gid_info->gl_mutex); 2978 gid_info->gl_pending_cmds++; 2979 ibdm_bump_transactionID(gid_info); 2980 mutex_exit(&gid_info->gl_mutex); 2981 2982 if (first != B_TRUE) { 2983 if (ibmf_alloc_msg(ibmf_hdl, IBMF_ALLOC_SLEEP, 2984 &msg) != IBMF_SUCCESS) { 2985 continue; 2986 } 2987 2988 } 2989 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 2990 ibdm_alloc_send_buffers(msg); 2991 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 2992 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2993 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2994 if (gid_info->gl_redirected == B_TRUE) { 2995 if (gid_info->gl_redirect_dlid != 0) { 2996 msg->im_local_addr.ia_remote_lid = 2997 gid_info->gl_redirect_dlid; 2998 } 2999 msg->im_local_addr.ia_remote_qno = 3000 gid_info->gl_redirect_QP; 3001 msg->im_local_addr.ia_p_key = 3002 gid_info->gl_redirect_pkey; 3003 msg->im_local_addr.ia_q_key = 3004 gid_info->gl_redirect_qkey; 3005 msg->im_local_addr.ia_service_level = 3006 gid_info->gl_redirectSL; 3007 } else { 3008 msg->im_local_addr.ia_remote_qno = 1; 3009 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 3010 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 3011 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 3012 } 3013 3014 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 3015 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 3016 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 3017 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 3018 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 3019 hdr->Status = 0; 3020 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 3021 hdr->AttributeID = h2b16(IB_DM_ATTR_SERVICE_ENTRIES); 3022 3023 srv_start = ii * 4; 3024 cb_args = &ioc_info->ioc_serv[srv_start].se_cb_args; 3025 cb_args->cb_gid_info = gid_info; 3026 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 3027 cb_args->cb_req_type = IBDM_REQ_TYPE_SRVENTS; 3028 cb_args->cb_srvents_start = srv_start; 3029 cb_args->cb_ioc_num = ioc_no - 1; 3030 3031 if (nserv_entries >= IBDM_MAX_SERV_ENTRIES_PER_REQ) { 3032 nserv_entries -= IBDM_MAX_SERV_ENTRIES_PER_REQ; 3033 cb_args->cb_srvents_end = (cb_args->cb_srvents_start + 3034 IBDM_MAX_SERV_ENTRIES_PER_REQ - 1); 3035 } else { 3036 cb_args->cb_srvents_end = 3037 (cb_args->cb_srvents_start + nserv_entries - 1); 3038 nserv_entries = 0; 3039 } 3040 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*hdr)) 3041 ibdm_fill_srv_attr_mod(hdr, cb_args); 3042 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*hdr)) 3043 3044 mutex_enter(&gid_info->gl_mutex); 3045 ioc_info->ioc_serv[srv_start].se_timeout_id = timeout( 3046 ibdm_pkt_timeout_hdlr, cb_args, 3047 IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 3048 mutex_exit(&gid_info->gl_mutex); 3049 3050 IBTF_DPRINTF_L5("ibdm", "\thandle_ioc_profile:" 3051 "timeout %x, ioc %d srv %d", 3052 ioc_info->ioc_serv[srv_start].se_timeout_id, 3053 ioc_no - 1, srv_start); 3054 3055 if (ibmf_msg_transport(ibmf_hdl, gid_info->gl_qp_hdl, msg, 3056 NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 3057 IBTF_DPRINTF_L2("ibdm", 3058 "\thandle_ioc_profile: msg send failed"); 3059 ibdm_ibmf_send_cb(ibmf_hdl, msg, cb_args); 3060 } 3061 (*flag) |= IBDM_IBMF_PKT_REUSED; 3062 first = B_FALSE; 3063 ii++; 3064 } 3065 } 3066 3067 3068 /* 3069 * ibdm_handle_srventry_mad() 3070 */ 3071 static void 3072 ibdm_handle_srventry_mad(ibmf_msg_t *msg, 3073 ibdm_dp_gidinfo_t *gid_info, int *flag) 3074 { 3075 uint_t ii, ioc_no, attrmod; 3076 uint_t nentries, start, end; 3077 timeout_id_t timeout_id; 3078 ib_dm_srv_t *srv_ents; 3079 ibdm_ioc_info_t *ioc_info; 3080 ibdm_srvents_info_t *gsrv_ents; 3081 3082 IBTF_DPRINTF_L4("ibdm", "\thandle_srventry_mad:" 3083 " IBMF msg %p gid info %p", msg, gid_info); 3084 3085 srv_ents = IBDM_IN_IBMFMSG2SRVENT(msg); 3086 /* 3087 * Get the start and end index of the service entries 3088 * Upper 16 bits identify the IOC 3089 * Lower 16 bits specify the range of service entries 3090 * LSB specifies (Big endian) end of the range 3091 * MSB specifies (Big endian) start of the range 3092 */ 3093 attrmod = IBDM_IN_IBMFMSG_ATTRMOD(msg); 3094 ioc_no = ((attrmod >> 16) & IBDM_16_BIT_MASK); 3095 end = ((attrmod >> 8) & IBDM_8_BIT_MASK); 3096 start = (attrmod & IBDM_8_BIT_MASK); 3097 3098 /* Make sure that IOC index is with the valid range */ 3099 if ((ioc_no < 1) | 3100 (ioc_no > gid_info->gl_iou->iou_info.iou_num_ctrl_slots)) { 3101 IBTF_DPRINTF_L2("ibdm", "\thandle_srventry_mad: " 3102 "IOC index Out of range, index %d", ioc_no); 3103 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3104 return; 3105 } 3106 ioc_info = IBDM_GIDINFO2IOCINFO(gid_info, (ioc_no -1)); 3107 3108 /* 3109 * Make sure that the "start" and "end" service indexes are 3110 * with in the valid range 3111 */ 3112 nentries = ioc_info->ioc_profile.ioc_service_entries; 3113 if ((start > end) | (start >= nentries) | (end >= nentries)) { 3114 IBTF_DPRINTF_L2("ibdm", "\thandle_srventry_mad: " 3115 "Attr modifier 0x%x, #Serv entries %d", attrmod, nentries); 3116 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3117 return; 3118 } 3119 gsrv_ents = &ioc_info->ioc_serv[start]; 3120 mutex_enter(&gid_info->gl_mutex); 3121 if (gsrv_ents->se_state != IBDM_SE_INVALID) { 3122 IBTF_DPRINTF_L2("ibdm", "\thandle_srventry_mad: " 3123 "already known, ioc %d, srv %d, se_state %x", 3124 ioc_no - 1, start, gsrv_ents->se_state); 3125 mutex_exit(&gid_info->gl_mutex); 3126 (*flag) |= IBDM_IBMF_PKT_DUP_RESP; 3127 return; 3128 } 3129 ioc_info->ioc_serv[start].se_cb_args.cb_req_type = 0; 3130 if (ioc_info->ioc_serv[start].se_timeout_id) { 3131 IBTF_DPRINTF_L2("ibdm", 3132 "\thandle_srventry_mad: ioc %d start %d", ioc_no, start); 3133 timeout_id = ioc_info->ioc_serv[start].se_timeout_id; 3134 ioc_info->ioc_serv[start].se_timeout_id = 0; 3135 mutex_exit(&gid_info->gl_mutex); 3136 IBTF_DPRINTF_L5("ibdm", "handle_srverntry_mad: " 3137 "se_timeout_id = 0x%x", timeout_id); 3138 if (untimeout(timeout_id) == -1) { 3139 IBTF_DPRINTF_L2("ibdm", "handle_srventry_mad: " 3140 "untimeout se_timeout_id failed"); 3141 } 3142 mutex_enter(&gid_info->gl_mutex); 3143 } 3144 3145 gsrv_ents->se_state = IBDM_SE_VALID; 3146 mutex_exit(&gid_info->gl_mutex); 3147 for (ii = start; ii <= end; ii++, srv_ents++, gsrv_ents++) { 3148 gsrv_ents->se_attr.srv_id = b2h64(srv_ents->srv_id); 3149 bcopy(srv_ents->srv_name, 3150 gsrv_ents->se_attr.srv_name, IB_DM_MAX_SVC_NAME_LEN); 3151 ibdm_dump_service_entries(&gsrv_ents->se_attr); 3152 } 3153 } 3154 3155 3156 /* 3157 * ibdm_get_diagcode: 3158 * Send request to get IOU/IOC diag code 3159 * Returns IBDM_SUCCESS/IBDM_FAILURE 3160 */ 3161 static int 3162 ibdm_get_diagcode(ibdm_dp_gidinfo_t *gid_info, int attr) 3163 { 3164 ibmf_msg_t *msg; 3165 ib_mad_hdr_t *hdr; 3166 ibdm_ioc_info_t *ioc; 3167 ibdm_timeout_cb_args_t *cb_args; 3168 timeout_id_t *timeout_id; 3169 3170 IBTF_DPRINTF_L4("ibdm", "\tget_diagcode: gid info %p, attr = %d", 3171 gid_info, attr); 3172 3173 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, 3174 &msg) != IBMF_SUCCESS) { 3175 IBTF_DPRINTF_L4("ibdm", "\tget_diagcode: pkt alloc fail"); 3176 return (IBDM_FAILURE); 3177 } 3178 3179 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 3180 ibdm_alloc_send_buffers(msg); 3181 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 3182 3183 mutex_enter(&gid_info->gl_mutex); 3184 ibdm_bump_transactionID(gid_info); 3185 mutex_exit(&gid_info->gl_mutex); 3186 3187 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 3188 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 3189 if (gid_info->gl_redirected == B_TRUE) { 3190 if (gid_info->gl_redirect_dlid != 0) { 3191 msg->im_local_addr.ia_remote_lid = 3192 gid_info->gl_redirect_dlid; 3193 } 3194 3195 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 3196 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 3197 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 3198 msg->im_local_addr.ia_service_level = gid_info->gl_redirectSL; 3199 } else { 3200 msg->im_local_addr.ia_remote_qno = 1; 3201 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 3202 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 3203 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 3204 } 3205 3206 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 3207 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 3208 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 3209 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 3210 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 3211 hdr->Status = 0; 3212 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 3213 3214 hdr->AttributeID = h2b16(IB_DM_ATTR_DIAG_CODE); 3215 hdr->AttributeModifier = h2b32(attr); 3216 3217 if (attr == 0) { 3218 cb_args = &gid_info->gl_iou_cb_args; 3219 gid_info->gl_iou->iou_dc_valid = B_FALSE; 3220 cb_args->cb_ioc_num = 0; 3221 cb_args->cb_req_type = IBDM_REQ_TYPE_IOU_DIAGCODE; 3222 timeout_id = &gid_info->gl_timeout_id; 3223 } else { 3224 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attr - 1)); 3225 ioc->ioc_dc_valid = B_FALSE; 3226 cb_args = &ioc->ioc_dc_cb_args; 3227 cb_args->cb_ioc_num = attr - 1; 3228 cb_args->cb_req_type = IBDM_REQ_TYPE_IOC_DIAGCODE; 3229 timeout_id = &ioc->ioc_dc_timeout_id; 3230 } 3231 cb_args->cb_gid_info = gid_info; 3232 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 3233 cb_args->cb_srvents_start = 0; 3234 3235 mutex_enter(&gid_info->gl_mutex); 3236 *timeout_id = timeout(ibdm_pkt_timeout_hdlr, 3237 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 3238 mutex_exit(&gid_info->gl_mutex); 3239 3240 IBTF_DPRINTF_L5("ibdm", "\tget_diagcode:" 3241 "timeout %x, ioc %d", *timeout_id, cb_args->cb_ioc_num); 3242 3243 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, 3244 msg, NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 3245 IBTF_DPRINTF_L2("ibdm", "\tget_diagcode: ibmf send failed"); 3246 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 3247 } 3248 return (IBDM_SUCCESS); 3249 } 3250 3251 /* 3252 * ibdm_handle_diagcode: 3253 * Process the DiagCode MAD response and update local DM 3254 * data structure. 3255 */ 3256 static void 3257 ibdm_handle_diagcode(ibmf_msg_t *ibmf_msg, 3258 ibdm_dp_gidinfo_t *gid_info, int *flag) 3259 { 3260 uint16_t attrmod, *diagcode; 3261 ibdm_iou_info_t *iou; 3262 ibdm_ioc_info_t *ioc; 3263 timeout_id_t timeout_id; 3264 ibdm_timeout_cb_args_t *cb_args; 3265 3266 diagcode = (uint16_t *)ibmf_msg->im_msgbufs_recv.im_bufs_cl_data; 3267 3268 mutex_enter(&gid_info->gl_mutex); 3269 attrmod = IBDM_IN_IBMFMSG_ATTRMOD(ibmf_msg); 3270 iou = gid_info->gl_iou; 3271 if (attrmod == 0) { 3272 if (iou->iou_dc_valid != B_FALSE) { 3273 (*flag) |= IBDM_IBMF_PKT_DUP_RESP; 3274 IBTF_DPRINTF_L4("ibdm", 3275 "\thandle_diagcode: Duplicate IOU DiagCode"); 3276 mutex_exit(&gid_info->gl_mutex); 3277 return; 3278 } 3279 cb_args = &gid_info->gl_iou_cb_args; 3280 cb_args->cb_req_type = 0; 3281 iou->iou_diagcode = b2h16(*diagcode); 3282 iou->iou_dc_valid = B_TRUE; 3283 if (gid_info->gl_timeout_id) { 3284 timeout_id = gid_info->gl_timeout_id; 3285 mutex_exit(&gid_info->gl_mutex); 3286 IBTF_DPRINTF_L5("ibdm", "\thandle_diagcode: " 3287 "gl_timeout_id = 0x%x", timeout_id); 3288 if (untimeout(timeout_id) == -1) { 3289 IBTF_DPRINTF_L2("ibdm", "handle_diagcode: " 3290 "untimeout gl_timeout_id failed"); 3291 } 3292 mutex_enter(&gid_info->gl_mutex); 3293 gid_info->gl_timeout_id = 0; 3294 } 3295 } else { 3296 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attrmod - 1)); 3297 if (ioc->ioc_dc_valid != B_FALSE) { 3298 (*flag) |= IBDM_IBMF_PKT_DUP_RESP; 3299 IBTF_DPRINTF_L4("ibdm", 3300 "\thandle_diagcode: Duplicate IOC DiagCode"); 3301 mutex_exit(&gid_info->gl_mutex); 3302 return; 3303 } 3304 cb_args = &ioc->ioc_dc_cb_args; 3305 cb_args->cb_req_type = 0; 3306 ioc->ioc_diagcode = b2h16(*diagcode); 3307 ioc->ioc_dc_valid = B_TRUE; 3308 timeout_id = iou->iou_ioc_info[attrmod - 1].ioc_dc_timeout_id; 3309 if (timeout_id) { 3310 iou->iou_ioc_info[attrmod - 1].ioc_dc_timeout_id = 0; 3311 mutex_exit(&gid_info->gl_mutex); 3312 IBTF_DPRINTF_L5("ibdm", "handle_diagcode: " 3313 "timeout_id = 0x%x", timeout_id); 3314 if (untimeout(timeout_id) == -1) { 3315 IBTF_DPRINTF_L2("ibdm", "\thandle_diagcode: " 3316 "untimeout ioc_dc_timeout_id failed"); 3317 } 3318 mutex_enter(&gid_info->gl_mutex); 3319 } 3320 } 3321 mutex_exit(&gid_info->gl_mutex); 3322 3323 IBTF_DPRINTF_L4("ibdm", "\thandle_diagcode: DiagCode : 0x%x" 3324 "attrmod : 0x%x", b2h16(*diagcode), attrmod); 3325 } 3326 3327 3328 /* 3329 * ibdm_is_ioc_present() 3330 * Return ibdm_ioc_info_t if IOC guid is found in the global gid list 3331 */ 3332 static ibdm_ioc_info_t * 3333 ibdm_is_ioc_present(ib_guid_t ioc_guid, 3334 ibdm_dp_gidinfo_t *gid_info, int *flag) 3335 { 3336 int ii; 3337 ibdm_ioc_info_t *ioc; 3338 ibdm_dp_gidinfo_t *head; 3339 ib_dm_io_unitinfo_t *iou; 3340 3341 mutex_enter(&ibdm.ibdm_mutex); 3342 head = ibdm.ibdm_dp_gidlist_head; 3343 while (head) { 3344 mutex_enter(&head->gl_mutex); 3345 if (head->gl_iou == NULL) { 3346 mutex_exit(&head->gl_mutex); 3347 head = head->gl_next; 3348 continue; 3349 } 3350 iou = &head->gl_iou->iou_info; 3351 for (ii = 0; ii < iou->iou_num_ctrl_slots; ii++) { 3352 ioc = IBDM_GIDINFO2IOCINFO(head, ii); 3353 if ((ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS) && 3354 (ioc->ioc_profile.ioc_guid == ioc_guid)) { 3355 if (gid_info == head) { 3356 *flag |= IBDM_IBMF_PKT_DUP_RESP; 3357 } else if (ibdm_check_dgid(head->gl_dgid_lo, 3358 head->gl_dgid_hi) != NULL) { 3359 IBTF_DPRINTF_L4("ibdm", "\tis_ioc_" 3360 "present: gid not present"); 3361 ibdm_add_to_gl_gid(gid_info, head); 3362 } 3363 mutex_exit(&head->gl_mutex); 3364 mutex_exit(&ibdm.ibdm_mutex); 3365 return (ioc); 3366 } 3367 } 3368 mutex_exit(&head->gl_mutex); 3369 head = head->gl_next; 3370 } 3371 mutex_exit(&ibdm.ibdm_mutex); 3372 return (NULL); 3373 } 3374 3375 3376 /* 3377 * ibdm_ibmf_send_cb() 3378 * IBMF invokes this callback routine after posting the DM MAD to 3379 * the HCA. 3380 */ 3381 /*ARGSUSED*/ 3382 static void 3383 ibdm_ibmf_send_cb(ibmf_handle_t ibmf_hdl, ibmf_msg_t *ibmf_msg, void *arg) 3384 { 3385 ibdm_dump_ibmf_msg(ibmf_msg, 1); 3386 ibdm_free_send_buffers(ibmf_msg); 3387 if (ibmf_free_msg(ibmf_hdl, &ibmf_msg) != IBMF_SUCCESS) { 3388 IBTF_DPRINTF_L4("ibdm", 3389 "\tibmf_send_cb: IBMF free msg failed"); 3390 } 3391 } 3392 3393 3394 /* 3395 * ibdm_ibmf_recv_cb() 3396 * Invoked by the IBMF when a response to the one of the DM requests 3397 * is received. 3398 */ 3399 /*ARGSUSED*/ 3400 static void 3401 ibdm_ibmf_recv_cb(ibmf_handle_t ibmf_hdl, ibmf_msg_t *msg, void *arg) 3402 { 3403 ibdm_taskq_args_t *taskq_args; 3404 3405 /* 3406 * If the taskq enable is set then dispatch a taskq to process 3407 * the MAD, otherwise just process it on this thread 3408 */ 3409 if (ibdm_taskq_enable != IBDM_ENABLE_TASKQ_HANDLING) { 3410 ibdm_process_incoming_mad(ibmf_hdl, msg, arg); 3411 return; 3412 } 3413 3414 /* 3415 * create a taskq and dispatch it to process the incoming MAD 3416 */ 3417 taskq_args = kmem_alloc(sizeof (ibdm_taskq_args_t), KM_NOSLEEP); 3418 if (taskq_args == NULL) { 3419 IBTF_DPRINTF_L2("ibdm", "ibmf_recv_cb: kmem_alloc failed for" 3420 "taskq_args"); 3421 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3422 IBTF_DPRINTF_L4("ibmf_recv_cb", 3423 "\tibmf_recv_cb: IBMF free msg failed"); 3424 } 3425 return; 3426 } 3427 taskq_args->tq_ibmf_handle = ibmf_hdl; 3428 taskq_args->tq_ibmf_msg = msg; 3429 taskq_args->tq_args = arg; 3430 3431 if (taskq_dispatch(system_taskq, ibdm_recv_incoming_mad, taskq_args, 3432 TQ_NOSLEEP) == 0) { 3433 IBTF_DPRINTF_L2("ibdm", "ibmf_recv_cb: taskq_dispatch failed"); 3434 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3435 IBTF_DPRINTF_L4("ibmf_recv_cb", 3436 "\tibmf_recv_cb: IBMF free msg failed"); 3437 } 3438 kmem_free(taskq_args, sizeof (ibdm_taskq_args_t)); 3439 return; 3440 } 3441 3442 /* taskq_args are deleted in ibdm_recv_incoming_mad() */ 3443 } 3444 3445 3446 void 3447 ibdm_recv_incoming_mad(void *args) 3448 { 3449 ibdm_taskq_args_t *taskq_args; 3450 3451 taskq_args = (ibdm_taskq_args_t *)args; 3452 3453 IBTF_DPRINTF_L4("ibdm", "\tibdm_recv_incoming_mad: " 3454 "Processing incoming MAD via taskq"); 3455 3456 ibdm_process_incoming_mad(taskq_args->tq_ibmf_handle, 3457 taskq_args->tq_ibmf_msg, taskq_args->tq_args); 3458 3459 kmem_free(taskq_args, sizeof (ibdm_taskq_args_t)); 3460 } 3461 3462 3463 /* 3464 * Calls ibdm_process_incoming_mad with all function arguments extracted 3465 * from args 3466 */ 3467 /*ARGSUSED*/ 3468 static void 3469 ibdm_process_incoming_mad(ibmf_handle_t ibmf_hdl, ibmf_msg_t *msg, void *arg) 3470 { 3471 int flag = 0; 3472 int ret; 3473 uint64_t transaction_id; 3474 ib_mad_hdr_t *hdr; 3475 ibdm_dp_gidinfo_t *gid_info = NULL; 3476 3477 IBTF_DPRINTF_L4("ibdm", 3478 "\tprocess_incoming_mad: ibmf hdl %p pkt %p", ibmf_hdl, msg); 3479 ibdm_dump_ibmf_msg(msg, 0); 3480 3481 /* 3482 * IBMF calls this routine for every DM MAD that arrives at this port. 3483 * But we handle only the responses for requests we sent. We drop all 3484 * the DM packets that does not have response bit set in the MAD 3485 * header(this eliminates all the requests sent to this port). 3486 * We handle only DM class version 1 MAD's 3487 */ 3488 hdr = IBDM_IN_IBMFMSG_MADHDR(msg); 3489 if (ibdm_verify_mad_status(hdr) != IBDM_SUCCESS) { 3490 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3491 IBTF_DPRINTF_L2("ibdm", "\tprocess_incoming_mad: " 3492 "IBMF free msg failed DM request drop it"); 3493 } 3494 return; 3495 } 3496 3497 transaction_id = b2h64(hdr->TransactionID); 3498 3499 mutex_enter(&ibdm.ibdm_mutex); 3500 gid_info = ibdm.ibdm_dp_gidlist_head; 3501 while (gid_info) { 3502 if ((gid_info->gl_transactionID & 3503 IBDM_GID_TRANSACTIONID_MASK) == 3504 (transaction_id & IBDM_GID_TRANSACTIONID_MASK)) 3505 break; 3506 gid_info = gid_info->gl_next; 3507 } 3508 mutex_exit(&ibdm.ibdm_mutex); 3509 3510 if (gid_info == NULL) { 3511 /* Drop the packet */ 3512 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: transaction ID" 3513 " does not match: 0x%llx", transaction_id); 3514 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3515 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: " 3516 "IBMF free msg failed DM request drop it"); 3517 } 3518 return; 3519 } 3520 3521 /* Handle redirection for all the MAD's, except ClassPortInfo */ 3522 if (((IBDM_IN_IBMFMSG_STATUS(msg) & MAD_STATUS_REDIRECT_REQUIRED)) && 3523 (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_CLASSPORTINFO)) { 3524 ret = ibdm_handle_redirection(msg, gid_info, &flag); 3525 if (ret == IBDM_SUCCESS) { 3526 return; 3527 } 3528 } else { 3529 uint_t gl_state; 3530 3531 mutex_enter(&gid_info->gl_mutex); 3532 gl_state = gid_info->gl_state; 3533 mutex_exit(&gid_info->gl_mutex); 3534 3535 switch (gl_state) { 3536 3537 case IBDM_SET_CLASSPORTINFO: 3538 ibdm_handle_setclassportinfo( 3539 ibmf_hdl, msg, gid_info, &flag); 3540 break; 3541 3542 case IBDM_GET_CLASSPORTINFO: 3543 ibdm_handle_classportinfo( 3544 ibmf_hdl, msg, gid_info, &flag); 3545 break; 3546 3547 case IBDM_GET_IOUNITINFO: 3548 ibdm_handle_iounitinfo(ibmf_hdl, msg, gid_info, &flag); 3549 break; 3550 3551 case IBDM_GET_IOC_DETAILS: 3552 switch (IBDM_IN_IBMFMSG_ATTR(msg)) { 3553 3554 case IB_DM_ATTR_SERVICE_ENTRIES: 3555 ibdm_handle_srventry_mad(msg, gid_info, &flag); 3556 break; 3557 3558 case IB_DM_ATTR_IOC_CTRL_PROFILE: 3559 ibdm_handle_ioc_profile( 3560 ibmf_hdl, msg, gid_info, &flag); 3561 break; 3562 3563 case IB_DM_ATTR_DIAG_CODE: 3564 ibdm_handle_diagcode(msg, gid_info, &flag); 3565 break; 3566 3567 default: 3568 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: " 3569 "Error state, wrong attribute :-("); 3570 (void) ibmf_free_msg(ibmf_hdl, &msg); 3571 return; 3572 } 3573 break; 3574 default: 3575 IBTF_DPRINTF_L2("ibdm", 3576 "process_incoming_mad: Dropping the packet" 3577 " gl_state %x", gl_state); 3578 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3579 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: " 3580 "IBMF free msg failed DM request drop it"); 3581 } 3582 return; 3583 } 3584 } 3585 3586 if ((flag & IBDM_IBMF_PKT_DUP_RESP) || 3587 (flag & IBDM_IBMF_PKT_UNEXP_RESP)) { 3588 IBTF_DPRINTF_L2("ibdm", 3589 "\tprocess_incoming_mad:Dup/unexp resp : 0x%x", flag); 3590 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3591 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: " 3592 "IBMF free msg failed DM request drop it"); 3593 } 3594 return; 3595 } 3596 3597 mutex_enter(&gid_info->gl_mutex); 3598 if (gid_info->gl_pending_cmds < 1) { 3599 IBTF_DPRINTF_L2("ibdm", 3600 "\tprocess_incoming_mad: pending commands negative"); 3601 } 3602 if (--gid_info->gl_pending_cmds) { 3603 IBTF_DPRINTF_L4("ibdm", "\tprocess_incoming_mad: " 3604 "gid_info %p pending cmds %d", 3605 gid_info, gid_info->gl_pending_cmds); 3606 mutex_exit(&gid_info->gl_mutex); 3607 } else { 3608 uint_t prev_state; 3609 IBTF_DPRINTF_L4("ibdm", "\tprocess_incoming_mad: Probing DONE"); 3610 prev_state = gid_info->gl_state; 3611 gid_info->gl_state = IBDM_GID_PROBING_COMPLETE; 3612 if (prev_state == IBDM_SET_CLASSPORTINFO) { 3613 IBTF_DPRINTF_L4("ibdm", 3614 "\tprocess_incoming_mad: " 3615 "Setclassportinfo for Cisco FC GW is done."); 3616 gid_info->gl_flag &= ~IBDM_CISCO_PROBE; 3617 gid_info->gl_flag |= IBDM_CISCO_PROBE_DONE; 3618 mutex_exit(&gid_info->gl_mutex); 3619 cv_broadcast(&gid_info->gl_probe_cv); 3620 } else { 3621 mutex_exit(&gid_info->gl_mutex); 3622 ibdm_notify_newgid_iocs(gid_info); 3623 mutex_enter(&ibdm.ibdm_mutex); 3624 if (--ibdm.ibdm_ngid_probes_in_progress == 0) { 3625 IBTF_DPRINTF_L4("ibdm", 3626 "\tprocess_incoming_mad: Wakeup"); 3627 ibdm.ibdm_busy &= ~IBDM_PROBE_IN_PROGRESS; 3628 cv_broadcast(&ibdm.ibdm_probe_cv); 3629 } 3630 mutex_exit(&ibdm.ibdm_mutex); 3631 } 3632 } 3633 3634 /* 3635 * Do not deallocate the IBMF packet if atleast one request 3636 * is posted. IBMF packet is reused. 3637 */ 3638 if (!(flag & IBDM_IBMF_PKT_REUSED)) { 3639 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3640 IBTF_DPRINTF_L2("ibdm", "\tprocess_incoming_mad: " 3641 "IBMF free msg failed DM request drop it"); 3642 } 3643 } 3644 } 3645 3646 3647 /* 3648 * ibdm_verify_mad_status() 3649 * Verifies the MAD status 3650 * Returns IBDM_SUCCESS if status is correct 3651 * Returns IBDM_FAILURE for bogus MAD status 3652 */ 3653 static int 3654 ibdm_verify_mad_status(ib_mad_hdr_t *hdr) 3655 { 3656 int ret = 0; 3657 3658 if ((hdr->R_Method != IB_DM_DEVMGT_METHOD_GET_RESP) || 3659 (hdr->ClassVersion != IB_DM_CLASS_VERSION_1)) { 3660 return (IBDM_FAILURE); 3661 } 3662 3663 if (b2h16(hdr->Status) == 0) 3664 ret = IBDM_SUCCESS; 3665 else if ((b2h16(hdr->Status) & 0x1f) == MAD_STATUS_REDIRECT_REQUIRED) 3666 ret = IBDM_SUCCESS; 3667 else { 3668 IBTF_DPRINTF_L2("ibdm", 3669 "\tverify_mad_status: Status : 0x%x", b2h16(hdr->Status)); 3670 ret = IBDM_FAILURE; 3671 } 3672 return (ret); 3673 } 3674 3675 3676 3677 /* 3678 * ibdm_handle_redirection() 3679 * Returns IBDM_SUCCESS/IBDM_FAILURE 3680 */ 3681 static int 3682 ibdm_handle_redirection(ibmf_msg_t *msg, 3683 ibdm_dp_gidinfo_t *gid_info, int *flag) 3684 { 3685 int attrmod, ioc_no, start; 3686 void *data; 3687 timeout_id_t *timeout_id; 3688 ib_mad_hdr_t *hdr; 3689 ibdm_ioc_info_t *ioc = NULL; 3690 ibdm_timeout_cb_args_t *cb_args; 3691 ib_mad_classportinfo_t *cpi; 3692 3693 IBTF_DPRINTF_L4("ibdm", "\thandle_redirection: Enter"); 3694 mutex_enter(&gid_info->gl_mutex); 3695 switch (gid_info->gl_state) { 3696 case IBDM_GET_IOUNITINFO: 3697 cb_args = &gid_info->gl_iou_cb_args; 3698 timeout_id = &gid_info->gl_timeout_id; 3699 break; 3700 3701 case IBDM_GET_IOC_DETAILS: 3702 attrmod = IBDM_IN_IBMFMSG_ATTRMOD(msg); 3703 switch (IBDM_IN_IBMFMSG_ATTR(msg)) { 3704 3705 case IB_DM_ATTR_DIAG_CODE: 3706 if (attrmod == 0) { 3707 cb_args = &gid_info->gl_iou_cb_args; 3708 timeout_id = &gid_info->gl_timeout_id; 3709 break; 3710 } 3711 if (IBDM_IS_IOC_NUM_INVALID(attrmod, gid_info)) { 3712 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:" 3713 "IOC# Out of range %d", attrmod); 3714 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3715 mutex_exit(&gid_info->gl_mutex); 3716 return (IBDM_FAILURE); 3717 } 3718 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attrmod -1)); 3719 cb_args = &ioc->ioc_dc_cb_args; 3720 timeout_id = &ioc->ioc_dc_timeout_id; 3721 break; 3722 3723 case IB_DM_ATTR_IOC_CTRL_PROFILE: 3724 if (IBDM_IS_IOC_NUM_INVALID(attrmod, gid_info)) { 3725 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:" 3726 "IOC# Out of range %d", attrmod); 3727 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3728 mutex_exit(&gid_info->gl_mutex); 3729 return (IBDM_FAILURE); 3730 } 3731 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attrmod -1)); 3732 cb_args = &ioc->ioc_cb_args; 3733 timeout_id = &ioc->ioc_timeout_id; 3734 break; 3735 3736 case IB_DM_ATTR_SERVICE_ENTRIES: 3737 ioc_no = ((attrmod >> 16) & IBDM_16_BIT_MASK); 3738 if (IBDM_IS_IOC_NUM_INVALID(ioc_no, gid_info)) { 3739 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:" 3740 "IOC# Out of range %d", ioc_no); 3741 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3742 mutex_exit(&gid_info->gl_mutex); 3743 return (IBDM_FAILURE); 3744 } 3745 start = (attrmod & IBDM_8_BIT_MASK); 3746 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (ioc_no -1)); 3747 if (start > ioc->ioc_profile.ioc_service_entries) { 3748 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:" 3749 " SE index Out of range %d", start); 3750 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3751 mutex_exit(&gid_info->gl_mutex); 3752 return (IBDM_FAILURE); 3753 } 3754 cb_args = &ioc->ioc_serv[start].se_cb_args; 3755 timeout_id = &ioc->ioc_serv[start].se_timeout_id; 3756 break; 3757 3758 default: 3759 /* ERROR State */ 3760 IBTF_DPRINTF_L2("ibdm", 3761 "\thandle_redirection: wrong attribute :-("); 3762 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3763 mutex_exit(&gid_info->gl_mutex); 3764 return (IBDM_FAILURE); 3765 } 3766 break; 3767 default: 3768 /* ERROR State */ 3769 IBTF_DPRINTF_L2("ibdm", 3770 "\thandle_redirection: Error state :-("); 3771 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3772 mutex_exit(&gid_info->gl_mutex); 3773 return (IBDM_FAILURE); 3774 } 3775 if ((*timeout_id) != 0) { 3776 mutex_exit(&gid_info->gl_mutex); 3777 if (untimeout(*timeout_id) == -1) { 3778 IBTF_DPRINTF_L2("ibdm", "\thandle_redirection: " 3779 "untimeout failed %x", *timeout_id); 3780 } else { 3781 IBTF_DPRINTF_L5("ibdm", 3782 "\thandle_redirection: timeout %x", *timeout_id); 3783 } 3784 mutex_enter(&gid_info->gl_mutex); 3785 *timeout_id = 0; 3786 } 3787 3788 data = msg->im_msgbufs_recv.im_bufs_cl_data; 3789 cpi = (ib_mad_classportinfo_t *)data; 3790 3791 gid_info->gl_resp_timeout = 3792 (b2h32(cpi->RespTimeValue) & 0x1F); 3793 3794 gid_info->gl_redirected = B_TRUE; 3795 gid_info->gl_redirect_dlid = b2h16(cpi->RedirectLID); 3796 gid_info->gl_redirect_QP = (b2h32(cpi->RedirectQP) & 0xffffff); 3797 gid_info->gl_redirect_pkey = b2h16(cpi->RedirectP_Key); 3798 gid_info->gl_redirect_qkey = b2h32(cpi->RedirectQ_Key); 3799 gid_info->gl_redirectGID_hi = b2h64(cpi->RedirectGID_hi); 3800 gid_info->gl_redirectGID_lo = b2h64(cpi->RedirectGID_lo); 3801 gid_info->gl_redirectSL = cpi->RedirectSL; 3802 3803 if (gid_info->gl_redirect_dlid != 0) { 3804 msg->im_local_addr.ia_remote_lid = 3805 gid_info->gl_redirect_dlid; 3806 } 3807 ibdm_bump_transactionID(gid_info); 3808 mutex_exit(&gid_info->gl_mutex); 3809 3810 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg, *hdr)) 3811 ibdm_alloc_send_buffers(msg); 3812 3813 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 3814 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 3815 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 3816 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 3817 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 3818 hdr->Status = 0; 3819 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 3820 hdr->AttributeID = 3821 msg->im_msgbufs_recv.im_bufs_mad_hdr->AttributeID; 3822 hdr->AttributeModifier = 3823 msg->im_msgbufs_recv.im_bufs_mad_hdr->AttributeModifier; 3824 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg, *hdr)) 3825 3826 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 3827 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 3828 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 3829 msg->im_local_addr.ia_service_level = gid_info->gl_redirectSL; 3830 3831 mutex_enter(&gid_info->gl_mutex); 3832 *timeout_id = timeout(ibdm_pkt_timeout_hdlr, 3833 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 3834 mutex_exit(&gid_info->gl_mutex); 3835 3836 IBTF_DPRINTF_L5("ibdm", "\thandle_redirect:" 3837 "timeout %x", *timeout_id); 3838 3839 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, 3840 msg, NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 3841 IBTF_DPRINTF_L4("ibdm", "\thandle_redirection:" 3842 "message transport failed"); 3843 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 3844 } 3845 (*flag) |= IBDM_IBMF_PKT_REUSED; 3846 IBTF_DPRINTF_L4("ibdm", "\thandle_redirection: Exit"); 3847 return (IBDM_SUCCESS); 3848 } 3849 3850 3851 /* 3852 * ibdm_pkt_timeout_hdlr 3853 * This timeout handler is registed for every IBMF packet that is 3854 * sent through the IBMF. It gets called when no response is received 3855 * within the specified time for the packet. No retries for the failed 3856 * commands currently. Drops the failed IBMF packet and update the 3857 * pending list commands. 3858 */ 3859 static void 3860 ibdm_pkt_timeout_hdlr(void *arg) 3861 { 3862 ibdm_iou_info_t *iou; 3863 ibdm_ioc_info_t *ioc; 3864 ibdm_timeout_cb_args_t *cb_args = arg; 3865 ibdm_dp_gidinfo_t *gid_info; 3866 int srv_ent; 3867 uint_t new_gl_state; 3868 3869 IBTF_DPRINTF_L2("ibdm", "\tpkt_timeout_hdlr: gid_info: %p " 3870 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 3871 cb_args->cb_req_type, cb_args->cb_ioc_num, 3872 cb_args->cb_srvents_start); 3873 3874 gid_info = cb_args->cb_gid_info; 3875 mutex_enter(&gid_info->gl_mutex); 3876 3877 if ((gid_info->gl_state == IBDM_GID_PROBING_COMPLETE) || 3878 (cb_args->cb_req_type == 0)) { 3879 3880 IBTF_DPRINTF_L2("ibdm", "\tpkt_timeout_hdlr: req completed" 3881 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_req_type, 3882 cb_args->cb_ioc_num, cb_args->cb_srvents_start); 3883 3884 if (gid_info->gl_timeout_id) 3885 gid_info->gl_timeout_id = 0; 3886 mutex_exit(&gid_info->gl_mutex); 3887 return; 3888 } 3889 if (cb_args->cb_retry_count) { 3890 cb_args->cb_retry_count--; 3891 /* 3892 * A new timeout_id is set inside ibdm_retry_command(). 3893 * When the function returns an error, the timeout_id 3894 * is reset (to zero) in the switch statement below. 3895 */ 3896 if (ibdm_retry_command(cb_args) == IBDM_SUCCESS) { 3897 mutex_exit(&gid_info->gl_mutex); 3898 return; 3899 } 3900 cb_args->cb_retry_count = 0; 3901 } 3902 3903 IBTF_DPRINTF_L2("ibdm", "\tpkt_timeout_hdlr: command failed: gid %p" 3904 " rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 3905 cb_args->cb_req_type, cb_args->cb_ioc_num, 3906 cb_args->cb_srvents_start); 3907 3908 switch (cb_args->cb_req_type) { 3909 3910 case IBDM_REQ_TYPE_CLASSPORTINFO: 3911 case IBDM_REQ_TYPE_IOUINFO: 3912 new_gl_state = IBDM_GID_PROBING_FAILED; 3913 if (gid_info->gl_timeout_id) 3914 gid_info->gl_timeout_id = 0; 3915 break; 3916 3917 case IBDM_REQ_TYPE_IOCINFO: 3918 new_gl_state = IBDM_GID_PROBING_COMPLETE; 3919 iou = gid_info->gl_iou; 3920 ioc = &iou->iou_ioc_info[cb_args->cb_ioc_num]; 3921 ioc->ioc_state = IBDM_IOC_STATE_PROBE_FAILED; 3922 if (ioc->ioc_timeout_id) 3923 ioc->ioc_timeout_id = 0; 3924 break; 3925 3926 case IBDM_REQ_TYPE_SRVENTS: 3927 new_gl_state = IBDM_GID_PROBING_COMPLETE; 3928 iou = gid_info->gl_iou; 3929 ioc = &iou->iou_ioc_info[cb_args->cb_ioc_num]; 3930 ioc->ioc_state = IBDM_IOC_STATE_PROBE_FAILED; 3931 srv_ent = cb_args->cb_srvents_start; 3932 if (ioc->ioc_serv[srv_ent].se_timeout_id) 3933 ioc->ioc_serv[srv_ent].se_timeout_id = 0; 3934 break; 3935 3936 case IBDM_REQ_TYPE_IOU_DIAGCODE: 3937 new_gl_state = IBDM_GID_PROBING_COMPLETE; 3938 iou = gid_info->gl_iou; 3939 iou->iou_dc_valid = B_FALSE; 3940 if (gid_info->gl_timeout_id) 3941 gid_info->gl_timeout_id = 0; 3942 break; 3943 3944 case IBDM_REQ_TYPE_IOC_DIAGCODE: 3945 new_gl_state = IBDM_GID_PROBING_COMPLETE; 3946 iou = gid_info->gl_iou; 3947 ioc = &iou->iou_ioc_info[cb_args->cb_ioc_num]; 3948 ioc->ioc_dc_valid = B_FALSE; 3949 if (ioc->ioc_dc_timeout_id) 3950 ioc->ioc_dc_timeout_id = 0; 3951 break; 3952 3953 default: /* ERROR State */ 3954 new_gl_state = IBDM_GID_PROBING_FAILED; 3955 if (gid_info->gl_timeout_id) 3956 gid_info->gl_timeout_id = 0; 3957 IBTF_DPRINTF_L2("ibdm", 3958 "\tpkt_timeout_hdlr: wrong request type."); 3959 break; 3960 } 3961 3962 --gid_info->gl_pending_cmds; /* decrease the counter */ 3963 3964 if (gid_info->gl_pending_cmds == 0) { 3965 gid_info->gl_state = new_gl_state; 3966 mutex_exit(&gid_info->gl_mutex); 3967 /* 3968 * Delete this gid_info if the gid probe fails. 3969 */ 3970 if (new_gl_state == IBDM_GID_PROBING_FAILED) { 3971 ibdm_delete_glhca_list(gid_info); 3972 } 3973 ibdm_notify_newgid_iocs(gid_info); 3974 mutex_enter(&ibdm.ibdm_mutex); 3975 if (--ibdm.ibdm_ngid_probes_in_progress == 0) { 3976 IBTF_DPRINTF_L4("ibdm", "\tpkt_timeout_hdlr: Wakeup"); 3977 ibdm.ibdm_busy &= ~IBDM_PROBE_IN_PROGRESS; 3978 cv_broadcast(&ibdm.ibdm_probe_cv); 3979 } 3980 mutex_exit(&ibdm.ibdm_mutex); 3981 } else { 3982 /* 3983 * Reset gl_pending_cmd if the extra timeout happens since 3984 * gl_pending_cmd becomes negative as a result. 3985 */ 3986 if (gid_info->gl_pending_cmds < 0) { 3987 gid_info->gl_pending_cmds = 0; 3988 IBTF_DPRINTF_L2("ibdm", 3989 "\tpkt_timeout_hdlr: extra timeout request." 3990 " reset gl_pending_cmds"); 3991 } 3992 mutex_exit(&gid_info->gl_mutex); 3993 /* 3994 * Delete this gid_info if the gid probe fails. 3995 */ 3996 if (new_gl_state == IBDM_GID_PROBING_FAILED) { 3997 ibdm_delete_glhca_list(gid_info); 3998 } 3999 } 4000 } 4001 4002 4003 /* 4004 * ibdm_retry_command() 4005 * Retries the failed command. 4006 * Returns IBDM_FAILURE/IBDM_SUCCESS 4007 */ 4008 static int 4009 ibdm_retry_command(ibdm_timeout_cb_args_t *cb_args) 4010 { 4011 int ret; 4012 ibmf_msg_t *msg; 4013 ib_mad_hdr_t *hdr; 4014 ibdm_dp_gidinfo_t *gid_info = cb_args->cb_gid_info; 4015 timeout_id_t *timeout_id; 4016 ibdm_ioc_info_t *ioc; 4017 int ioc_no; 4018 ASSERT(MUTEX_HELD(&gid_info->gl_mutex)); 4019 4020 IBTF_DPRINTF_L2("ibdm", "\tretry_command: gid_info: %p " 4021 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 4022 cb_args->cb_req_type, cb_args->cb_ioc_num, 4023 cb_args->cb_srvents_start); 4024 4025 ret = ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_NOSLEEP, &msg); 4026 4027 4028 /* 4029 * Reset the gid if alloc_msg failed with BAD_HANDLE 4030 * ibdm_reset_gidinfo reinits the gid_info 4031 */ 4032 if (ret == IBMF_BAD_HANDLE) { 4033 IBTF_DPRINTF_L3(ibdm_string, "\tretry_command: gid %p hdl bad", 4034 gid_info); 4035 4036 mutex_exit(&gid_info->gl_mutex); 4037 ibdm_reset_gidinfo(gid_info); 4038 mutex_enter(&gid_info->gl_mutex); 4039 4040 /* Retry alloc */ 4041 ret = ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_NOSLEEP, 4042 &msg); 4043 } 4044 4045 if (ret != IBDM_SUCCESS) { 4046 IBTF_DPRINTF_L2("ibdm", "\tretry_command: alloc failed: %p " 4047 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 4048 cb_args->cb_req_type, cb_args->cb_ioc_num, 4049 cb_args->cb_srvents_start); 4050 return (IBDM_FAILURE); 4051 } 4052 4053 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 4054 ibdm_alloc_send_buffers(msg); 4055 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 4056 4057 ibdm_bump_transactionID(gid_info); 4058 4059 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 4060 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 4061 if (gid_info->gl_redirected == B_TRUE) { 4062 if (gid_info->gl_redirect_dlid != 0) { 4063 msg->im_local_addr.ia_remote_lid = 4064 gid_info->gl_redirect_dlid; 4065 } 4066 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 4067 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 4068 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 4069 msg->im_local_addr.ia_service_level = gid_info->gl_redirectSL; 4070 } else { 4071 msg->im_local_addr.ia_remote_qno = 1; 4072 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 4073 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 4074 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 4075 } 4076 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 4077 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*hdr)) 4078 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 4079 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 4080 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 4081 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 4082 hdr->Status = 0; 4083 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 4084 4085 switch (cb_args->cb_req_type) { 4086 case IBDM_REQ_TYPE_CLASSPORTINFO: 4087 hdr->AttributeID = h2b16(IB_DM_ATTR_CLASSPORTINFO); 4088 hdr->AttributeModifier = 0; 4089 timeout_id = &gid_info->gl_timeout_id; 4090 break; 4091 case IBDM_REQ_TYPE_IOUINFO: 4092 hdr->AttributeID = h2b16(IB_DM_ATTR_IO_UNITINFO); 4093 hdr->AttributeModifier = 0; 4094 timeout_id = &gid_info->gl_timeout_id; 4095 break; 4096 case IBDM_REQ_TYPE_IOCINFO: 4097 hdr->AttributeID = h2b16(IB_DM_ATTR_IOC_CTRL_PROFILE); 4098 hdr->AttributeModifier = h2b32(cb_args->cb_ioc_num + 1); 4099 ioc = IBDM_GIDINFO2IOCINFO(gid_info, cb_args->cb_ioc_num); 4100 timeout_id = &ioc->ioc_timeout_id; 4101 break; 4102 case IBDM_REQ_TYPE_SRVENTS: 4103 hdr->AttributeID = h2b16(IB_DM_ATTR_SERVICE_ENTRIES); 4104 ibdm_fill_srv_attr_mod(hdr, cb_args); 4105 ioc = IBDM_GIDINFO2IOCINFO(gid_info, cb_args->cb_ioc_num); 4106 timeout_id = 4107 &ioc->ioc_serv[cb_args->cb_srvents_start].se_timeout_id; 4108 break; 4109 case IBDM_REQ_TYPE_IOU_DIAGCODE: 4110 hdr->AttributeID = h2b16(IB_DM_ATTR_DIAG_CODE); 4111 hdr->AttributeModifier = 0; 4112 timeout_id = &gid_info->gl_timeout_id; 4113 break; 4114 case IBDM_REQ_TYPE_IOC_DIAGCODE: 4115 hdr->AttributeID = h2b16(IB_DM_ATTR_DIAG_CODE); 4116 hdr->AttributeModifier = h2b32(cb_args->cb_ioc_num + 1); 4117 ioc_no = cb_args->cb_ioc_num; 4118 ioc = &gid_info->gl_iou->iou_ioc_info[ioc_no]; 4119 timeout_id = &ioc->ioc_dc_timeout_id; 4120 break; 4121 } 4122 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*hdr)) 4123 4124 *timeout_id = timeout(ibdm_pkt_timeout_hdlr, 4125 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 4126 4127 mutex_exit(&gid_info->gl_mutex); 4128 4129 IBTF_DPRINTF_L5("ibdm", "\tretry_command: %p,%x,%d,%d:" 4130 "timeout %x", cb_args->cb_req_type, cb_args->cb_ioc_num, 4131 cb_args->cb_srvents_start, *timeout_id); 4132 4133 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, 4134 gid_info->gl_qp_hdl, msg, NULL, ibdm_ibmf_send_cb, 4135 cb_args, 0) != IBMF_SUCCESS) { 4136 IBTF_DPRINTF_L2("ibdm", "\tretry_command: send failed: %p " 4137 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 4138 cb_args->cb_req_type, cb_args->cb_ioc_num, 4139 cb_args->cb_srvents_start); 4140 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 4141 } 4142 mutex_enter(&gid_info->gl_mutex); 4143 return (IBDM_SUCCESS); 4144 } 4145 4146 4147 /* 4148 * ibdm_update_ioc_port_gidlist() 4149 */ 4150 static void 4151 ibdm_update_ioc_port_gidlist(ibdm_ioc_info_t *dest, 4152 ibdm_dp_gidinfo_t *gid_info) 4153 { 4154 int ii, ngid_ents; 4155 ibdm_gid_t *tmp; 4156 ibdm_hca_list_t *gid_hca_head, *temp; 4157 ibdm_hca_list_t *ioc_head = NULL; 4158 ASSERT(MUTEX_HELD(&gid_info->gl_mutex)); 4159 4160 IBTF_DPRINTF_L5("ibdm", "\tupdate_ioc_port_gidlist: Enter"); 4161 4162 ngid_ents = gid_info->gl_ngids; 4163 dest->ioc_nportgids = ngid_ents; 4164 dest->ioc_gid_list = kmem_zalloc(sizeof (ibdm_gid_t) * 4165 ngid_ents, KM_SLEEP); 4166 tmp = gid_info->gl_gid; 4167 for (ii = 0; (ii < ngid_ents) && (tmp); ii++) { 4168 dest->ioc_gid_list[ii].gid_dgid_hi = tmp->gid_dgid_hi; 4169 dest->ioc_gid_list[ii].gid_dgid_lo = tmp->gid_dgid_lo; 4170 tmp = tmp->gid_next; 4171 } 4172 4173 gid_hca_head = gid_info->gl_hca_list; 4174 while (gid_hca_head) { 4175 temp = ibdm_dup_hca_attr(gid_hca_head); 4176 temp->hl_next = ioc_head; 4177 ioc_head = temp; 4178 gid_hca_head = gid_hca_head->hl_next; 4179 } 4180 dest->ioc_hca_list = ioc_head; 4181 } 4182 4183 4184 /* 4185 * ibdm_alloc_send_buffers() 4186 * Allocates memory for the IBMF send buffer to send and/or receive 4187 * the Device Management MAD packet. 4188 */ 4189 static void 4190 ibdm_alloc_send_buffers(ibmf_msg_t *msgp) 4191 { 4192 msgp->im_msgbufs_send.im_bufs_mad_hdr = 4193 kmem_zalloc(IBDM_MAD_SIZE, KM_SLEEP); 4194 4195 msgp->im_msgbufs_send.im_bufs_cl_hdr = (uchar_t *) 4196 msgp->im_msgbufs_send.im_bufs_mad_hdr + sizeof (ib_mad_hdr_t); 4197 msgp->im_msgbufs_send.im_bufs_cl_hdr_len = IBDM_DM_MAD_HDR_SZ; 4198 4199 msgp->im_msgbufs_send.im_bufs_cl_data = 4200 ((char *)msgp->im_msgbufs_send.im_bufs_cl_hdr + IBDM_DM_MAD_HDR_SZ); 4201 msgp->im_msgbufs_send.im_bufs_cl_data_len = 4202 IBDM_MAD_SIZE - sizeof (ib_mad_hdr_t) - IBDM_DM_MAD_HDR_SZ; 4203 } 4204 4205 4206 /* 4207 * ibdm_alloc_send_buffers() 4208 * De-allocates memory for the IBMF send buffer 4209 */ 4210 static void 4211 ibdm_free_send_buffers(ibmf_msg_t *msgp) 4212 { 4213 if (msgp->im_msgbufs_send.im_bufs_mad_hdr != NULL) 4214 kmem_free(msgp->im_msgbufs_send.im_bufs_mad_hdr, IBDM_MAD_SIZE); 4215 } 4216 4217 /* 4218 * ibdm_probe_ioc() 4219 * 1. Gets the node records for the port GUID. This detects all the port 4220 * to the IOU. 4221 * 2. Selectively probes all the IOC, given it's node GUID 4222 * 3. In case of reprobe, only the IOC to be reprobed is send the IOC 4223 * Controller Profile asynchronously 4224 */ 4225 /*ARGSUSED*/ 4226 static void 4227 ibdm_probe_ioc(ib_guid_t nodeguid, ib_guid_t ioc_guid, int reprobe_flag) 4228 { 4229 int ii, nrecords; 4230 size_t nr_len = 0, pi_len = 0; 4231 ib_gid_t sgid, dgid; 4232 ibdm_hca_list_t *hca_list = NULL; 4233 sa_node_record_t *nr, *tmp; 4234 ibdm_port_attr_t *port = NULL; 4235 ibdm_dp_gidinfo_t *reprobe_gid, *new_gid, *node_gid; 4236 ibdm_dp_gidinfo_t *temp_gidinfo; 4237 ibdm_gid_t *temp_gid; 4238 sa_portinfo_record_t *pi; 4239 4240 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc(%llx, %llx, %x): Begin", 4241 nodeguid, ioc_guid, reprobe_flag); 4242 4243 /* Rescan the GID list for any removed GIDs for reprobe */ 4244 if (reprobe_flag) 4245 ibdm_rescan_gidlist(&ioc_guid); 4246 4247 mutex_enter(&ibdm.ibdm_hl_mutex); 4248 for (ibdm_get_next_port(&hca_list, &port, 1); port; 4249 ibdm_get_next_port(&hca_list, &port, 1)) { 4250 reprobe_gid = new_gid = node_gid = NULL; 4251 4252 nr = ibdm_get_node_records(port->pa_sa_hdl, &nr_len, nodeguid); 4253 if (nr == NULL) { 4254 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc: no records"); 4255 continue; 4256 } 4257 nrecords = (nr_len / sizeof (sa_node_record_t)); 4258 for (tmp = nr, ii = 0; (ii < nrecords); ii++, tmp++) { 4259 if ((pi = ibdm_get_portinfo( 4260 port->pa_sa_hdl, &pi_len, tmp->LID)) == NULL) { 4261 IBTF_DPRINTF_L4("ibdm", 4262 "\tibdm_get_portinfo: no portinfo recs"); 4263 continue; 4264 } 4265 4266 /* 4267 * If Device Management is not supported on 4268 * this port, skip the rest. 4269 */ 4270 if (!(pi->PortInfo.CapabilityMask & 4271 SM_CAP_MASK_IS_DM_SUPPD)) { 4272 kmem_free(pi, pi_len); 4273 continue; 4274 } 4275 4276 /* 4277 * For reprobes: Check if GID, already in 4278 * the list. If so, set the state to SKIPPED 4279 */ 4280 if (((temp_gidinfo = ibdm_find_gid(nodeguid, 4281 tmp->NodeInfo.PortGUID)) != NULL) && 4282 temp_gidinfo->gl_state == 4283 IBDM_GID_PROBING_COMPLETE) { 4284 ASSERT(reprobe_gid == NULL); 4285 ibdm_addto_glhcalist(temp_gidinfo, 4286 hca_list); 4287 reprobe_gid = temp_gidinfo; 4288 kmem_free(pi, pi_len); 4289 continue; 4290 } else if (temp_gidinfo != NULL) { 4291 kmem_free(pi, pi_len); 4292 ibdm_addto_glhcalist(temp_gidinfo, 4293 hca_list); 4294 continue; 4295 } 4296 4297 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc : " 4298 "create_gid : prefix %llx, guid %llx\n", 4299 pi->PortInfo.GidPrefix, 4300 tmp->NodeInfo.PortGUID); 4301 4302 sgid.gid_prefix = port->pa_sn_prefix; 4303 sgid.gid_guid = port->pa_port_guid; 4304 dgid.gid_prefix = pi->PortInfo.GidPrefix; 4305 dgid.gid_guid = tmp->NodeInfo.PortGUID; 4306 new_gid = ibdm_create_gid_info(port, sgid, 4307 dgid); 4308 if (new_gid == NULL) { 4309 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: " 4310 "create_gid_info failed\n"); 4311 kmem_free(pi, pi_len); 4312 continue; 4313 } 4314 if (node_gid == NULL) { 4315 node_gid = new_gid; 4316 ibdm_add_to_gl_gid(node_gid, node_gid); 4317 } else { 4318 IBTF_DPRINTF_L4("ibdm", 4319 "\tprobe_ioc: new gid"); 4320 temp_gid = kmem_zalloc( 4321 sizeof (ibdm_gid_t), KM_SLEEP); 4322 temp_gid->gid_dgid_hi = 4323 new_gid->gl_dgid_hi; 4324 temp_gid->gid_dgid_lo = 4325 new_gid->gl_dgid_lo; 4326 temp_gid->gid_next = node_gid->gl_gid; 4327 node_gid->gl_gid = temp_gid; 4328 node_gid->gl_ngids++; 4329 } 4330 new_gid->gl_is_dm_capable = B_TRUE; 4331 new_gid->gl_nodeguid = nodeguid; 4332 new_gid->gl_portguid = dgid.gid_guid; 4333 ibdm_addto_glhcalist(new_gid, hca_list); 4334 4335 /* 4336 * Set the state to skipped as all these 4337 * gids point to the same node. 4338 * We (re)probe only one GID below and reset 4339 * state appropriately 4340 */ 4341 new_gid->gl_state = IBDM_GID_PROBING_SKIPPED; 4342 new_gid->gl_devid = (*tmp).NodeInfo.DeviceID; 4343 kmem_free(pi, pi_len); 4344 } 4345 kmem_free(nr, nr_len); 4346 4347 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc : reprobe_flag %d " 4348 "reprobe_gid %p new_gid %p node_gid %p", 4349 reprobe_flag, reprobe_gid, new_gid, node_gid); 4350 4351 if (reprobe_flag != 0 && reprobe_gid != NULL) { 4352 int niocs, jj; 4353 ibdm_ioc_info_t *tmp_ioc; 4354 int ioc_matched = 0; 4355 4356 mutex_exit(&ibdm.ibdm_hl_mutex); 4357 mutex_enter(&reprobe_gid->gl_mutex); 4358 reprobe_gid->gl_state = IBDM_GET_IOC_DETAILS; 4359 niocs = 4360 reprobe_gid->gl_iou->iou_info.iou_num_ctrl_slots; 4361 reprobe_gid->gl_pending_cmds++; 4362 mutex_exit(&reprobe_gid->gl_mutex); 4363 4364 for (jj = 0; jj < niocs; jj++) { 4365 tmp_ioc = 4366 IBDM_GIDINFO2IOCINFO(reprobe_gid, jj); 4367 if (tmp_ioc->ioc_profile.ioc_guid != ioc_guid) 4368 continue; 4369 4370 ioc_matched = 1; 4371 4372 /* 4373 * Explicitly set gl_reprobe_flag to 0 so that 4374 * IBnex is not notified on completion 4375 */ 4376 mutex_enter(&reprobe_gid->gl_mutex); 4377 reprobe_gid->gl_reprobe_flag = 0; 4378 mutex_exit(&reprobe_gid->gl_mutex); 4379 4380 mutex_enter(&ibdm.ibdm_mutex); 4381 ibdm.ibdm_ngid_probes_in_progress++; 4382 mutex_exit(&ibdm.ibdm_mutex); 4383 if (ibdm_send_ioc_profile(reprobe_gid, jj) != 4384 IBDM_SUCCESS) { 4385 IBTF_DPRINTF_L4("ibdm", 4386 "\tprobe_ioc: " 4387 "send_ioc_profile failed " 4388 "for ioc %d", jj); 4389 ibdm_gid_decr_pending(reprobe_gid); 4390 break; 4391 } 4392 mutex_enter(&ibdm.ibdm_mutex); 4393 ibdm_wait_probe_completion(); 4394 mutex_exit(&ibdm.ibdm_mutex); 4395 break; 4396 } 4397 if (ioc_matched == 0) 4398 ibdm_gid_decr_pending(reprobe_gid); 4399 else { 4400 mutex_enter(&ibdm.ibdm_hl_mutex); 4401 break; 4402 } 4403 } else if (new_gid != NULL) { 4404 mutex_exit(&ibdm.ibdm_hl_mutex); 4405 node_gid = node_gid ? node_gid : new_gid; 4406 4407 /* 4408 * New or reinserted GID : Enable notification 4409 * to IBnex 4410 */ 4411 mutex_enter(&node_gid->gl_mutex); 4412 node_gid->gl_reprobe_flag = 1; 4413 mutex_exit(&node_gid->gl_mutex); 4414 4415 ibdm_probe_gid(node_gid); 4416 4417 mutex_enter(&ibdm.ibdm_hl_mutex); 4418 } 4419 } 4420 mutex_exit(&ibdm.ibdm_hl_mutex); 4421 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc : End\n"); 4422 } 4423 4424 4425 /* 4426 * ibdm_probe_gid() 4427 * Selectively probes the GID 4428 */ 4429 static void 4430 ibdm_probe_gid(ibdm_dp_gidinfo_t *gid_info) 4431 { 4432 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid:"); 4433 4434 /* 4435 * A Cisco FC GW needs the special handling to get IOUnitInfo. 4436 */ 4437 mutex_enter(&gid_info->gl_mutex); 4438 if (ibdm_is_cisco_switch(gid_info)) { 4439 gid_info->gl_pending_cmds++; 4440 gid_info->gl_state = IBDM_SET_CLASSPORTINFO; 4441 mutex_exit(&gid_info->gl_mutex); 4442 4443 if (ibdm_set_classportinfo(gid_info) != IBDM_SUCCESS) { 4444 4445 mutex_enter(&gid_info->gl_mutex); 4446 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 4447 --gid_info->gl_pending_cmds; 4448 mutex_exit(&gid_info->gl_mutex); 4449 4450 /* free the hca_list on this gid_info */ 4451 ibdm_delete_glhca_list(gid_info); 4452 gid_info = gid_info->gl_next; 4453 return; 4454 } 4455 4456 mutex_enter(&gid_info->gl_mutex); 4457 ibdm_wait_cisco_probe_completion(gid_info); 4458 4459 IBTF_DPRINTF_L4("ibdm", 4460 "\tprobe_gid: CISCO Wakeup signal received"); 4461 } 4462 4463 /* move on to the 'GET_CLASSPORTINFO' stage */ 4464 gid_info->gl_pending_cmds++; 4465 gid_info->gl_state = IBDM_GET_CLASSPORTINFO; 4466 mutex_exit(&gid_info->gl_mutex); 4467 4468 if (ibdm_send_classportinfo(gid_info) != IBDM_SUCCESS) { 4469 4470 mutex_enter(&gid_info->gl_mutex); 4471 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 4472 --gid_info->gl_pending_cmds; 4473 mutex_exit(&gid_info->gl_mutex); 4474 4475 /* free the hca_list on this gid_info */ 4476 ibdm_delete_glhca_list(gid_info); 4477 gid_info = gid_info->gl_next; 4478 return; 4479 } 4480 4481 mutex_enter(&ibdm.ibdm_mutex); 4482 ibdm.ibdm_ngid_probes_in_progress++; 4483 gid_info = gid_info->gl_next; 4484 ibdm_wait_probe_completion(); 4485 mutex_exit(&ibdm.ibdm_mutex); 4486 4487 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid: Wakeup signal received"); 4488 } 4489 4490 4491 /* 4492 * ibdm_create_gid_info() 4493 * Allocates a gid_info structure and initializes 4494 * Returns pointer to the structure on success 4495 * and NULL on failure 4496 */ 4497 static ibdm_dp_gidinfo_t * 4498 ibdm_create_gid_info(ibdm_port_attr_t *port, ib_gid_t sgid, ib_gid_t dgid) 4499 { 4500 uint8_t ii, npaths; 4501 sa_path_record_t *path; 4502 size_t len; 4503 ibdm_pkey_tbl_t *pkey_tbl; 4504 ibdm_dp_gidinfo_t *gid_info = NULL; 4505 int ret; 4506 4507 IBTF_DPRINTF_L4("ibdm", "\tcreate_gid_info: Begin"); 4508 npaths = 1; 4509 4510 /* query for reversible paths */ 4511 if (port->pa_sa_hdl) 4512 ret = ibmf_saa_gid_to_pathrecords(port->pa_sa_hdl, 4513 sgid, dgid, IBMF_SAA_PKEY_WC, 0, B_TRUE, &npaths, 0, 4514 &len, &path); 4515 else 4516 return (NULL); 4517 4518 if (ret == IBMF_SUCCESS && path) { 4519 ibdm_dump_path_info(path); 4520 4521 gid_info = kmem_zalloc( 4522 sizeof (ibdm_dp_gidinfo_t), KM_SLEEP); 4523 mutex_init(&gid_info->gl_mutex, NULL, MUTEX_DEFAULT, NULL); 4524 cv_init(&gid_info->gl_probe_cv, NULL, CV_DRIVER, NULL); 4525 gid_info->gl_dgid_hi = path->DGID.gid_prefix; 4526 gid_info->gl_dgid_lo = path->DGID.gid_guid; 4527 gid_info->gl_sgid_hi = path->SGID.gid_prefix; 4528 gid_info->gl_sgid_lo = path->SGID.gid_guid; 4529 gid_info->gl_p_key = path->P_Key; 4530 gid_info->gl_sa_hdl = port->pa_sa_hdl; 4531 gid_info->gl_ibmf_hdl = port->pa_ibmf_hdl; 4532 gid_info->gl_slid = path->SLID; 4533 gid_info->gl_dlid = path->DLID; 4534 gid_info->gl_transactionID = (++ibdm.ibdm_transactionID) 4535 << IBDM_GID_TRANSACTIONID_SHIFT; 4536 gid_info->gl_min_transactionID = gid_info->gl_transactionID; 4537 gid_info->gl_max_transactionID = (ibdm.ibdm_transactionID +1) 4538 << IBDM_GID_TRANSACTIONID_SHIFT; 4539 gid_info->gl_SL = path->SL; 4540 4541 gid_info->gl_qp_hdl = IBMF_QP_HANDLE_DEFAULT; 4542 for (ii = 0; ii < port->pa_npkeys; ii++) { 4543 if (port->pa_pkey_tbl == NULL) 4544 break; 4545 4546 pkey_tbl = &port->pa_pkey_tbl[ii]; 4547 if ((gid_info->gl_p_key == pkey_tbl->pt_pkey) && 4548 (pkey_tbl->pt_qp_hdl != NULL)) { 4549 gid_info->gl_qp_hdl = pkey_tbl->pt_qp_hdl; 4550 break; 4551 } 4552 } 4553 kmem_free(path, len); 4554 4555 /* 4556 * QP handle for GID not initialized. No matching Pkey 4557 * was found!! ibdm should *not* hit this case. Flag an 4558 * error and drop the GID if ibdm does encounter this. 4559 */ 4560 if (gid_info->gl_qp_hdl == NULL) { 4561 IBTF_DPRINTF_L2(ibdm_string, 4562 "\tcreate_gid_info: No matching Pkey"); 4563 ibdm_delete_gidinfo(gid_info); 4564 return (NULL); 4565 } 4566 4567 ibdm.ibdm_ngids++; 4568 if (ibdm.ibdm_dp_gidlist_head == NULL) { 4569 ibdm.ibdm_dp_gidlist_head = gid_info; 4570 ibdm.ibdm_dp_gidlist_tail = gid_info; 4571 } else { 4572 ibdm.ibdm_dp_gidlist_tail->gl_next = gid_info; 4573 gid_info->gl_prev = ibdm.ibdm_dp_gidlist_tail; 4574 ibdm.ibdm_dp_gidlist_tail = gid_info; 4575 } 4576 } 4577 4578 return (gid_info); 4579 } 4580 4581 4582 /* 4583 * ibdm_get_node_records 4584 * Sends a SA query to get the NODE record 4585 * Returns pointer to the sa_node_record_t on success 4586 * and NULL on failure 4587 */ 4588 static sa_node_record_t * 4589 ibdm_get_node_records(ibmf_saa_handle_t sa_hdl, size_t *length, ib_guid_t guid) 4590 { 4591 sa_node_record_t req, *resp = NULL; 4592 ibmf_saa_access_args_t args; 4593 int ret; 4594 4595 IBTF_DPRINTF_L4("ibdm", "\tget_node_records: Begin"); 4596 4597 bzero(&req, sizeof (sa_node_record_t)); 4598 req.NodeInfo.NodeGUID = guid; 4599 4600 args.sq_attr_id = SA_NODERECORD_ATTRID; 4601 args.sq_access_type = IBMF_SAA_RETRIEVE; 4602 args.sq_component_mask = SA_NODEINFO_COMPMASK_NODEGUID; 4603 args.sq_template = &req; 4604 args.sq_callback = NULL; 4605 args.sq_callback_arg = NULL; 4606 4607 ret = ibmf_sa_access(sa_hdl, &args, 0, length, (void **) &resp); 4608 if (ret != IBMF_SUCCESS) { 4609 IBTF_DPRINTF_L2("ibdm", "\tget_node_records:" 4610 " SA Retrieve Failed: %d", ret); 4611 return (NULL); 4612 } 4613 if ((resp == NULL) || (*length == 0)) { 4614 IBTF_DPRINTF_L2("ibdm", "\tget_node_records: No records"); 4615 return (NULL); 4616 } 4617 4618 IBTF_DPRINTF_L4("ibdm", "\tget_node_records: NodeGuid %llx " 4619 "PortGUID %llx", resp->NodeInfo.NodeGUID, resp->NodeInfo.PortGUID); 4620 4621 return (resp); 4622 } 4623 4624 4625 /* 4626 * ibdm_get_portinfo() 4627 * Sends a SA query to get the PortInfo record 4628 * Returns pointer to the sa_portinfo_record_t on success 4629 * and NULL on failure 4630 */ 4631 static sa_portinfo_record_t * 4632 ibdm_get_portinfo(ibmf_saa_handle_t sa_hdl, size_t *length, ib_lid_t lid) 4633 { 4634 sa_portinfo_record_t req, *resp = NULL; 4635 ibmf_saa_access_args_t args; 4636 int ret; 4637 4638 IBTF_DPRINTF_L4("ibdm", "\tget_portinfo: Begin"); 4639 4640 bzero(&req, sizeof (sa_portinfo_record_t)); 4641 req.EndportLID = lid; 4642 4643 args.sq_attr_id = SA_PORTINFORECORD_ATTRID; 4644 args.sq_access_type = IBMF_SAA_RETRIEVE; 4645 args.sq_component_mask = SA_PORTINFO_COMPMASK_PORTLID; 4646 args.sq_template = &req; 4647 args.sq_callback = NULL; 4648 args.sq_callback_arg = NULL; 4649 4650 ret = ibmf_sa_access(sa_hdl, &args, 0, length, (void **) &resp); 4651 if (ret != IBMF_SUCCESS) { 4652 IBTF_DPRINTF_L2("ibdm", "\tget_portinfo:" 4653 " SA Retrieve Failed: 0x%X", ret); 4654 return (NULL); 4655 } 4656 if ((*length == 0) || (resp == NULL)) 4657 return (NULL); 4658 4659 IBTF_DPRINTF_L4("ibdm", "\tget_portinfo: GidPrefix %llx Cap 0x%x", 4660 resp->PortInfo.GidPrefix, resp->PortInfo.CapabilityMask); 4661 return (resp); 4662 } 4663 4664 4665 /* 4666 * ibdm_ibnex_register_callback 4667 * IB nexus callback routine for HCA attach and detach notification 4668 */ 4669 void 4670 ibdm_ibnex_register_callback(ibdm_callback_t ibnex_dm_callback) 4671 { 4672 IBTF_DPRINTF_L4("ibdm", "\tibnex_register_callbacks"); 4673 mutex_enter(&ibdm.ibdm_ibnex_mutex); 4674 ibdm.ibdm_ibnex_callback = ibnex_dm_callback; 4675 mutex_exit(&ibdm.ibdm_ibnex_mutex); 4676 } 4677 4678 4679 /* 4680 * ibdm_ibnex_unregister_callbacks 4681 */ 4682 void 4683 ibdm_ibnex_unregister_callback() 4684 { 4685 IBTF_DPRINTF_L4("ibdm", "\tibnex_unregister_callbacks"); 4686 mutex_enter(&ibdm.ibdm_ibnex_mutex); 4687 ibdm.ibdm_ibnex_callback = NULL; 4688 mutex_exit(&ibdm.ibdm_ibnex_mutex); 4689 } 4690 4691 /* 4692 * ibdm_get_waittime() 4693 * Calculates the wait time based on the last HCA attach time 4694 */ 4695 static clock_t 4696 ibdm_get_waittime(ib_guid_t hca_guid, time_t dft_wait_sec) 4697 { 4698 const hrtime_t dft_wait = dft_wait_sec * NANOSEC; 4699 hrtime_t temp, wait_time = 0; 4700 clock_t usecs; 4701 int i; 4702 ibdm_hca_list_t *hca; 4703 4704 IBTF_DPRINTF_L4("ibdm", "\tget_waittime hcaguid:%llx" 4705 "\tport settling time %d", hca_guid, dft_wait); 4706 4707 ASSERT(mutex_owned(&ibdm.ibdm_hl_mutex)); 4708 4709 hca = ibdm.ibdm_hca_list_head; 4710 4711 for (i = 0; i < ibdm.ibdm_hca_count; i++, hca = hca->hl_next) { 4712 if (hca->hl_nports == hca->hl_nports_active) 4713 continue; 4714 4715 if (hca_guid && (hca_guid != hca->hl_hca_guid)) 4716 continue; 4717 4718 temp = gethrtime() - hca->hl_attach_time; 4719 temp = MAX(0, (dft_wait - temp)); 4720 4721 if (hca_guid) { 4722 wait_time = temp; 4723 break; 4724 } 4725 4726 wait_time = MAX(temp, wait_time); 4727 } 4728 4729 /* convert to microseconds */ 4730 usecs = MIN(wait_time, dft_wait) / (NANOSEC / MICROSEC); 4731 4732 IBTF_DPRINTF_L2("ibdm", "\tget_waittime: wait_time = %ld usecs", 4733 (long) usecs); 4734 4735 return (drv_usectohz(usecs)); 4736 } 4737 4738 void 4739 ibdm_ibnex_port_settle_wait(ib_guid_t hca_guid, time_t dft_wait) 4740 { 4741 clock_t wait_time; 4742 4743 mutex_enter(&ibdm.ibdm_hl_mutex); 4744 4745 while ((wait_time = ibdm_get_waittime(hca_guid, dft_wait)) > 0) 4746 (void) cv_reltimedwait(&ibdm.ibdm_port_settle_cv, 4747 &ibdm.ibdm_hl_mutex, wait_time, TR_CLOCK_TICK); 4748 4749 mutex_exit(&ibdm.ibdm_hl_mutex); 4750 } 4751 4752 4753 /* 4754 * ibdm_ibnex_probe_hcaport 4755 * Probes the presence of HCA port (with HCA dip and port number) 4756 * Returns port attributes structure on SUCCESS 4757 */ 4758 ibdm_port_attr_t * 4759 ibdm_ibnex_probe_hcaport(ib_guid_t hca_guid, uint8_t port_num) 4760 { 4761 int ii, jj; 4762 ibdm_hca_list_t *hca_list; 4763 ibdm_port_attr_t *port_attr; 4764 4765 IBTF_DPRINTF_L4("ibdm", "\tibnex_probe_hcaport:"); 4766 4767 mutex_enter(&ibdm.ibdm_hl_mutex); 4768 hca_list = ibdm.ibdm_hca_list_head; 4769 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4770 if (hca_list->hl_hca_guid == hca_guid) { 4771 for (jj = 0; jj < hca_list->hl_nports; jj++) { 4772 if (hca_list->hl_port_attr[jj].pa_port_num == 4773 port_num) { 4774 break; 4775 } 4776 } 4777 if (jj != hca_list->hl_nports) 4778 break; 4779 } 4780 hca_list = hca_list->hl_next; 4781 } 4782 if (ii == ibdm.ibdm_hca_count) { 4783 IBTF_DPRINTF_L2("ibdm", "\tibnex_probe_hcaport: not found"); 4784 mutex_exit(&ibdm.ibdm_hl_mutex); 4785 return (NULL); 4786 } 4787 port_attr = (ibdm_port_attr_t *)kmem_zalloc( 4788 sizeof (ibdm_port_attr_t), KM_SLEEP); 4789 bcopy((char *)&hca_list->hl_port_attr[jj], 4790 port_attr, sizeof (ibdm_port_attr_t)); 4791 ibdm_update_port_attr(port_attr); 4792 4793 mutex_exit(&ibdm.ibdm_hl_mutex); 4794 return (port_attr); 4795 } 4796 4797 4798 /* 4799 * ibdm_ibnex_get_port_attrs 4800 * Scan all HCAs for a matching port_guid. 4801 * Returns "port attributes" structure on success. 4802 */ 4803 ibdm_port_attr_t * 4804 ibdm_ibnex_get_port_attrs(ib_guid_t port_guid) 4805 { 4806 int ii, jj; 4807 ibdm_hca_list_t *hca_list; 4808 ibdm_port_attr_t *port_attr; 4809 4810 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_port_attrs:"); 4811 4812 mutex_enter(&ibdm.ibdm_hl_mutex); 4813 hca_list = ibdm.ibdm_hca_list_head; 4814 4815 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4816 for (jj = 0; jj < hca_list->hl_nports; jj++) { 4817 if (hca_list->hl_port_attr[jj].pa_port_guid == 4818 port_guid) { 4819 break; 4820 } 4821 } 4822 if (jj != hca_list->hl_nports) 4823 break; 4824 hca_list = hca_list->hl_next; 4825 } 4826 4827 if (ii == ibdm.ibdm_hca_count) { 4828 IBTF_DPRINTF_L2("ibdm", "\tibnex_get_port_attrs: not found"); 4829 mutex_exit(&ibdm.ibdm_hl_mutex); 4830 return (NULL); 4831 } 4832 4833 port_attr = (ibdm_port_attr_t *)kmem_alloc(sizeof (ibdm_port_attr_t), 4834 KM_SLEEP); 4835 bcopy((char *)&hca_list->hl_port_attr[jj], port_attr, 4836 sizeof (ibdm_port_attr_t)); 4837 ibdm_update_port_attr(port_attr); 4838 4839 mutex_exit(&ibdm.ibdm_hl_mutex); 4840 return (port_attr); 4841 } 4842 4843 4844 /* 4845 * ibdm_ibnex_free_port_attr() 4846 */ 4847 void 4848 ibdm_ibnex_free_port_attr(ibdm_port_attr_t *port_attr) 4849 { 4850 IBTF_DPRINTF_L4("ibdm", "\tibnex_free_port_attr:"); 4851 if (port_attr) { 4852 if (port_attr->pa_pkey_tbl != NULL) { 4853 kmem_free(port_attr->pa_pkey_tbl, 4854 (port_attr->pa_npkeys * sizeof (ibdm_pkey_tbl_t))); 4855 } 4856 kmem_free(port_attr, sizeof (ibdm_port_attr_t)); 4857 } 4858 } 4859 4860 4861 /* 4862 * ibdm_ibnex_get_hca_list() 4863 * Returns portinfo for all the port for all the HCA's 4864 */ 4865 void 4866 ibdm_ibnex_get_hca_list(ibdm_hca_list_t **hca, int *count) 4867 { 4868 ibdm_hca_list_t *head = NULL, *temp, *temp1; 4869 int ii; 4870 4871 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_hca_list:"); 4872 4873 mutex_enter(&ibdm.ibdm_hl_mutex); 4874 temp = ibdm.ibdm_hca_list_head; 4875 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4876 temp1 = ibdm_dup_hca_attr(temp); 4877 temp1->hl_next = head; 4878 head = temp1; 4879 temp = temp->hl_next; 4880 } 4881 *count = ibdm.ibdm_hca_count; 4882 *hca = head; 4883 mutex_exit(&ibdm.ibdm_hl_mutex); 4884 } 4885 4886 4887 /* 4888 * ibdm_ibnex_get_hca_info_by_guid() 4889 */ 4890 ibdm_hca_list_t * 4891 ibdm_ibnex_get_hca_info_by_guid(ib_guid_t hca_guid) 4892 { 4893 ibdm_hca_list_t *head = NULL, *hca = NULL; 4894 4895 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_hca_info_by_dip"); 4896 4897 mutex_enter(&ibdm.ibdm_hl_mutex); 4898 head = ibdm.ibdm_hca_list_head; 4899 while (head) { 4900 if (head->hl_hca_guid == hca_guid) { 4901 hca = ibdm_dup_hca_attr(head); 4902 hca->hl_next = NULL; 4903 break; 4904 } 4905 head = head->hl_next; 4906 } 4907 mutex_exit(&ibdm.ibdm_hl_mutex); 4908 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_hca_info_by_dip %p", hca); 4909 return (hca); 4910 } 4911 4912 4913 /* 4914 * ibdm_dup_hca_attr() 4915 * Allocate a new HCA attribute strucuture and initialize 4916 * hca attribute structure with the incoming HCA attributes 4917 * returned the allocated hca attributes. 4918 */ 4919 static ibdm_hca_list_t * 4920 ibdm_dup_hca_attr(ibdm_hca_list_t *in_hca) 4921 { 4922 int len; 4923 ibdm_hca_list_t *out_hca; 4924 4925 len = sizeof (ibdm_hca_list_t) + 4926 (in_hca->hl_nports * sizeof (ibdm_port_attr_t)); 4927 IBTF_DPRINTF_L4("ibdm", "\tdup_hca_attr len %d", len); 4928 out_hca = (ibdm_hca_list_t *)kmem_alloc(len, KM_SLEEP); 4929 bcopy((char *)in_hca, 4930 (char *)out_hca, sizeof (ibdm_hca_list_t)); 4931 if (in_hca->hl_nports) { 4932 out_hca->hl_port_attr = (ibdm_port_attr_t *) 4933 ((char *)out_hca + sizeof (ibdm_hca_list_t)); 4934 bcopy((char *)in_hca->hl_port_attr, 4935 (char *)out_hca->hl_port_attr, 4936 (in_hca->hl_nports * sizeof (ibdm_port_attr_t))); 4937 for (len = 0; len < out_hca->hl_nports; len++) 4938 ibdm_update_port_attr(&out_hca->hl_port_attr[len]); 4939 } 4940 return (out_hca); 4941 } 4942 4943 4944 /* 4945 * ibdm_ibnex_free_hca_list() 4946 * Free one/more HCA lists 4947 */ 4948 void 4949 ibdm_ibnex_free_hca_list(ibdm_hca_list_t *hca_list) 4950 { 4951 int ii; 4952 size_t len; 4953 ibdm_hca_list_t *temp; 4954 ibdm_port_attr_t *port; 4955 4956 IBTF_DPRINTF_L4("ibdm", "\tibnex_free_hca_list:"); 4957 ASSERT(hca_list); 4958 while (hca_list) { 4959 temp = hca_list; 4960 hca_list = hca_list->hl_next; 4961 for (ii = 0; ii < temp->hl_nports; ii++) { 4962 port = &temp->hl_port_attr[ii]; 4963 len = (port->pa_npkeys * sizeof (ibdm_pkey_tbl_t)); 4964 if (len != 0) 4965 kmem_free(port->pa_pkey_tbl, len); 4966 } 4967 len = sizeof (ibdm_hca_list_t) + (temp->hl_nports * 4968 sizeof (ibdm_port_attr_t)); 4969 kmem_free(temp, len); 4970 } 4971 } 4972 4973 4974 /* 4975 * ibdm_ibnex_probe_iocguid() 4976 * Probes the IOC on the fabric and returns the IOC information 4977 * if present. Otherwise, NULL is returned 4978 */ 4979 /* ARGSUSED */ 4980 ibdm_ioc_info_t * 4981 ibdm_ibnex_probe_ioc(ib_guid_t iou, ib_guid_t ioc_guid, int reprobe_flag) 4982 { 4983 int k; 4984 ibdm_ioc_info_t *ioc_info; 4985 ibdm_dp_gidinfo_t *gid_info; /* used as index and arg */ 4986 timeout_id_t *timeout_id; 4987 4988 IBTF_DPRINTF_L4("ibdm", "\tibnex_probe_ioc: (%llX, %llX, %d) Begin", 4989 iou, ioc_guid, reprobe_flag); 4990 4991 if (ibdm_enumerate_iocs == 0) 4992 return (NULL); 4993 4994 /* Check whether we know this already */ 4995 ioc_info = ibdm_get_ioc_info_with_gid(ioc_guid, &gid_info); 4996 if (ioc_info == NULL) { 4997 mutex_enter(&ibdm.ibdm_mutex); 4998 while (ibdm.ibdm_busy & IBDM_BUSY) 4999 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 5000 ibdm.ibdm_busy |= IBDM_BUSY; 5001 mutex_exit(&ibdm.ibdm_mutex); 5002 ibdm_probe_ioc(iou, ioc_guid, 0); 5003 mutex_enter(&ibdm.ibdm_mutex); 5004 ibdm.ibdm_busy &= ~IBDM_BUSY; 5005 cv_broadcast(&ibdm.ibdm_busy_cv); 5006 mutex_exit(&ibdm.ibdm_mutex); 5007 ioc_info = ibdm_get_ioc_info_with_gid(ioc_guid, &gid_info); 5008 } else if (reprobe_flag) { /* Handle Reprobe for the IOC */ 5009 ASSERT(gid_info != NULL); 5010 /* Free the ioc_list before reprobe; and cancel any timers */ 5011 mutex_enter(&ibdm.ibdm_mutex); 5012 mutex_enter(&gid_info->gl_mutex); 5013 if (ioc_info->ioc_timeout_id) { 5014 timeout_id = ioc_info->ioc_timeout_id; 5015 ioc_info->ioc_timeout_id = 0; 5016 mutex_exit(&gid_info->gl_mutex); 5017 IBTF_DPRINTF_L5("ibdm", "\tprobe_ioc: " 5018 "ioc_timeout_id = 0x%x", timeout_id); 5019 if (untimeout(timeout_id) == -1) { 5020 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: " 5021 "untimeout ioc_timeout_id failed"); 5022 } 5023 mutex_enter(&gid_info->gl_mutex); 5024 } 5025 if (ioc_info->ioc_dc_timeout_id) { 5026 timeout_id = ioc_info->ioc_dc_timeout_id; 5027 ioc_info->ioc_dc_timeout_id = 0; 5028 mutex_exit(&gid_info->gl_mutex); 5029 IBTF_DPRINTF_L5("ibdm", "\tprobe_ioc: " 5030 "ioc_dc_timeout_id = 0x%x", timeout_id); 5031 if (untimeout(timeout_id) == -1) { 5032 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: " 5033 "untimeout ioc_dc_timeout_id failed"); 5034 } 5035 mutex_enter(&gid_info->gl_mutex); 5036 } 5037 for (k = 0; k < ioc_info->ioc_profile.ioc_service_entries; k++) 5038 if (ioc_info->ioc_serv[k].se_timeout_id) { 5039 timeout_id = ioc_info->ioc_serv[k]. 5040 se_timeout_id; 5041 ioc_info->ioc_serv[k].se_timeout_id = 0; 5042 mutex_exit(&gid_info->gl_mutex); 5043 IBTF_DPRINTF_L5("ibdm", "\tprobe_ioc: " 5044 "ioc_info->ioc_serv[k].se_timeout_id = %x", 5045 k, timeout_id); 5046 if (untimeout(timeout_id) == -1) { 5047 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: " 5048 "untimeout se_timeout_id %d " 5049 "failed", k); 5050 } 5051 mutex_enter(&gid_info->gl_mutex); 5052 } 5053 mutex_exit(&gid_info->gl_mutex); 5054 mutex_exit(&ibdm.ibdm_mutex); 5055 ibdm_ibnex_free_ioc_list(ioc_info); 5056 5057 mutex_enter(&ibdm.ibdm_mutex); 5058 while (ibdm.ibdm_busy & IBDM_BUSY) 5059 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 5060 ibdm.ibdm_busy |= IBDM_BUSY; 5061 mutex_exit(&ibdm.ibdm_mutex); 5062 5063 ibdm_probe_ioc(iou, ioc_guid, 1); 5064 5065 /* 5066 * Skip if gl_reprobe_flag is set, this will be 5067 * a re-inserted / new GID, for which notifications 5068 * have already been send. 5069 */ 5070 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; 5071 gid_info = gid_info->gl_next) { 5072 uint8_t ii, niocs; 5073 ibdm_ioc_info_t *ioc; 5074 5075 if (gid_info->gl_iou == NULL) 5076 continue; 5077 5078 if (gid_info->gl_reprobe_flag) { 5079 gid_info->gl_reprobe_flag = 0; 5080 continue; 5081 } 5082 5083 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 5084 for (ii = 0; ii < niocs; ii++) { 5085 ioc = IBDM_GIDINFO2IOCINFO(gid_info, ii); 5086 if (ioc->ioc_profile.ioc_guid == ioc_guid) { 5087 mutex_enter(&ibdm.ibdm_mutex); 5088 ibdm_reprobe_update_port_srv(ioc, 5089 gid_info); 5090 mutex_exit(&ibdm.ibdm_mutex); 5091 } 5092 } 5093 } 5094 mutex_enter(&ibdm.ibdm_mutex); 5095 ibdm.ibdm_busy &= ~IBDM_BUSY; 5096 cv_broadcast(&ibdm.ibdm_busy_cv); 5097 mutex_exit(&ibdm.ibdm_mutex); 5098 5099 ioc_info = ibdm_get_ioc_info_with_gid(ioc_guid, &gid_info); 5100 } 5101 return (ioc_info); 5102 } 5103 5104 5105 /* 5106 * ibdm_get_ioc_info_with_gid() 5107 * Returns pointer to ibdm_ioc_info_t if it finds 5108 * matching record for the ioc_guid. Otherwise NULL is returned. 5109 * The pointer to gid_info is set to the second argument in case that 5110 * the non-NULL value returns (and the second argument is not NULL). 5111 * 5112 * Note. use the same strings as "ibnex_get_ioc_info" in 5113 * IBTF_DPRINTF() to keep compatibility. 5114 */ 5115 static ibdm_ioc_info_t * 5116 ibdm_get_ioc_info_with_gid(ib_guid_t ioc_guid, 5117 ibdm_dp_gidinfo_t **gid_info) 5118 { 5119 int ii; 5120 ibdm_ioc_info_t *ioc = NULL, *tmp = NULL; 5121 ibdm_dp_gidinfo_t *gid_list; 5122 ib_dm_io_unitinfo_t *iou; 5123 5124 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_ioc_info: GUID %llx", ioc_guid); 5125 5126 mutex_enter(&ibdm.ibdm_mutex); 5127 while (ibdm.ibdm_busy & IBDM_BUSY) 5128 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 5129 ibdm.ibdm_busy |= IBDM_BUSY; 5130 5131 if (gid_info) 5132 *gid_info = NULL; /* clear the value of gid_info */ 5133 5134 gid_list = ibdm.ibdm_dp_gidlist_head; 5135 while (gid_list) { 5136 mutex_enter(&gid_list->gl_mutex); 5137 if (gid_list->gl_state != IBDM_GID_PROBING_COMPLETE) { 5138 mutex_exit(&gid_list->gl_mutex); 5139 gid_list = gid_list->gl_next; 5140 continue; 5141 } 5142 if (gid_list->gl_iou == NULL) { 5143 IBTF_DPRINTF_L2("ibdm", 5144 "\tget_ioc_info: No IOU info"); 5145 mutex_exit(&gid_list->gl_mutex); 5146 gid_list = gid_list->gl_next; 5147 continue; 5148 } 5149 iou = &gid_list->gl_iou->iou_info; 5150 for (ii = 0; ii < iou->iou_num_ctrl_slots; ii++) { 5151 tmp = IBDM_GIDINFO2IOCINFO(gid_list, ii); 5152 if ((tmp->ioc_profile.ioc_guid == ioc_guid) && 5153 (tmp->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS)) { 5154 ioc = ibdm_dup_ioc_info(tmp, gid_list); 5155 if (gid_info) 5156 *gid_info = gid_list; /* set this ptr */ 5157 mutex_exit(&gid_list->gl_mutex); 5158 ibdm.ibdm_busy &= ~IBDM_BUSY; 5159 cv_broadcast(&ibdm.ibdm_busy_cv); 5160 mutex_exit(&ibdm.ibdm_mutex); 5161 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_info: End"); 5162 return (ioc); 5163 } 5164 } 5165 if (ii == iou->iou_num_ctrl_slots) 5166 ioc = NULL; 5167 5168 mutex_exit(&gid_list->gl_mutex); 5169 gid_list = gid_list->gl_next; 5170 } 5171 5172 ibdm.ibdm_busy &= ~IBDM_BUSY; 5173 cv_broadcast(&ibdm.ibdm_busy_cv); 5174 mutex_exit(&ibdm.ibdm_mutex); 5175 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_info: failure End"); 5176 return (ioc); 5177 } 5178 5179 /* 5180 * ibdm_ibnex_get_ioc_info() 5181 * Returns pointer to ibdm_ioc_info_t if it finds 5182 * matching record for the ioc_guid, otherwise NULL 5183 * is returned 5184 * 5185 * Note. this is a wrapper function to ibdm_get_ioc_info_with_gid() now. 5186 */ 5187 ibdm_ioc_info_t * 5188 ibdm_ibnex_get_ioc_info(ib_guid_t ioc_guid) 5189 { 5190 if (ibdm_enumerate_iocs == 0) 5191 return (NULL); 5192 5193 /* will not use the gid_info pointer, so the second arg is NULL */ 5194 return (ibdm_get_ioc_info_with_gid(ioc_guid, NULL)); 5195 } 5196 5197 /* 5198 * ibdm_ibnex_get_ioc_count() 5199 * Returns number of ibdm_ioc_info_t it finds 5200 */ 5201 int 5202 ibdm_ibnex_get_ioc_count(void) 5203 { 5204 int count = 0, k; 5205 ibdm_ioc_info_t *ioc; 5206 ibdm_dp_gidinfo_t *gid_list; 5207 5208 if (ibdm_enumerate_iocs == 0) 5209 return (0); 5210 5211 mutex_enter(&ibdm.ibdm_mutex); 5212 ibdm_sweep_fabric(0); 5213 5214 while (ibdm.ibdm_busy & IBDM_BUSY) 5215 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 5216 ibdm.ibdm_busy |= IBDM_BUSY; 5217 5218 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 5219 gid_list = gid_list->gl_next) { 5220 mutex_enter(&gid_list->gl_mutex); 5221 if ((gid_list->gl_state != IBDM_GID_PROBING_COMPLETE) || 5222 (gid_list->gl_iou == NULL)) { 5223 mutex_exit(&gid_list->gl_mutex); 5224 continue; 5225 } 5226 for (k = 0; k < gid_list->gl_iou->iou_info.iou_num_ctrl_slots; 5227 k++) { 5228 ioc = IBDM_GIDINFO2IOCINFO(gid_list, k); 5229 if (ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS) 5230 ++count; 5231 } 5232 mutex_exit(&gid_list->gl_mutex); 5233 } 5234 ibdm.ibdm_busy &= ~IBDM_BUSY; 5235 cv_broadcast(&ibdm.ibdm_busy_cv); 5236 mutex_exit(&ibdm.ibdm_mutex); 5237 5238 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_count: count = %d", count); 5239 return (count); 5240 } 5241 5242 5243 /* 5244 * ibdm_ibnex_get_ioc_list() 5245 * Returns information about all the IOCs present on the fabric. 5246 * Reprobes the IOCs and the GID list if list_flag is set to REPROBE_ALL. 5247 * Does not sweep fabric if DONOT_PROBE is set 5248 */ 5249 ibdm_ioc_info_t * 5250 ibdm_ibnex_get_ioc_list(ibdm_ibnex_get_ioclist_mtd_t list_flag) 5251 { 5252 int ii; 5253 ibdm_ioc_info_t *ioc_list = NULL, *tmp, *ioc; 5254 ibdm_dp_gidinfo_t *gid_list; 5255 ib_dm_io_unitinfo_t *iou; 5256 5257 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_list: Enter"); 5258 5259 if (ibdm_enumerate_iocs == 0) 5260 return (NULL); 5261 5262 mutex_enter(&ibdm.ibdm_mutex); 5263 if (list_flag != IBDM_IBNEX_DONOT_PROBE) 5264 ibdm_sweep_fabric(list_flag == IBDM_IBNEX_REPROBE_ALL); 5265 5266 while (ibdm.ibdm_busy & IBDM_BUSY) 5267 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 5268 ibdm.ibdm_busy |= IBDM_BUSY; 5269 5270 gid_list = ibdm.ibdm_dp_gidlist_head; 5271 while (gid_list) { 5272 mutex_enter(&gid_list->gl_mutex); 5273 if (gid_list->gl_state != IBDM_GID_PROBING_COMPLETE) { 5274 mutex_exit(&gid_list->gl_mutex); 5275 gid_list = gid_list->gl_next; 5276 continue; 5277 } 5278 if (gid_list->gl_iou == NULL) { 5279 IBTF_DPRINTF_L2("ibdm", 5280 "\tget_ioc_list: No IOU info"); 5281 mutex_exit(&gid_list->gl_mutex); 5282 gid_list = gid_list->gl_next; 5283 continue; 5284 } 5285 iou = &gid_list->gl_iou->iou_info; 5286 for (ii = 0; ii < iou->iou_num_ctrl_slots; ii++) { 5287 ioc = IBDM_GIDINFO2IOCINFO(gid_list, ii); 5288 if (ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS) { 5289 tmp = ibdm_dup_ioc_info(ioc, gid_list); 5290 tmp->ioc_next = ioc_list; 5291 ioc_list = tmp; 5292 } 5293 } 5294 mutex_exit(&gid_list->gl_mutex); 5295 gid_list = gid_list->gl_next; 5296 } 5297 ibdm.ibdm_busy &= ~IBDM_BUSY; 5298 cv_broadcast(&ibdm.ibdm_busy_cv); 5299 mutex_exit(&ibdm.ibdm_mutex); 5300 5301 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_list: End"); 5302 return (ioc_list); 5303 } 5304 5305 /* 5306 * ibdm_dup_ioc_info() 5307 * Duplicate the IOC information and return the IOC 5308 * information. 5309 */ 5310 static ibdm_ioc_info_t * 5311 ibdm_dup_ioc_info(ibdm_ioc_info_t *in_ioc, ibdm_dp_gidinfo_t *gid_list) 5312 { 5313 ibdm_ioc_info_t *out_ioc; 5314 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*out_ioc)); 5315 ASSERT(MUTEX_HELD(&gid_list->gl_mutex)); 5316 5317 out_ioc = kmem_alloc(sizeof (ibdm_ioc_info_t), KM_SLEEP); 5318 bcopy(in_ioc, out_ioc, sizeof (ibdm_ioc_info_t)); 5319 ibdm_update_ioc_port_gidlist(out_ioc, gid_list); 5320 out_ioc->ioc_iou_dc_valid = gid_list->gl_iou->iou_dc_valid; 5321 out_ioc->ioc_iou_diagcode = gid_list->gl_iou->iou_diagcode; 5322 5323 return (out_ioc); 5324 } 5325 5326 5327 /* 5328 * ibdm_free_ioc_list() 5329 * Deallocate memory for IOC list structure 5330 */ 5331 void 5332 ibdm_ibnex_free_ioc_list(ibdm_ioc_info_t *ioc) 5333 { 5334 ibdm_ioc_info_t *temp; 5335 5336 IBTF_DPRINTF_L4("ibdm", "\tibnex_free_ioc_list:"); 5337 while (ioc) { 5338 temp = ioc; 5339 ioc = ioc->ioc_next; 5340 kmem_free(temp->ioc_gid_list, 5341 (sizeof (ibdm_gid_t) * temp->ioc_nportgids)); 5342 if (temp->ioc_hca_list) 5343 ibdm_ibnex_free_hca_list(temp->ioc_hca_list); 5344 kmem_free(temp, sizeof (ibdm_ioc_info_t)); 5345 } 5346 } 5347 5348 5349 /* 5350 * ibdm_ibnex_update_pkey_tbls 5351 * Updates the DM P_Key database. 5352 * NOTE: Two cases are handled here: P_Key being added or removed. 5353 * 5354 * Arguments : NONE 5355 * Return Values : NONE 5356 */ 5357 void 5358 ibdm_ibnex_update_pkey_tbls(void) 5359 { 5360 int h, pp, pidx; 5361 uint_t nports; 5362 uint_t size; 5363 ib_pkey_t new_pkey; 5364 ib_pkey_t *orig_pkey; 5365 ibdm_hca_list_t *hca_list; 5366 ibdm_port_attr_t *port; 5367 ibt_hca_portinfo_t *pinfop; 5368 5369 IBTF_DPRINTF_L4("ibdm", "\tibnex_update_pkey_tbls:"); 5370 5371 mutex_enter(&ibdm.ibdm_hl_mutex); 5372 hca_list = ibdm.ibdm_hca_list_head; 5373 5374 for (h = 0; h < ibdm.ibdm_hca_count; h++) { 5375 5376 /* This updates P_Key Tables for all ports of this HCA */ 5377 (void) ibt_query_hca_ports(hca_list->hl_hca_hdl, 0, &pinfop, 5378 &nports, &size); 5379 5380 /* number of ports shouldn't have changed */ 5381 ASSERT(nports == hca_list->hl_nports); 5382 5383 for (pp = 0; pp < hca_list->hl_nports; pp++) { 5384 port = &hca_list->hl_port_attr[pp]; 5385 5386 /* 5387 * First figure out the P_Keys from IBTL. 5388 * Three things could have happened: 5389 * New P_Keys added 5390 * Existing P_Keys removed 5391 * Both of the above two 5392 * 5393 * Loop through the P_Key Indices and check if a 5394 * give P_Key_Ix matches that of the one seen by 5395 * IBDM. If they match no action is needed. 5396 * 5397 * If they don't match: 5398 * 1. if orig_pkey is invalid and new_pkey is valid 5399 * ---> add new_pkey to DM database 5400 * 2. if orig_pkey is valid and new_pkey is invalid 5401 * ---> remove orig_pkey from DM database 5402 * 3. if orig_pkey and new_pkey are both valid: 5403 * ---> remov orig_pkey from DM database 5404 * ---> add new_pkey to DM database 5405 * 4. if orig_pkey and new_pkey are both invalid: 5406 * ---> do nothing. Updated DM database. 5407 */ 5408 5409 for (pidx = 0; pidx < port->pa_npkeys; pidx++) { 5410 new_pkey = pinfop[pp].p_pkey_tbl[pidx]; 5411 orig_pkey = &port->pa_pkey_tbl[pidx].pt_pkey; 5412 5413 /* keys match - do nothing */ 5414 if (*orig_pkey == new_pkey) 5415 continue; 5416 5417 if (IBDM_INVALID_PKEY(*orig_pkey) && 5418 !IBDM_INVALID_PKEY(new_pkey)) { 5419 /* P_Key was added */ 5420 IBTF_DPRINTF_L5("ibdm", 5421 "\tibnex_update_pkey_tbls: new " 5422 "P_Key added = 0x%x", new_pkey); 5423 *orig_pkey = new_pkey; 5424 ibdm_port_attr_ibmf_init(port, 5425 new_pkey, pp); 5426 } else if (!IBDM_INVALID_PKEY(*orig_pkey) && 5427 IBDM_INVALID_PKEY(new_pkey)) { 5428 /* P_Key was removed */ 5429 IBTF_DPRINTF_L5("ibdm", 5430 "\tibnex_update_pkey_tbls: P_Key " 5431 "removed = 0x%x", *orig_pkey); 5432 *orig_pkey = new_pkey; 5433 (void) ibdm_port_attr_ibmf_fini(port, 5434 pidx); 5435 } else if (!IBDM_INVALID_PKEY(*orig_pkey) && 5436 !IBDM_INVALID_PKEY(new_pkey)) { 5437 /* P_Key were replaced */ 5438 IBTF_DPRINTF_L5("ibdm", 5439 "\tibnex_update_pkey_tbls: P_Key " 5440 "replaced 0x%x with 0x%x", 5441 *orig_pkey, new_pkey); 5442 (void) ibdm_port_attr_ibmf_fini(port, 5443 pidx); 5444 *orig_pkey = new_pkey; 5445 ibdm_port_attr_ibmf_init(port, 5446 new_pkey, pp); 5447 } else { 5448 /* 5449 * P_Keys are invalid 5450 * set anyway to reflect if 5451 * INVALID_FULL was changed to 5452 * INVALID_LIMITED or vice-versa. 5453 */ 5454 *orig_pkey = new_pkey; 5455 } /* end of else */ 5456 5457 } /* loop of p_key index */ 5458 5459 } /* loop of #ports of HCA */ 5460 5461 ibt_free_portinfo(pinfop, size); 5462 hca_list = hca_list->hl_next; 5463 5464 } /* loop for all HCAs in the system */ 5465 5466 mutex_exit(&ibdm.ibdm_hl_mutex); 5467 } 5468 5469 5470 /* 5471 * ibdm_send_ioc_profile() 5472 * Send IOC Controller Profile request. When the request is completed 5473 * IBMF calls ibdm_process_incoming_mad routine to inform about 5474 * the completion. 5475 */ 5476 static int 5477 ibdm_send_ioc_profile(ibdm_dp_gidinfo_t *gid_info, uint8_t ioc_no) 5478 { 5479 ibmf_msg_t *msg; 5480 ib_mad_hdr_t *hdr; 5481 ibdm_ioc_info_t *ioc_info = &(gid_info->gl_iou->iou_ioc_info[ioc_no]); 5482 ibdm_timeout_cb_args_t *cb_args; 5483 5484 IBTF_DPRINTF_L4("ibdm", "\tsend_ioc_profile: " 5485 "gid info 0x%p, ioc_no = %d", gid_info, ioc_no); 5486 5487 /* 5488 * Send command to get IOC profile. 5489 * Allocate a IBMF packet and initialize the packet. 5490 */ 5491 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, 5492 &msg) != IBMF_SUCCESS) { 5493 IBTF_DPRINTF_L2("ibdm", "\tsend_ioc_profile: pkt alloc fail"); 5494 return (IBDM_FAILURE); 5495 } 5496 5497 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 5498 ibdm_alloc_send_buffers(msg); 5499 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 5500 5501 mutex_enter(&gid_info->gl_mutex); 5502 ibdm_bump_transactionID(gid_info); 5503 mutex_exit(&gid_info->gl_mutex); 5504 5505 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 5506 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 5507 if (gid_info->gl_redirected == B_TRUE) { 5508 if (gid_info->gl_redirect_dlid != 0) { 5509 msg->im_local_addr.ia_remote_lid = 5510 gid_info->gl_redirect_dlid; 5511 } 5512 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 5513 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 5514 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 5515 msg->im_local_addr.ia_service_level = gid_info->gl_redirectSL; 5516 } else { 5517 msg->im_local_addr.ia_remote_qno = 1; 5518 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 5519 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 5520 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 5521 } 5522 5523 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 5524 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 5525 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 5526 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 5527 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 5528 hdr->Status = 0; 5529 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 5530 hdr->AttributeID = h2b16(IB_DM_ATTR_IOC_CTRL_PROFILE); 5531 hdr->AttributeModifier = h2b32(ioc_no + 1); 5532 5533 ioc_info->ioc_state = IBDM_IOC_STATE_REPROBE_PROGRESS; 5534 cb_args = &ioc_info->ioc_cb_args; 5535 cb_args->cb_gid_info = gid_info; 5536 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 5537 cb_args->cb_req_type = IBDM_REQ_TYPE_IOCINFO; 5538 cb_args->cb_ioc_num = ioc_no; 5539 5540 mutex_enter(&gid_info->gl_mutex); 5541 ioc_info->ioc_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 5542 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 5543 mutex_exit(&gid_info->gl_mutex); 5544 5545 IBTF_DPRINTF_L5("ibdm", "\tsend_ioc_profile:" 5546 "timeout %x", ioc_info->ioc_timeout_id); 5547 5548 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, msg, 5549 NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 5550 IBTF_DPRINTF_L2("ibdm", 5551 "\tsend_ioc_profile: msg transport failed"); 5552 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 5553 } 5554 ioc_info->ioc_state = IBDM_IOC_STATE_REPROBE_PROGRESS; 5555 return (IBDM_SUCCESS); 5556 } 5557 5558 5559 /* 5560 * ibdm_port_reachable 5561 * Returns B_TRUE if the port GID is reachable by sending 5562 * a SA query to get the NODE record for this port GUID. 5563 */ 5564 static boolean_t 5565 ibdm_port_reachable(ibmf_saa_handle_t sa_hdl, ib_guid_t guid) 5566 { 5567 sa_node_record_t *resp; 5568 size_t length; 5569 5570 /* 5571 * Verify if it's reachable by getting the node record. 5572 */ 5573 if (ibdm_get_node_record_by_port(sa_hdl, guid, &resp, &length) == 5574 IBDM_SUCCESS) { 5575 kmem_free(resp, length); 5576 return (B_TRUE); 5577 } 5578 return (B_FALSE); 5579 } 5580 5581 /* 5582 * ibdm_get_node_record_by_port 5583 * Sends a SA query to get the NODE record for port GUID 5584 * Returns IBDM_SUCCESS if the port GID is reachable. 5585 * 5586 * Note: the caller must be responsible for freeing the resource 5587 * by calling kmem_free(resp, length) later. 5588 */ 5589 static int 5590 ibdm_get_node_record_by_port(ibmf_saa_handle_t sa_hdl, ib_guid_t guid, 5591 sa_node_record_t **resp, size_t *length) 5592 { 5593 sa_node_record_t req; 5594 ibmf_saa_access_args_t args; 5595 int ret; 5596 ASSERT(resp != NULL && length != NULL); 5597 5598 IBTF_DPRINTF_L4("ibdm", "\tport_reachable: port_guid %llx", 5599 guid); 5600 5601 bzero(&req, sizeof (sa_node_record_t)); 5602 req.NodeInfo.PortGUID = guid; 5603 5604 args.sq_attr_id = SA_NODERECORD_ATTRID; 5605 args.sq_access_type = IBMF_SAA_RETRIEVE; 5606 args.sq_component_mask = SA_NODEINFO_COMPMASK_PORTGUID; 5607 args.sq_template = &req; 5608 args.sq_callback = NULL; 5609 args.sq_callback_arg = NULL; 5610 5611 ret = ibmf_sa_access(sa_hdl, &args, 0, length, (void **) resp); 5612 if (ret != IBMF_SUCCESS) { 5613 IBTF_DPRINTF_L2("ibdm", "\tport_reachable:" 5614 " SA Retrieve Failed: %d", ret); 5615 return (IBDM_FAILURE); 5616 } 5617 if (*resp == NULL || *length == 0) { 5618 IBTF_DPRINTF_L2("ibdm", "\tport_reachable: No records"); 5619 return (IBDM_FAILURE); 5620 } 5621 /* 5622 * There is one NodeRecord on each endport on a subnet. 5623 */ 5624 ASSERT(*length == sizeof (sa_node_record_t)); 5625 5626 return (IBDM_SUCCESS); 5627 } 5628 5629 5630 /* 5631 * Update the gidlist for all affected IOCs when GID becomes 5632 * available/unavailable. 5633 * 5634 * Parameters : 5635 * gidinfo - Incoming / Outgoing GID. 5636 * add_flag - 1 for GID added, 0 for GID removed. 5637 * - (-1) : IOC gid list updated, ioc_list required. 5638 * 5639 * This function gets the GID for the node GUID corresponding to the 5640 * port GID. Gets the IOU info 5641 */ 5642 static ibdm_ioc_info_t * 5643 ibdm_update_ioc_gidlist(ibdm_dp_gidinfo_t *gid_info, int avail_flag) 5644 { 5645 ibdm_dp_gidinfo_t *node_gid = NULL; 5646 uint8_t niocs, ii; 5647 ibdm_ioc_info_t *ioc, *ioc_list = NULL, *tmp; 5648 5649 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist"); 5650 5651 switch (avail_flag) { 5652 case 1 : 5653 node_gid = ibdm_check_dest_nodeguid(gid_info); 5654 break; 5655 case 0 : 5656 node_gid = ibdm_handle_gid_rm(gid_info); 5657 break; 5658 case -1 : 5659 node_gid = gid_info; 5660 break; 5661 default : 5662 break; 5663 } 5664 5665 if (node_gid == NULL) { 5666 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist: " 5667 "No node GID found, port gid 0x%p, avail_flag %d", 5668 gid_info, avail_flag); 5669 return (NULL); 5670 } 5671 5672 mutex_enter(&node_gid->gl_mutex); 5673 if ((node_gid->gl_state != IBDM_GID_PROBING_COMPLETE && 5674 node_gid->gl_state != IBDM_GID_PROBING_SKIPPED) || 5675 node_gid->gl_iou == NULL) { 5676 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist " 5677 "gl_state %x, gl_iou %p", node_gid->gl_state, 5678 node_gid->gl_iou); 5679 mutex_exit(&node_gid->gl_mutex); 5680 return (NULL); 5681 } 5682 5683 niocs = node_gid->gl_iou->iou_info.iou_num_ctrl_slots; 5684 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist : niocs %x", 5685 niocs); 5686 for (ii = 0; ii < niocs; ii++) { 5687 ioc = IBDM_GIDINFO2IOCINFO(node_gid, ii); 5688 /* 5689 * Skip IOCs for which probe is not complete or 5690 * reprobe is progress 5691 */ 5692 if (ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS) { 5693 tmp = ibdm_dup_ioc_info(ioc, node_gid); 5694 tmp->ioc_info_updated.ib_gid_prop_updated = 1; 5695 tmp->ioc_next = ioc_list; 5696 ioc_list = tmp; 5697 } 5698 } 5699 mutex_exit(&node_gid->gl_mutex); 5700 5701 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist : return %p", 5702 ioc_list); 5703 return (ioc_list); 5704 } 5705 5706 /* 5707 * ibdm_saa_event_cb : 5708 * Event handling which does *not* require ibdm_hl_mutex to be 5709 * held are executed in the same thread. This is to prevent 5710 * deadlocks with HCA port down notifications which hold the 5711 * ibdm_hl_mutex. 5712 * 5713 * GID_AVAILABLE event is handled here. A taskq is spawned to 5714 * handle GID_UNAVAILABLE. 5715 * 5716 * A new mutex ibdm_ibnex_mutex has been introduced to protect 5717 * ibnex_callback. This has been done to prevent any possible 5718 * deadlock (described above) while handling GID_AVAILABLE. 5719 * 5720 * IBMF calls the event callback for a HCA port. The SA handle 5721 * for this port would be valid, till the callback returns. 5722 * IBDM calling IBDM using the above SA handle should be valid. 5723 * 5724 * IBDM will additionally check (SA handle != NULL), before 5725 * calling IBMF. 5726 */ 5727 /*ARGSUSED*/ 5728 static void 5729 ibdm_saa_event_cb(ibmf_saa_handle_t ibmf_saa_handle, 5730 ibmf_saa_subnet_event_t ibmf_saa_event, 5731 ibmf_saa_event_details_t *event_details, void *callback_arg) 5732 { 5733 ibdm_saa_event_arg_t *event_arg; 5734 ib_gid_t sgid, dgid; 5735 ibdm_port_attr_t *hca_port; 5736 ibdm_dp_gidinfo_t *gid_info, *node_gid_info = NULL; 5737 sa_node_record_t *nrec; 5738 size_t length; 5739 5740 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*event_arg)); 5741 5742 hca_port = (ibdm_port_attr_t *)callback_arg; 5743 5744 IBTF_DPRINTF_L4("ibdm", "\tsaa_event_cb(%x, %x, %x, %x)\n", 5745 ibmf_saa_handle, ibmf_saa_event, event_details, 5746 callback_arg); 5747 5748 #ifdef DEBUG 5749 if (ibdm_ignore_saa_event) 5750 return; 5751 #endif 5752 5753 if (ibmf_saa_event == IBMF_SAA_EVENT_GID_AVAILABLE) { 5754 /* 5755 * Ensure no other probe / sweep fabric is in 5756 * progress. 5757 */ 5758 mutex_enter(&ibdm.ibdm_mutex); 5759 while (ibdm.ibdm_busy & IBDM_BUSY) 5760 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 5761 ibdm.ibdm_busy |= IBDM_BUSY; 5762 mutex_exit(&ibdm.ibdm_mutex); 5763 5764 /* 5765 * If we already know about this GID, return. 5766 * GID_AVAILABLE may be reported for multiple HCA 5767 * ports. 5768 */ 5769 if ((ibdm_check_dgid(event_details->ie_gid.gid_guid, 5770 event_details->ie_gid.gid_prefix)) != NULL) { 5771 mutex_enter(&ibdm.ibdm_mutex); 5772 ibdm.ibdm_busy &= ~IBDM_BUSY; 5773 cv_broadcast(&ibdm.ibdm_busy_cv); 5774 mutex_exit(&ibdm.ibdm_mutex); 5775 return; 5776 } 5777 5778 IBTF_DPRINTF_L4("ibdm", "\tGID (prefix %x, guid %llx) " 5779 "Insertion notified", 5780 event_details->ie_gid.gid_prefix, 5781 event_details->ie_gid.gid_guid); 5782 5783 /* This is a new gid, insert it to GID list */ 5784 sgid.gid_prefix = hca_port->pa_sn_prefix; 5785 sgid.gid_guid = hca_port->pa_port_guid; 5786 dgid.gid_prefix = event_details->ie_gid.gid_prefix; 5787 dgid.gid_guid = event_details->ie_gid.gid_guid; 5788 gid_info = ibdm_create_gid_info(hca_port, sgid, dgid); 5789 if (gid_info == NULL) { 5790 IBTF_DPRINTF_L4("ibdm", "\tGID_AVAILABLE: " 5791 "create_gid_info returned NULL"); 5792 mutex_enter(&ibdm.ibdm_mutex); 5793 ibdm.ibdm_busy &= ~IBDM_BUSY; 5794 cv_broadcast(&ibdm.ibdm_busy_cv); 5795 mutex_exit(&ibdm.ibdm_mutex); 5796 return; 5797 } 5798 mutex_enter(&gid_info->gl_mutex); 5799 gid_info->gl_state = IBDM_GID_PROBING_SKIPPED; 5800 mutex_exit(&gid_info->gl_mutex); 5801 5802 /* Get the node GUID */ 5803 if (ibdm_get_node_record_by_port(ibmf_saa_handle, dgid.gid_guid, 5804 &nrec, &length) != IBDM_SUCCESS) { 5805 /* 5806 * Set the state to PROBE_NOT_DONE for the 5807 * next sweep to probe it 5808 */ 5809 IBTF_DPRINTF_L2("ibdm", "\tsaa_event_taskq: " 5810 "Skipping GID : port GUID not found"); 5811 mutex_enter(&gid_info->gl_mutex); 5812 gid_info->gl_state = IBDM_GID_PROBE_NOT_DONE; 5813 mutex_exit(&gid_info->gl_mutex); 5814 mutex_enter(&ibdm.ibdm_mutex); 5815 ibdm.ibdm_busy &= ~IBDM_BUSY; 5816 cv_broadcast(&ibdm.ibdm_busy_cv); 5817 mutex_exit(&ibdm.ibdm_mutex); 5818 return; 5819 } 5820 gid_info->gl_nodeguid = nrec->NodeInfo.NodeGUID; 5821 gid_info->gl_devid = nrec->NodeInfo.DeviceID; 5822 kmem_free(nrec, length); 5823 gid_info->gl_portguid = dgid.gid_guid; 5824 5825 /* 5826 * Get the gid info with the same node GUID. 5827 */ 5828 mutex_enter(&ibdm.ibdm_mutex); 5829 node_gid_info = ibdm.ibdm_dp_gidlist_head; 5830 while (node_gid_info) { 5831 if (node_gid_info->gl_nodeguid == 5832 gid_info->gl_nodeguid && 5833 node_gid_info->gl_iou != NULL) { 5834 break; 5835 } 5836 node_gid_info = node_gid_info->gl_next; 5837 } 5838 mutex_exit(&ibdm.ibdm_mutex); 5839 5840 /* 5841 * Handling a new GID requires filling of gl_hca_list. 5842 * This require ibdm hca_list to be parsed and hence 5843 * holding the ibdm_hl_mutex. Spawning a new thread to 5844 * handle this. 5845 */ 5846 if (node_gid_info == NULL) { 5847 if (taskq_dispatch(system_taskq, 5848 ibdm_saa_handle_new_gid, (void *)gid_info, 5849 TQ_NOSLEEP) == NULL) { 5850 IBTF_DPRINTF_L2("ibdm", "\tsaa_event_cb: " 5851 "new_gid taskq_dispatch failed"); 5852 return; 5853 } 5854 } 5855 5856 mutex_enter(&ibdm.ibdm_mutex); 5857 ibdm.ibdm_busy &= ~IBDM_BUSY; 5858 cv_broadcast(&ibdm.ibdm_busy_cv); 5859 mutex_exit(&ibdm.ibdm_mutex); 5860 return; 5861 } 5862 5863 if (ibmf_saa_event != IBMF_SAA_EVENT_GID_UNAVAILABLE) 5864 return; 5865 5866 /* 5867 * GID UNAVAIL EVENT: Try to locate the GID in the GID list. 5868 * If we don't find it we just return. 5869 */ 5870 mutex_enter(&ibdm.ibdm_mutex); 5871 gid_info = ibdm.ibdm_dp_gidlist_head; 5872 while (gid_info) { 5873 if (gid_info->gl_portguid == 5874 event_details->ie_gid.gid_guid) { 5875 break; 5876 } 5877 gid_info = gid_info->gl_next; 5878 } 5879 mutex_exit(&ibdm.ibdm_mutex); 5880 if (gid_info == NULL) { 5881 IBTF_DPRINTF_L2("ibdm", "\tsaa_event_cb: " 5882 "GID for GUID %llX not found during GID UNAVAIL event", 5883 event_details->ie_gid.gid_guid); 5884 return; 5885 } 5886 5887 /* 5888 * If this GID is DM capable, we'll have to check whether this DGID 5889 * is reachable via another port. 5890 */ 5891 if (gid_info->gl_is_dm_capable == B_TRUE) { 5892 event_arg = (ibdm_saa_event_arg_t *)kmem_alloc( 5893 sizeof (ibdm_saa_event_arg_t), KM_SLEEP); 5894 event_arg->ibmf_saa_handle = ibmf_saa_handle; 5895 event_arg->ibmf_saa_event = ibmf_saa_event; 5896 bcopy(event_details, &event_arg->event_details, 5897 sizeof (ibmf_saa_event_details_t)); 5898 event_arg->callback_arg = callback_arg; 5899 5900 if (taskq_dispatch(system_taskq, ibdm_saa_event_taskq, 5901 (void *)event_arg, TQ_NOSLEEP) == NULL) { 5902 IBTF_DPRINTF_L2("ibdm", "\tsaa_event_cb: " 5903 "taskq_dispatch failed"); 5904 ibdm_free_saa_event_arg(event_arg); 5905 return; 5906 } 5907 } 5908 } 5909 5910 /* 5911 * Handle a new GID discovered by GID_AVAILABLE saa event. 5912 */ 5913 void 5914 ibdm_saa_handle_new_gid(void *arg) 5915 { 5916 ibdm_dp_gidinfo_t *gid_info; 5917 ibdm_hca_list_t *hca_list = NULL; 5918 ibdm_port_attr_t *port = NULL; 5919 ibdm_ioc_info_t *ioc_list = NULL; 5920 5921 IBTF_DPRINTF_L4(ibdm_string, "\tsaa_handle_new_gid(%p)", arg); 5922 5923 gid_info = (ibdm_dp_gidinfo_t *)arg; 5924 5925 /* 5926 * Ensure that no other sweep / probe has completed 5927 * probing this gid. 5928 */ 5929 mutex_enter(&gid_info->gl_mutex); 5930 if (gid_info->gl_state != IBDM_GID_PROBE_NOT_DONE) { 5931 mutex_exit(&gid_info->gl_mutex); 5932 return; 5933 } 5934 mutex_exit(&gid_info->gl_mutex); 5935 5936 /* 5937 * Parse HCAs to fill gl_hca_list 5938 */ 5939 mutex_enter(&ibdm.ibdm_hl_mutex); 5940 for (ibdm_get_next_port(&hca_list, &port, 1); port; 5941 ibdm_get_next_port(&hca_list, &port, 1)) { 5942 if (ibdm_port_reachable(port->pa_sa_hdl, 5943 gid_info->gl_portguid) == B_TRUE) { 5944 ibdm_addto_glhcalist(gid_info, hca_list); 5945 } 5946 } 5947 mutex_exit(&ibdm.ibdm_hl_mutex); 5948 5949 /* 5950 * Ensure no other probe / sweep fabric is in 5951 * progress. 5952 */ 5953 mutex_enter(&ibdm.ibdm_mutex); 5954 while (ibdm.ibdm_busy & IBDM_BUSY) 5955 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 5956 ibdm.ibdm_busy |= IBDM_BUSY; 5957 mutex_exit(&ibdm.ibdm_mutex); 5958 5959 /* 5960 * New IOU probe it, to check if new IOCs 5961 */ 5962 IBTF_DPRINTF_L4(ibdm_string, "\tsaa_handle_new_gid: " 5963 "new GID : probing"); 5964 mutex_enter(&ibdm.ibdm_mutex); 5965 ibdm.ibdm_ngid_probes_in_progress++; 5966 mutex_exit(&ibdm.ibdm_mutex); 5967 mutex_enter(&gid_info->gl_mutex); 5968 gid_info->gl_reprobe_flag = 0; 5969 gid_info->gl_state = IBDM_GID_PROBE_NOT_DONE; 5970 mutex_exit(&gid_info->gl_mutex); 5971 ibdm_probe_gid_thread((void *)gid_info); 5972 5973 mutex_enter(&ibdm.ibdm_mutex); 5974 ibdm_wait_probe_completion(); 5975 mutex_exit(&ibdm.ibdm_mutex); 5976 5977 if (gid_info->gl_iou == NULL) { 5978 mutex_enter(&ibdm.ibdm_mutex); 5979 ibdm.ibdm_busy &= ~IBDM_BUSY; 5980 cv_broadcast(&ibdm.ibdm_busy_cv); 5981 mutex_exit(&ibdm.ibdm_mutex); 5982 return; 5983 } 5984 5985 /* 5986 * Update GID list in all IOCs affected by this 5987 */ 5988 ioc_list = ibdm_update_ioc_gidlist(gid_info, 1); 5989 5990 /* 5991 * Pass on the IOCs with updated GIDs to IBnexus 5992 */ 5993 if (ioc_list) { 5994 mutex_enter(&ibdm.ibdm_ibnex_mutex); 5995 if (ibdm.ibdm_ibnex_callback != NULL) { 5996 (*ibdm.ibdm_ibnex_callback)((void *)ioc_list, 5997 IBDM_EVENT_IOC_PROP_UPDATE); 5998 } 5999 mutex_exit(&ibdm.ibdm_ibnex_mutex); 6000 } 6001 6002 mutex_enter(&ibdm.ibdm_mutex); 6003 ibdm.ibdm_busy &= ~IBDM_BUSY; 6004 cv_broadcast(&ibdm.ibdm_busy_cv); 6005 mutex_exit(&ibdm.ibdm_mutex); 6006 } 6007 6008 /* 6009 * ibdm_saa_event_taskq : 6010 * GID_UNAVAILABLE Event handling requires ibdm_hl_mutex to be 6011 * held. The GID_UNAVAILABLE handling is done in a taskq to 6012 * prevent deadlocks with HCA port down notifications which hold 6013 * ibdm_hl_mutex. 6014 */ 6015 void 6016 ibdm_saa_event_taskq(void *arg) 6017 { 6018 ibdm_saa_event_arg_t *event_arg; 6019 ibmf_saa_handle_t ibmf_saa_handle; 6020 ibmf_saa_subnet_event_t ibmf_saa_event; 6021 ibmf_saa_event_details_t *event_details; 6022 void *callback_arg; 6023 6024 ibdm_dp_gidinfo_t *gid_info; 6025 ibdm_port_attr_t *hca_port, *port = NULL; 6026 ibdm_hca_list_t *hca_list = NULL; 6027 int sa_handle_valid = 0; 6028 ibdm_ioc_info_t *ioc_list = NULL; 6029 6030 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*event_arg)); 6031 6032 event_arg = (ibdm_saa_event_arg_t *)arg; 6033 ibmf_saa_handle = event_arg->ibmf_saa_handle; 6034 ibmf_saa_event = event_arg->ibmf_saa_event; 6035 event_details = &event_arg->event_details; 6036 callback_arg = event_arg->callback_arg; 6037 6038 ASSERT(callback_arg != NULL); 6039 ASSERT(ibmf_saa_event == IBMF_SAA_EVENT_GID_UNAVAILABLE); 6040 IBTF_DPRINTF_L4("ibdm", "\tsaa_event_taskq(%x, %x, %x, %x)", 6041 ibmf_saa_handle, ibmf_saa_event, event_details, 6042 callback_arg); 6043 6044 hca_port = (ibdm_port_attr_t *)callback_arg; 6045 6046 /* Check if the port_attr is still valid */ 6047 mutex_enter(&ibdm.ibdm_hl_mutex); 6048 for (ibdm_get_next_port(&hca_list, &port, 0); port; 6049 ibdm_get_next_port(&hca_list, &port, 0)) { 6050 if (port == hca_port && port->pa_port_guid == 6051 hca_port->pa_port_guid) { 6052 if (ibmf_saa_handle == hca_port->pa_sa_hdl) 6053 sa_handle_valid = 1; 6054 break; 6055 } 6056 } 6057 mutex_exit(&ibdm.ibdm_hl_mutex); 6058 if (sa_handle_valid == 0) { 6059 ibdm_free_saa_event_arg(event_arg); 6060 return; 6061 } 6062 6063 if (hca_port && (hca_port->pa_sa_hdl == NULL || 6064 ibmf_saa_handle != hca_port->pa_sa_hdl)) { 6065 ibdm_free_saa_event_arg(event_arg); 6066 return; 6067 } 6068 hca_list = NULL; 6069 port = NULL; 6070 6071 /* 6072 * Check if the GID is visible to other HCA ports. 6073 * Return if so. 6074 */ 6075 mutex_enter(&ibdm.ibdm_hl_mutex); 6076 for (ibdm_get_next_port(&hca_list, &port, 1); port; 6077 ibdm_get_next_port(&hca_list, &port, 1)) { 6078 if (ibdm_port_reachable(port->pa_sa_hdl, 6079 event_details->ie_gid.gid_guid) == B_TRUE) { 6080 mutex_exit(&ibdm.ibdm_hl_mutex); 6081 ibdm_free_saa_event_arg(event_arg); 6082 return; 6083 } 6084 } 6085 mutex_exit(&ibdm.ibdm_hl_mutex); 6086 6087 /* 6088 * Ensure no other probe / sweep fabric is in 6089 * progress. 6090 */ 6091 mutex_enter(&ibdm.ibdm_mutex); 6092 while (ibdm.ibdm_busy & IBDM_BUSY) 6093 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 6094 ibdm.ibdm_busy |= IBDM_BUSY; 6095 mutex_exit(&ibdm.ibdm_mutex); 6096 6097 /* 6098 * If this GID is no longer in GID list, return 6099 * GID_UNAVAILABLE may be reported for multiple HCA 6100 * ports. 6101 */ 6102 mutex_enter(&ibdm.ibdm_mutex); 6103 gid_info = ibdm.ibdm_dp_gidlist_head; 6104 while (gid_info) { 6105 if (gid_info->gl_portguid == 6106 event_details->ie_gid.gid_guid) { 6107 break; 6108 } 6109 gid_info = gid_info->gl_next; 6110 } 6111 mutex_exit(&ibdm.ibdm_mutex); 6112 if (gid_info == NULL) { 6113 mutex_enter(&ibdm.ibdm_mutex); 6114 ibdm.ibdm_busy &= ~IBDM_BUSY; 6115 cv_broadcast(&ibdm.ibdm_busy_cv); 6116 mutex_exit(&ibdm.ibdm_mutex); 6117 ibdm_free_saa_event_arg(event_arg); 6118 return; 6119 } 6120 6121 IBTF_DPRINTF_L4("ibdm", "\tGID (prefix %x, guid %llx) " 6122 "Unavailable notification", 6123 event_details->ie_gid.gid_prefix, 6124 event_details->ie_gid.gid_guid); 6125 6126 /* 6127 * Update GID list in all IOCs affected by this 6128 */ 6129 if (gid_info->gl_state == IBDM_GID_PROBING_SKIPPED || 6130 gid_info->gl_state == IBDM_GID_PROBING_COMPLETE) 6131 ioc_list = ibdm_update_ioc_gidlist(gid_info, 0); 6132 6133 /* 6134 * Remove GID from the global GID list 6135 * Handle the case where all port GIDs for an 6136 * IOU have been hot-removed. Check both gid_info 6137 * & ioc_info for checking ngids. 6138 */ 6139 mutex_enter(&ibdm.ibdm_mutex); 6140 if (gid_info->gl_iou != NULL && gid_info->gl_ngids == 0) { 6141 mutex_enter(&gid_info->gl_mutex); 6142 (void) ibdm_free_iou_info(gid_info, &gid_info->gl_iou); 6143 mutex_exit(&gid_info->gl_mutex); 6144 } 6145 if (gid_info->gl_prev != NULL) 6146 gid_info->gl_prev->gl_next = gid_info->gl_next; 6147 if (gid_info->gl_next != NULL) 6148 gid_info->gl_next->gl_prev = gid_info->gl_prev; 6149 6150 if (gid_info == ibdm.ibdm_dp_gidlist_head) 6151 ibdm.ibdm_dp_gidlist_head = gid_info->gl_next; 6152 if (gid_info == ibdm.ibdm_dp_gidlist_tail) 6153 ibdm.ibdm_dp_gidlist_tail = gid_info->gl_prev; 6154 ibdm.ibdm_ngids--; 6155 6156 ibdm.ibdm_busy &= ~IBDM_BUSY; 6157 cv_broadcast(&ibdm.ibdm_busy_cv); 6158 mutex_exit(&ibdm.ibdm_mutex); 6159 6160 /* free the hca_list on this gid_info */ 6161 ibdm_delete_glhca_list(gid_info); 6162 6163 mutex_destroy(&gid_info->gl_mutex); 6164 kmem_free(gid_info, sizeof (ibdm_dp_gidinfo_t)); 6165 6166 /* 6167 * Pass on the IOCs with updated GIDs to IBnexus 6168 */ 6169 if (ioc_list) { 6170 IBTF_DPRINTF_L4("ibdm", "\tGID_UNAVAILABLE " 6171 "IOC_PROP_UPDATE for %p\n", ioc_list); 6172 mutex_enter(&ibdm.ibdm_ibnex_mutex); 6173 if (ibdm.ibdm_ibnex_callback != NULL) { 6174 (*ibdm.ibdm_ibnex_callback)((void *) 6175 ioc_list, IBDM_EVENT_IOC_PROP_UPDATE); 6176 } 6177 mutex_exit(&ibdm.ibdm_ibnex_mutex); 6178 } 6179 6180 ibdm_free_saa_event_arg(event_arg); 6181 } 6182 6183 6184 static int 6185 ibdm_cmp_gid_list(ibdm_gid_t *new, ibdm_gid_t *prev) 6186 { 6187 ibdm_gid_t *scan_new, *scan_prev; 6188 int cmp_failed = 0; 6189 6190 ASSERT(new != NULL); 6191 ASSERT(prev != NULL); 6192 6193 /* 6194 * Search for each new gid anywhere in the prev GID list. 6195 * Note that the gid list could have been re-ordered. 6196 */ 6197 for (scan_new = new; scan_new; scan_new = scan_new->gid_next) { 6198 for (scan_prev = prev, cmp_failed = 1; scan_prev; 6199 scan_prev = scan_prev->gid_next) { 6200 if (scan_prev->gid_dgid_hi == scan_new->gid_dgid_hi && 6201 scan_prev->gid_dgid_lo == scan_new->gid_dgid_lo) { 6202 cmp_failed = 0; 6203 break; 6204 } 6205 } 6206 6207 if (cmp_failed) 6208 return (1); 6209 } 6210 return (0); 6211 } 6212 6213 /* 6214 * This is always called in a single thread 6215 * This function updates the gid_list and serv_list of IOC 6216 * The current gid_list is in ioc_info_t(contains only port 6217 * guids for which probe is done) & gidinfo_t(other port gids) 6218 * The gids in both locations are used for comparision. 6219 */ 6220 static void 6221 ibdm_reprobe_update_port_srv(ibdm_ioc_info_t *ioc, ibdm_dp_gidinfo_t *gidinfo) 6222 { 6223 ibdm_gid_t *cur_gid_list; 6224 uint_t cur_nportgids; 6225 6226 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 6227 6228 ioc->ioc_info_updated.ib_prop_updated = 0; 6229 6230 6231 /* Current GID list in gid_info only */ 6232 cur_gid_list = gidinfo->gl_gid; 6233 cur_nportgids = gidinfo->gl_ngids; 6234 6235 if (ioc->ioc_prev_serv_cnt != 6236 ioc->ioc_profile.ioc_service_entries || 6237 ibdm_serv_cmp(&ioc->ioc_serv[0], &ioc->ioc_prev_serv[0], 6238 ioc->ioc_prev_serv_cnt)) 6239 ioc->ioc_info_updated.ib_srv_prop_updated = 1; 6240 6241 if (ioc->ioc_prev_nportgids != cur_nportgids || 6242 ioc->ioc_prev_gid_list == NULL || cur_gid_list == NULL) { 6243 ioc->ioc_info_updated.ib_gid_prop_updated = 1; 6244 } else if (ibdm_cmp_gid_list(ioc->ioc_prev_gid_list, cur_gid_list)) { 6245 ioc->ioc_info_updated.ib_gid_prop_updated = 1; 6246 } 6247 6248 /* Zero out previous entries */ 6249 ibdm_free_gid_list(ioc->ioc_prev_gid_list); 6250 if (ioc->ioc_prev_serv) 6251 kmem_free(ioc->ioc_prev_serv, ioc->ioc_prev_serv_cnt * 6252 sizeof (ibdm_srvents_info_t)); 6253 ioc->ioc_prev_serv_cnt = 0; 6254 ioc->ioc_prev_nportgids = 0; 6255 ioc->ioc_prev_serv = NULL; 6256 ioc->ioc_prev_gid_list = NULL; 6257 } 6258 6259 /* 6260 * Handle GID removal. This returns gid_info of an GID for the same 6261 * node GUID, if found. For an GID with IOU information, the same 6262 * gid_info is returned if no gid_info with same node_guid is found. 6263 */ 6264 static ibdm_dp_gidinfo_t * 6265 ibdm_handle_gid_rm(ibdm_dp_gidinfo_t *rm_gid) 6266 { 6267 ibdm_dp_gidinfo_t *gid_list; 6268 6269 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm(0x%p)", rm_gid); 6270 6271 if (rm_gid->gl_iou == NULL) { 6272 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm NO iou"); 6273 /* 6274 * Search for a GID with same node_guid and 6275 * gl_iou != NULL 6276 */ 6277 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 6278 gid_list = gid_list->gl_next) { 6279 if (gid_list->gl_iou != NULL && (gid_list->gl_nodeguid 6280 == rm_gid->gl_nodeguid)) 6281 break; 6282 } 6283 6284 if (gid_list) 6285 ibdm_rmfrom_glgid_list(gid_list, rm_gid); 6286 6287 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm ret %p", gid_list); 6288 return (gid_list); 6289 } else { 6290 /* 6291 * Search for a GID with same node_guid and 6292 * gl_iou == NULL 6293 */ 6294 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm with iou"); 6295 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 6296 gid_list = gid_list->gl_next) { 6297 if (gid_list->gl_iou == NULL && (gid_list->gl_nodeguid 6298 == rm_gid->gl_nodeguid)) 6299 break; 6300 } 6301 6302 if (gid_list) { 6303 /* 6304 * Copy the following fields from rm_gid : 6305 * 1. gl_state 6306 * 2. gl_iou 6307 * 3. gl_gid & gl_ngids 6308 * 6309 * Note : Function is synchronized by 6310 * ibdm_busy flag. 6311 * 6312 * Note : Redirect info is initialized if 6313 * any MADs for the GID fail 6314 */ 6315 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm " 6316 "copying info to GID with gl_iou != NULl"); 6317 gid_list->gl_state = rm_gid->gl_state; 6318 gid_list->gl_iou = rm_gid->gl_iou; 6319 gid_list->gl_gid = rm_gid->gl_gid; 6320 gid_list->gl_ngids = rm_gid->gl_ngids; 6321 6322 /* Remove the GID from gl_gid list */ 6323 ibdm_rmfrom_glgid_list(gid_list, rm_gid); 6324 } else { 6325 /* 6326 * Handle a case where all GIDs to the IOU have 6327 * been removed. 6328 */ 6329 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm 0 GID " 6330 "to IOU"); 6331 6332 ibdm_rmfrom_glgid_list(rm_gid, rm_gid); 6333 return (rm_gid); 6334 } 6335 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm ret %p", gid_list); 6336 return (gid_list); 6337 } 6338 } 6339 6340 static void 6341 ibdm_rmfrom_glgid_list(ibdm_dp_gidinfo_t *gid_info, 6342 ibdm_dp_gidinfo_t *rm_gid) 6343 { 6344 ibdm_gid_t *tmp, *prev; 6345 6346 IBTF_DPRINTF_L4("ibdm", "\trmfrom_glgid (%p, %p)", 6347 gid_info, rm_gid); 6348 6349 for (tmp = gid_info->gl_gid, prev = NULL; tmp; ) { 6350 if (tmp->gid_dgid_hi == rm_gid->gl_dgid_hi && 6351 tmp->gid_dgid_lo == rm_gid->gl_dgid_lo) { 6352 if (prev == NULL) 6353 gid_info->gl_gid = tmp->gid_next; 6354 else 6355 prev->gid_next = tmp->gid_next; 6356 6357 kmem_free(tmp, sizeof (ibdm_gid_t)); 6358 gid_info->gl_ngids--; 6359 break; 6360 } else { 6361 prev = tmp; 6362 tmp = tmp->gid_next; 6363 } 6364 } 6365 } 6366 6367 static void 6368 ibdm_addto_gidlist(ibdm_gid_t **src_ptr, ibdm_gid_t *dest) 6369 { 6370 ibdm_gid_t *head = NULL, *new, *tail; 6371 6372 /* First copy the destination */ 6373 for (; dest; dest = dest->gid_next) { 6374 new = kmem_zalloc(sizeof (ibdm_gid_t), KM_SLEEP); 6375 new->gid_dgid_hi = dest->gid_dgid_hi; 6376 new->gid_dgid_lo = dest->gid_dgid_lo; 6377 new->gid_next = head; 6378 head = new; 6379 } 6380 6381 /* Insert this to the source */ 6382 if (*src_ptr == NULL) 6383 *src_ptr = head; 6384 else { 6385 for (tail = *src_ptr; tail->gid_next != NULL; 6386 tail = tail->gid_next) 6387 ; 6388 6389 tail->gid_next = head; 6390 } 6391 } 6392 6393 static void 6394 ibdm_free_gid_list(ibdm_gid_t *head) 6395 { 6396 ibdm_gid_t *delete; 6397 6398 for (delete = head; delete; ) { 6399 head = delete->gid_next; 6400 kmem_free(delete, sizeof (ibdm_gid_t)); 6401 delete = head; 6402 } 6403 } 6404 6405 /* 6406 * This function rescans the DM capable GIDs (gl_state is 6407 * GID_PROBE_COMPLETE or IBDM_GID_PROBING_SKIPPED.This 6408 * basically checks if the DM capable GID is reachable. If 6409 * not this is handled the same way as GID_UNAVAILABLE, 6410 * except that notifications are not send to IBnexus. 6411 * 6412 * This function also initializes the ioc_prev_list for 6413 * a particular IOC (when called from probe_ioc, with 6414 * ioc_guidp != NULL) or all IOCs for the gid (called from 6415 * sweep_fabric, ioc_guidp == NULL). 6416 */ 6417 static void 6418 ibdm_rescan_gidlist(ib_guid_t *ioc_guidp) 6419 { 6420 ibdm_dp_gidinfo_t *gid_info, *tmp; 6421 int ii, niocs, found; 6422 ibdm_hca_list_t *hca_list = NULL; 6423 ibdm_port_attr_t *port = NULL; 6424 ibdm_ioc_info_t *ioc_list; 6425 6426 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; ) { 6427 found = 0; 6428 if (gid_info->gl_state != IBDM_GID_PROBING_SKIPPED && 6429 gid_info->gl_state != IBDM_GID_PROBING_COMPLETE) { 6430 gid_info = gid_info->gl_next; 6431 continue; 6432 } 6433 6434 /* 6435 * Check if the GID is visible to any HCA ports. 6436 * Return if so. 6437 */ 6438 mutex_enter(&ibdm.ibdm_hl_mutex); 6439 for (ibdm_get_next_port(&hca_list, &port, 1); port; 6440 ibdm_get_next_port(&hca_list, &port, 1)) { 6441 if (ibdm_port_reachable(port->pa_sa_hdl, 6442 gid_info->gl_dgid_lo) == B_TRUE) { 6443 found = 1; 6444 break; 6445 } 6446 } 6447 mutex_exit(&ibdm.ibdm_hl_mutex); 6448 6449 if (found) { 6450 if (gid_info->gl_iou == NULL) { 6451 gid_info = gid_info->gl_next; 6452 continue; 6453 } 6454 6455 /* Intialize the ioc_prev_gid_list */ 6456 niocs = 6457 gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 6458 for (ii = 0; ii < niocs; ii++) { 6459 ioc_list = IBDM_GIDINFO2IOCINFO(gid_info, ii); 6460 6461 if (ioc_guidp == NULL || (*ioc_guidp == 6462 ioc_list->ioc_profile.ioc_guid)) { 6463 /* Add info of GIDs in gid_info also */ 6464 ibdm_addto_gidlist( 6465 &ioc_list->ioc_prev_gid_list, 6466 gid_info->gl_gid); 6467 ioc_list->ioc_prev_nportgids = 6468 gid_info->gl_ngids; 6469 } 6470 } 6471 gid_info = gid_info->gl_next; 6472 continue; 6473 } 6474 6475 IBTF_DPRINTF_L4("ibdm", "\trescan_gidlist " 6476 "deleted port GUID %llx", 6477 gid_info->gl_dgid_lo); 6478 6479 /* 6480 * Update GID list in all IOCs affected by this 6481 */ 6482 ioc_list = ibdm_update_ioc_gidlist(gid_info, 0); 6483 6484 /* 6485 * Remove GID from the global GID list 6486 * Handle the case where all port GIDs for an 6487 * IOU have been hot-removed. 6488 */ 6489 mutex_enter(&ibdm.ibdm_mutex); 6490 if (gid_info->gl_iou != NULL && gid_info->gl_ngids == 0) { 6491 mutex_enter(&gid_info->gl_mutex); 6492 (void) ibdm_free_iou_info(gid_info, &gid_info->gl_iou); 6493 mutex_exit(&gid_info->gl_mutex); 6494 } 6495 6496 tmp = gid_info->gl_next; 6497 if (gid_info->gl_prev != NULL) 6498 gid_info->gl_prev->gl_next = gid_info->gl_next; 6499 if (gid_info->gl_next != NULL) 6500 gid_info->gl_next->gl_prev = gid_info->gl_prev; 6501 6502 if (gid_info == ibdm.ibdm_dp_gidlist_head) 6503 ibdm.ibdm_dp_gidlist_head = gid_info->gl_next; 6504 if (gid_info == ibdm.ibdm_dp_gidlist_tail) 6505 ibdm.ibdm_dp_gidlist_tail = gid_info->gl_prev; 6506 ibdm.ibdm_ngids--; 6507 mutex_exit(&ibdm.ibdm_mutex); 6508 6509 /* free the hca_list on this gid_info */ 6510 ibdm_delete_glhca_list(gid_info); 6511 6512 mutex_destroy(&gid_info->gl_mutex); 6513 kmem_free(gid_info, sizeof (ibdm_dp_gidinfo_t)); 6514 6515 gid_info = tmp; 6516 6517 /* 6518 * Pass on the IOCs with updated GIDs to IBnexus 6519 */ 6520 if (ioc_list) { 6521 IBTF_DPRINTF_L4("ibdm", "\trescan_gidlist " 6522 "IOC_PROP_UPDATE for %p\n", ioc_list); 6523 mutex_enter(&ibdm.ibdm_ibnex_mutex); 6524 if (ibdm.ibdm_ibnex_callback != NULL) { 6525 (*ibdm.ibdm_ibnex_callback)((void *) 6526 ioc_list, IBDM_EVENT_IOC_PROP_UPDATE); 6527 } 6528 mutex_exit(&ibdm.ibdm_ibnex_mutex); 6529 } 6530 } 6531 } 6532 6533 /* 6534 * This function notifies IBnex of IOCs on this GID. 6535 * Notification is for GIDs with gl_reprobe_flag set. 6536 * The flag is set when IOC probe / fabric sweep 6537 * probes a GID starting from CLASS port info. 6538 * 6539 * IBnexus will have information of a reconnected IOC 6540 * if it had probed it before. If this is a new IOC, 6541 * IBnexus ignores the notification. 6542 * 6543 * This function should be called with no locks held. 6544 */ 6545 static void 6546 ibdm_notify_newgid_iocs(ibdm_dp_gidinfo_t *gid_info) 6547 { 6548 ibdm_ioc_info_t *ioc_list; 6549 6550 if (gid_info->gl_reprobe_flag == 0 || 6551 gid_info->gl_iou == NULL) 6552 return; 6553 6554 ioc_list = ibdm_update_ioc_gidlist(gid_info, -1); 6555 6556 /* 6557 * Pass on the IOCs with updated GIDs to IBnexus 6558 */ 6559 if (ioc_list) { 6560 mutex_enter(&ibdm.ibdm_ibnex_mutex); 6561 if (ibdm.ibdm_ibnex_callback != NULL) { 6562 (*ibdm.ibdm_ibnex_callback)((void *)ioc_list, 6563 IBDM_EVENT_IOC_PROP_UPDATE); 6564 } 6565 mutex_exit(&ibdm.ibdm_ibnex_mutex); 6566 } 6567 } 6568 6569 6570 static void 6571 ibdm_free_saa_event_arg(ibdm_saa_event_arg_t *arg) 6572 { 6573 if (arg != NULL) 6574 kmem_free(arg, sizeof (ibdm_saa_event_arg_t)); 6575 } 6576 6577 /* 6578 * This function parses the list of HCAs and HCA ports 6579 * to return the port_attr of the next HCA port. A port 6580 * connected to IB fabric (port_state active) is returned, 6581 * if connected_flag is set. 6582 */ 6583 static void 6584 ibdm_get_next_port(ibdm_hca_list_t **inp_hcap, 6585 ibdm_port_attr_t **inp_portp, int connect_flag) 6586 { 6587 int ii; 6588 ibdm_port_attr_t *port, *next_port = NULL; 6589 ibdm_port_attr_t *inp_port; 6590 ibdm_hca_list_t *hca_list; 6591 int found = 0; 6592 6593 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 6594 IBTF_DPRINTF_L4(ibdm_string, "\tget_next_port(%p, %p, %x)", 6595 inp_hcap, inp_portp, connect_flag); 6596 6597 hca_list = *inp_hcap; 6598 inp_port = *inp_portp; 6599 6600 if (hca_list == NULL) 6601 hca_list = ibdm.ibdm_hca_list_head; 6602 6603 for (; hca_list; hca_list = hca_list->hl_next) { 6604 for (ii = 0; ii < hca_list->hl_nports; ii++) { 6605 port = &hca_list->hl_port_attr[ii]; 6606 6607 /* 6608 * inp_port != NULL; 6609 * Skip till we find the matching port 6610 */ 6611 if (inp_port && !found) { 6612 if (inp_port == port) 6613 found = 1; 6614 continue; 6615 } 6616 6617 if (!connect_flag) { 6618 next_port = port; 6619 break; 6620 } 6621 6622 if (port->pa_sa_hdl == NULL) 6623 ibdm_initialize_port(port); 6624 if (port->pa_sa_hdl == NULL) 6625 (void) ibdm_fini_port(port); 6626 else if (next_port == NULL && 6627 port->pa_sa_hdl != NULL && 6628 port->pa_state == IBT_PORT_ACTIVE) { 6629 next_port = port; 6630 break; 6631 } 6632 } 6633 6634 if (next_port) 6635 break; 6636 } 6637 6638 IBTF_DPRINTF_L4(ibdm_string, "\tget_next_port : " 6639 "returns hca_list %p port %p", hca_list, next_port); 6640 *inp_hcap = hca_list; 6641 *inp_portp = next_port; 6642 } 6643 6644 static void 6645 ibdm_add_to_gl_gid(ibdm_dp_gidinfo_t *nodegid, ibdm_dp_gidinfo_t *addgid) 6646 { 6647 ibdm_gid_t *tmp; 6648 6649 tmp = kmem_zalloc(sizeof (ibdm_gid_t), KM_SLEEP); 6650 tmp->gid_dgid_hi = addgid->gl_dgid_hi; 6651 tmp->gid_dgid_lo = addgid->gl_dgid_lo; 6652 6653 mutex_enter(&nodegid->gl_mutex); 6654 tmp->gid_next = nodegid->gl_gid; 6655 nodegid->gl_gid = tmp; 6656 nodegid->gl_ngids++; 6657 mutex_exit(&nodegid->gl_mutex); 6658 } 6659 6660 static void 6661 ibdm_addto_glhcalist(ibdm_dp_gidinfo_t *gid_info, 6662 ibdm_hca_list_t *hca) 6663 { 6664 ibdm_hca_list_t *head, *prev = NULL, *temp; 6665 6666 IBTF_DPRINTF_L4(ibdm_string, "\taddto_glhcalist(%p, %p) " 6667 ": gl_hca_list %p", gid_info, hca, gid_info->gl_hca_list); 6668 ASSERT(!MUTEX_HELD(&gid_info->gl_mutex)); 6669 6670 mutex_enter(&gid_info->gl_mutex); 6671 head = gid_info->gl_hca_list; 6672 if (head == NULL) { 6673 head = ibdm_dup_hca_attr(hca); 6674 head->hl_next = NULL; 6675 gid_info->gl_hca_list = head; 6676 mutex_exit(&gid_info->gl_mutex); 6677 IBTF_DPRINTF_L4(ibdm_string, "\tadd_to_glhcalist: " 6678 "gid %p, gl_hca_list %p", gid_info, 6679 gid_info->gl_hca_list); 6680 return; 6681 } 6682 6683 /* Check if already in the list */ 6684 while (head) { 6685 if (head->hl_hca_guid == hca->hl_hca_guid) { 6686 mutex_exit(&gid_info->gl_mutex); 6687 IBTF_DPRINTF_L4(ibdm_string, 6688 "\taddto_glhcalist : gid %p hca %p dup", 6689 gid_info, hca); 6690 return; 6691 } 6692 prev = head; 6693 head = head->hl_next; 6694 } 6695 6696 /* Add this HCA to gl_hca_list */ 6697 temp = ibdm_dup_hca_attr(hca); 6698 temp->hl_next = NULL; 6699 prev->hl_next = temp; 6700 mutex_exit(&gid_info->gl_mutex); 6701 6702 IBTF_DPRINTF_L4(ibdm_string, "\tadd_to_glhcalist: " 6703 "gid %p, gl_hca_list %p", gid_info, gid_info->gl_hca_list); 6704 } 6705 6706 static void 6707 ibdm_delete_glhca_list(ibdm_dp_gidinfo_t *gid_info) 6708 { 6709 ASSERT(!MUTEX_HELD(&gid_info->gl_mutex)); 6710 ASSERT(!MUTEX_HELD(&ibdm.ibdm_mutex)); 6711 6712 mutex_enter(&gid_info->gl_mutex); 6713 if (gid_info->gl_hca_list) 6714 ibdm_ibnex_free_hca_list(gid_info->gl_hca_list); 6715 gid_info->gl_hca_list = NULL; 6716 mutex_exit(&gid_info->gl_mutex); 6717 } 6718 6719 6720 static void 6721 ibdm_reset_all_dgids(ibmf_saa_handle_t port_sa_hdl) 6722 { 6723 IBTF_DPRINTF_L4(ibdm_string, "\treset_all_dgids(%X)", 6724 port_sa_hdl); 6725 6726 if (ibdm_enumerate_iocs == 0) 6727 return; 6728 6729 ASSERT(!MUTEX_HELD(&ibdm.ibdm_mutex)); 6730 ASSERT(!MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 6731 6732 /* Check : Not busy in another probe / sweep */ 6733 mutex_enter(&ibdm.ibdm_mutex); 6734 if ((ibdm.ibdm_busy & IBDM_BUSY) == 0) { 6735 ibdm_dp_gidinfo_t *gid_info; 6736 6737 ibdm.ibdm_busy |= IBDM_BUSY; 6738 mutex_exit(&ibdm.ibdm_mutex); 6739 6740 /* 6741 * Check if any GID is using the SA & IBMF handle 6742 * of HCA port going down. Reset ibdm_dp_gidinfo_t 6743 * using another HCA port which can reach the GID. 6744 * This is for DM capable GIDs only, no need to do 6745 * this for others 6746 * 6747 * Delete the GID if no alternate HCA port to reach 6748 * it is found. 6749 */ 6750 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; ) { 6751 ibdm_dp_gidinfo_t *tmp; 6752 6753 IBTF_DPRINTF_L4(ibdm_string, "\tevent_hdlr " 6754 "checking gidinfo %p", gid_info); 6755 6756 if (gid_info->gl_sa_hdl == port_sa_hdl) { 6757 IBTF_DPRINTF_L3(ibdm_string, 6758 "\tevent_hdlr: down HCA port hdl " 6759 "matches gid %p", gid_info); 6760 6761 /* 6762 * The non-DM GIDs can come back 6763 * with a new subnet prefix, when 6764 * the HCA port commes up again. To 6765 * avoid issues, delete non-DM 6766 * capable GIDs, if the gid was 6767 * discovered using the HCA port 6768 * going down. This is ensured by 6769 * setting gl_disconnected to 1. 6770 */ 6771 if (gid_info->gl_is_dm_capable == B_FALSE) 6772 gid_info->gl_disconnected = 1; 6773 else 6774 ibdm_reset_gidinfo(gid_info); 6775 6776 if (gid_info->gl_disconnected) { 6777 IBTF_DPRINTF_L3(ibdm_string, 6778 "\tevent_hdlr: deleting" 6779 " gid %p", gid_info); 6780 tmp = gid_info; 6781 gid_info = gid_info->gl_next; 6782 ibdm_delete_gidinfo(tmp); 6783 } else 6784 gid_info = gid_info->gl_next; 6785 } else 6786 gid_info = gid_info->gl_next; 6787 } 6788 6789 mutex_enter(&ibdm.ibdm_mutex); 6790 ibdm.ibdm_busy &= ~IBDM_BUSY; 6791 cv_signal(&ibdm.ibdm_busy_cv); 6792 } 6793 mutex_exit(&ibdm.ibdm_mutex); 6794 } 6795 6796 static void 6797 ibdm_reset_gidinfo(ibdm_dp_gidinfo_t *gidinfo) 6798 { 6799 ibdm_hca_list_t *hca_list = NULL; 6800 ibdm_port_attr_t *port = NULL; 6801 int gid_reinited = 0; 6802 sa_node_record_t *nr, *tmp; 6803 sa_portinfo_record_t *pi; 6804 size_t nr_len = 0, pi_len = 0; 6805 size_t path_len; 6806 ib_gid_t sgid, dgid; 6807 int ret, ii, nrecords; 6808 sa_path_record_t *path; 6809 uint8_t npaths = 1; 6810 ibdm_pkey_tbl_t *pkey_tbl; 6811 6812 IBTF_DPRINTF_L4(ibdm_string, "\treset_gidinfo(%p)", gidinfo); 6813 6814 /* 6815 * Get list of all the ports reachable from the local known HCA 6816 * ports which are active 6817 */ 6818 mutex_enter(&ibdm.ibdm_hl_mutex); 6819 for (ibdm_get_next_port(&hca_list, &port, 1); port; 6820 ibdm_get_next_port(&hca_list, &port, 1)) { 6821 6822 6823 /* 6824 * Get the path and re-populate the gidinfo. 6825 * Getting the path is the same probe_ioc 6826 * Init the gid info as in ibdm_create_gidinfo() 6827 */ 6828 nr = ibdm_get_node_records(port->pa_sa_hdl, &nr_len, 6829 gidinfo->gl_nodeguid); 6830 if (nr == NULL) { 6831 IBTF_DPRINTF_L4(ibdm_string, 6832 "\treset_gidinfo : no records"); 6833 continue; 6834 } 6835 6836 nrecords = (nr_len / sizeof (sa_node_record_t)); 6837 for (tmp = nr, ii = 0; (ii < nrecords); ii++, tmp++) { 6838 if (tmp->NodeInfo.PortGUID == gidinfo->gl_portguid) 6839 break; 6840 } 6841 6842 if (ii == nrecords) { 6843 IBTF_DPRINTF_L4(ibdm_string, 6844 "\treset_gidinfo : no record for portguid"); 6845 kmem_free(nr, nr_len); 6846 continue; 6847 } 6848 6849 pi = ibdm_get_portinfo(port->pa_sa_hdl, &pi_len, tmp->LID); 6850 if (pi == NULL) { 6851 IBTF_DPRINTF_L4(ibdm_string, 6852 "\treset_gidinfo : no portinfo"); 6853 kmem_free(nr, nr_len); 6854 continue; 6855 } 6856 6857 sgid.gid_prefix = port->pa_sn_prefix; 6858 sgid.gid_guid = port->pa_port_guid; 6859 dgid.gid_prefix = pi->PortInfo.GidPrefix; 6860 dgid.gid_guid = tmp->NodeInfo.PortGUID; 6861 6862 ret = ibmf_saa_gid_to_pathrecords(port->pa_sa_hdl, sgid, dgid, 6863 IBMF_SAA_PKEY_WC, 0, B_TRUE, &npaths, 0, &path_len, &path); 6864 6865 if ((ret != IBMF_SUCCESS) || path == NULL) { 6866 IBTF_DPRINTF_L4(ibdm_string, 6867 "\treset_gidinfo : no paths"); 6868 kmem_free(pi, pi_len); 6869 kmem_free(nr, nr_len); 6870 continue; 6871 } 6872 6873 gidinfo->gl_dgid_hi = path->DGID.gid_prefix; 6874 gidinfo->gl_dgid_lo = path->DGID.gid_guid; 6875 gidinfo->gl_sgid_hi = path->SGID.gid_prefix; 6876 gidinfo->gl_sgid_lo = path->SGID.gid_guid; 6877 gidinfo->gl_p_key = path->P_Key; 6878 gidinfo->gl_sa_hdl = port->pa_sa_hdl; 6879 gidinfo->gl_ibmf_hdl = port->pa_ibmf_hdl; 6880 gidinfo->gl_slid = path->SLID; 6881 gidinfo->gl_dlid = path->DLID; 6882 /* Reset redirect info, next MAD will set if redirected */ 6883 gidinfo->gl_redirected = 0; 6884 gidinfo->gl_devid = (*tmp).NodeInfo.DeviceID; 6885 gidinfo->gl_SL = path->SL; 6886 6887 gidinfo->gl_qp_hdl = IBMF_QP_HANDLE_DEFAULT; 6888 for (ii = 0; ii < port->pa_npkeys; ii++) { 6889 if (port->pa_pkey_tbl == NULL) 6890 break; 6891 6892 pkey_tbl = &port->pa_pkey_tbl[ii]; 6893 if ((gidinfo->gl_p_key == pkey_tbl->pt_pkey) && 6894 (pkey_tbl->pt_qp_hdl != NULL)) { 6895 gidinfo->gl_qp_hdl = pkey_tbl->pt_qp_hdl; 6896 break; 6897 } 6898 } 6899 6900 if (gidinfo->gl_qp_hdl == NULL) 6901 IBTF_DPRINTF_L2(ibdm_string, 6902 "\treset_gid_info: No matching Pkey"); 6903 else 6904 gid_reinited = 1; 6905 6906 kmem_free(path, path_len); 6907 kmem_free(pi, pi_len); 6908 kmem_free(nr, nr_len); 6909 break; 6910 } 6911 mutex_exit(&ibdm.ibdm_hl_mutex); 6912 6913 if (!gid_reinited) 6914 gidinfo->gl_disconnected = 1; 6915 } 6916 6917 static void 6918 ibdm_delete_gidinfo(ibdm_dp_gidinfo_t *gidinfo) 6919 { 6920 ibdm_ioc_info_t *ioc_list; 6921 int in_gidlist = 0; 6922 6923 /* 6924 * Check if gidinfo has been inserted into the 6925 * ibdm_dp_gidlist_head list. gl_next or gl_prev 6926 * != NULL, if gidinfo is the list. 6927 */ 6928 if (gidinfo->gl_prev != NULL || 6929 gidinfo->gl_next != NULL || 6930 ibdm.ibdm_dp_gidlist_head == gidinfo) 6931 in_gidlist = 1; 6932 6933 ioc_list = ibdm_update_ioc_gidlist(gidinfo, 0); 6934 6935 /* 6936 * Remove GID from the global GID list 6937 * Handle the case where all port GIDs for an 6938 * IOU have been hot-removed. 6939 */ 6940 mutex_enter(&ibdm.ibdm_mutex); 6941 if (gidinfo->gl_iou != NULL && gidinfo->gl_ngids == 0) { 6942 mutex_enter(&gidinfo->gl_mutex); 6943 (void) ibdm_free_iou_info(gidinfo, &gidinfo->gl_iou); 6944 mutex_exit(&gidinfo->gl_mutex); 6945 } 6946 6947 /* Delete gl_hca_list */ 6948 mutex_exit(&ibdm.ibdm_mutex); 6949 ibdm_delete_glhca_list(gidinfo); 6950 mutex_enter(&ibdm.ibdm_mutex); 6951 6952 if (in_gidlist) { 6953 if (gidinfo->gl_prev != NULL) 6954 gidinfo->gl_prev->gl_next = gidinfo->gl_next; 6955 if (gidinfo->gl_next != NULL) 6956 gidinfo->gl_next->gl_prev = gidinfo->gl_prev; 6957 6958 if (gidinfo == ibdm.ibdm_dp_gidlist_head) 6959 ibdm.ibdm_dp_gidlist_head = gidinfo->gl_next; 6960 if (gidinfo == ibdm.ibdm_dp_gidlist_tail) 6961 ibdm.ibdm_dp_gidlist_tail = gidinfo->gl_prev; 6962 ibdm.ibdm_ngids--; 6963 } 6964 mutex_exit(&ibdm.ibdm_mutex); 6965 6966 mutex_destroy(&gidinfo->gl_mutex); 6967 cv_destroy(&gidinfo->gl_probe_cv); 6968 kmem_free(gidinfo, sizeof (ibdm_dp_gidinfo_t)); 6969 6970 /* 6971 * Pass on the IOCs with updated GIDs to IBnexus 6972 */ 6973 if (ioc_list) { 6974 IBTF_DPRINTF_L4("ibdm", "\tdelete_gidinfo " 6975 "IOC_PROP_UPDATE for %p\n", ioc_list); 6976 mutex_enter(&ibdm.ibdm_ibnex_mutex); 6977 if (ibdm.ibdm_ibnex_callback != NULL) { 6978 (*ibdm.ibdm_ibnex_callback)((void *) 6979 ioc_list, IBDM_EVENT_IOC_PROP_UPDATE); 6980 } 6981 mutex_exit(&ibdm.ibdm_ibnex_mutex); 6982 } 6983 } 6984 6985 6986 static void 6987 ibdm_fill_srv_attr_mod(ib_mad_hdr_t *hdr, ibdm_timeout_cb_args_t *cb_args) 6988 { 6989 uint32_t attr_mod; 6990 6991 attr_mod = (cb_args->cb_ioc_num + 1) << 16; 6992 attr_mod |= cb_args->cb_srvents_start; 6993 attr_mod |= (cb_args->cb_srvents_end) << 8; 6994 hdr->AttributeModifier = h2b32(attr_mod); 6995 } 6996 6997 static void 6998 ibdm_bump_transactionID(ibdm_dp_gidinfo_t *gid_info) 6999 { 7000 ASSERT(MUTEX_HELD(&gid_info->gl_mutex)); 7001 gid_info->gl_transactionID++; 7002 if (gid_info->gl_transactionID == gid_info->gl_max_transactionID) { 7003 IBTF_DPRINTF_L4(ibdm_string, 7004 "\tbump_transactionID(%p), wrapup", gid_info); 7005 gid_info->gl_transactionID = gid_info->gl_min_transactionID; 7006 } 7007 } 7008 7009 /* 7010 * gl_prev_iou is set for *non-reprobe* sweeep requests, which 7011 * detected that ChangeID in IOU info has changed. The service 7012 * entry also may have changed. Check if service entry in IOC 7013 * has changed wrt the prev iou, if so notify to IB Nexus. 7014 */ 7015 static ibdm_ioc_info_t * 7016 ibdm_handle_prev_iou() 7017 { 7018 ibdm_dp_gidinfo_t *gid_info; 7019 ibdm_ioc_info_t *ioc_list_head = NULL, *ioc_list; 7020 ibdm_ioc_info_t *prev_ioc, *ioc; 7021 int ii, jj, niocs, prev_niocs; 7022 7023 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 7024 7025 IBTF_DPRINTF_L4(ibdm_string, "\thandle_prev_iou enter"); 7026 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; 7027 gid_info = gid_info->gl_next) { 7028 if (gid_info->gl_prev_iou == NULL) 7029 continue; 7030 7031 IBTF_DPRINTF_L4(ibdm_string, "\thandle_prev_iou gid %p", 7032 gid_info); 7033 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 7034 prev_niocs = 7035 gid_info->gl_prev_iou->iou_info.iou_num_ctrl_slots; 7036 for (ii = 0; ii < niocs; ii++) { 7037 ioc = IBDM_GIDINFO2IOCINFO(gid_info, ii); 7038 7039 /* Find matching IOC */ 7040 for (jj = 0; jj < prev_niocs; jj++) { 7041 prev_ioc = (ibdm_ioc_info_t *) 7042 &gid_info->gl_prev_iou->iou_ioc_info[jj]; 7043 if (prev_ioc->ioc_profile.ioc_guid == 7044 ioc->ioc_profile.ioc_guid) 7045 break; 7046 } 7047 if (jj == prev_niocs) 7048 prev_ioc = NULL; 7049 if (ioc == NULL || prev_ioc == NULL) 7050 continue; 7051 if ((ioc->ioc_profile.ioc_service_entries != 7052 prev_ioc->ioc_profile.ioc_service_entries) || 7053 ibdm_serv_cmp(&ioc->ioc_serv[0], 7054 &prev_ioc->ioc_serv[0], 7055 ioc->ioc_profile.ioc_service_entries) != 0) { 7056 IBTF_DPRINTF_L4(ibdm_string, 7057 "/thandle_prev_iou modified IOC: " 7058 "current ioc %p, old ioc %p", 7059 ioc, prev_ioc); 7060 mutex_enter(&gid_info->gl_mutex); 7061 ioc_list = ibdm_dup_ioc_info(ioc, gid_info); 7062 mutex_exit(&gid_info->gl_mutex); 7063 ioc_list->ioc_info_updated.ib_prop_updated 7064 = 0; 7065 ioc_list->ioc_info_updated.ib_srv_prop_updated 7066 = 1; 7067 7068 if (ioc_list_head == NULL) 7069 ioc_list_head = ioc_list; 7070 else { 7071 ioc_list_head->ioc_next = ioc_list; 7072 ioc_list_head = ioc_list; 7073 } 7074 } 7075 } 7076 7077 mutex_enter(&gid_info->gl_mutex); 7078 (void) ibdm_free_iou_info(gid_info, &gid_info->gl_prev_iou); 7079 mutex_exit(&gid_info->gl_mutex); 7080 } 7081 IBTF_DPRINTF_L4(ibdm_string, "\thandle_prev_iouret %p", 7082 ioc_list_head); 7083 return (ioc_list_head); 7084 } 7085 7086 /* 7087 * Compares two service entries lists, returns 0 if same, returns 1 7088 * if no match. 7089 */ 7090 static int 7091 ibdm_serv_cmp(ibdm_srvents_info_t *serv1, ibdm_srvents_info_t *serv2, 7092 int nserv) 7093 { 7094 int ii; 7095 7096 IBTF_DPRINTF_L4(ibdm_string, "\tserv_cmp: enter"); 7097 for (ii = 0; ii < nserv; ii++, serv1++, serv2++) { 7098 if (serv1->se_attr.srv_id != serv2->se_attr.srv_id || 7099 bcmp(serv1->se_attr.srv_name, 7100 serv2->se_attr.srv_name, 7101 IB_DM_MAX_SVC_NAME_LEN) != 0) { 7102 IBTF_DPRINTF_L4(ibdm_string, "\tserv_cmp: ret 1"); 7103 return (1); 7104 } 7105 } 7106 IBTF_DPRINTF_L4(ibdm_string, "\tserv_cmp: ret 0"); 7107 return (0); 7108 } 7109 7110 /* For debugging purpose only */ 7111 #ifdef DEBUG 7112 void 7113 ibdm_dump_mad_hdr(ib_mad_hdr_t *mad_hdr) 7114 { 7115 IBTF_DPRINTF_L4("ibdm", "\t\t MAD Header info"); 7116 IBTF_DPRINTF_L4("ibdm", "\t\t ---------------"); 7117 7118 IBTF_DPRINTF_L4("ibdm", "\tBase version : 0x%x" 7119 "\tMgmt Class : 0x%x", mad_hdr->BaseVersion, mad_hdr->MgmtClass); 7120 IBTF_DPRINTF_L4("ibdm", "\tClass version : 0x%x" 7121 "\tR Method : 0x%x", 7122 mad_hdr->ClassVersion, mad_hdr->R_Method); 7123 IBTF_DPRINTF_L4("ibdm", "\tMAD Status : 0x%x" 7124 "\tTransaction ID : 0x%llx", 7125 b2h16(mad_hdr->Status), b2h64(mad_hdr->TransactionID)); 7126 IBTF_DPRINTF_L4("ibdm", "\t Attribute ID : 0x%x" 7127 "\tAttribute Modified : 0x%lx", 7128 b2h16(mad_hdr->AttributeID), b2h32(mad_hdr->AttributeModifier)); 7129 } 7130 7131 7132 void 7133 ibdm_dump_ibmf_msg(ibmf_msg_t *ibmf_msg, int flag) 7134 { 7135 ib_mad_hdr_t *mad_hdr; 7136 7137 IBTF_DPRINTF_L4("ibdm", "\t\t(IBMF_PKT): Local address info"); 7138 IBTF_DPRINTF_L4("ibdm", "\t\t ------------------"); 7139 7140 IBTF_DPRINTF_L4("ibdm", "\tLocal Lid : 0x%x\tRemote Lid : 0x%x" 7141 " Remote Qp : 0x%x", ibmf_msg->im_local_addr.ia_local_lid, 7142 ibmf_msg->im_local_addr.ia_remote_lid, 7143 ibmf_msg->im_local_addr.ia_remote_qno); 7144 IBTF_DPRINTF_L4("ibdm", "\tP_key : 0x%x\tQ_key : 0x%x" 7145 " SL : 0x%x", ibmf_msg->im_local_addr.ia_p_key, 7146 ibmf_msg->im_local_addr.ia_q_key, 7147 ibmf_msg->im_local_addr.ia_service_level); 7148 7149 if (flag) 7150 mad_hdr = (ib_mad_hdr_t *)IBDM_OUT_IBMFMSG_MADHDR(ibmf_msg); 7151 else 7152 mad_hdr = IBDM_IN_IBMFMSG_MADHDR(ibmf_msg); 7153 7154 ibdm_dump_mad_hdr(mad_hdr); 7155 } 7156 7157 7158 void 7159 ibdm_dump_path_info(sa_path_record_t *path) 7160 { 7161 IBTF_DPRINTF_L4("ibdm", "\t\t Path information"); 7162 IBTF_DPRINTF_L4("ibdm", "\t\t ----------------"); 7163 7164 IBTF_DPRINTF_L4("ibdm", "\t DGID hi : %llx\tDGID lo : %llx", 7165 path->DGID.gid_prefix, path->DGID.gid_guid); 7166 IBTF_DPRINTF_L4("ibdm", "\t SGID hi : %llx\tSGID lo : %llx", 7167 path->SGID.gid_prefix, path->SGID.gid_guid); 7168 IBTF_DPRINTF_L4("ibdm", "\t SLID : %x\t\tDlID : %x", 7169 path->SLID, path->DLID); 7170 IBTF_DPRINTF_L4("ibdm", "\t P Key : %x\t\tSL : %x", 7171 path->P_Key, path->SL); 7172 } 7173 7174 7175 void 7176 ibdm_dump_classportinfo(ib_mad_classportinfo_t *classportinfo) 7177 { 7178 IBTF_DPRINTF_L4("ibdm", "\t\t CLASSPORT INFO"); 7179 IBTF_DPRINTF_L4("ibdm", "\t\t --------------"); 7180 7181 IBTF_DPRINTF_L4("ibdm", "\t Response Time Value : 0x%x", 7182 ((b2h32(classportinfo->RespTimeValue)) & 0x1F)); 7183 7184 IBTF_DPRINTF_L4("ibdm", "\t Redirected GID hi : 0x%llx", 7185 b2h64(classportinfo->RedirectGID_hi)); 7186 IBTF_DPRINTF_L4("ibdm", "\t Redirected GID lo : 0x%llx", 7187 b2h64(classportinfo->RedirectGID_lo)); 7188 IBTF_DPRINTF_L4("ibdm", "\t Redirected TC : 0x%x", 7189 classportinfo->RedirectTC); 7190 IBTF_DPRINTF_L4("ibdm", "\t Redirected SL : 0x%x", 7191 classportinfo->RedirectSL); 7192 IBTF_DPRINTF_L4("ibdm", "\t Redirected FL : 0x%x", 7193 classportinfo->RedirectFL); 7194 IBTF_DPRINTF_L4("ibdm", "\t Redirected LID : 0x%x", 7195 b2h16(classportinfo->RedirectLID)); 7196 IBTF_DPRINTF_L4("ibdm", "\t Redirected P KEY : 0x%x", 7197 b2h16(classportinfo->RedirectP_Key)); 7198 IBTF_DPRINTF_L4("ibdm", "\t Redirected QP : 0x%x", 7199 classportinfo->RedirectQP); 7200 IBTF_DPRINTF_L4("ibdm", "\t Redirected Q KEY : 0x%x", 7201 b2h32(classportinfo->RedirectQ_Key)); 7202 IBTF_DPRINTF_L4("ibdm", "\t Trap GID hi : 0x%llx", 7203 b2h64(classportinfo->TrapGID_hi)); 7204 IBTF_DPRINTF_L4("ibdm", "\t Trap GID lo : 0x%llx", 7205 b2h64(classportinfo->TrapGID_lo)); 7206 IBTF_DPRINTF_L4("ibdm", "\t Trap TC : 0x%x", 7207 classportinfo->TrapTC); 7208 IBTF_DPRINTF_L4("ibdm", "\t Trap SL : 0x%x", 7209 classportinfo->TrapSL); 7210 IBTF_DPRINTF_L4("ibdm", "\t Trap FL : 0x%x", 7211 classportinfo->TrapFL); 7212 IBTF_DPRINTF_L4("ibdm", "\t Trap LID : 0x%x", 7213 b2h16(classportinfo->TrapLID)); 7214 IBTF_DPRINTF_L4("ibdm", "\t Trap P_Key : 0x%x", 7215 b2h16(classportinfo->TrapP_Key)); 7216 IBTF_DPRINTF_L4("ibdm", "\t Trap HL : 0x%x", 7217 classportinfo->TrapHL); 7218 IBTF_DPRINTF_L4("ibdm", "\t Trap QP : 0x%x", 7219 classportinfo->TrapQP); 7220 IBTF_DPRINTF_L4("ibdm", "\t Trap Q_Key : 0x%x", 7221 b2h32(classportinfo->TrapQ_Key)); 7222 } 7223 7224 7225 void 7226 ibdm_dump_iounitinfo(ib_dm_io_unitinfo_t *iou_info) 7227 { 7228 IBTF_DPRINTF_L4("ibdm", "\t\t I/O UnitInfo"); 7229 IBTF_DPRINTF_L4("ibdm", "\t\t ------------"); 7230 7231 IBTF_DPRINTF_L4("ibdm", "\tChange ID : 0x%x", 7232 b2h16(iou_info->iou_changeid)); 7233 IBTF_DPRINTF_L4("ibdm", "\t#of ctrl slots : %d", 7234 iou_info->iou_num_ctrl_slots); 7235 IBTF_DPRINTF_L4("ibdm", "\tIOU flag : 0x%x", 7236 iou_info->iou_flag); 7237 IBTF_DPRINTF_L4("ibdm", "\tContrl list byte 0 : 0x%x", 7238 iou_info->iou_ctrl_list[0]); 7239 IBTF_DPRINTF_L4("ibdm", "\tContrl list byte 1 : 0x%x", 7240 iou_info->iou_ctrl_list[1]); 7241 IBTF_DPRINTF_L4("ibdm", "\tContrl list byte 2 : 0x%x", 7242 iou_info->iou_ctrl_list[2]); 7243 } 7244 7245 7246 void 7247 ibdm_dump_ioc_profile(ib_dm_ioc_ctrl_profile_t *ioc) 7248 { 7249 IBTF_DPRINTF_L4("ibdm", "\t\t IOC Controller Profile"); 7250 IBTF_DPRINTF_L4("ibdm", "\t\t ----------------------"); 7251 7252 IBTF_DPRINTF_L4("ibdm", "\tIOC Guid : %llx", ioc->ioc_guid); 7253 IBTF_DPRINTF_L4("ibdm", "\tVendorID : 0x%x", ioc->ioc_vendorid); 7254 IBTF_DPRINTF_L4("ibdm", "\tDevice Id : 0x%x", ioc->ioc_deviceid); 7255 IBTF_DPRINTF_L4("ibdm", "\tDevice Ver : 0x%x", ioc->ioc_device_ver); 7256 IBTF_DPRINTF_L4("ibdm", "\tSubsys ID : 0x%x", ioc->ioc_subsys_id); 7257 IBTF_DPRINTF_L4("ibdm", "\tIO class : 0x%x", ioc->ioc_io_class); 7258 IBTF_DPRINTF_L4("ibdm", "\tIO subclass : 0x%x", ioc->ioc_io_subclass); 7259 IBTF_DPRINTF_L4("ibdm", "\tProtocol : 0x%x", ioc->ioc_protocol); 7260 IBTF_DPRINTF_L4("ibdm", "\tProtocolV : 0x%x", ioc->ioc_protocol_ver); 7261 IBTF_DPRINTF_L4("ibdm", "\tmsg qdepth : %d", ioc->ioc_send_msg_qdepth); 7262 IBTF_DPRINTF_L4("ibdm", "\trdma qdepth : %d", 7263 ioc->ioc_rdma_read_qdepth); 7264 IBTF_DPRINTF_L4("ibdm", "\tsndmsg sz : %d", ioc->ioc_send_msg_sz); 7265 IBTF_DPRINTF_L4("ibdm", "\trdma xfersz : %d", ioc->ioc_rdma_xfer_sz); 7266 IBTF_DPRINTF_L4("ibdm", "\topcal mask : 0x%x", 7267 ioc->ioc_ctrl_opcap_mask); 7268 IBTF_DPRINTF_L4("ibdm", "\tsrventries : %x", ioc->ioc_service_entries); 7269 } 7270 7271 7272 void 7273 ibdm_dump_service_entries(ib_dm_srv_t *srv_ents) 7274 { 7275 IBTF_DPRINTF_L4("ibdm", 7276 "\thandle_srventry_mad: service id : %llx", srv_ents->srv_id); 7277 7278 IBTF_DPRINTF_L4("ibdm", "\thandle_srventry_mad: " 7279 "Service Name : %s", srv_ents->srv_name); 7280 } 7281 7282 int ibdm_allow_sweep_fabric_timestamp = 1; 7283 7284 void 7285 ibdm_dump_sweep_fabric_timestamp(int flag) 7286 { 7287 static hrtime_t x; 7288 if (flag) { 7289 if (ibdm_allow_sweep_fabric_timestamp) { 7290 IBTF_DPRINTF_L4("ibdm", "\tTime taken to complete " 7291 "sweep %lld ms", ((gethrtime() - x)/ 1000000)); 7292 } 7293 x = 0; 7294 } else 7295 x = gethrtime(); 7296 } 7297 #endif