Print this page
XXXX introduce drv_sectohz
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/hxge/hxge_main.c
+++ new/usr/src/uts/common/io/hxge/hxge_main.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 * Copyright 2012 Milan Jurik. All rights reserved.
25 25 */
26 26
27 27 /*
28 28 * SunOs MT STREAMS Hydra 10Gb Ethernet Device Driver.
29 29 */
30 30 #include <hxge_impl.h>
31 31 #include <hxge_pfc.h>
32 32
33 33 /*
34 34 * PSARC/2007/453 MSI-X interrupt limit override
35 35 * (This PSARC case is limited to MSI-X vectors
36 36 * and SPARC platforms only).
37 37 */
38 38 uint32_t hxge_msi_enable = 2;
39 39
40 40 /*
41 41 * Globals: tunable parameters (/etc/system or adb)
42 42 *
43 43 */
44 44 uint32_t hxge_rbr_size = HXGE_RBR_RBB_DEFAULT;
45 45 uint32_t hxge_rbr_spare_size = 0;
46 46 uint32_t hxge_rcr_size = HXGE_RCR_DEFAULT;
47 47 uint32_t hxge_tx_ring_size = HXGE_TX_RING_DEFAULT;
48 48 uint32_t hxge_bcopy_thresh = TX_BCOPY_MAX;
49 49 uint32_t hxge_dvma_thresh = TX_FASTDVMA_MIN;
50 50 uint32_t hxge_dma_stream_thresh = TX_STREAM_MIN;
51 51 uint32_t hxge_jumbo_frame_size = MAX_FRAME_SIZE;
52 52
53 53 static hxge_os_mutex_t hxgedebuglock;
54 54 static int hxge_debug_init = 0;
55 55
56 56 /*
57 57 * Debugging flags:
58 58 * hxge_no_tx_lb : transmit load balancing
59 59 * hxge_tx_lb_policy: 0 - TCP/UDP port (default)
60 60 * 1 - From the Stack
61 61 * 2 - Destination IP Address
62 62 */
63 63 uint32_t hxge_no_tx_lb = 0;
64 64 uint32_t hxge_tx_lb_policy = HXGE_TX_LB_TCPUDP;
65 65
66 66 /*
67 67 * Tunables to manage the receive buffer blocks.
68 68 *
69 69 * hxge_rx_threshold_hi: copy all buffers.
70 70 * hxge_rx_bcopy_size_type: receive buffer block size type.
71 71 * hxge_rx_threshold_lo: copy only up to tunable block size type.
72 72 */
73 73 #if defined(__sparc)
74 74 hxge_rxbuf_threshold_t hxge_rx_threshold_hi = HXGE_RX_COPY_6;
75 75 hxge_rxbuf_threshold_t hxge_rx_threshold_lo = HXGE_RX_COPY_4;
76 76 #else
77 77 hxge_rxbuf_threshold_t hxge_rx_threshold_hi = HXGE_RX_COPY_NONE;
78 78 hxge_rxbuf_threshold_t hxge_rx_threshold_lo = HXGE_RX_COPY_NONE;
79 79 #endif
80 80 hxge_rxbuf_type_t hxge_rx_buf_size_type = RCR_PKTBUFSZ_0;
81 81
82 82 rtrace_t hpi_rtracebuf;
83 83
84 84 /*
85 85 * Function Prototypes
86 86 */
87 87 static int hxge_attach(dev_info_t *, ddi_attach_cmd_t);
88 88 static int hxge_detach(dev_info_t *, ddi_detach_cmd_t);
89 89 static void hxge_unattach(p_hxge_t);
90 90
91 91 static hxge_status_t hxge_setup_system_dma_pages(p_hxge_t);
92 92
93 93 static hxge_status_t hxge_setup_mutexes(p_hxge_t);
94 94 static void hxge_destroy_mutexes(p_hxge_t);
95 95
96 96 static hxge_status_t hxge_map_regs(p_hxge_t hxgep);
97 97 static void hxge_unmap_regs(p_hxge_t hxgep);
98 98
99 99 static hxge_status_t hxge_add_intrs(p_hxge_t hxgep);
100 100 static void hxge_remove_intrs(p_hxge_t hxgep);
101 101 static hxge_status_t hxge_add_intrs_adv(p_hxge_t hxgep);
102 102 static hxge_status_t hxge_add_intrs_adv_type(p_hxge_t, uint32_t);
103 103 static hxge_status_t hxge_add_intrs_adv_type_fix(p_hxge_t, uint32_t);
104 104 static void hxge_intrs_enable(p_hxge_t hxgep);
105 105 static void hxge_intrs_disable(p_hxge_t hxgep);
106 106 static void hxge_suspend(p_hxge_t);
107 107 static hxge_status_t hxge_resume(p_hxge_t);
108 108 static hxge_status_t hxge_setup_dev(p_hxge_t);
109 109 static void hxge_destroy_dev(p_hxge_t);
110 110 static hxge_status_t hxge_alloc_mem_pool(p_hxge_t);
111 111 static void hxge_free_mem_pool(p_hxge_t);
112 112 static hxge_status_t hxge_alloc_rx_mem_pool(p_hxge_t);
113 113 static void hxge_free_rx_mem_pool(p_hxge_t);
114 114 static hxge_status_t hxge_alloc_tx_mem_pool(p_hxge_t);
115 115 static void hxge_free_tx_mem_pool(p_hxge_t);
116 116 static hxge_status_t hxge_dma_mem_alloc(p_hxge_t, dma_method_t,
117 117 struct ddi_dma_attr *, size_t, ddi_device_acc_attr_t *, uint_t,
118 118 p_hxge_dma_common_t);
119 119 static void hxge_dma_mem_free(p_hxge_dma_common_t);
120 120 static hxge_status_t hxge_alloc_rx_buf_dma(p_hxge_t, uint16_t,
121 121 p_hxge_dma_common_t *, size_t, size_t, uint32_t *);
122 122 static void hxge_free_rx_buf_dma(p_hxge_t, p_hxge_dma_common_t, uint32_t);
123 123 static hxge_status_t hxge_alloc_rx_cntl_dma(p_hxge_t, uint16_t,
124 124 p_hxge_dma_common_t *, struct ddi_dma_attr *, size_t);
125 125 static void hxge_free_rx_cntl_dma(p_hxge_t, p_hxge_dma_common_t);
126 126 static hxge_status_t hxge_alloc_tx_buf_dma(p_hxge_t, uint16_t,
127 127 p_hxge_dma_common_t *, size_t, size_t, uint32_t *);
128 128 static void hxge_free_tx_buf_dma(p_hxge_t, p_hxge_dma_common_t, uint32_t);
129 129 static hxge_status_t hxge_alloc_tx_cntl_dma(p_hxge_t, uint16_t,
130 130 p_hxge_dma_common_t *, size_t);
131 131 static void hxge_free_tx_cntl_dma(p_hxge_t, p_hxge_dma_common_t);
132 132 static int hxge_init_common_dev(p_hxge_t);
133 133 static void hxge_uninit_common_dev(p_hxge_t);
134 134
135 135 /*
136 136 * The next declarations are for the GLDv3 interface.
137 137 */
138 138 static int hxge_m_start(void *);
139 139 static void hxge_m_stop(void *);
140 140 static int hxge_m_multicst(void *, boolean_t, const uint8_t *);
141 141 static int hxge_m_promisc(void *, boolean_t);
142 142 static void hxge_m_ioctl(void *, queue_t *, mblk_t *);
143 143 static hxge_status_t hxge_mac_register(p_hxge_t hxgep);
144 144
145 145 static boolean_t hxge_m_getcapab(void *, mac_capab_t, void *);
146 146 static boolean_t hxge_param_locked(mac_prop_id_t pr_num);
147 147 static int hxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
148 148 uint_t pr_valsize, const void *pr_val);
149 149 static int hxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
150 150 uint_t pr_valsize, void *pr_val);
151 151 static void hxge_m_propinfo(void *barg, const char *pr_name,
152 152 mac_prop_id_t pr_num, mac_prop_info_handle_t mph);
153 153 static int hxge_set_priv_prop(p_hxge_t hxgep, const char *pr_name,
154 154 uint_t pr_valsize, const void *pr_val);
155 155 static int hxge_get_priv_prop(p_hxge_t hxgep, const char *pr_name,
156 156 uint_t pr_valsize, void *pr_val);
157 157 static void hxge_link_poll(void *arg);
158 158 static void hxge_link_update(p_hxge_t hxge, link_state_t state);
159 159 static void hxge_msix_init(p_hxge_t hxgep);
160 160
161 161 char *hxge_priv_props[] = {
162 162 "_rxdma_intr_time",
163 163 "_rxdma_intr_pkts",
164 164 "_class_opt_ipv4_tcp",
165 165 "_class_opt_ipv4_udp",
166 166 "_class_opt_ipv4_ah",
167 167 "_class_opt_ipv4_sctp",
168 168 "_class_opt_ipv6_tcp",
169 169 "_class_opt_ipv6_udp",
170 170 "_class_opt_ipv6_ah",
171 171 "_class_opt_ipv6_sctp",
172 172 NULL
173 173 };
174 174
175 175 #define HXGE_MAX_PRIV_PROPS \
176 176 (sizeof (hxge_priv_props)/sizeof (mac_priv_prop_t))
177 177
178 178 #define HXGE_MAGIC 0x4E584745UL
179 179 #define MAX_DUMP_SZ 256
180 180
181 181 #define HXGE_M_CALLBACK_FLAGS \
182 182 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO)
183 183
184 184 extern hxge_status_t hxge_pfc_set_default_mac_addr(p_hxge_t hxgep);
185 185
186 186 static mac_callbacks_t hxge_m_callbacks = {
187 187 HXGE_M_CALLBACK_FLAGS,
188 188 hxge_m_stat,
189 189 hxge_m_start,
190 190 hxge_m_stop,
191 191 hxge_m_promisc,
192 192 hxge_m_multicst,
193 193 NULL,
194 194 NULL,
195 195 NULL,
196 196 hxge_m_ioctl,
197 197 hxge_m_getcapab,
198 198 NULL,
199 199 NULL,
200 200 hxge_m_setprop,
201 201 hxge_m_getprop,
202 202 hxge_m_propinfo
203 203 };
204 204
205 205 /* PSARC/2007/453 MSI-X interrupt limit override. */
206 206 #define HXGE_MSIX_REQUEST_10G 8
207 207 static int hxge_create_msi_property(p_hxge_t);
208 208
209 209 /* Enable debug messages as necessary. */
210 210 uint64_t hxge_debug_level = 0;
211 211
212 212 /*
213 213 * This list contains the instance structures for the Hydra
214 214 * devices present in the system. The lock exists to guarantee
215 215 * mutually exclusive access to the list.
216 216 */
217 217 void *hxge_list = NULL;
218 218 void *hxge_hw_list = NULL;
219 219 hxge_os_mutex_t hxge_common_lock;
220 220
221 221 extern uint64_t hpi_debug_level;
222 222
223 223 extern hxge_status_t hxge_ldgv_init(p_hxge_t, int *, int *);
224 224 extern hxge_status_t hxge_ldgv_uninit(p_hxge_t);
225 225 extern hxge_status_t hxge_intr_ldgv_init(p_hxge_t);
226 226 extern void hxge_fm_init(p_hxge_t hxgep, ddi_device_acc_attr_t *reg_attr,
227 227 ddi_device_acc_attr_t *desc_attr, ddi_dma_attr_t *dma_attr);
228 228 extern void hxge_fm_fini(p_hxge_t hxgep);
229 229
230 230 /*
231 231 * Count used to maintain the number of buffers being used
232 232 * by Hydra instances and loaned up to the upper layers.
233 233 */
234 234 uint32_t hxge_mblks_pending = 0;
235 235
236 236 /*
237 237 * Device register access attributes for PIO.
238 238 */
239 239 static ddi_device_acc_attr_t hxge_dev_reg_acc_attr = {
240 240 DDI_DEVICE_ATTR_V0,
241 241 DDI_STRUCTURE_LE_ACC,
242 242 DDI_STRICTORDER_ACC,
243 243 };
244 244
245 245 /*
246 246 * Device descriptor access attributes for DMA.
247 247 */
248 248 static ddi_device_acc_attr_t hxge_dev_desc_dma_acc_attr = {
249 249 DDI_DEVICE_ATTR_V0,
250 250 DDI_STRUCTURE_LE_ACC,
251 251 DDI_STRICTORDER_ACC
252 252 };
253 253
254 254 /*
255 255 * Device buffer access attributes for DMA.
256 256 */
257 257 static ddi_device_acc_attr_t hxge_dev_buf_dma_acc_attr = {
258 258 DDI_DEVICE_ATTR_V0,
259 259 DDI_STRUCTURE_BE_ACC,
260 260 DDI_STRICTORDER_ACC
261 261 };
262 262
263 263 ddi_dma_attr_t hxge_rx_rcr_desc_dma_attr = {
264 264 DMA_ATTR_V0, /* version number. */
265 265 0, /* low address */
266 266 0xffffffffffffffff, /* high address */
267 267 0xffffffffffffffff, /* address counter max */
268 268 0x80000, /* alignment */
269 269 0xfc00fc, /* dlim_burstsizes */
270 270 0x1, /* minimum transfer size */
271 271 0xffffffffffffffff, /* maximum transfer size */
272 272 0xffffffffffffffff, /* maximum segment size */
273 273 1, /* scatter/gather list length */
274 274 (unsigned int)1, /* granularity */
275 275 0 /* attribute flags */
276 276 };
277 277
278 278 ddi_dma_attr_t hxge_tx_desc_dma_attr = {
279 279 DMA_ATTR_V0, /* version number. */
280 280 0, /* low address */
281 281 0xffffffffffffffff, /* high address */
282 282 0xffffffffffffffff, /* address counter max */
283 283 0x100000, /* alignment */
284 284 0xfc00fc, /* dlim_burstsizes */
285 285 0x1, /* minimum transfer size */
286 286 0xffffffffffffffff, /* maximum transfer size */
287 287 0xffffffffffffffff, /* maximum segment size */
288 288 1, /* scatter/gather list length */
289 289 (unsigned int)1, /* granularity */
290 290 0 /* attribute flags */
291 291 };
292 292
293 293 ddi_dma_attr_t hxge_rx_rbr_desc_dma_attr = {
294 294 DMA_ATTR_V0, /* version number. */
295 295 0, /* low address */
296 296 0xffffffffffffffff, /* high address */
297 297 0xffffffffffffffff, /* address counter max */
298 298 0x40000, /* alignment */
299 299 0xfc00fc, /* dlim_burstsizes */
300 300 0x1, /* minimum transfer size */
301 301 0xffffffffffffffff, /* maximum transfer size */
302 302 0xffffffffffffffff, /* maximum segment size */
303 303 1, /* scatter/gather list length */
304 304 (unsigned int)1, /* granularity */
305 305 0 /* attribute flags */
306 306 };
307 307
308 308 ddi_dma_attr_t hxge_rx_mbox_dma_attr = {
309 309 DMA_ATTR_V0, /* version number. */
310 310 0, /* low address */
311 311 0xffffffffffffffff, /* high address */
312 312 0xffffffffffffffff, /* address counter max */
313 313 #if defined(_BIG_ENDIAN)
314 314 0x2000, /* alignment */
315 315 #else
316 316 0x1000, /* alignment */
317 317 #endif
318 318 0xfc00fc, /* dlim_burstsizes */
319 319 0x1, /* minimum transfer size */
320 320 0xffffffffffffffff, /* maximum transfer size */
321 321 0xffffffffffffffff, /* maximum segment size */
322 322 5, /* scatter/gather list length */
323 323 (unsigned int)1, /* granularity */
324 324 0 /* attribute flags */
325 325 };
326 326
327 327 ddi_dma_attr_t hxge_tx_dma_attr = {
328 328 DMA_ATTR_V0, /* version number. */
329 329 0, /* low address */
330 330 0xffffffffffffffff, /* high address */
331 331 0xffffffffffffffff, /* address counter max */
332 332 #if defined(_BIG_ENDIAN)
333 333 0x2000, /* alignment */
334 334 #else
335 335 0x1000, /* alignment */
336 336 #endif
337 337 0xfc00fc, /* dlim_burstsizes */
338 338 0x1, /* minimum transfer size */
339 339 0xffffffffffffffff, /* maximum transfer size */
340 340 0xffffffffffffffff, /* maximum segment size */
341 341 5, /* scatter/gather list length */
342 342 (unsigned int)1, /* granularity */
343 343 0 /* attribute flags */
344 344 };
345 345
346 346 ddi_dma_attr_t hxge_rx_dma_attr = {
347 347 DMA_ATTR_V0, /* version number. */
348 348 0, /* low address */
349 349 0xffffffffffffffff, /* high address */
350 350 0xffffffffffffffff, /* address counter max */
351 351 0x10000, /* alignment */
352 352 0xfc00fc, /* dlim_burstsizes */
353 353 0x1, /* minimum transfer size */
354 354 0xffffffffffffffff, /* maximum transfer size */
355 355 0xffffffffffffffff, /* maximum segment size */
356 356 1, /* scatter/gather list length */
357 357 (unsigned int)1, /* granularity */
358 358 DDI_DMA_RELAXED_ORDERING /* attribute flags */
359 359 };
360 360
361 361 ddi_dma_lim_t hxge_dma_limits = {
362 362 (uint_t)0, /* dlim_addr_lo */
363 363 (uint_t)0xffffffff, /* dlim_addr_hi */
364 364 (uint_t)0xffffffff, /* dlim_cntr_max */
365 365 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */
366 366 0x1, /* dlim_minxfer */
367 367 1024 /* dlim_speed */
368 368 };
369 369
370 370 dma_method_t hxge_force_dma = DVMA;
371 371
372 372 /*
373 373 * dma chunk sizes.
374 374 *
375 375 * Try to allocate the largest possible size
376 376 * so that fewer number of dma chunks would be managed
377 377 */
378 378 size_t alloc_sizes[] = {
379 379 0x1000, 0x2000, 0x4000, 0x8000,
380 380 0x10000, 0x20000, 0x40000, 0x80000,
381 381 0x100000, 0x200000, 0x400000, 0x800000, 0x1000000
382 382 };
383 383
384 384 /*
385 385 * Translate "dev_t" to a pointer to the associated "dev_info_t".
386 386 */
387 387 static int
388 388 hxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
389 389 {
390 390 p_hxge_t hxgep = NULL;
391 391 int instance;
392 392 int status = DDI_SUCCESS;
393 393 int i;
394 394
395 395 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_attach"));
396 396
397 397 /*
398 398 * Get the device instance since we'll need to setup or retrieve a soft
399 399 * state for this instance.
400 400 */
401 401 instance = ddi_get_instance(dip);
402 402
403 403 switch (cmd) {
404 404 case DDI_ATTACH:
405 405 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_ATTACH"));
406 406 break;
407 407
408 408 case DDI_RESUME:
409 409 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_RESUME"));
410 410 hxgep = (p_hxge_t)ddi_get_soft_state(hxge_list, instance);
411 411 if (hxgep == NULL) {
412 412 status = DDI_FAILURE;
413 413 break;
414 414 }
415 415 if (hxgep->dip != dip) {
416 416 status = DDI_FAILURE;
417 417 break;
418 418 }
419 419 if (hxgep->suspended == DDI_PM_SUSPEND) {
420 420 status = ddi_dev_is_needed(hxgep->dip, 0, 1);
421 421 } else {
422 422 (void) hxge_resume(hxgep);
423 423 }
424 424 goto hxge_attach_exit;
425 425
426 426 case DDI_PM_RESUME:
427 427 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_PM_RESUME"));
428 428 hxgep = (p_hxge_t)ddi_get_soft_state(hxge_list, instance);
429 429 if (hxgep == NULL) {
430 430 status = DDI_FAILURE;
431 431 break;
432 432 }
433 433 if (hxgep->dip != dip) {
434 434 status = DDI_FAILURE;
435 435 break;
436 436 }
437 437 (void) hxge_resume(hxgep);
438 438 goto hxge_attach_exit;
439 439
440 440 default:
441 441 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing unknown"));
442 442 status = DDI_FAILURE;
443 443 goto hxge_attach_exit;
444 444 }
445 445
446 446 if (ddi_soft_state_zalloc(hxge_list, instance) == DDI_FAILURE) {
447 447 status = DDI_FAILURE;
448 448 HXGE_ERROR_MSG((hxgep, DDI_CTL,
449 449 "ddi_soft_state_zalloc failed"));
450 450 goto hxge_attach_exit;
451 451 }
452 452
453 453 hxgep = ddi_get_soft_state(hxge_list, instance);
454 454 if (hxgep == NULL) {
455 455 status = HXGE_ERROR;
456 456 HXGE_ERROR_MSG((hxgep, DDI_CTL,
457 457 "ddi_get_soft_state failed"));
458 458 goto hxge_attach_fail2;
459 459 }
460 460
461 461 hxgep->drv_state = 0;
462 462 hxgep->dip = dip;
463 463 hxgep->instance = instance;
464 464 hxgep->p_dip = ddi_get_parent(dip);
465 465 hxgep->hxge_debug_level = hxge_debug_level;
466 466 hpi_debug_level = hxge_debug_level;
467 467
468 468 /*
469 469 * Initialize MMAC struture.
470 470 */
471 471 (void) hxge_pfc_num_macs_get(hxgep, &hxgep->mmac.total);
472 472 hxgep->mmac.available = hxgep->mmac.total;
473 473 for (i = 0; i < hxgep->mmac.total; i++) {
474 474 hxgep->mmac.addrs[i].set = B_FALSE;
475 475 hxgep->mmac.addrs[i].primary = B_FALSE;
476 476 }
477 477
478 478 hxge_fm_init(hxgep, &hxge_dev_reg_acc_attr, &hxge_dev_desc_dma_acc_attr,
479 479 &hxge_rx_dma_attr);
480 480
481 481 status = hxge_map_regs(hxgep);
482 482 if (status != HXGE_OK) {
483 483 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "hxge_map_regs failed"));
484 484 goto hxge_attach_fail3;
485 485 }
486 486
487 487 status = hxge_init_common_dev(hxgep);
488 488 if (status != HXGE_OK) {
489 489 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
490 490 "hxge_init_common_dev failed"));
491 491 goto hxge_attach_fail4;
492 492 }
493 493
494 494 /*
495 495 * Setup the Ndd parameters for this instance.
496 496 */
497 497 hxge_init_param(hxgep);
498 498
499 499 /*
500 500 * Setup Register Tracing Buffer.
501 501 */
502 502 hpi_rtrace_buf_init((rtrace_t *)&hpi_rtracebuf);
503 503
504 504 /* init stats ptr */
505 505 hxge_init_statsp(hxgep);
506 506
507 507 status = hxge_setup_mutexes(hxgep);
508 508 if (status != HXGE_OK) {
509 509 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "set mutex failed"));
510 510 goto hxge_attach_fail;
511 511 }
512 512
513 513 /* Scrub the MSI-X memory */
514 514 hxge_msix_init(hxgep);
515 515
516 516 status = hxge_get_config_properties(hxgep);
517 517 if (status != HXGE_OK) {
518 518 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "get_hw create failed"));
519 519 goto hxge_attach_fail;
520 520 }
521 521
522 522 /*
523 523 * Setup the Kstats for the driver.
524 524 */
525 525 hxge_setup_kstats(hxgep);
526 526 hxge_setup_param(hxgep);
527 527
528 528 status = hxge_setup_system_dma_pages(hxgep);
529 529 if (status != HXGE_OK) {
530 530 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "set dma page failed"));
531 531 goto hxge_attach_fail;
532 532 }
533 533
534 534 hxge_hw_id_init(hxgep);
535 535 hxge_hw_init_niu_common(hxgep);
536 536
537 537 status = hxge_setup_dev(hxgep);
538 538 if (status != DDI_SUCCESS) {
539 539 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "set dev failed"));
540 540 goto hxge_attach_fail;
541 541 }
542 542
543 543 status = hxge_add_intrs(hxgep);
544 544 if (status != DDI_SUCCESS) {
545 545 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "add_intr failed"));
546 546 goto hxge_attach_fail;
547 547 }
548 548
549 549 /*
550 550 * Enable interrupts.
551 551 */
552 552 hxge_intrs_enable(hxgep);
553 553
554 554 if ((status = hxge_mac_register(hxgep)) != HXGE_OK) {
555 555 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
556 556 "unable to register to mac layer (%d)", status));
557 557 goto hxge_attach_fail;
558 558 }
559 559 mac_link_update(hxgep->mach, LINK_STATE_UNKNOWN);
560 560
561 561 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "registered to mac (instance %d)",
562 562 instance));
563 563
564 564 goto hxge_attach_exit;
565 565
566 566 hxge_attach_fail:
567 567 hxge_unattach(hxgep);
568 568 goto hxge_attach_fail1;
569 569
570 570 hxge_attach_fail5:
571 571 /*
572 572 * Tear down the ndd parameters setup.
573 573 */
574 574 hxge_destroy_param(hxgep);
575 575
576 576 /*
577 577 * Tear down the kstat setup.
578 578 */
579 579 hxge_destroy_kstats(hxgep);
580 580
581 581 hxge_attach_fail4:
582 582 if (hxgep->hxge_hw_p) {
583 583 hxge_uninit_common_dev(hxgep);
584 584 hxgep->hxge_hw_p = NULL;
585 585 }
586 586 hxge_attach_fail3:
587 587 /*
588 588 * Unmap the register setup.
589 589 */
590 590 hxge_unmap_regs(hxgep);
591 591
592 592 hxge_fm_fini(hxgep);
593 593
594 594 hxge_attach_fail2:
595 595 ddi_soft_state_free(hxge_list, hxgep->instance);
596 596
597 597 hxge_attach_fail1:
598 598 if (status != HXGE_OK)
599 599 status = (HXGE_ERROR | HXGE_DDI_FAILED);
600 600 hxgep = NULL;
601 601
602 602 hxge_attach_exit:
603 603 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_attach status = 0x%08x",
604 604 status));
605 605
606 606 return (status);
607 607 }
608 608
609 609 static int
610 610 hxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
611 611 {
612 612 int status = DDI_SUCCESS;
613 613 int instance;
614 614 p_hxge_t hxgep = NULL;
615 615
616 616 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_detach"));
617 617 instance = ddi_get_instance(dip);
618 618 hxgep = ddi_get_soft_state(hxge_list, instance);
619 619 if (hxgep == NULL) {
620 620 status = DDI_FAILURE;
621 621 goto hxge_detach_exit;
622 622 }
623 623
624 624 switch (cmd) {
625 625 case DDI_DETACH:
626 626 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_DETACH"));
627 627 break;
628 628
629 629 case DDI_PM_SUSPEND:
630 630 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_PM_SUSPEND"));
631 631 hxgep->suspended = DDI_PM_SUSPEND;
632 632 hxge_suspend(hxgep);
633 633 break;
634 634
635 635 case DDI_SUSPEND:
636 636 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_SUSPEND"));
637 637 if (hxgep->suspended != DDI_PM_SUSPEND) {
638 638 hxgep->suspended = DDI_SUSPEND;
639 639 hxge_suspend(hxgep);
640 640 }
641 641 break;
642 642
643 643 default:
644 644 status = DDI_FAILURE;
645 645 break;
646 646 }
647 647
648 648 if (cmd != DDI_DETACH)
649 649 goto hxge_detach_exit;
650 650
651 651 /*
652 652 * Stop the xcvr polling.
653 653 */
654 654 hxgep->suspended = cmd;
655 655
656 656 if (hxgep->mach && (status = mac_unregister(hxgep->mach)) != 0) {
657 657 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
658 658 "<== hxge_detach status = 0x%08X", status));
659 659 return (DDI_FAILURE);
660 660 }
661 661 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
662 662 "<== hxge_detach (mac_unregister) status = 0x%08X", status));
663 663
664 664 hxge_unattach(hxgep);
665 665 hxgep = NULL;
666 666
667 667 hxge_detach_exit:
668 668 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_detach status = 0x%08X",
669 669 status));
670 670
671 671 return (status);
672 672 }
673 673
674 674 static void
675 675 hxge_unattach(p_hxge_t hxgep)
676 676 {
677 677 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_unattach"));
678 678
679 679 if (hxgep == NULL || hxgep->dev_regs == NULL) {
680 680 return;
681 681 }
682 682
683 683 if (hxgep->hxge_hw_p) {
684 684 hxge_uninit_common_dev(hxgep);
685 685 hxgep->hxge_hw_p = NULL;
686 686 }
687 687
688 688 if (hxgep->hxge_timerid) {
689 689 hxge_stop_timer(hxgep, hxgep->hxge_timerid);
690 690 hxgep->hxge_timerid = 0;
691 691 }
692 692
693 693 /* Stop interrupts. */
694 694 hxge_intrs_disable(hxgep);
695 695
696 696 /* Stop any further interrupts. */
697 697 hxge_remove_intrs(hxgep);
698 698
699 699 /* Stop the device and free resources. */
700 700 hxge_destroy_dev(hxgep);
701 701
702 702 /* Tear down the ndd parameters setup. */
703 703 hxge_destroy_param(hxgep);
704 704
705 705 /* Tear down the kstat setup. */
706 706 hxge_destroy_kstats(hxgep);
707 707
708 708 /*
709 709 * Remove the list of ndd parameters which were setup during attach.
710 710 */
711 711 if (hxgep->dip) {
712 712 HXGE_DEBUG_MSG((hxgep, OBP_CTL,
713 713 " hxge_unattach: remove all properties"));
714 714 (void) ddi_prop_remove_all(hxgep->dip);
715 715 }
716 716
717 717 /*
718 718 * Reset RDC, TDC, PFC, and VMAC blocks from PEU to clear any
719 719 * previous state before unmapping the registers.
720 720 */
721 721 HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, 0x0000001E);
722 722 HXGE_DELAY(1000);
723 723
724 724 /*
725 725 * Unmap the register setup.
726 726 */
727 727 hxge_unmap_regs(hxgep);
728 728
729 729 hxge_fm_fini(hxgep);
730 730
731 731 /* Destroy all mutexes. */
732 732 hxge_destroy_mutexes(hxgep);
733 733
734 734 /*
735 735 * Free the soft state data structures allocated with this instance.
736 736 */
737 737 ddi_soft_state_free(hxge_list, hxgep->instance);
738 738
739 739 HXGE_DEBUG_MSG((NULL, DDI_CTL, "<== hxge_unattach"));
740 740 }
741 741
742 742 static hxge_status_t
743 743 hxge_map_regs(p_hxge_t hxgep)
744 744 {
745 745 int ddi_status = DDI_SUCCESS;
746 746 p_dev_regs_t dev_regs;
747 747
748 748 #ifdef HXGE_DEBUG
749 749 char *sysname;
750 750 #endif
751 751
752 752 off_t regsize;
753 753 hxge_status_t status = HXGE_OK;
754 754 int nregs;
755 755
756 756 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_map_regs"));
757 757
758 758 if (ddi_dev_nregs(hxgep->dip, &nregs) != DDI_SUCCESS)
759 759 return (HXGE_ERROR);
760 760
761 761 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "hxge_map_regs: nregs: %d", nregs));
762 762
763 763 hxgep->dev_regs = NULL;
764 764 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP);
765 765 dev_regs->hxge_regh = NULL;
766 766 dev_regs->hxge_pciregh = NULL;
767 767 dev_regs->hxge_msix_regh = NULL;
768 768
769 769 (void) ddi_dev_regsize(hxgep->dip, 0, ®size);
770 770 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
771 771 "hxge_map_regs: pci config size 0x%x", regsize));
772 772
773 773 ddi_status = ddi_regs_map_setup(hxgep->dip, 0,
774 774 (caddr_t *)&(dev_regs->hxge_pciregp), 0, 0,
775 775 &hxge_dev_reg_acc_attr, &dev_regs->hxge_pciregh);
776 776 if (ddi_status != DDI_SUCCESS) {
777 777 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
778 778 "ddi_map_regs, hxge bus config regs failed"));
779 779 goto hxge_map_regs_fail0;
780 780 }
781 781
782 782 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
783 783 "hxge_map_reg: PCI config addr 0x%0llx handle 0x%0llx",
784 784 dev_regs->hxge_pciregp,
785 785 dev_regs->hxge_pciregh));
786 786
787 787 (void) ddi_dev_regsize(hxgep->dip, 1, ®size);
788 788 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
789 789 "hxge_map_regs: pio size 0x%x", regsize));
790 790
791 791 /* set up the device mapped register */
792 792 ddi_status = ddi_regs_map_setup(hxgep->dip, 1,
793 793 (caddr_t *)&(dev_regs->hxge_regp), 0, 0,
794 794 &hxge_dev_reg_acc_attr, &dev_regs->hxge_regh);
795 795
796 796 if (ddi_status != DDI_SUCCESS) {
797 797 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
798 798 "ddi_map_regs for Hydra global reg failed"));
799 799 goto hxge_map_regs_fail1;
800 800 }
801 801
802 802 /* set up the msi/msi-x mapped register */
803 803 (void) ddi_dev_regsize(hxgep->dip, 2, ®size);
804 804 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
805 805 "hxge_map_regs: msix size 0x%x", regsize));
806 806
807 807 ddi_status = ddi_regs_map_setup(hxgep->dip, 2,
808 808 (caddr_t *)&(dev_regs->hxge_msix_regp), 0, 0,
809 809 &hxge_dev_reg_acc_attr, &dev_regs->hxge_msix_regh);
810 810
811 811 if (ddi_status != DDI_SUCCESS) {
812 812 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
813 813 "ddi_map_regs for msi reg failed"));
814 814 goto hxge_map_regs_fail2;
815 815 }
816 816
817 817 hxgep->dev_regs = dev_regs;
818 818
819 819 HPI_PCI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_pciregh);
820 820 HPI_PCI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_pciregp);
821 821 HPI_MSI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_msix_regh);
822 822 HPI_MSI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_msix_regp);
823 823
824 824 HPI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_regh);
825 825 HPI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_regp);
826 826
827 827 HPI_REG_ACC_HANDLE_SET(hxgep, dev_regs->hxge_regh);
828 828 HPI_REG_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_regp);
829 829
830 830 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "hxge_map_reg: hardware addr 0x%0llx "
831 831 " handle 0x%0llx", dev_regs->hxge_regp, dev_regs->hxge_regh));
832 832
833 833 goto hxge_map_regs_exit;
834 834
835 835 hxge_map_regs_fail3:
836 836 if (dev_regs->hxge_msix_regh) {
837 837 ddi_regs_map_free(&dev_regs->hxge_msix_regh);
838 838 }
839 839
840 840 hxge_map_regs_fail2:
841 841 if (dev_regs->hxge_regh) {
842 842 ddi_regs_map_free(&dev_regs->hxge_regh);
843 843 }
844 844
845 845 hxge_map_regs_fail1:
846 846 if (dev_regs->hxge_pciregh) {
847 847 ddi_regs_map_free(&dev_regs->hxge_pciregh);
848 848 }
849 849
850 850 hxge_map_regs_fail0:
851 851 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "Freeing register set memory"));
852 852 kmem_free(dev_regs, sizeof (dev_regs_t));
853 853
854 854 hxge_map_regs_exit:
855 855 if (ddi_status != DDI_SUCCESS)
856 856 status |= (HXGE_ERROR | HXGE_DDI_FAILED);
857 857 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_map_regs"));
858 858 return (status);
859 859 }
860 860
861 861 static void
862 862 hxge_unmap_regs(p_hxge_t hxgep)
863 863 {
864 864 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_unmap_regs"));
865 865 if (hxgep->dev_regs) {
866 866 if (hxgep->dev_regs->hxge_pciregh) {
867 867 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
868 868 "==> hxge_unmap_regs: bus"));
869 869 ddi_regs_map_free(&hxgep->dev_regs->hxge_pciregh);
870 870 hxgep->dev_regs->hxge_pciregh = NULL;
871 871 }
872 872
873 873 if (hxgep->dev_regs->hxge_regh) {
874 874 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
875 875 "==> hxge_unmap_regs: device registers"));
876 876 ddi_regs_map_free(&hxgep->dev_regs->hxge_regh);
877 877 hxgep->dev_regs->hxge_regh = NULL;
878 878 }
879 879
880 880 if (hxgep->dev_regs->hxge_msix_regh) {
881 881 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
882 882 "==> hxge_unmap_regs: device interrupts"));
883 883 ddi_regs_map_free(&hxgep->dev_regs->hxge_msix_regh);
884 884 hxgep->dev_regs->hxge_msix_regh = NULL;
885 885 }
886 886 kmem_free(hxgep->dev_regs, sizeof (dev_regs_t));
887 887 hxgep->dev_regs = NULL;
888 888 }
889 889 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_unmap_regs"));
890 890 }
891 891
892 892 static hxge_status_t
893 893 hxge_setup_mutexes(p_hxge_t hxgep)
894 894 {
895 895 int ddi_status = DDI_SUCCESS;
896 896 hxge_status_t status = HXGE_OK;
897 897
898 898 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_mutexes"));
899 899
900 900 /*
901 901 * Get the interrupt cookie so the mutexes can be Initialised.
902 902 */
903 903 ddi_status = ddi_get_iblock_cookie(hxgep->dip, 0,
904 904 &hxgep->interrupt_cookie);
905 905
906 906 if (ddi_status != DDI_SUCCESS) {
907 907 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
908 908 "<== hxge_setup_mutexes: failed 0x%x", ddi_status));
909 909 goto hxge_setup_mutexes_exit;
910 910 }
911 911
912 912 /*
913 913 * Initialize mutex's for this device.
914 914 */
915 915 MUTEX_INIT(hxgep->genlock, NULL,
916 916 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
917 917 MUTEX_INIT(&hxgep->vmac_lock, NULL,
918 918 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
919 919 MUTEX_INIT(&hxgep->ouraddr_lock, NULL,
920 920 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
921 921 RW_INIT(&hxgep->filter_lock, NULL,
922 922 RW_DRIVER, (void *) hxgep->interrupt_cookie);
923 923 MUTEX_INIT(&hxgep->pio_lock, NULL,
924 924 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
925 925 MUTEX_INIT(&hxgep->timeout.lock, NULL,
926 926 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
927 927
928 928 hxge_setup_mutexes_exit:
929 929 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
930 930 "<== hxge_setup_mutexes status = %x", status));
931 931
932 932 if (ddi_status != DDI_SUCCESS)
933 933 status |= (HXGE_ERROR | HXGE_DDI_FAILED);
934 934
935 935 return (status);
936 936 }
937 937
938 938 static void
939 939 hxge_destroy_mutexes(p_hxge_t hxgep)
940 940 {
941 941 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_mutexes"));
942 942 RW_DESTROY(&hxgep->filter_lock);
943 943 MUTEX_DESTROY(&hxgep->vmac_lock);
944 944 MUTEX_DESTROY(&hxgep->ouraddr_lock);
945 945 MUTEX_DESTROY(hxgep->genlock);
946 946 MUTEX_DESTROY(&hxgep->pio_lock);
947 947 MUTEX_DESTROY(&hxgep->timeout.lock);
948 948
949 949 if (hxge_debug_init == 1) {
950 950 MUTEX_DESTROY(&hxgedebuglock);
951 951 hxge_debug_init = 0;
952 952 }
953 953
954 954 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_mutexes"));
955 955 }
956 956
957 957 hxge_status_t
958 958 hxge_init(p_hxge_t hxgep)
959 959 {
960 960 hxge_status_t status = HXGE_OK;
961 961
962 962 HXGE_DEBUG_MSG((hxgep, STR_CTL, "==> hxge_init"));
963 963
964 964 if (hxgep->drv_state & STATE_HW_INITIALIZED) {
965 965 return (status);
966 966 }
967 967
968 968 /*
969 969 * Allocate system memory for the receive/transmit buffer blocks and
970 970 * receive/transmit descriptor rings.
971 971 */
972 972 status = hxge_alloc_mem_pool(hxgep);
973 973 if (status != HXGE_OK) {
974 974 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "alloc mem failed\n"));
975 975 goto hxge_init_fail1;
976 976 }
977 977
978 978 /*
979 979 * Initialize and enable TXDMA channels.
980 980 */
981 981 status = hxge_init_txdma_channels(hxgep);
982 982 if (status != HXGE_OK) {
983 983 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init txdma failed\n"));
984 984 goto hxge_init_fail3;
985 985 }
986 986
987 987 /*
988 988 * Initialize and enable RXDMA channels.
989 989 */
990 990 status = hxge_init_rxdma_channels(hxgep);
991 991 if (status != HXGE_OK) {
992 992 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init rxdma failed\n"));
993 993 goto hxge_init_fail4;
994 994 }
995 995
996 996 /*
997 997 * Initialize TCAM
998 998 */
999 999 status = hxge_classify_init(hxgep);
1000 1000 if (status != HXGE_OK) {
1001 1001 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init classify failed\n"));
1002 1002 goto hxge_init_fail5;
1003 1003 }
1004 1004
1005 1005 /*
1006 1006 * Initialize the VMAC block.
1007 1007 */
1008 1008 status = hxge_vmac_init(hxgep);
1009 1009 if (status != HXGE_OK) {
1010 1010 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init MAC failed\n"));
1011 1011 goto hxge_init_fail5;
1012 1012 }
1013 1013
1014 1014 /* Bringup - this may be unnecessary when PXE and FCODE available */
1015 1015 status = hxge_pfc_set_default_mac_addr(hxgep);
1016 1016 if (status != HXGE_OK) {
1017 1017 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1018 1018 "Default Address Failure\n"));
1019 1019 goto hxge_init_fail5;
1020 1020 }
1021 1021
1022 1022 /*
1023 1023 * Enable hardware interrupts.
1024 1024 */
1025 1025 hxge_intr_hw_enable(hxgep);
1026 1026 hxgep->drv_state |= STATE_HW_INITIALIZED;
1027 1027
1028 1028 goto hxge_init_exit;
1029 1029
1030 1030 hxge_init_fail5:
1031 1031 hxge_uninit_rxdma_channels(hxgep);
1032 1032 hxge_init_fail4:
1033 1033 hxge_uninit_txdma_channels(hxgep);
1034 1034 hxge_init_fail3:
1035 1035 hxge_free_mem_pool(hxgep);
1036 1036 hxge_init_fail1:
1037 1037 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1038 1038 "<== hxge_init status (failed) = 0x%08x", status));
1039 1039 return (status);
1040 1040
1041 1041 hxge_init_exit:
1042 1042
1043 1043 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_init status = 0x%08x",
1044 1044 status));
1045 1045
1046 1046 return (status);
1047 1047 }
1048 1048
1049 1049 timeout_id_t
1050 1050 hxge_start_timer(p_hxge_t hxgep, fptrv_t func, int msec)
1051 1051 {
1052 1052 if ((hxgep->suspended == 0) || (hxgep->suspended == DDI_RESUME)) {
1053 1053 return (timeout(func, (caddr_t)hxgep,
1054 1054 drv_usectohz(1000 * msec)));
1055 1055 }
1056 1056 return (NULL);
1057 1057 }
1058 1058
1059 1059 /*ARGSUSED*/
1060 1060 void
1061 1061 hxge_stop_timer(p_hxge_t hxgep, timeout_id_t timerid)
1062 1062 {
1063 1063 if (timerid) {
1064 1064 (void) untimeout(timerid);
1065 1065 }
1066 1066 }
1067 1067
1068 1068 void
1069 1069 hxge_uninit(p_hxge_t hxgep)
1070 1070 {
1071 1071 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_uninit"));
1072 1072
1073 1073 if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
1074 1074 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1075 1075 "==> hxge_uninit: not initialized"));
1076 1076 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_uninit"));
1077 1077 return;
1078 1078 }
1079 1079
1080 1080 /* Stop timer */
1081 1081 if (hxgep->hxge_timerid) {
1082 1082 hxge_stop_timer(hxgep, hxgep->hxge_timerid);
1083 1083 hxgep->hxge_timerid = 0;
1084 1084 }
1085 1085
1086 1086 (void) hxge_intr_hw_disable(hxgep);
1087 1087
1088 1088 /* Reset the receive VMAC side. */
1089 1089 (void) hxge_rx_vmac_disable(hxgep);
1090 1090
1091 1091 /* Free classification resources */
1092 1092 (void) hxge_classify_uninit(hxgep);
1093 1093
1094 1094 /* Reset the transmit/receive DMA side. */
1095 1095 (void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_STOP);
1096 1096 (void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_STOP);
1097 1097
1098 1098 hxge_uninit_txdma_channels(hxgep);
1099 1099 hxge_uninit_rxdma_channels(hxgep);
1100 1100
1101 1101 /* Reset the transmit VMAC side. */
1102 1102 (void) hxge_tx_vmac_disable(hxgep);
1103 1103
1104 1104 hxge_free_mem_pool(hxgep);
1105 1105
1106 1106 hxgep->drv_state &= ~STATE_HW_INITIALIZED;
1107 1107
1108 1108 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_uninit"));
1109 1109 }
1110 1110
1111 1111 /*ARGSUSED*/
1112 1112 /*VARARGS*/
1113 1113 void
1114 1114 hxge_debug_msg(p_hxge_t hxgep, uint64_t level, char *fmt, ...)
1115 1115 {
1116 1116 char msg_buffer[1048];
1117 1117 char prefix_buffer[32];
1118 1118 int instance;
1119 1119 uint64_t debug_level;
1120 1120 int cmn_level = CE_CONT;
1121 1121 va_list ap;
1122 1122
1123 1123 debug_level = (hxgep == NULL) ? hxge_debug_level :
1124 1124 hxgep->hxge_debug_level;
1125 1125
1126 1126 if ((level & debug_level) || (level == HXGE_NOTE) ||
1127 1127 (level == HXGE_ERR_CTL)) {
1128 1128 /* do the msg processing */
1129 1129 if (hxge_debug_init == 0) {
1130 1130 MUTEX_INIT(&hxgedebuglock, NULL, MUTEX_DRIVER, NULL);
1131 1131 hxge_debug_init = 1;
1132 1132 }
1133 1133
1134 1134 MUTEX_ENTER(&hxgedebuglock);
1135 1135
1136 1136 if ((level & HXGE_NOTE)) {
1137 1137 cmn_level = CE_NOTE;
1138 1138 }
1139 1139
1140 1140 if (level & HXGE_ERR_CTL) {
1141 1141 cmn_level = CE_WARN;
1142 1142 }
1143 1143
1144 1144 va_start(ap, fmt);
1145 1145 (void) vsprintf(msg_buffer, fmt, ap);
1146 1146 va_end(ap);
1147 1147
1148 1148 if (hxgep == NULL) {
1149 1149 instance = -1;
1150 1150 (void) sprintf(prefix_buffer, "%s :", "hxge");
1151 1151 } else {
1152 1152 instance = hxgep->instance;
1153 1153 (void) sprintf(prefix_buffer,
1154 1154 "%s%d :", "hxge", instance);
1155 1155 }
1156 1156
1157 1157 MUTEX_EXIT(&hxgedebuglock);
1158 1158 cmn_err(cmn_level, "%s %s\n", prefix_buffer, msg_buffer);
1159 1159 }
1160 1160 }
1161 1161
1162 1162 char *
1163 1163 hxge_dump_packet(char *addr, int size)
1164 1164 {
1165 1165 uchar_t *ap = (uchar_t *)addr;
1166 1166 int i;
1167 1167 static char etherbuf[1024];
1168 1168 char *cp = etherbuf;
1169 1169 char digits[] = "0123456789abcdef";
1170 1170
1171 1171 if (!size)
1172 1172 size = 60;
1173 1173
1174 1174 if (size > MAX_DUMP_SZ) {
1175 1175 /* Dump the leading bytes */
1176 1176 for (i = 0; i < MAX_DUMP_SZ / 2; i++) {
1177 1177 if (*ap > 0x0f)
1178 1178 *cp++ = digits[*ap >> 4];
1179 1179 *cp++ = digits[*ap++ & 0xf];
1180 1180 *cp++ = ':';
1181 1181 }
1182 1182 for (i = 0; i < 20; i++)
1183 1183 *cp++ = '.';
1184 1184 /* Dump the last MAX_DUMP_SZ/2 bytes */
1185 1185 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ / 2));
1186 1186 for (i = 0; i < MAX_DUMP_SZ / 2; i++) {
1187 1187 if (*ap > 0x0f)
1188 1188 *cp++ = digits[*ap >> 4];
1189 1189 *cp++ = digits[*ap++ & 0xf];
1190 1190 *cp++ = ':';
1191 1191 }
1192 1192 } else {
1193 1193 for (i = 0; i < size; i++) {
1194 1194 if (*ap > 0x0f)
1195 1195 *cp++ = digits[*ap >> 4];
1196 1196 *cp++ = digits[*ap++ & 0xf];
1197 1197 *cp++ = ':';
1198 1198 }
1199 1199 }
1200 1200 *--cp = 0;
1201 1201 return (etherbuf);
1202 1202 }
1203 1203
1204 1204 static void
1205 1205 hxge_suspend(p_hxge_t hxgep)
1206 1206 {
1207 1207 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_suspend"));
1208 1208
1209 1209 /*
1210 1210 * Stop the link status timer before hxge_intrs_disable() to avoid
1211 1211 * accessing the the MSIX table simultaneously. Note that the timer
1212 1212 * routine polls for MSIX parity errors.
1213 1213 */
1214 1214 MUTEX_ENTER(&hxgep->timeout.lock);
1215 1215 if (hxgep->timeout.id)
1216 1216 (void) untimeout(hxgep->timeout.id);
1217 1217 MUTEX_EXIT(&hxgep->timeout.lock);
1218 1218
1219 1219 hxge_intrs_disable(hxgep);
1220 1220 hxge_destroy_dev(hxgep);
1221 1221
1222 1222 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_suspend"));
1223 1223 }
1224 1224
1225 1225 static hxge_status_t
1226 1226 hxge_resume(p_hxge_t hxgep)
1227 1227 {
1228 1228 hxge_status_t status = HXGE_OK;
1229 1229
1230 1230 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_resume"));
1231 1231 hxgep->suspended = DDI_RESUME;
1232 1232
1233 1233 (void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_START);
1234 1234 (void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_START);
1235 1235
1236 1236 (void) hxge_rx_vmac_enable(hxgep);
1237 1237 (void) hxge_tx_vmac_enable(hxgep);
1238 1238
1239 1239 hxge_intrs_enable(hxgep);
1240 1240
1241 1241 hxgep->suspended = 0;
1242 1242
1243 1243 /*
1244 1244 * Resume the link status timer after hxge_intrs_enable to avoid
1245 1245 * accessing MSIX table simultaneously.
1246 1246 */
1247 1247 MUTEX_ENTER(&hxgep->timeout.lock);
1248 1248 hxgep->timeout.id = timeout(hxge_link_poll, (void *)hxgep,
1249 1249 hxgep->timeout.ticks);
1250 1250 MUTEX_EXIT(&hxgep->timeout.lock);
1251 1251
1252 1252 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1253 1253 "<== hxge_resume status = 0x%x", status));
1254 1254
1255 1255 return (status);
1256 1256 }
1257 1257
1258 1258 static hxge_status_t
1259 1259 hxge_setup_dev(p_hxge_t hxgep)
1260 1260 {
1261 1261 hxge_status_t status = HXGE_OK;
1262 1262
1263 1263 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_dev"));
1264 1264
1265 1265 status = hxge_link_init(hxgep);
1266 1266 if (fm_check_acc_handle(hxgep->dev_regs->hxge_regh) != DDI_FM_OK) {
1267 1267 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1268 1268 "Bad register acc handle"));
1269 1269 status = HXGE_ERROR;
1270 1270 }
1271 1271
1272 1272 if (status != HXGE_OK) {
1273 1273 HXGE_DEBUG_MSG((hxgep, MAC_CTL,
1274 1274 " hxge_setup_dev status (link init 0x%08x)", status));
1275 1275 goto hxge_setup_dev_exit;
1276 1276 }
1277 1277
1278 1278 hxge_setup_dev_exit:
1279 1279 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1280 1280 "<== hxge_setup_dev status = 0x%08x", status));
1281 1281
1282 1282 return (status);
1283 1283 }
1284 1284
1285 1285 static void
1286 1286 hxge_destroy_dev(p_hxge_t hxgep)
1287 1287 {
1288 1288 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_dev"));
1289 1289
1290 1290 (void) hxge_hw_stop(hxgep);
1291 1291
1292 1292 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_dev"));
1293 1293 }
1294 1294
1295 1295 static hxge_status_t
1296 1296 hxge_setup_system_dma_pages(p_hxge_t hxgep)
1297 1297 {
1298 1298 int ddi_status = DDI_SUCCESS;
1299 1299 uint_t count;
1300 1300 ddi_dma_cookie_t cookie;
1301 1301 uint_t iommu_pagesize;
1302 1302 hxge_status_t status = HXGE_OK;
1303 1303
1304 1304 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_system_dma_pages"));
1305 1305
1306 1306 hxgep->sys_page_sz = ddi_ptob(hxgep->dip, (ulong_t)1);
1307 1307 iommu_pagesize = dvma_pagesize(hxgep->dip);
1308 1308
1309 1309 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1310 1310 " hxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
1311 1311 " default_block_size %d iommu_pagesize %d",
1312 1312 hxgep->sys_page_sz, ddi_ptob(hxgep->dip, (ulong_t)1),
1313 1313 hxgep->rx_default_block_size, iommu_pagesize));
1314 1314
1315 1315 if (iommu_pagesize != 0) {
1316 1316 if (hxgep->sys_page_sz == iommu_pagesize) {
1317 1317 /* Hydra support up to 8K pages */
1318 1318 if (iommu_pagesize > 0x2000)
1319 1319 hxgep->sys_page_sz = 0x2000;
1320 1320 } else {
1321 1321 if (hxgep->sys_page_sz > iommu_pagesize)
1322 1322 hxgep->sys_page_sz = iommu_pagesize;
1323 1323 }
1324 1324 }
1325 1325
1326 1326 hxgep->sys_page_mask = ~(hxgep->sys_page_sz - 1);
1327 1327
1328 1328 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1329 1329 "==> hxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
1330 1330 "default_block_size %d page mask %d",
1331 1331 hxgep->sys_page_sz, ddi_ptob(hxgep->dip, (ulong_t)1),
1332 1332 hxgep->rx_default_block_size, hxgep->sys_page_mask));
1333 1333
1334 1334 switch (hxgep->sys_page_sz) {
1335 1335 default:
1336 1336 hxgep->sys_page_sz = 0x1000;
1337 1337 hxgep->sys_page_mask = ~(hxgep->sys_page_sz - 1);
1338 1338 hxgep->rx_default_block_size = 0x1000;
1339 1339 hxgep->rx_bksize_code = RBR_BKSIZE_4K;
1340 1340 break;
1341 1341 case 0x1000:
1342 1342 hxgep->rx_default_block_size = 0x1000;
1343 1343 hxgep->rx_bksize_code = RBR_BKSIZE_4K;
1344 1344 break;
1345 1345 case 0x2000:
1346 1346 hxgep->rx_default_block_size = 0x2000;
1347 1347 hxgep->rx_bksize_code = RBR_BKSIZE_8K;
1348 1348 break;
1349 1349 }
1350 1350
1351 1351 hxge_rx_dma_attr.dma_attr_align = hxgep->sys_page_sz;
1352 1352 hxge_tx_dma_attr.dma_attr_align = hxgep->sys_page_sz;
1353 1353
1354 1354 /*
1355 1355 * Get the system DMA burst size.
1356 1356 */
1357 1357 ddi_status = ddi_dma_alloc_handle(hxgep->dip, &hxge_tx_dma_attr,
1358 1358 DDI_DMA_DONTWAIT, 0, &hxgep->dmasparehandle);
1359 1359 if (ddi_status != DDI_SUCCESS) {
1360 1360 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1361 1361 "ddi_dma_alloc_handle: failed status 0x%x", ddi_status));
1362 1362 goto hxge_get_soft_properties_exit;
1363 1363 }
1364 1364
1365 1365 ddi_status = ddi_dma_addr_bind_handle(hxgep->dmasparehandle, NULL,
1366 1366 (caddr_t)hxgep->dmasparehandle, sizeof (hxgep->dmasparehandle),
1367 1367 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, 0,
1368 1368 &cookie, &count);
1369 1369 if (ddi_status != DDI_DMA_MAPPED) {
1370 1370 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1371 1371 "Binding spare handle to find system burstsize failed."));
1372 1372 ddi_status = DDI_FAILURE;
1373 1373 goto hxge_get_soft_properties_fail1;
1374 1374 }
1375 1375
1376 1376 hxgep->sys_burst_sz = ddi_dma_burstsizes(hxgep->dmasparehandle);
1377 1377 (void) ddi_dma_unbind_handle(hxgep->dmasparehandle);
1378 1378
1379 1379 hxge_get_soft_properties_fail1:
1380 1380 ddi_dma_free_handle(&hxgep->dmasparehandle);
1381 1381
1382 1382 hxge_get_soft_properties_exit:
1383 1383
1384 1384 if (ddi_status != DDI_SUCCESS)
1385 1385 status |= (HXGE_ERROR | HXGE_DDI_FAILED);
1386 1386
1387 1387 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1388 1388 "<== hxge_setup_system_dma_pages status = 0x%08x", status));
1389 1389
1390 1390 return (status);
1391 1391 }
1392 1392
1393 1393 static hxge_status_t
1394 1394 hxge_alloc_mem_pool(p_hxge_t hxgep)
1395 1395 {
1396 1396 hxge_status_t status = HXGE_OK;
1397 1397
1398 1398 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_alloc_mem_pool"));
1399 1399
1400 1400 status = hxge_alloc_rx_mem_pool(hxgep);
1401 1401 if (status != HXGE_OK) {
1402 1402 return (HXGE_ERROR);
1403 1403 }
1404 1404
1405 1405 status = hxge_alloc_tx_mem_pool(hxgep);
1406 1406 if (status != HXGE_OK) {
1407 1407 hxge_free_rx_mem_pool(hxgep);
1408 1408 return (HXGE_ERROR);
1409 1409 }
1410 1410
1411 1411 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_alloc_mem_pool"));
1412 1412 return (HXGE_OK);
1413 1413 }
1414 1414
1415 1415 static void
1416 1416 hxge_free_mem_pool(p_hxge_t hxgep)
1417 1417 {
1418 1418 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_free_mem_pool"));
1419 1419
1420 1420 hxge_free_rx_mem_pool(hxgep);
1421 1421 hxge_free_tx_mem_pool(hxgep);
1422 1422
1423 1423 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "<== hxge_free_mem_pool"));
1424 1424 }
1425 1425
1426 1426 static hxge_status_t
1427 1427 hxge_alloc_rx_mem_pool(p_hxge_t hxgep)
1428 1428 {
1429 1429 int i, j;
1430 1430 uint32_t ndmas, st_rdc;
1431 1431 p_hxge_dma_pt_cfg_t p_all_cfgp;
1432 1432 p_hxge_hw_pt_cfg_t p_cfgp;
1433 1433 p_hxge_dma_pool_t dma_poolp;
1434 1434 p_hxge_dma_common_t *dma_buf_p;
1435 1435 p_hxge_dma_pool_t dma_rbr_cntl_poolp;
1436 1436 p_hxge_dma_common_t *dma_rbr_cntl_p;
1437 1437 p_hxge_dma_pool_t dma_rcr_cntl_poolp;
1438 1438 p_hxge_dma_common_t *dma_rcr_cntl_p;
1439 1439 p_hxge_dma_pool_t dma_mbox_cntl_poolp;
1440 1440 p_hxge_dma_common_t *dma_mbox_cntl_p;
1441 1441 size_t rx_buf_alloc_size;
1442 1442 size_t rx_rbr_cntl_alloc_size;
1443 1443 size_t rx_rcr_cntl_alloc_size;
1444 1444 size_t rx_mbox_cntl_alloc_size;
1445 1445 uint32_t *num_chunks; /* per dma */
1446 1446 hxge_status_t status = HXGE_OK;
1447 1447
1448 1448 uint32_t hxge_port_rbr_size;
1449 1449 uint32_t hxge_port_rbr_spare_size;
1450 1450 uint32_t hxge_port_rcr_size;
1451 1451
1452 1452 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_mem_pool"));
1453 1453
1454 1454 p_all_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
1455 1455 p_cfgp = (p_hxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
1456 1456 st_rdc = p_cfgp->start_rdc;
1457 1457 ndmas = p_cfgp->max_rdcs;
1458 1458
1459 1459 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1460 1460 " hxge_alloc_rx_mem_pool st_rdc %d ndmas %d", st_rdc, ndmas));
1461 1461
1462 1462 /*
1463 1463 * Allocate memory for each receive DMA channel.
1464 1464 */
1465 1465 dma_poolp = (p_hxge_dma_pool_t)KMEM_ZALLOC(sizeof (hxge_dma_pool_t),
1466 1466 KM_SLEEP);
1467 1467 dma_buf_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1468 1468 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1469 1469
1470 1470 dma_rbr_cntl_poolp = (p_hxge_dma_pool_t)
1471 1471 KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1472 1472 dma_rbr_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1473 1473 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1474 1474 dma_rcr_cntl_poolp = (p_hxge_dma_pool_t)
1475 1475 KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1476 1476 dma_rcr_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1477 1477 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1478 1478 dma_mbox_cntl_poolp = (p_hxge_dma_pool_t)
1479 1479 KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1480 1480 dma_mbox_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1481 1481 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1482 1482
1483 1483 num_chunks = (uint32_t *)KMEM_ZALLOC(sizeof (uint32_t) * ndmas,
1484 1484 KM_SLEEP);
1485 1485
1486 1486 /*
1487 1487 * Assume that each DMA channel will be configured with default block
1488 1488 * size. rbr block counts are mod of batch count (16).
1489 1489 */
1490 1490 hxge_port_rbr_size = p_all_cfgp->rbr_size;
1491 1491 hxge_port_rcr_size = p_all_cfgp->rcr_size;
1492 1492
1493 1493 if (!hxge_port_rbr_size) {
1494 1494 hxge_port_rbr_size = HXGE_RBR_RBB_DEFAULT;
1495 1495 }
1496 1496
1497 1497 if (hxge_port_rbr_size % HXGE_RXDMA_POST_BATCH) {
1498 1498 hxge_port_rbr_size = (HXGE_RXDMA_POST_BATCH *
1499 1499 (hxge_port_rbr_size / HXGE_RXDMA_POST_BATCH + 1));
1500 1500 }
1501 1501
1502 1502 p_all_cfgp->rbr_size = hxge_port_rbr_size;
1503 1503 hxge_port_rbr_spare_size = hxge_rbr_spare_size;
1504 1504
1505 1505 if (hxge_port_rbr_spare_size % HXGE_RXDMA_POST_BATCH) {
1506 1506 hxge_port_rbr_spare_size = (HXGE_RXDMA_POST_BATCH *
1507 1507 (hxge_port_rbr_spare_size / HXGE_RXDMA_POST_BATCH + 1));
1508 1508 }
1509 1509
1510 1510 rx_buf_alloc_size = (hxgep->rx_default_block_size *
1511 1511 (hxge_port_rbr_size + hxge_port_rbr_spare_size));
1512 1512
1513 1513 /*
1514 1514 * Addresses of receive block ring, receive completion ring and the
1515 1515 * mailbox must be all cache-aligned (64 bytes).
1516 1516 */
1517 1517 rx_rbr_cntl_alloc_size = hxge_port_rbr_size + hxge_port_rbr_spare_size;
1518 1518 rx_rbr_cntl_alloc_size *= sizeof (rx_desc_t);
1519 1519 rx_rcr_cntl_alloc_size = sizeof (rcr_entry_t) * hxge_port_rcr_size;
1520 1520 rx_mbox_cntl_alloc_size = sizeof (rxdma_mailbox_t);
1521 1521
1522 1522 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_alloc_rx_mem_pool: "
1523 1523 "hxge_port_rbr_size = %d hxge_port_rbr_spare_size = %d "
1524 1524 "hxge_port_rcr_size = %d rx_cntl_alloc_size = %d",
1525 1525 hxge_port_rbr_size, hxge_port_rbr_spare_size,
1526 1526 hxge_port_rcr_size, rx_cntl_alloc_size));
1527 1527
1528 1528 hxgep->hxge_port_rbr_size = hxge_port_rbr_size;
1529 1529 hxgep->hxge_port_rcr_size = hxge_port_rcr_size;
1530 1530
1531 1531 /*
1532 1532 * Allocate memory for receive buffers and descriptor rings. Replace
1533 1533 * allocation functions with interface functions provided by the
1534 1534 * partition manager when it is available.
1535 1535 */
1536 1536 /*
1537 1537 * Allocate memory for the receive buffer blocks.
1538 1538 */
1539 1539 for (i = 0; i < ndmas; i++) {
1540 1540 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1541 1541 " hxge_alloc_rx_mem_pool to alloc mem: "
1542 1542 " dma %d dma_buf_p %llx &dma_buf_p %llx",
1543 1543 i, dma_buf_p[i], &dma_buf_p[i]));
1544 1544
1545 1545 num_chunks[i] = 0;
1546 1546
1547 1547 status = hxge_alloc_rx_buf_dma(hxgep, st_rdc, &dma_buf_p[i],
1548 1548 rx_buf_alloc_size, hxgep->rx_default_block_size,
1549 1549 &num_chunks[i]);
1550 1550 if (status != HXGE_OK) {
1551 1551 break;
1552 1552 }
1553 1553
1554 1554 st_rdc++;
1555 1555 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1556 1556 " hxge_alloc_rx_mem_pool DONE alloc mem: "
1557 1557 "dma %d dma_buf_p %llx &dma_buf_p %llx", i,
1558 1558 dma_buf_p[i], &dma_buf_p[i]));
1559 1559 }
1560 1560
1561 1561 if (i < ndmas) {
1562 1562 goto hxge_alloc_rx_mem_fail1;
1563 1563 }
1564 1564
1565 1565 /*
1566 1566 * Allocate memory for descriptor rings and mailbox.
1567 1567 */
1568 1568 st_rdc = p_cfgp->start_rdc;
1569 1569 for (j = 0; j < ndmas; j++) {
1570 1570 if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc,
1571 1571 &dma_rbr_cntl_p[j], &hxge_rx_rbr_desc_dma_attr,
1572 1572 rx_rbr_cntl_alloc_size)) != HXGE_OK) {
1573 1573 break;
1574 1574 }
1575 1575
1576 1576 if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc,
1577 1577 &dma_rcr_cntl_p[j], &hxge_rx_rcr_desc_dma_attr,
1578 1578 rx_rcr_cntl_alloc_size)) != HXGE_OK) {
1579 1579 break;
1580 1580 }
1581 1581
1582 1582 if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc,
1583 1583 &dma_mbox_cntl_p[j], &hxge_rx_mbox_dma_attr,
1584 1584 rx_mbox_cntl_alloc_size)) != HXGE_OK) {
1585 1585 break;
1586 1586 }
1587 1587 st_rdc++;
1588 1588 }
1589 1589
1590 1590 if (j < ndmas) {
1591 1591 goto hxge_alloc_rx_mem_fail2;
1592 1592 }
1593 1593
1594 1594 dma_poolp->ndmas = ndmas;
1595 1595 dma_poolp->num_chunks = num_chunks;
1596 1596 dma_poolp->buf_allocated = B_TRUE;
1597 1597 hxgep->rx_buf_pool_p = dma_poolp;
1598 1598 dma_poolp->dma_buf_pool_p = dma_buf_p;
1599 1599
1600 1600 dma_rbr_cntl_poolp->ndmas = ndmas;
1601 1601 dma_rbr_cntl_poolp->buf_allocated = B_TRUE;
1602 1602 hxgep->rx_rbr_cntl_pool_p = dma_rbr_cntl_poolp;
1603 1603 dma_rbr_cntl_poolp->dma_buf_pool_p = dma_rbr_cntl_p;
1604 1604
1605 1605 dma_rcr_cntl_poolp->ndmas = ndmas;
1606 1606 dma_rcr_cntl_poolp->buf_allocated = B_TRUE;
1607 1607 hxgep->rx_rcr_cntl_pool_p = dma_rcr_cntl_poolp;
1608 1608 dma_rcr_cntl_poolp->dma_buf_pool_p = dma_rcr_cntl_p;
1609 1609
1610 1610 dma_mbox_cntl_poolp->ndmas = ndmas;
1611 1611 dma_mbox_cntl_poolp->buf_allocated = B_TRUE;
1612 1612 hxgep->rx_mbox_cntl_pool_p = dma_mbox_cntl_poolp;
1613 1613 dma_mbox_cntl_poolp->dma_buf_pool_p = dma_mbox_cntl_p;
1614 1614
1615 1615 goto hxge_alloc_rx_mem_pool_exit;
1616 1616
1617 1617 hxge_alloc_rx_mem_fail2:
1618 1618 /* Free control buffers */
1619 1619 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1620 1620 "==> hxge_alloc_rx_mem_pool: freeing control bufs (%d)", j));
1621 1621 for (; j >= 0; j--) {
1622 1622 hxge_free_rx_cntl_dma(hxgep,
1623 1623 (p_hxge_dma_common_t)dma_rbr_cntl_p[j]);
1624 1624 hxge_free_rx_cntl_dma(hxgep,
1625 1625 (p_hxge_dma_common_t)dma_rcr_cntl_p[j]);
1626 1626 hxge_free_rx_cntl_dma(hxgep,
1627 1627 (p_hxge_dma_common_t)dma_mbox_cntl_p[j]);
1628 1628 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1629 1629 "==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j));
1630 1630 }
1631 1631 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1632 1632 "==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j));
1633 1633
1634 1634 hxge_alloc_rx_mem_fail1:
1635 1635 /* Free data buffers */
1636 1636 i--;
1637 1637 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1638 1638 "==> hxge_alloc_rx_mem_pool: freeing data bufs (%d)", i));
1639 1639 for (; i >= 0; i--) {
1640 1640 hxge_free_rx_buf_dma(hxgep, (p_hxge_dma_common_t)dma_buf_p[i],
1641 1641 num_chunks[i]);
1642 1642 }
1643 1643 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1644 1644 "==> hxge_alloc_rx_mem_pool: data bufs freed (%d)", i));
1645 1645
1646 1646 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
1647 1647 KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
1648 1648 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
1649 1649 KMEM_FREE(dma_rbr_cntl_poolp, sizeof (hxge_dma_pool_t));
1650 1650 KMEM_FREE(dma_rbr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1651 1651 KMEM_FREE(dma_rcr_cntl_poolp, sizeof (hxge_dma_pool_t));
1652 1652 KMEM_FREE(dma_rcr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1653 1653 KMEM_FREE(dma_mbox_cntl_poolp, sizeof (hxge_dma_pool_t));
1654 1654 KMEM_FREE(dma_mbox_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1655 1655
1656 1656 hxge_alloc_rx_mem_pool_exit:
1657 1657 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1658 1658 "<== hxge_alloc_rx_mem_pool:status 0x%08x", status));
1659 1659
1660 1660 return (status);
1661 1661 }
1662 1662
1663 1663 static void
1664 1664 hxge_free_rx_mem_pool(p_hxge_t hxgep)
1665 1665 {
1666 1666 uint32_t i, ndmas;
1667 1667 p_hxge_dma_pool_t dma_poolp;
1668 1668 p_hxge_dma_common_t *dma_buf_p;
1669 1669 p_hxge_dma_pool_t dma_rbr_cntl_poolp;
1670 1670 p_hxge_dma_common_t *dma_rbr_cntl_p;
1671 1671 p_hxge_dma_pool_t dma_rcr_cntl_poolp;
1672 1672 p_hxge_dma_common_t *dma_rcr_cntl_p;
1673 1673 p_hxge_dma_pool_t dma_mbox_cntl_poolp;
1674 1674 p_hxge_dma_common_t *dma_mbox_cntl_p;
1675 1675 uint32_t *num_chunks;
1676 1676
1677 1677 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_free_rx_mem_pool"));
1678 1678
1679 1679 dma_poolp = hxgep->rx_buf_pool_p;
1680 1680 if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
1681 1681 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_mem_pool "
1682 1682 "(null rx buf pool or buf not allocated"));
1683 1683 return;
1684 1684 }
1685 1685
1686 1686 dma_rbr_cntl_poolp = hxgep->rx_rbr_cntl_pool_p;
1687 1687 if (dma_rbr_cntl_poolp == NULL ||
1688 1688 (!dma_rbr_cntl_poolp->buf_allocated)) {
1689 1689 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1690 1690 "<== hxge_free_rx_mem_pool "
1691 1691 "(null rbr cntl buf pool or rbr cntl buf not allocated"));
1692 1692 return;
1693 1693 }
1694 1694
1695 1695 dma_rcr_cntl_poolp = hxgep->rx_rcr_cntl_pool_p;
1696 1696 if (dma_rcr_cntl_poolp == NULL ||
1697 1697 (!dma_rcr_cntl_poolp->buf_allocated)) {
1698 1698 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1699 1699 "<== hxge_free_rx_mem_pool "
1700 1700 "(null rcr cntl buf pool or rcr cntl buf not allocated"));
1701 1701 return;
1702 1702 }
1703 1703
1704 1704 dma_mbox_cntl_poolp = hxgep->rx_mbox_cntl_pool_p;
1705 1705 if (dma_mbox_cntl_poolp == NULL ||
1706 1706 (!dma_mbox_cntl_poolp->buf_allocated)) {
1707 1707 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1708 1708 "<== hxge_free_rx_mem_pool "
1709 1709 "(null mbox cntl buf pool or mbox cntl buf not allocated"));
1710 1710 return;
1711 1711 }
1712 1712
1713 1713 dma_buf_p = dma_poolp->dma_buf_pool_p;
1714 1714 num_chunks = dma_poolp->num_chunks;
1715 1715
1716 1716 dma_rbr_cntl_p = dma_rbr_cntl_poolp->dma_buf_pool_p;
1717 1717 dma_rcr_cntl_p = dma_rcr_cntl_poolp->dma_buf_pool_p;
1718 1718 dma_mbox_cntl_p = dma_mbox_cntl_poolp->dma_buf_pool_p;
1719 1719 ndmas = dma_rbr_cntl_poolp->ndmas;
1720 1720
1721 1721 for (i = 0; i < ndmas; i++) {
1722 1722 hxge_free_rx_buf_dma(hxgep, dma_buf_p[i], num_chunks[i]);
1723 1723 }
1724 1724
1725 1725 for (i = 0; i < ndmas; i++) {
1726 1726 hxge_free_rx_cntl_dma(hxgep, dma_rbr_cntl_p[i]);
1727 1727 hxge_free_rx_cntl_dma(hxgep, dma_rcr_cntl_p[i]);
1728 1728 hxge_free_rx_cntl_dma(hxgep, dma_mbox_cntl_p[i]);
1729 1729 }
1730 1730
1731 1731 for (i = 0; i < ndmas; i++) {
1732 1732 KMEM_FREE(dma_buf_p[i],
1733 1733 sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
1734 1734 KMEM_FREE(dma_rbr_cntl_p[i], sizeof (hxge_dma_common_t));
1735 1735 KMEM_FREE(dma_rcr_cntl_p[i], sizeof (hxge_dma_common_t));
1736 1736 KMEM_FREE(dma_mbox_cntl_p[i], sizeof (hxge_dma_common_t));
1737 1737 }
1738 1738
1739 1739 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
1740 1740 KMEM_FREE(dma_rbr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1741 1741 KMEM_FREE(dma_rbr_cntl_poolp, sizeof (hxge_dma_pool_t));
1742 1742 KMEM_FREE(dma_rcr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1743 1743 KMEM_FREE(dma_rcr_cntl_poolp, sizeof (hxge_dma_pool_t));
1744 1744 KMEM_FREE(dma_mbox_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1745 1745 KMEM_FREE(dma_mbox_cntl_poolp, sizeof (hxge_dma_pool_t));
1746 1746 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
1747 1747 KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
1748 1748
1749 1749 hxgep->rx_buf_pool_p = NULL;
1750 1750 hxgep->rx_rbr_cntl_pool_p = NULL;
1751 1751 hxgep->rx_rcr_cntl_pool_p = NULL;
1752 1752 hxgep->rx_mbox_cntl_pool_p = NULL;
1753 1753
1754 1754 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_mem_pool"));
1755 1755 }
1756 1756
1757 1757 static hxge_status_t
1758 1758 hxge_alloc_rx_buf_dma(p_hxge_t hxgep, uint16_t dma_channel,
1759 1759 p_hxge_dma_common_t *dmap,
1760 1760 size_t alloc_size, size_t block_size, uint32_t *num_chunks)
1761 1761 {
1762 1762 p_hxge_dma_common_t rx_dmap;
1763 1763 hxge_status_t status = HXGE_OK;
1764 1764 size_t total_alloc_size;
1765 1765 size_t allocated = 0;
1766 1766 int i, size_index, array_size;
1767 1767
1768 1768 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_buf_dma"));
1769 1769
1770 1770 rx_dmap = (p_hxge_dma_common_t)
1771 1771 KMEM_ZALLOC(sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK, KM_SLEEP);
1772 1772
1773 1773 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1774 1774 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ",
1775 1775 dma_channel, alloc_size, block_size, dmap));
1776 1776
1777 1777 total_alloc_size = alloc_size;
1778 1778
1779 1779 i = 0;
1780 1780 size_index = 0;
1781 1781 array_size = sizeof (alloc_sizes) / sizeof (size_t);
1782 1782 while ((size_index < array_size) &&
1783 1783 (alloc_sizes[size_index] < alloc_size))
1784 1784 size_index++;
1785 1785 if (size_index >= array_size) {
1786 1786 size_index = array_size - 1;
1787 1787 }
1788 1788
1789 1789 while ((allocated < total_alloc_size) &&
1790 1790 (size_index >= 0) && (i < HXGE_DMA_BLOCK)) {
1791 1791 rx_dmap[i].dma_chunk_index = i;
1792 1792 rx_dmap[i].block_size = block_size;
1793 1793 rx_dmap[i].alength = alloc_sizes[size_index];
1794 1794 rx_dmap[i].orig_alength = rx_dmap[i].alength;
1795 1795 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
1796 1796 rx_dmap[i].dma_channel = dma_channel;
1797 1797 rx_dmap[i].contig_alloc_type = B_FALSE;
1798 1798
1799 1799 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1800 1800 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x "
1801 1801 "i %d nblocks %d alength %d",
1802 1802 dma_channel, i, &rx_dmap[i], block_size,
1803 1803 i, rx_dmap[i].nblocks, rx_dmap[i].alength));
1804 1804 status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
1805 1805 &hxge_rx_dma_attr, rx_dmap[i].alength,
1806 1806 &hxge_dev_buf_dma_acc_attr,
1807 1807 DDI_DMA_READ | DDI_DMA_STREAMING,
1808 1808 (p_hxge_dma_common_t)(&rx_dmap[i]));
1809 1809 if (status != HXGE_OK) {
1810 1810 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1811 1811 " hxge_alloc_rx_buf_dma: Alloc Failed: "
1812 1812 " for size: %d", alloc_sizes[size_index]));
1813 1813 size_index--;
1814 1814 } else {
1815 1815 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1816 1816 " alloc_rx_buf_dma allocated rdc %d "
1817 1817 "chunk %d size %x dvma %x bufp %llx ",
1818 1818 dma_channel, i, rx_dmap[i].alength,
1819 1819 rx_dmap[i].ioaddr_pp, &rx_dmap[i]));
1820 1820 i++;
1821 1821 allocated += alloc_sizes[size_index];
1822 1822 }
1823 1823 }
1824 1824
1825 1825 if (allocated < total_alloc_size) {
1826 1826 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1827 1827 " hxge_alloc_rx_buf_dma failed due to"
1828 1828 " allocated(%d) < required(%d)",
1829 1829 allocated, total_alloc_size));
1830 1830 goto hxge_alloc_rx_mem_fail1;
1831 1831 }
1832 1832
1833 1833 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1834 1834 " alloc_rx_buf_dma rdc %d allocated %d chunks", dma_channel, i));
1835 1835
1836 1836 *num_chunks = i;
1837 1837 *dmap = rx_dmap;
1838 1838
1839 1839 goto hxge_alloc_rx_mem_exit;
1840 1840
1841 1841 hxge_alloc_rx_mem_fail1:
1842 1842 KMEM_FREE(rx_dmap, sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
1843 1843
1844 1844 hxge_alloc_rx_mem_exit:
1845 1845 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1846 1846 "<== hxge_alloc_rx_buf_dma status 0x%08x", status));
1847 1847
1848 1848 return (status);
1849 1849 }
1850 1850
1851 1851 /*ARGSUSED*/
1852 1852 static void
1853 1853 hxge_free_rx_buf_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap,
1854 1854 uint32_t num_chunks)
1855 1855 {
1856 1856 int i;
1857 1857
1858 1858 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1859 1859 "==> hxge_free_rx_buf_dma: # of chunks %d", num_chunks));
1860 1860
1861 1861 for (i = 0; i < num_chunks; i++) {
1862 1862 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1863 1863 "==> hxge_free_rx_buf_dma: chunk %d dmap 0x%llx", i, dmap));
1864 1864 hxge_dma_mem_free(dmap++);
1865 1865 }
1866 1866
1867 1867 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_buf_dma"));
1868 1868 }
1869 1869
1870 1870 /*ARGSUSED*/
1871 1871 static hxge_status_t
1872 1872 hxge_alloc_rx_cntl_dma(p_hxge_t hxgep, uint16_t dma_channel,
1873 1873 p_hxge_dma_common_t *dmap, struct ddi_dma_attr *attr, size_t size)
1874 1874 {
1875 1875 p_hxge_dma_common_t rx_dmap;
1876 1876 hxge_status_t status = HXGE_OK;
1877 1877
1878 1878 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_cntl_dma"));
1879 1879
1880 1880 rx_dmap = (p_hxge_dma_common_t)
1881 1881 KMEM_ZALLOC(sizeof (hxge_dma_common_t), KM_SLEEP);
1882 1882
1883 1883 rx_dmap->contig_alloc_type = B_FALSE;
1884 1884
1885 1885 status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
1886 1886 attr, size, &hxge_dev_desc_dma_acc_attr,
1887 1887 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, rx_dmap);
1888 1888 if (status != HXGE_OK) {
1889 1889 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1890 1890 " hxge_alloc_rx_cntl_dma: Alloc Failed: "
1891 1891 " for size: %d", size));
1892 1892 goto hxge_alloc_rx_cntl_dma_fail1;
1893 1893 }
1894 1894
1895 1895 *dmap = rx_dmap;
1896 1896
1897 1897 goto hxge_alloc_rx_cntl_dma_exit;
1898 1898
1899 1899 hxge_alloc_rx_cntl_dma_fail1:
1900 1900 KMEM_FREE(rx_dmap, sizeof (hxge_dma_common_t));
1901 1901
1902 1902 hxge_alloc_rx_cntl_dma_exit:
1903 1903 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1904 1904 "<== hxge_alloc_rx_cntl_dma status 0x%08x", status));
1905 1905
1906 1906 return (status);
1907 1907 }
1908 1908
1909 1909 /*ARGSUSED*/
1910 1910 static void
1911 1911 hxge_free_rx_cntl_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap)
1912 1912 {
1913 1913 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_free_rx_cntl_dma"));
1914 1914
1915 1915 hxge_dma_mem_free(dmap);
1916 1916
1917 1917 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_free_rx_cntl_dma"));
1918 1918 }
1919 1919
1920 1920 static hxge_status_t
1921 1921 hxge_alloc_tx_mem_pool(p_hxge_t hxgep)
1922 1922 {
1923 1923 hxge_status_t status = HXGE_OK;
1924 1924 int i, j;
1925 1925 uint32_t ndmas, st_tdc;
1926 1926 p_hxge_dma_pt_cfg_t p_all_cfgp;
1927 1927 p_hxge_hw_pt_cfg_t p_cfgp;
1928 1928 p_hxge_dma_pool_t dma_poolp;
1929 1929 p_hxge_dma_common_t *dma_buf_p;
1930 1930 p_hxge_dma_pool_t dma_cntl_poolp;
1931 1931 p_hxge_dma_common_t *dma_cntl_p;
1932 1932 size_t tx_buf_alloc_size;
1933 1933 size_t tx_cntl_alloc_size;
1934 1934 uint32_t *num_chunks; /* per dma */
1935 1935
1936 1936 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_alloc_tx_mem_pool"));
1937 1937
1938 1938 p_all_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
1939 1939 p_cfgp = (p_hxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
1940 1940 st_tdc = p_cfgp->start_tdc;
1941 1941 ndmas = p_cfgp->max_tdcs;
1942 1942
1943 1943 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_alloc_tx_mem_pool: "
1944 1944 "p_cfgp 0x%016llx start_tdc %d ndmas %d hxgep->max_tdcs %d",
1945 1945 p_cfgp, p_cfgp->start_tdc, p_cfgp->max_tdcs, hxgep->max_tdcs));
1946 1946 /*
1947 1947 * Allocate memory for each transmit DMA channel.
1948 1948 */
1949 1949 dma_poolp = (p_hxge_dma_pool_t)KMEM_ZALLOC(sizeof (hxge_dma_pool_t),
1950 1950 KM_SLEEP);
1951 1951 dma_buf_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1952 1952 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1953 1953
1954 1954 dma_cntl_poolp = (p_hxge_dma_pool_t)
1955 1955 KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1956 1956 dma_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1957 1957 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1958 1958
1959 1959 hxgep->hxge_port_tx_ring_size = hxge_tx_ring_size;
1960 1960
1961 1961 /*
1962 1962 * Assume that each DMA channel will be configured with default
1963 1963 * transmit bufer size for copying transmit data. (For packet payload
1964 1964 * over this limit, packets will not be copied.)
1965 1965 */
1966 1966 tx_buf_alloc_size = (hxge_bcopy_thresh * hxge_tx_ring_size);
1967 1967
1968 1968 /*
1969 1969 * Addresses of transmit descriptor ring and the mailbox must be all
1970 1970 * cache-aligned (64 bytes).
1971 1971 */
1972 1972 tx_cntl_alloc_size = hxge_tx_ring_size;
1973 1973 tx_cntl_alloc_size *= (sizeof (tx_desc_t));
1974 1974 tx_cntl_alloc_size += sizeof (txdma_mailbox_t);
1975 1975
1976 1976 num_chunks = (uint32_t *)KMEM_ZALLOC(sizeof (uint32_t) * ndmas,
1977 1977 KM_SLEEP);
1978 1978
1979 1979 /*
1980 1980 * Allocate memory for transmit buffers and descriptor rings. Replace
1981 1981 * allocation functions with interface functions provided by the
1982 1982 * partition manager when it is available.
1983 1983 *
1984 1984 * Allocate memory for the transmit buffer pool.
1985 1985 */
1986 1986 for (i = 0; i < ndmas; i++) {
1987 1987 num_chunks[i] = 0;
1988 1988 status = hxge_alloc_tx_buf_dma(hxgep, st_tdc, &dma_buf_p[i],
1989 1989 tx_buf_alloc_size, hxge_bcopy_thresh, &num_chunks[i]);
1990 1990 if (status != HXGE_OK) {
1991 1991 break;
1992 1992 }
1993 1993 st_tdc++;
1994 1994 }
1995 1995
1996 1996 if (i < ndmas) {
1997 1997 goto hxge_alloc_tx_mem_pool_fail1;
1998 1998 }
1999 1999
2000 2000 st_tdc = p_cfgp->start_tdc;
2001 2001
2002 2002 /*
2003 2003 * Allocate memory for descriptor rings and mailbox.
2004 2004 */
2005 2005 for (j = 0; j < ndmas; j++) {
2006 2006 status = hxge_alloc_tx_cntl_dma(hxgep, st_tdc, &dma_cntl_p[j],
2007 2007 tx_cntl_alloc_size);
2008 2008 if (status != HXGE_OK) {
2009 2009 break;
2010 2010 }
2011 2011 st_tdc++;
2012 2012 }
2013 2013
2014 2014 if (j < ndmas) {
2015 2015 goto hxge_alloc_tx_mem_pool_fail2;
2016 2016 }
2017 2017
2018 2018 dma_poolp->ndmas = ndmas;
2019 2019 dma_poolp->num_chunks = num_chunks;
2020 2020 dma_poolp->buf_allocated = B_TRUE;
2021 2021 dma_poolp->dma_buf_pool_p = dma_buf_p;
2022 2022 hxgep->tx_buf_pool_p = dma_poolp;
2023 2023
2024 2024 dma_cntl_poolp->ndmas = ndmas;
2025 2025 dma_cntl_poolp->buf_allocated = B_TRUE;
2026 2026 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
2027 2027 hxgep->tx_cntl_pool_p = dma_cntl_poolp;
2028 2028
2029 2029 HXGE_DEBUG_MSG((hxgep, MEM_CTL,
2030 2030 "==> hxge_alloc_tx_mem_pool: start_tdc %d "
2031 2031 "ndmas %d poolp->ndmas %d", st_tdc, ndmas, dma_poolp->ndmas));
2032 2032
2033 2033 goto hxge_alloc_tx_mem_pool_exit;
2034 2034
2035 2035 hxge_alloc_tx_mem_pool_fail2:
2036 2036 /* Free control buffers */
2037 2037 j--;
2038 2038 for (; j >= 0; j--) {
2039 2039 hxge_free_tx_cntl_dma(hxgep,
2040 2040 (p_hxge_dma_common_t)dma_cntl_p[j]);
2041 2041 }
2042 2042
2043 2043 hxge_alloc_tx_mem_pool_fail1:
2044 2044 /* Free data buffers */
2045 2045 i--;
2046 2046 for (; i >= 0; i--) {
2047 2047 hxge_free_tx_buf_dma(hxgep, (p_hxge_dma_common_t)dma_buf_p[i],
2048 2048 num_chunks[i]);
2049 2049 }
2050 2050
2051 2051 KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
2052 2052 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
2053 2053 KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
2054 2054 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
2055 2055 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
2056 2056
2057 2057 hxge_alloc_tx_mem_pool_exit:
2058 2058 HXGE_DEBUG_MSG((hxgep, MEM_CTL,
2059 2059 "<== hxge_alloc_tx_mem_pool:status 0x%08x", status));
2060 2060
2061 2061 return (status);
2062 2062 }
2063 2063
2064 2064 static hxge_status_t
2065 2065 hxge_alloc_tx_buf_dma(p_hxge_t hxgep, uint16_t dma_channel,
2066 2066 p_hxge_dma_common_t *dmap, size_t alloc_size,
2067 2067 size_t block_size, uint32_t *num_chunks)
2068 2068 {
2069 2069 p_hxge_dma_common_t tx_dmap;
2070 2070 hxge_status_t status = HXGE_OK;
2071 2071 size_t total_alloc_size;
2072 2072 size_t allocated = 0;
2073 2073 int i, size_index, array_size;
2074 2074
2075 2075 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_tx_buf_dma"));
2076 2076
2077 2077 tx_dmap = (p_hxge_dma_common_t)
2078 2078 KMEM_ZALLOC(sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK, KM_SLEEP);
2079 2079
2080 2080 total_alloc_size = alloc_size;
2081 2081 i = 0;
2082 2082 size_index = 0;
2083 2083 array_size = sizeof (alloc_sizes) / sizeof (size_t);
2084 2084 while ((size_index < array_size) &&
2085 2085 (alloc_sizes[size_index] < alloc_size))
2086 2086 size_index++;
2087 2087 if (size_index >= array_size) {
2088 2088 size_index = array_size - 1;
2089 2089 }
2090 2090
2091 2091 while ((allocated < total_alloc_size) &&
2092 2092 (size_index >= 0) && (i < HXGE_DMA_BLOCK)) {
2093 2093 tx_dmap[i].dma_chunk_index = i;
2094 2094 tx_dmap[i].block_size = block_size;
2095 2095 tx_dmap[i].alength = alloc_sizes[size_index];
2096 2096 tx_dmap[i].orig_alength = tx_dmap[i].alength;
2097 2097 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
2098 2098 tx_dmap[i].dma_channel = dma_channel;
2099 2099 tx_dmap[i].contig_alloc_type = B_FALSE;
2100 2100
2101 2101 status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
2102 2102 &hxge_tx_dma_attr, tx_dmap[i].alength,
2103 2103 &hxge_dev_buf_dma_acc_attr,
2104 2104 DDI_DMA_WRITE | DDI_DMA_STREAMING,
2105 2105 (p_hxge_dma_common_t)(&tx_dmap[i]));
2106 2106 if (status != HXGE_OK) {
2107 2107 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2108 2108 " hxge_alloc_tx_buf_dma: Alloc Failed: "
2109 2109 " for size: %d", alloc_sizes[size_index]));
2110 2110 size_index--;
2111 2111 } else {
2112 2112 i++;
2113 2113 allocated += alloc_sizes[size_index];
2114 2114 }
2115 2115 }
2116 2116
2117 2117 if (allocated < total_alloc_size) {
2118 2118 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2119 2119 " hxge_alloc_tx_buf_dma: failed due to"
2120 2120 " allocated(%d) < required(%d)",
2121 2121 allocated, total_alloc_size));
2122 2122 goto hxge_alloc_tx_mem_fail1;
2123 2123 }
2124 2124
2125 2125 *num_chunks = i;
2126 2126 *dmap = tx_dmap;
2127 2127 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2128 2128 "==> hxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d",
2129 2129 *dmap, i));
2130 2130 goto hxge_alloc_tx_mem_exit;
2131 2131
2132 2132 hxge_alloc_tx_mem_fail1:
2133 2133 KMEM_FREE(tx_dmap, sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
2134 2134
2135 2135 hxge_alloc_tx_mem_exit:
2136 2136 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2137 2137 "<== hxge_alloc_tx_buf_dma status 0x%08x", status));
2138 2138
2139 2139 return (status);
2140 2140 }
2141 2141
2142 2142 /*ARGSUSED*/
2143 2143 static void
2144 2144 hxge_free_tx_buf_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap,
2145 2145 uint32_t num_chunks)
2146 2146 {
2147 2147 int i;
2148 2148
2149 2149 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_free_tx_buf_dma"));
2150 2150
2151 2151 for (i = 0; i < num_chunks; i++) {
2152 2152 hxge_dma_mem_free(dmap++);
2153 2153 }
2154 2154
2155 2155 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "<== hxge_free_tx_buf_dma"));
2156 2156 }
2157 2157
2158 2158 /*ARGSUSED*/
2159 2159 static hxge_status_t
2160 2160 hxge_alloc_tx_cntl_dma(p_hxge_t hxgep, uint16_t dma_channel,
2161 2161 p_hxge_dma_common_t *dmap, size_t size)
2162 2162 {
2163 2163 p_hxge_dma_common_t tx_dmap;
2164 2164 hxge_status_t status = HXGE_OK;
2165 2165
2166 2166 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_tx_cntl_dma"));
2167 2167
2168 2168 tx_dmap = (p_hxge_dma_common_t)KMEM_ZALLOC(sizeof (hxge_dma_common_t),
2169 2169 KM_SLEEP);
2170 2170
2171 2171 tx_dmap->contig_alloc_type = B_FALSE;
2172 2172
2173 2173 status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
2174 2174 &hxge_tx_desc_dma_attr, size, &hxge_dev_desc_dma_acc_attr,
2175 2175 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, tx_dmap);
2176 2176 if (status != HXGE_OK) {
2177 2177 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2178 2178 " hxge_alloc_tx_cntl_dma: Alloc Failed: "
2179 2179 " for size: %d", size));
2180 2180 goto hxge_alloc_tx_cntl_dma_fail1;
2181 2181 }
2182 2182
2183 2183 *dmap = tx_dmap;
2184 2184
2185 2185 goto hxge_alloc_tx_cntl_dma_exit;
2186 2186
2187 2187 hxge_alloc_tx_cntl_dma_fail1:
2188 2188 KMEM_FREE(tx_dmap, sizeof (hxge_dma_common_t));
2189 2189
2190 2190 hxge_alloc_tx_cntl_dma_exit:
2191 2191 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2192 2192 "<== hxge_alloc_tx_cntl_dma status 0x%08x", status));
2193 2193
2194 2194 return (status);
2195 2195 }
2196 2196
2197 2197 /*ARGSUSED*/
2198 2198 static void
2199 2199 hxge_free_tx_cntl_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap)
2200 2200 {
2201 2201 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_free_tx_cntl_dma"));
2202 2202
2203 2203 hxge_dma_mem_free(dmap);
2204 2204
2205 2205 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_free_tx_cntl_dma"));
2206 2206 }
2207 2207
2208 2208 static void
2209 2209 hxge_free_tx_mem_pool(p_hxge_t hxgep)
2210 2210 {
2211 2211 uint32_t i, ndmas;
2212 2212 p_hxge_dma_pool_t dma_poolp;
2213 2213 p_hxge_dma_common_t *dma_buf_p;
2214 2214 p_hxge_dma_pool_t dma_cntl_poolp;
2215 2215 p_hxge_dma_common_t *dma_cntl_p;
2216 2216 uint32_t *num_chunks;
2217 2217
2218 2218 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_free_tx_mem_pool"));
2219 2219
2220 2220 dma_poolp = hxgep->tx_buf_pool_p;
2221 2221 if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
2222 2222 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2223 2223 "<== hxge_free_tx_mem_pool "
2224 2224 "(null rx buf pool or buf not allocated"));
2225 2225 return;
2226 2226 }
2227 2227
2228 2228 dma_cntl_poolp = hxgep->tx_cntl_pool_p;
2229 2229 if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) {
2230 2230 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2231 2231 "<== hxge_free_tx_mem_pool "
2232 2232 "(null tx cntl buf pool or cntl buf not allocated"));
2233 2233 return;
2234 2234 }
2235 2235
2236 2236 dma_buf_p = dma_poolp->dma_buf_pool_p;
2237 2237 num_chunks = dma_poolp->num_chunks;
2238 2238
2239 2239 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
2240 2240 ndmas = dma_cntl_poolp->ndmas;
2241 2241
2242 2242 for (i = 0; i < ndmas; i++) {
2243 2243 hxge_free_tx_buf_dma(hxgep, dma_buf_p[i], num_chunks[i]);
2244 2244 }
2245 2245
2246 2246 for (i = 0; i < ndmas; i++) {
2247 2247 hxge_free_tx_cntl_dma(hxgep, dma_cntl_p[i]);
2248 2248 }
2249 2249
2250 2250 for (i = 0; i < ndmas; i++) {
2251 2251 KMEM_FREE(dma_buf_p[i],
2252 2252 sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
2253 2253 KMEM_FREE(dma_cntl_p[i], sizeof (hxge_dma_common_t));
2254 2254 }
2255 2255
2256 2256 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
2257 2257 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
2258 2258 KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
2259 2259 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
2260 2260 KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
2261 2261
2262 2262 hxgep->tx_buf_pool_p = NULL;
2263 2263 hxgep->tx_cntl_pool_p = NULL;
2264 2264
2265 2265 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_free_tx_mem_pool"));
2266 2266 }
2267 2267
2268 2268 /*ARGSUSED*/
2269 2269 static hxge_status_t
2270 2270 hxge_dma_mem_alloc(p_hxge_t hxgep, dma_method_t method,
2271 2271 struct ddi_dma_attr *dma_attrp,
2272 2272 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags,
2273 2273 p_hxge_dma_common_t dma_p)
2274 2274 {
2275 2275 caddr_t kaddrp;
2276 2276 int ddi_status = DDI_SUCCESS;
2277 2277
2278 2278 dma_p->dma_handle = NULL;
2279 2279 dma_p->acc_handle = NULL;
2280 2280 dma_p->kaddrp = NULL;
2281 2281
2282 2282 ddi_status = ddi_dma_alloc_handle(hxgep->dip, dma_attrp,
2283 2283 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle);
2284 2284 if (ddi_status != DDI_SUCCESS) {
2285 2285 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2286 2286 "hxge_dma_mem_alloc:ddi_dma_alloc_handle failed."));
2287 2287 return (HXGE_ERROR | HXGE_DDI_FAILED);
2288 2288 }
2289 2289
2290 2290 ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, length, acc_attr_p,
2291 2291 xfer_flags, DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength,
2292 2292 &dma_p->acc_handle);
2293 2293 if (ddi_status != DDI_SUCCESS) {
2294 2294 /* The caller will decide whether it is fatal */
2295 2295 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2296 2296 "hxge_dma_mem_alloc:ddi_dma_mem_alloc failed"));
2297 2297 ddi_dma_free_handle(&dma_p->dma_handle);
2298 2298 dma_p->dma_handle = NULL;
2299 2299 return (HXGE_ERROR | HXGE_DDI_FAILED);
2300 2300 }
2301 2301
2302 2302 if (dma_p->alength < length) {
2303 2303 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2304 2304 "hxge_dma_mem_alloc:ddi_dma_mem_alloc < length."));
2305 2305 ddi_dma_mem_free(&dma_p->acc_handle);
2306 2306 ddi_dma_free_handle(&dma_p->dma_handle);
2307 2307 dma_p->acc_handle = NULL;
2308 2308 dma_p->dma_handle = NULL;
2309 2309 return (HXGE_ERROR);
2310 2310 }
2311 2311
2312 2312 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL,
2313 2313 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0,
2314 2314 &dma_p->dma_cookie, &dma_p->ncookies);
2315 2315 if (ddi_status != DDI_DMA_MAPPED) {
2316 2316 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2317 2317 "hxge_dma_mem_alloc:di_dma_addr_bind failed "
2318 2318 "(staus 0x%x ncookies %d.)", ddi_status, dma_p->ncookies));
2319 2319 if (dma_p->acc_handle) {
2320 2320 ddi_dma_mem_free(&dma_p->acc_handle);
2321 2321 dma_p->acc_handle = NULL;
2322 2322 }
2323 2323 ddi_dma_free_handle(&dma_p->dma_handle);
2324 2324 dma_p->dma_handle = NULL;
2325 2325 return (HXGE_ERROR | HXGE_DDI_FAILED);
2326 2326 }
2327 2327
2328 2328 if (dma_p->ncookies != 1) {
2329 2329 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2330 2330 "hxge_dma_mem_alloc:ddi_dma_addr_bind > 1 cookie"
2331 2331 "(staus 0x%x ncookies %d.)", ddi_status, dma_p->ncookies));
2332 2332 if (dma_p->acc_handle) {
2333 2333 ddi_dma_mem_free(&dma_p->acc_handle);
2334 2334 dma_p->acc_handle = NULL;
2335 2335 }
2336 2336 (void) ddi_dma_unbind_handle(dma_p->dma_handle);
2337 2337 ddi_dma_free_handle(&dma_p->dma_handle);
2338 2338 dma_p->dma_handle = NULL;
2339 2339 return (HXGE_ERROR);
2340 2340 }
2341 2341
2342 2342 dma_p->kaddrp = kaddrp;
2343 2343 #if defined(__i386)
2344 2344 dma_p->ioaddr_pp =
2345 2345 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress;
2346 2346 #else
2347 2347 dma_p->ioaddr_pp = (unsigned char *) dma_p->dma_cookie.dmac_laddress;
2348 2348 #endif
2349 2349
2350 2350 HPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle);
2351 2351
2352 2352 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_dma_mem_alloc: "
2353 2353 "dma buffer allocated: dma_p $%p "
2354 2354 "return dmac_ladress from cookie $%p dmac_size %d "
2355 2355 "dma_p->ioaddr_p $%p "
2356 2356 "dma_p->orig_ioaddr_p $%p "
2357 2357 "orig_vatopa $%p "
2358 2358 "alength %d (0x%x) "
2359 2359 "kaddrp $%p "
2360 2360 "length %d (0x%x)",
2361 2361 dma_p,
2362 2362 dma_p->dma_cookie.dmac_laddress,
2363 2363 dma_p->dma_cookie.dmac_size,
2364 2364 dma_p->ioaddr_pp,
2365 2365 dma_p->orig_ioaddr_pp,
2366 2366 dma_p->orig_vatopa,
2367 2367 dma_p->alength, dma_p->alength,
2368 2368 kaddrp,
2369 2369 length, length));
2370 2370
2371 2371 return (HXGE_OK);
2372 2372 }
2373 2373
2374 2374 static void
2375 2375 hxge_dma_mem_free(p_hxge_dma_common_t dma_p)
2376 2376 {
2377 2377 if (dma_p == NULL)
2378 2378 return;
2379 2379
2380 2380 if (dma_p->dma_handle != NULL) {
2381 2381 if (dma_p->ncookies) {
2382 2382 (void) ddi_dma_unbind_handle(dma_p->dma_handle);
2383 2383 dma_p->ncookies = 0;
2384 2384 }
2385 2385 ddi_dma_free_handle(&dma_p->dma_handle);
2386 2386 dma_p->dma_handle = NULL;
2387 2387 }
2388 2388
2389 2389 if (dma_p->acc_handle != NULL) {
2390 2390 ddi_dma_mem_free(&dma_p->acc_handle);
2391 2391 dma_p->acc_handle = NULL;
2392 2392 HPI_DMA_ACC_HANDLE_SET(dma_p, NULL);
2393 2393 }
2394 2394
2395 2395 dma_p->kaddrp = NULL;
2396 2396 dma_p->alength = NULL;
2397 2397 }
2398 2398
2399 2399 /*
2400 2400 * hxge_m_start() -- start transmitting and receiving.
2401 2401 *
2402 2402 * This function is called by the MAC layer when the first
2403 2403 * stream is open to prepare the hardware ready for sending
2404 2404 * and transmitting packets.
2405 2405 */
2406 2406 static int
2407 2407 hxge_m_start(void *arg)
2408 2408 {
2409 2409 p_hxge_t hxgep = (p_hxge_t)arg;
2410 2410
2411 2411 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_start"));
2412 2412
2413 2413 MUTEX_ENTER(hxgep->genlock);
2414 2414
2415 2415 if (hxge_init(hxgep) != DDI_SUCCESS) {
2416 2416 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2417 2417 "<== hxge_m_start: initialization failed"));
2418 2418 MUTEX_EXIT(hxgep->genlock);
2419 2419 return (EIO);
2420 2420 }
2421 2421
2422 2422 if (hxgep->hxge_mac_state != HXGE_MAC_STARTED) {
↓ open down ↓ |
2422 lines elided |
↑ open up ↑ |
2423 2423 /*
2424 2424 * Start timer to check the system error and tx hangs
2425 2425 */
2426 2426 hxgep->hxge_timerid = hxge_start_timer(hxgep,
2427 2427 hxge_check_hw_state, HXGE_CHECK_TIMER);
2428 2428
2429 2429 hxgep->hxge_mac_state = HXGE_MAC_STARTED;
2430 2430
2431 2431 hxgep->timeout.link_status = 0;
2432 2432 hxgep->timeout.report_link_status = B_TRUE;
2433 - hxgep->timeout.ticks = drv_usectohz(2 * 1000000);
2433 + hxgep->timeout.ticks = drv_sectohz(2);
2434 2434
2435 2435 /* Start the link status timer to check the link status */
2436 2436 MUTEX_ENTER(&hxgep->timeout.lock);
2437 2437 hxgep->timeout.id = timeout(hxge_link_poll, (void *)hxgep,
2438 2438 hxgep->timeout.ticks);
2439 2439 MUTEX_EXIT(&hxgep->timeout.lock);
2440 2440 }
2441 2441
2442 2442 MUTEX_EXIT(hxgep->genlock);
2443 2443
2444 2444 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_start"));
2445 2445
2446 2446 return (0);
2447 2447 }
2448 2448
2449 2449 /*
2450 2450 * hxge_m_stop(): stop transmitting and receiving.
2451 2451 */
2452 2452 static void
2453 2453 hxge_m_stop(void *arg)
2454 2454 {
2455 2455 p_hxge_t hxgep = (p_hxge_t)arg;
2456 2456
2457 2457 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_stop"));
2458 2458
2459 2459 if (hxgep->hxge_timerid) {
2460 2460 hxge_stop_timer(hxgep, hxgep->hxge_timerid);
2461 2461 hxgep->hxge_timerid = 0;
2462 2462 }
2463 2463
2464 2464 /* Stop the link status timer before unregistering */
2465 2465 MUTEX_ENTER(&hxgep->timeout.lock);
2466 2466 if (hxgep->timeout.id) {
2467 2467 (void) untimeout(hxgep->timeout.id);
2468 2468 hxgep->timeout.id = 0;
2469 2469 }
2470 2470 hxge_link_update(hxgep, LINK_STATE_DOWN);
2471 2471 MUTEX_EXIT(&hxgep->timeout.lock);
2472 2472
2473 2473 MUTEX_ENTER(hxgep->genlock);
2474 2474
2475 2475 hxge_uninit(hxgep);
2476 2476
2477 2477 hxgep->hxge_mac_state = HXGE_MAC_STOPPED;
2478 2478
2479 2479 MUTEX_EXIT(hxgep->genlock);
2480 2480
2481 2481 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_stop"));
2482 2482 }
2483 2483
2484 2484 static int
2485 2485 hxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
2486 2486 {
2487 2487 p_hxge_t hxgep = (p_hxge_t)arg;
2488 2488 struct ether_addr addrp;
2489 2489
2490 2490 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_multicst: add %d", add));
2491 2491
2492 2492 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL);
2493 2493
2494 2494 if (add) {
2495 2495 if (hxge_add_mcast_addr(hxgep, &addrp)) {
2496 2496 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2497 2497 "<== hxge_m_multicst: add multicast failed"));
2498 2498 return (EINVAL);
2499 2499 }
2500 2500 } else {
2501 2501 if (hxge_del_mcast_addr(hxgep, &addrp)) {
2502 2502 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2503 2503 "<== hxge_m_multicst: del multicast failed"));
2504 2504 return (EINVAL);
2505 2505 }
2506 2506 }
2507 2507
2508 2508 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_multicst"));
2509 2509
2510 2510 return (0);
2511 2511 }
2512 2512
2513 2513 static int
2514 2514 hxge_m_promisc(void *arg, boolean_t on)
2515 2515 {
2516 2516 p_hxge_t hxgep = (p_hxge_t)arg;
2517 2517
2518 2518 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_promisc: on %d", on));
2519 2519
2520 2520 if (hxge_set_promisc(hxgep, on)) {
2521 2521 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2522 2522 "<== hxge_m_promisc: set promisc failed"));
2523 2523 return (EINVAL);
2524 2524 }
2525 2525
2526 2526 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_promisc: on %d", on));
2527 2527
2528 2528 return (0);
2529 2529 }
2530 2530
2531 2531 static void
2532 2532 hxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
2533 2533 {
2534 2534 p_hxge_t hxgep = (p_hxge_t)arg;
2535 2535 struct iocblk *iocp = (struct iocblk *)mp->b_rptr;
2536 2536 boolean_t need_privilege;
2537 2537 int err;
2538 2538 int cmd;
2539 2539
2540 2540 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_ioctl"));
2541 2541
2542 2542 iocp = (struct iocblk *)mp->b_rptr;
2543 2543 iocp->ioc_error = 0;
2544 2544 need_privilege = B_TRUE;
2545 2545 cmd = iocp->ioc_cmd;
2546 2546
2547 2547 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_ioctl: cmd 0x%08x", cmd));
2548 2548 switch (cmd) {
2549 2549 default:
2550 2550 miocnak(wq, mp, 0, EINVAL);
2551 2551 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_ioctl: invalid"));
2552 2552 return;
2553 2553
2554 2554 case LB_GET_INFO_SIZE:
2555 2555 case LB_GET_INFO:
2556 2556 case LB_GET_MODE:
2557 2557 need_privilege = B_FALSE;
2558 2558 break;
2559 2559
2560 2560 case LB_SET_MODE:
2561 2561 break;
2562 2562
2563 2563 case ND_GET:
2564 2564 need_privilege = B_FALSE;
2565 2565 break;
2566 2566 case ND_SET:
2567 2567 break;
2568 2568
2569 2569 case HXGE_GET_TX_RING_SZ:
2570 2570 case HXGE_GET_TX_DESC:
2571 2571 case HXGE_TX_SIDE_RESET:
2572 2572 case HXGE_RX_SIDE_RESET:
2573 2573 case HXGE_GLOBAL_RESET:
2574 2574 case HXGE_RESET_MAC:
2575 2575 case HXGE_PUT_TCAM:
2576 2576 case HXGE_GET_TCAM:
2577 2577 case HXGE_RTRACE:
2578 2578
2579 2579 need_privilege = B_FALSE;
2580 2580 break;
2581 2581 }
2582 2582
2583 2583 if (need_privilege) {
2584 2584 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
2585 2585 if (err != 0) {
2586 2586 miocnak(wq, mp, 0, err);
2587 2587 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2588 2588 "<== hxge_m_ioctl: no priv"));
2589 2589 return;
2590 2590 }
2591 2591 }
2592 2592
2593 2593 switch (cmd) {
2594 2594 case ND_GET:
2595 2595 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "ND_GET command"));
2596 2596 case ND_SET:
2597 2597 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "ND_SET command"));
2598 2598 hxge_param_ioctl(hxgep, wq, mp, iocp);
2599 2599 break;
2600 2600
2601 2601 case LB_GET_MODE:
2602 2602 case LB_SET_MODE:
2603 2603 case LB_GET_INFO_SIZE:
2604 2604 case LB_GET_INFO:
2605 2605 hxge_loopback_ioctl(hxgep, wq, mp, iocp);
2606 2606 break;
2607 2607
2608 2608 case HXGE_PUT_TCAM:
2609 2609 case HXGE_GET_TCAM:
2610 2610 case HXGE_GET_TX_RING_SZ:
2611 2611 case HXGE_GET_TX_DESC:
2612 2612 case HXGE_TX_SIDE_RESET:
2613 2613 case HXGE_RX_SIDE_RESET:
2614 2614 case HXGE_GLOBAL_RESET:
2615 2615 case HXGE_RESET_MAC:
2616 2616 HXGE_DEBUG_MSG((hxgep, NEMO_CTL,
2617 2617 "==> hxge_m_ioctl: cmd 0x%x", cmd));
2618 2618 hxge_hw_ioctl(hxgep, wq, mp, iocp);
2619 2619 break;
2620 2620 }
2621 2621
2622 2622 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_ioctl"));
2623 2623 }
2624 2624
2625 2625 /*ARGSUSED*/
2626 2626 static int
2627 2627 hxge_tx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num)
2628 2628 {
2629 2629 p_hxge_ring_handle_t rhp = (p_hxge_ring_handle_t)rdriver;
2630 2630 p_hxge_t hxgep;
2631 2631 p_tx_ring_t ring;
2632 2632
2633 2633 ASSERT(rhp != NULL);
2634 2634 ASSERT((rhp->index >= 0) && (rhp->index < HXGE_MAX_TDCS));
2635 2635
2636 2636 hxgep = rhp->hxgep;
2637 2637
2638 2638 /*
2639 2639 * Get the ring pointer.
2640 2640 */
2641 2641 ring = hxgep->tx_rings->rings[rhp->index];
2642 2642
2643 2643 /*
2644 2644 * Fill in the handle for the transmit.
2645 2645 */
2646 2646 MUTEX_ENTER(&ring->lock);
2647 2647 rhp->started = B_TRUE;
2648 2648 ring->ring_handle = rhp->ring_handle;
2649 2649 MUTEX_EXIT(&ring->lock);
2650 2650
2651 2651 return (0);
2652 2652 }
2653 2653
2654 2654 static void
2655 2655 hxge_tx_ring_stop(mac_ring_driver_t rdriver)
2656 2656 {
2657 2657 p_hxge_ring_handle_t rhp = (p_hxge_ring_handle_t)rdriver;
2658 2658 p_hxge_t hxgep;
2659 2659 p_tx_ring_t ring;
2660 2660
2661 2661 ASSERT(rhp != NULL);
2662 2662 ASSERT((rhp->index >= 0) && (rhp->index < HXGE_MAX_TDCS));
2663 2663
2664 2664 hxgep = rhp->hxgep;
2665 2665 ring = hxgep->tx_rings->rings[rhp->index];
2666 2666
2667 2667 MUTEX_ENTER(&ring->lock);
2668 2668 ring->ring_handle = (mac_ring_handle_t)NULL;
2669 2669 rhp->started = B_FALSE;
2670 2670 MUTEX_EXIT(&ring->lock);
2671 2671 }
2672 2672
2673 2673 static int
2674 2674 hxge_rx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num)
2675 2675 {
2676 2676 p_hxge_ring_handle_t rhp = (p_hxge_ring_handle_t)rdriver;
2677 2677 p_hxge_t hxgep;
2678 2678 p_rx_rcr_ring_t ring;
2679 2679 int i;
2680 2680
2681 2681 ASSERT(rhp != NULL);
2682 2682 ASSERT((rhp->index >= 0) && (rhp->index < HXGE_MAX_TDCS));
2683 2683
2684 2684 hxgep = rhp->hxgep;
2685 2685
2686 2686 /*
2687 2687 * Get pointer to ring.
2688 2688 */
2689 2689 ring = hxgep->rx_rcr_rings->rcr_rings[rhp->index];
2690 2690
2691 2691 MUTEX_ENTER(&ring->lock);
2692 2692
2693 2693 if (rhp->started) {
2694 2694 MUTEX_EXIT(&ring->lock);
2695 2695 return (0);
2696 2696 }
2697 2697
2698 2698 /*
2699 2699 * Set the ldvp and ldgp pointers to enable/disable
2700 2700 * polling.
2701 2701 */
2702 2702 for (i = 0; i < hxgep->ldgvp->maxldvs; i++) {
2703 2703 if ((hxgep->ldgvp->ldvp[i].is_rxdma == 1) &&
2704 2704 (hxgep->ldgvp->ldvp[i].channel == rhp->index)) {
2705 2705 ring->ldvp = &hxgep->ldgvp->ldvp[i];
2706 2706 ring->ldgp = hxgep->ldgvp->ldvp[i].ldgp;
2707 2707 break;
2708 2708 }
2709 2709 }
2710 2710
2711 2711 rhp->started = B_TRUE;
2712 2712 ring->rcr_mac_handle = rhp->ring_handle;
2713 2713 ring->rcr_gen_num = mr_gen_num;
2714 2714 MUTEX_EXIT(&ring->lock);
2715 2715
2716 2716 return (0);
2717 2717 }
2718 2718
2719 2719 static void
2720 2720 hxge_rx_ring_stop(mac_ring_driver_t rdriver)
2721 2721 {
2722 2722 p_hxge_ring_handle_t rhp = (p_hxge_ring_handle_t)rdriver;
2723 2723 p_hxge_t hxgep;
2724 2724 p_rx_rcr_ring_t ring;
2725 2725
2726 2726 ASSERT(rhp != NULL);
2727 2727 ASSERT((rhp->index >= 0) && (rhp->index < HXGE_MAX_TDCS));
2728 2728
2729 2729 hxgep = rhp->hxgep;
2730 2730 ring = hxgep->rx_rcr_rings->rcr_rings[rhp->index];
2731 2731
2732 2732 MUTEX_ENTER(&ring->lock);
2733 2733 rhp->started = B_TRUE;
2734 2734 ring->rcr_mac_handle = NULL;
2735 2735 ring->ldvp = NULL;
2736 2736 ring->ldgp = NULL;
2737 2737 MUTEX_EXIT(&ring->lock);
2738 2738 }
2739 2739
2740 2740 static int
2741 2741 hxge_rx_group_start(mac_group_driver_t gdriver)
2742 2742 {
2743 2743 hxge_ring_group_t *group = (hxge_ring_group_t *)gdriver;
2744 2744
2745 2745 ASSERT(group->hxgep != NULL);
2746 2746 ASSERT(group->hxgep->hxge_mac_state == HXGE_MAC_STARTED);
2747 2747
2748 2748 MUTEX_ENTER(group->hxgep->genlock);
2749 2749 group->started = B_TRUE;
2750 2750 MUTEX_EXIT(group->hxgep->genlock);
2751 2751
2752 2752 return (0);
2753 2753 }
2754 2754
2755 2755 static void
2756 2756 hxge_rx_group_stop(mac_group_driver_t gdriver)
2757 2757 {
2758 2758 hxge_ring_group_t *group = (hxge_ring_group_t *)gdriver;
2759 2759
2760 2760 ASSERT(group->hxgep != NULL);
2761 2761 ASSERT(group->hxgep->hxge_mac_state == HXGE_MAC_STARTED);
2762 2762 ASSERT(group->started == B_TRUE);
2763 2763
2764 2764 MUTEX_ENTER(group->hxgep->genlock);
2765 2765 group->started = B_FALSE;
2766 2766 MUTEX_EXIT(group->hxgep->genlock);
2767 2767 }
2768 2768
2769 2769 static int
2770 2770 hxge_mmac_get_slot(p_hxge_t hxgep, int *slot)
2771 2771 {
2772 2772 int i;
2773 2773
2774 2774 /*
2775 2775 * Find an open slot.
2776 2776 */
2777 2777 for (i = 0; i < hxgep->mmac.total; i++) {
2778 2778 if (!hxgep->mmac.addrs[i].set) {
2779 2779 *slot = i;
2780 2780 return (0);
2781 2781 }
2782 2782 }
2783 2783
2784 2784 return (ENXIO);
2785 2785 }
2786 2786
2787 2787 static int
2788 2788 hxge_mmac_set_addr(p_hxge_t hxgep, int slot, const uint8_t *addr)
2789 2789 {
2790 2790 struct ether_addr eaddr;
2791 2791 hxge_status_t status = HXGE_OK;
2792 2792
2793 2793 bcopy(addr, (uint8_t *)&eaddr, ETHERADDRL);
2794 2794
2795 2795 /*
2796 2796 * Set new interface local address and re-init device.
2797 2797 * This is destructive to any other streams attached
2798 2798 * to this device.
2799 2799 */
2800 2800 RW_ENTER_WRITER(&hxgep->filter_lock);
2801 2801 status = hxge_pfc_set_mac_address(hxgep, slot, &eaddr);
2802 2802 RW_EXIT(&hxgep->filter_lock);
2803 2803 if (status != HXGE_OK)
2804 2804 return (status);
2805 2805
2806 2806 hxgep->mmac.addrs[slot].set = B_TRUE;
2807 2807 bcopy(addr, hxgep->mmac.addrs[slot].addr, ETHERADDRL);
2808 2808 hxgep->mmac.available--;
2809 2809 if (slot == HXGE_MAC_DEFAULT_ADDR_SLOT)
2810 2810 hxgep->mmac.addrs[slot].primary = B_TRUE;
2811 2811
2812 2812 return (0);
2813 2813 }
2814 2814
2815 2815 static int
2816 2816 hxge_mmac_find_addr(p_hxge_t hxgep, const uint8_t *addr, int *slot)
2817 2817 {
2818 2818 int i, result;
2819 2819
2820 2820 for (i = 0; i < hxgep->mmac.total; i++) {
2821 2821 if (hxgep->mmac.addrs[i].set) {
2822 2822 result = memcmp(hxgep->mmac.addrs[i].addr,
2823 2823 addr, ETHERADDRL);
2824 2824 if (result == 0) {
2825 2825 *slot = i;
2826 2826 return (0);
2827 2827 }
2828 2828 }
2829 2829 }
2830 2830
2831 2831 return (EINVAL);
2832 2832 }
2833 2833
2834 2834 static int
2835 2835 hxge_mmac_unset_addr(p_hxge_t hxgep, int slot)
2836 2836 {
2837 2837 hxge_status_t status;
2838 2838 int i;
2839 2839
2840 2840 status = hxge_pfc_clear_mac_address(hxgep, slot);
2841 2841 if (status != HXGE_OK)
2842 2842 return (status);
2843 2843
2844 2844 for (i = 0; i < ETHERADDRL; i++)
2845 2845 hxgep->mmac.addrs[slot].addr[i] = 0;
2846 2846
2847 2847 hxgep->mmac.addrs[slot].set = B_FALSE;
2848 2848 if (slot == HXGE_MAC_DEFAULT_ADDR_SLOT)
2849 2849 hxgep->mmac.addrs[slot].primary = B_FALSE;
2850 2850 hxgep->mmac.available++;
2851 2851
2852 2852 return (0);
2853 2853 }
2854 2854
2855 2855 static int
2856 2856 hxge_rx_group_add_mac(void *arg, const uint8_t *mac_addr)
2857 2857 {
2858 2858 hxge_ring_group_t *group = arg;
2859 2859 p_hxge_t hxgep = group->hxgep;
2860 2860 int slot = 0;
2861 2861
2862 2862 ASSERT(group->type == MAC_RING_TYPE_RX);
2863 2863
2864 2864 MUTEX_ENTER(hxgep->genlock);
2865 2865
2866 2866 /*
2867 2867 * Find a slot for the address.
2868 2868 */
2869 2869 if (hxge_mmac_get_slot(hxgep, &slot) != 0) {
2870 2870 MUTEX_EXIT(hxgep->genlock);
2871 2871 return (ENOSPC);
2872 2872 }
2873 2873
2874 2874 /*
2875 2875 * Program the MAC address.
2876 2876 */
2877 2877 if (hxge_mmac_set_addr(hxgep, slot, mac_addr) != 0) {
2878 2878 MUTEX_EXIT(hxgep->genlock);
2879 2879 return (ENOSPC);
2880 2880 }
2881 2881
2882 2882 MUTEX_EXIT(hxgep->genlock);
2883 2883 return (0);
2884 2884 }
2885 2885
2886 2886 static int
2887 2887 hxge_rx_group_rem_mac(void *arg, const uint8_t *mac_addr)
2888 2888 {
2889 2889 hxge_ring_group_t *group = arg;
2890 2890 p_hxge_t hxgep = group->hxgep;
2891 2891 int rv, slot;
2892 2892
2893 2893 ASSERT(group->type == MAC_RING_TYPE_RX);
2894 2894
2895 2895 MUTEX_ENTER(hxgep->genlock);
2896 2896
2897 2897 if ((rv = hxge_mmac_find_addr(hxgep, mac_addr, &slot)) != 0) {
2898 2898 MUTEX_EXIT(hxgep->genlock);
2899 2899 return (rv);
2900 2900 }
2901 2901
2902 2902 if ((rv = hxge_mmac_unset_addr(hxgep, slot)) != 0) {
2903 2903 MUTEX_EXIT(hxgep->genlock);
2904 2904 return (rv);
2905 2905 }
2906 2906
2907 2907 MUTEX_EXIT(hxgep->genlock);
2908 2908 return (0);
2909 2909 }
2910 2910
2911 2911 static void
2912 2912 hxge_group_get(void *arg, mac_ring_type_t type, int groupid,
2913 2913 mac_group_info_t *infop, mac_group_handle_t gh)
2914 2914 {
2915 2915 p_hxge_t hxgep = arg;
2916 2916 hxge_ring_group_t *group;
2917 2917
2918 2918 ASSERT(type == MAC_RING_TYPE_RX);
2919 2919
2920 2920 switch (type) {
2921 2921 case MAC_RING_TYPE_RX:
2922 2922 group = &hxgep->rx_groups[groupid];
2923 2923 group->hxgep = hxgep;
2924 2924 group->ghandle = gh;
2925 2925 group->index = groupid;
2926 2926 group->type = type;
2927 2927
2928 2928 infop->mgi_driver = (mac_group_driver_t)group;
2929 2929 infop->mgi_start = hxge_rx_group_start;
2930 2930 infop->mgi_stop = hxge_rx_group_stop;
2931 2931 infop->mgi_addmac = hxge_rx_group_add_mac;
2932 2932 infop->mgi_remmac = hxge_rx_group_rem_mac;
2933 2933 infop->mgi_count = HXGE_MAX_RDCS;
2934 2934 break;
2935 2935
2936 2936 case MAC_RING_TYPE_TX:
2937 2937 default:
2938 2938 break;
2939 2939 }
2940 2940 }
2941 2941
2942 2942 static int
2943 2943 hxge_ring_get_htable_idx(p_hxge_t hxgep, mac_ring_type_t type, uint32_t channel)
2944 2944 {
2945 2945 int i;
2946 2946
2947 2947 ASSERT(hxgep->ldgvp != NULL);
2948 2948
2949 2949 switch (type) {
2950 2950 case MAC_RING_TYPE_RX:
2951 2951 for (i = 0; i < hxgep->ldgvp->maxldvs; i++) {
2952 2952 if ((hxgep->ldgvp->ldvp[i].is_rxdma) &&
2953 2953 (hxgep->ldgvp->ldvp[i].channel == channel)) {
2954 2954 return ((int)
2955 2955 hxgep->ldgvp->ldvp[i].ldgp->htable_idx);
2956 2956 }
2957 2957 }
2958 2958 break;
2959 2959
2960 2960 case MAC_RING_TYPE_TX:
2961 2961 for (i = 0; i < hxgep->ldgvp->maxldvs; i++) {
2962 2962 if ((hxgep->ldgvp->ldvp[i].is_txdma) &&
2963 2963 (hxgep->ldgvp->ldvp[i].channel == channel)) {
2964 2964 return ((int)
2965 2965 hxgep->ldgvp->ldvp[i].ldgp->htable_idx);
2966 2966 }
2967 2967 }
2968 2968 break;
2969 2969
2970 2970 default:
2971 2971 break;
2972 2972 }
2973 2973
2974 2974 return (-1);
2975 2975 }
2976 2976
2977 2977 /*
2978 2978 * Callback function for the GLDv3 layer to register all rings.
2979 2979 */
2980 2980 /*ARGSUSED*/
2981 2981 static void
2982 2982 hxge_fill_ring(void *arg, mac_ring_type_t type, const int rg_index,
2983 2983 const int index, mac_ring_info_t *infop, mac_ring_handle_t rh)
2984 2984 {
2985 2985 p_hxge_t hxgep = arg;
2986 2986
2987 2987 ASSERT(hxgep != NULL);
2988 2988 ASSERT(infop != NULL);
2989 2989
2990 2990 switch (type) {
2991 2991 case MAC_RING_TYPE_TX: {
2992 2992 p_hxge_ring_handle_t rhp;
2993 2993 mac_intr_t *mintr = &infop->mri_intr;
2994 2994 p_hxge_intr_t intrp;
2995 2995 int htable_idx;
2996 2996
2997 2997 ASSERT((index >= 0) && (index < HXGE_MAX_TDCS));
2998 2998 rhp = &hxgep->tx_ring_handles[index];
2999 2999 rhp->hxgep = hxgep;
3000 3000 rhp->index = index;
3001 3001 rhp->ring_handle = rh;
3002 3002 infop->mri_driver = (mac_ring_driver_t)rhp;
3003 3003 infop->mri_start = hxge_tx_ring_start;
3004 3004 infop->mri_stop = hxge_tx_ring_stop;
3005 3005 infop->mri_tx = hxge_tx_ring_send;
3006 3006 infop->mri_stat = hxge_tx_ring_stat;
3007 3007
3008 3008 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3009 3009 htable_idx = hxge_ring_get_htable_idx(hxgep, type, index);
3010 3010 if (htable_idx >= 0)
3011 3011 mintr->mi_ddi_handle = intrp->htable[htable_idx];
3012 3012 else
3013 3013 mintr->mi_ddi_handle = NULL;
3014 3014 break;
3015 3015 }
3016 3016
3017 3017 case MAC_RING_TYPE_RX: {
3018 3018 p_hxge_ring_handle_t rhp;
3019 3019 mac_intr_t hxge_mac_intr;
3020 3020 p_hxge_intr_t intrp;
3021 3021 int htable_idx;
3022 3022
3023 3023 ASSERT((index >= 0) && (index < HXGE_MAX_RDCS));
3024 3024 rhp = &hxgep->rx_ring_handles[index];
3025 3025 rhp->hxgep = hxgep;
3026 3026 rhp->index = index;
3027 3027 rhp->ring_handle = rh;
3028 3028
3029 3029 /*
3030 3030 * Entrypoint to enable interrupt (disable poll) and
3031 3031 * disable interrupt (enable poll).
3032 3032 */
3033 3033 hxge_mac_intr.mi_handle = (mac_intr_handle_t)rhp;
3034 3034 hxge_mac_intr.mi_enable = (mac_intr_enable_t)hxge_disable_poll;
3035 3035 hxge_mac_intr.mi_disable = (mac_intr_disable_t)hxge_enable_poll;
3036 3036
3037 3037 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3038 3038 htable_idx = hxge_ring_get_htable_idx(hxgep, type, index);
3039 3039 if (htable_idx >= 0)
3040 3040 hxge_mac_intr.mi_ddi_handle = intrp->htable[htable_idx];
3041 3041 else
3042 3042 hxge_mac_intr.mi_ddi_handle = NULL;
3043 3043
3044 3044 infop->mri_driver = (mac_ring_driver_t)rhp;
3045 3045 infop->mri_start = hxge_rx_ring_start;
3046 3046 infop->mri_stop = hxge_rx_ring_stop;
3047 3047 infop->mri_intr = hxge_mac_intr;
3048 3048 infop->mri_poll = hxge_rx_poll;
3049 3049 infop->mri_stat = hxge_rx_ring_stat;
3050 3050 break;
3051 3051 }
3052 3052
3053 3053 default:
3054 3054 break;
3055 3055 }
3056 3056 }
3057 3057
3058 3058 /*ARGSUSED*/
3059 3059 boolean_t
3060 3060 hxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
3061 3061 {
3062 3062 p_hxge_t hxgep = arg;
3063 3063
3064 3064 switch (cap) {
3065 3065 case MAC_CAPAB_HCKSUM: {
3066 3066 uint32_t *txflags = cap_data;
3067 3067
3068 3068 *txflags = HCKSUM_INET_PARTIAL;
3069 3069 break;
3070 3070 }
3071 3071
3072 3072 case MAC_CAPAB_RINGS: {
3073 3073 mac_capab_rings_t *cap_rings = cap_data;
3074 3074
3075 3075 MUTEX_ENTER(hxgep->genlock);
3076 3076 if (cap_rings->mr_type == MAC_RING_TYPE_RX) {
3077 3077 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
3078 3078 cap_rings->mr_rnum = HXGE_MAX_RDCS;
3079 3079 cap_rings->mr_rget = hxge_fill_ring;
3080 3080 cap_rings->mr_gnum = HXGE_MAX_RX_GROUPS;
3081 3081 cap_rings->mr_gget = hxge_group_get;
3082 3082 cap_rings->mr_gaddring = NULL;
3083 3083 cap_rings->mr_gremring = NULL;
3084 3084 } else {
3085 3085 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
3086 3086 cap_rings->mr_rnum = HXGE_MAX_TDCS;
3087 3087 cap_rings->mr_rget = hxge_fill_ring;
3088 3088 cap_rings->mr_gnum = 0;
3089 3089 cap_rings->mr_gget = NULL;
3090 3090 cap_rings->mr_gaddring = NULL;
3091 3091 cap_rings->mr_gremring = NULL;
3092 3092 }
3093 3093 MUTEX_EXIT(hxgep->genlock);
3094 3094 break;
3095 3095 }
3096 3096
3097 3097 default:
3098 3098 return (B_FALSE);
3099 3099 }
3100 3100 return (B_TRUE);
3101 3101 }
3102 3102
3103 3103 static boolean_t
3104 3104 hxge_param_locked(mac_prop_id_t pr_num)
3105 3105 {
3106 3106 /*
3107 3107 * All adv_* parameters are locked (read-only) while
3108 3108 * the device is in any sort of loopback mode ...
3109 3109 */
3110 3110 switch (pr_num) {
3111 3111 case MAC_PROP_ADV_1000FDX_CAP:
3112 3112 case MAC_PROP_EN_1000FDX_CAP:
3113 3113 case MAC_PROP_ADV_1000HDX_CAP:
3114 3114 case MAC_PROP_EN_1000HDX_CAP:
3115 3115 case MAC_PROP_ADV_100FDX_CAP:
3116 3116 case MAC_PROP_EN_100FDX_CAP:
3117 3117 case MAC_PROP_ADV_100HDX_CAP:
3118 3118 case MAC_PROP_EN_100HDX_CAP:
3119 3119 case MAC_PROP_ADV_10FDX_CAP:
3120 3120 case MAC_PROP_EN_10FDX_CAP:
3121 3121 case MAC_PROP_ADV_10HDX_CAP:
3122 3122 case MAC_PROP_EN_10HDX_CAP:
3123 3123 case MAC_PROP_AUTONEG:
3124 3124 case MAC_PROP_FLOWCTRL:
3125 3125 return (B_TRUE);
3126 3126 }
3127 3127 return (B_FALSE);
3128 3128 }
3129 3129
3130 3130 /*
3131 3131 * callback functions for set/get of properties
3132 3132 */
3133 3133 static int
3134 3134 hxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
3135 3135 uint_t pr_valsize, const void *pr_val)
3136 3136 {
3137 3137 hxge_t *hxgep = barg;
3138 3138 p_hxge_stats_t statsp;
3139 3139 int err = 0;
3140 3140 uint32_t new_mtu, old_framesize, new_framesize;
3141 3141
3142 3142 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, "==> hxge_m_setprop"));
3143 3143
3144 3144 statsp = hxgep->statsp;
3145 3145 MUTEX_ENTER(hxgep->genlock);
3146 3146 if (statsp->port_stats.lb_mode != hxge_lb_normal &&
3147 3147 hxge_param_locked(pr_num)) {
3148 3148 /*
3149 3149 * All adv_* parameters are locked (read-only)
3150 3150 * while the device is in any sort of loopback mode.
3151 3151 */
3152 3152 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3153 3153 "==> hxge_m_setprop: loopback mode: read only"));
3154 3154 MUTEX_EXIT(hxgep->genlock);
3155 3155 return (EBUSY);
3156 3156 }
3157 3157
3158 3158 switch (pr_num) {
3159 3159 /*
3160 3160 * These properties are either not exist or read only
3161 3161 */
3162 3162 case MAC_PROP_EN_1000FDX_CAP:
3163 3163 case MAC_PROP_EN_100FDX_CAP:
3164 3164 case MAC_PROP_EN_10FDX_CAP:
3165 3165 case MAC_PROP_EN_1000HDX_CAP:
3166 3166 case MAC_PROP_EN_100HDX_CAP:
3167 3167 case MAC_PROP_EN_10HDX_CAP:
3168 3168 case MAC_PROP_ADV_1000FDX_CAP:
3169 3169 case MAC_PROP_ADV_1000HDX_CAP:
3170 3170 case MAC_PROP_ADV_100FDX_CAP:
3171 3171 case MAC_PROP_ADV_100HDX_CAP:
3172 3172 case MAC_PROP_ADV_10FDX_CAP:
3173 3173 case MAC_PROP_ADV_10HDX_CAP:
3174 3174 case MAC_PROP_STATUS:
3175 3175 case MAC_PROP_SPEED:
3176 3176 case MAC_PROP_DUPLEX:
3177 3177 case MAC_PROP_AUTONEG:
3178 3178 /*
3179 3179 * Flow control is handled in the shared domain and
3180 3180 * it is readonly here.
3181 3181 */
3182 3182 case MAC_PROP_FLOWCTRL:
3183 3183 err = EINVAL;
3184 3184 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3185 3185 "==> hxge_m_setprop: read only property %d",
3186 3186 pr_num));
3187 3187 break;
3188 3188
3189 3189 case MAC_PROP_MTU:
3190 3190 bcopy(pr_val, &new_mtu, sizeof (new_mtu));
3191 3191 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3192 3192 "==> hxge_m_setprop: set MTU: %d", new_mtu));
3193 3193
3194 3194 new_framesize = new_mtu + MTU_TO_FRAME_SIZE;
3195 3195 if (new_framesize == hxgep->vmac.maxframesize) {
3196 3196 err = 0;
3197 3197 break;
3198 3198 }
3199 3199
3200 3200 if (hxgep->hxge_mac_state == HXGE_MAC_STARTED) {
3201 3201 err = EBUSY;
3202 3202 break;
3203 3203 }
3204 3204
3205 3205 if (new_framesize < MIN_FRAME_SIZE ||
3206 3206 new_framesize > MAX_FRAME_SIZE) {
3207 3207 err = EINVAL;
3208 3208 break;
3209 3209 }
3210 3210
3211 3211 old_framesize = hxgep->vmac.maxframesize;
3212 3212 hxgep->vmac.maxframesize = (uint16_t)new_framesize;
3213 3213
3214 3214 if (hxge_vmac_set_framesize(hxgep)) {
3215 3215 hxgep->vmac.maxframesize =
3216 3216 (uint16_t)old_framesize;
3217 3217 err = EINVAL;
3218 3218 break;
3219 3219 }
3220 3220
3221 3221 err = mac_maxsdu_update(hxgep->mach, new_mtu);
3222 3222 if (err) {
3223 3223 hxgep->vmac.maxframesize =
3224 3224 (uint16_t)old_framesize;
3225 3225 (void) hxge_vmac_set_framesize(hxgep);
3226 3226 }
3227 3227
3228 3228 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3229 3229 "==> hxge_m_setprop: set MTU: %d maxframe %d",
3230 3230 new_mtu, hxgep->vmac.maxframesize));
3231 3231 break;
3232 3232
3233 3233 case MAC_PROP_PRIVATE:
3234 3234 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3235 3235 "==> hxge_m_setprop: private property"));
3236 3236 err = hxge_set_priv_prop(hxgep, pr_name, pr_valsize,
3237 3237 pr_val);
3238 3238 break;
3239 3239
3240 3240 default:
3241 3241 err = ENOTSUP;
3242 3242 break;
3243 3243 }
3244 3244
3245 3245 MUTEX_EXIT(hxgep->genlock);
3246 3246
3247 3247 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3248 3248 "<== hxge_m_setprop (return %d)", err));
3249 3249
3250 3250 return (err);
3251 3251 }
3252 3252
3253 3253 static int
3254 3254 hxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
3255 3255 uint_t pr_valsize, void *pr_val)
3256 3256 {
3257 3257 hxge_t *hxgep = barg;
3258 3258 p_hxge_stats_t statsp = hxgep->statsp;
3259 3259 int err = 0;
3260 3260 link_flowctrl_t fl;
3261 3261 uint64_t tmp = 0;
3262 3262 link_state_t ls;
3263 3263
3264 3264 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3265 3265 "==> hxge_m_getprop: pr_num %d", pr_num));
3266 3266
3267 3267 switch (pr_num) {
3268 3268 case MAC_PROP_DUPLEX:
3269 3269 *(uint8_t *)pr_val = statsp->mac_stats.link_duplex;
3270 3270 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3271 3271 "==> hxge_m_getprop: duplex mode %d",
3272 3272 *(uint8_t *)pr_val));
3273 3273 break;
3274 3274
3275 3275 case MAC_PROP_SPEED:
3276 3276 ASSERT(pr_valsize >= sizeof (uint64_t));
3277 3277 tmp = statsp->mac_stats.link_speed * 1000000ull;
3278 3278 bcopy(&tmp, pr_val, sizeof (tmp));
3279 3279 break;
3280 3280
3281 3281 case MAC_PROP_STATUS:
3282 3282 ASSERT(pr_valsize >= sizeof (link_state_t));
3283 3283 if (!statsp->mac_stats.link_up)
3284 3284 ls = LINK_STATE_DOWN;
3285 3285 else
3286 3286 ls = LINK_STATE_UP;
3287 3287 bcopy(&ls, pr_val, sizeof (ls));
3288 3288 break;
3289 3289
3290 3290 case MAC_PROP_FLOWCTRL:
3291 3291 /*
3292 3292 * Flow control is supported by the shared domain and
3293 3293 * it is currently transmit only
3294 3294 */
3295 3295 ASSERT(pr_valsize < sizeof (link_flowctrl_t));
3296 3296 fl = LINK_FLOWCTRL_TX;
3297 3297 bcopy(&fl, pr_val, sizeof (fl));
3298 3298 break;
3299 3299 case MAC_PROP_AUTONEG:
3300 3300 /* 10G link only and it is not negotiable */
3301 3301 *(uint8_t *)pr_val = 0;
3302 3302 break;
3303 3303 case MAC_PROP_ADV_1000FDX_CAP:
3304 3304 case MAC_PROP_ADV_100FDX_CAP:
3305 3305 case MAC_PROP_ADV_10FDX_CAP:
3306 3306 case MAC_PROP_ADV_1000HDX_CAP:
3307 3307 case MAC_PROP_ADV_100HDX_CAP:
3308 3308 case MAC_PROP_ADV_10HDX_CAP:
3309 3309 case MAC_PROP_EN_1000FDX_CAP:
3310 3310 case MAC_PROP_EN_100FDX_CAP:
3311 3311 case MAC_PROP_EN_10FDX_CAP:
3312 3312 case MAC_PROP_EN_1000HDX_CAP:
3313 3313 case MAC_PROP_EN_100HDX_CAP:
3314 3314 case MAC_PROP_EN_10HDX_CAP:
3315 3315 err = ENOTSUP;
3316 3316 break;
3317 3317
3318 3318 case MAC_PROP_PRIVATE:
3319 3319 err = hxge_get_priv_prop(hxgep, pr_name, pr_valsize,
3320 3320 pr_val);
3321 3321 break;
3322 3322
3323 3323 default:
3324 3324 err = EINVAL;
3325 3325 break;
3326 3326 }
3327 3327
3328 3328 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, "<== hxge_m_getprop"));
3329 3329
3330 3330 return (err);
3331 3331 }
3332 3332
3333 3333 static void
3334 3334 hxge_m_propinfo(void *arg, const char *pr_name,
3335 3335 mac_prop_id_t pr_num, mac_prop_info_handle_t prh)
3336 3336 {
3337 3337 _NOTE(ARGUNUSED(arg));
3338 3338 switch (pr_num) {
3339 3339 case MAC_PROP_DUPLEX:
3340 3340 case MAC_PROP_SPEED:
3341 3341 case MAC_PROP_STATUS:
3342 3342 case MAC_PROP_AUTONEG:
3343 3343 case MAC_PROP_FLOWCTRL:
3344 3344 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3345 3345 break;
3346 3346
3347 3347 case MAC_PROP_MTU:
3348 3348 mac_prop_info_set_range_uint32(prh,
3349 3349 MIN_FRAME_SIZE - MTU_TO_FRAME_SIZE,
3350 3350 MAX_FRAME_SIZE - MTU_TO_FRAME_SIZE);
3351 3351 break;
3352 3352
3353 3353 case MAC_PROP_PRIVATE: {
3354 3354 char valstr[MAXNAMELEN];
3355 3355
3356 3356 bzero(valstr, sizeof (valstr));
3357 3357
3358 3358 /* Receive Interrupt Blanking Parameters */
3359 3359 if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
3360 3360 (void) snprintf(valstr, sizeof (valstr), "%d",
3361 3361 RXDMA_RCR_TO_DEFAULT);
3362 3362 } else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
3363 3363 (void) snprintf(valstr, sizeof (valstr), "%d",
3364 3364 RXDMA_RCR_PTHRES_DEFAULT);
3365 3365
3366 3366 /* Classification and Load Distribution Configuration */
3367 3367 } else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0 ||
3368 3368 strcmp(pr_name, "_class_opt_ipv4_udp") == 0 ||
3369 3369 strcmp(pr_name, "_class_opt_ipv4_ah") == 0 ||
3370 3370 strcmp(pr_name, "_class_opt_ipv4_sctp") == 0 ||
3371 3371 strcmp(pr_name, "_class_opt_ipv6_tcp") == 0 ||
3372 3372 strcmp(pr_name, "_class_opt_ipv6_udp") == 0 ||
3373 3373 strcmp(pr_name, "_class_opt_ipv6_ah") == 0 ||
3374 3374 strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
3375 3375 (void) snprintf(valstr, sizeof (valstr), "%d",
3376 3376 HXGE_CLASS_TCAM_LOOKUP);
3377 3377 }
3378 3378
3379 3379 if (strlen(valstr) > 0)
3380 3380 mac_prop_info_set_default_str(prh, valstr);
3381 3381 break;
3382 3382 }
3383 3383 }
3384 3384 }
3385 3385
3386 3386
3387 3387 /* ARGSUSED */
3388 3388 static int
3389 3389 hxge_set_priv_prop(p_hxge_t hxgep, const char *pr_name, uint_t pr_valsize,
3390 3390 const void *pr_val)
3391 3391 {
3392 3392 p_hxge_param_t param_arr = hxgep->param_arr;
3393 3393 int err = 0;
3394 3394
3395 3395 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3396 3396 "==> hxge_set_priv_prop: name %s (value %s)", pr_name, pr_val));
3397 3397
3398 3398 if (pr_val == NULL) {
3399 3399 return (EINVAL);
3400 3400 }
3401 3401
3402 3402 /* Blanking */
3403 3403 if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
3404 3404 err = hxge_param_rx_intr_time(hxgep, NULL, NULL,
3405 3405 (char *)pr_val, (caddr_t)¶m_arr[param_rxdma_intr_time]);
3406 3406 } else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
3407 3407 err = hxge_param_rx_intr_pkts(hxgep, NULL, NULL,
3408 3408 (char *)pr_val, (caddr_t)¶m_arr[param_rxdma_intr_pkts]);
3409 3409
3410 3410 /* Classification */
3411 3411 } else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
3412 3412 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3413 3413 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]);
3414 3414 } else if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
3415 3415 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3416 3416 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]);
3417 3417 } else if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
3418 3418 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3419 3419 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]);
3420 3420 } else if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
3421 3421 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3422 3422 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]);
3423 3423 } else if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
3424 3424 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3425 3425 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]);
3426 3426 } else if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
3427 3427 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3428 3428 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]);
3429 3429 } else if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
3430 3430 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3431 3431 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]);
3432 3432 } else if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
3433 3433 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3434 3434 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]);
3435 3435 } else {
3436 3436 err = EINVAL;
3437 3437 }
3438 3438
3439 3439 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3440 3440 "<== hxge_set_priv_prop: err %d", err));
3441 3441
3442 3442 return (err);
3443 3443 }
3444 3444
3445 3445 static int
3446 3446 hxge_get_priv_prop(p_hxge_t hxgep, const char *pr_name, uint_t pr_valsize,
3447 3447 void *pr_val)
3448 3448 {
3449 3449 p_hxge_param_t param_arr = hxgep->param_arr;
3450 3450 char valstr[MAXNAMELEN];
3451 3451 int err = 0;
3452 3452 uint_t strsize;
3453 3453 int value = 0;
3454 3454
3455 3455 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3456 3456 "==> hxge_get_priv_prop: property %s", pr_name));
3457 3457
3458 3458 /* Receive Interrupt Blanking Parameters */
3459 3459 if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
3460 3460 value = hxgep->intr_timeout;
3461 3461 } else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
3462 3462 value = hxgep->intr_threshold;
3463 3463
3464 3464 /* Classification and Load Distribution Configuration */
3465 3465 } else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
3466 3466 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3467 3467 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]);
3468 3468
3469 3469 value = (int)param_arr[param_class_opt_ipv4_tcp].value;
3470 3470 } else if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
3471 3471 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3472 3472 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]);
3473 3473
3474 3474 value = (int)param_arr[param_class_opt_ipv4_udp].value;
3475 3475 } else if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
3476 3476 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3477 3477 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]);
3478 3478
3479 3479 value = (int)param_arr[param_class_opt_ipv4_ah].value;
3480 3480 } else if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
3481 3481 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3482 3482 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]);
3483 3483
3484 3484 value = (int)param_arr[param_class_opt_ipv4_sctp].value;
3485 3485 } else if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
3486 3486 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3487 3487 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]);
3488 3488
3489 3489 value = (int)param_arr[param_class_opt_ipv6_tcp].value;
3490 3490 } else if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
3491 3491 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3492 3492 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]);
3493 3493
3494 3494 value = (int)param_arr[param_class_opt_ipv6_udp].value;
3495 3495 } else if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
3496 3496 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3497 3497 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]);
3498 3498
3499 3499 value = (int)param_arr[param_class_opt_ipv6_ah].value;
3500 3500 } else if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
3501 3501 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3502 3502 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]);
3503 3503
3504 3504 value = (int)param_arr[param_class_opt_ipv6_sctp].value;
3505 3505 } else {
3506 3506 err = EINVAL;
3507 3507 }
3508 3508
3509 3509 if (err == 0) {
3510 3510 (void) snprintf(valstr, sizeof (valstr), "0x%x", value);
3511 3511
3512 3512 strsize = (uint_t)strlen(valstr);
3513 3513 if (pr_valsize < strsize) {
3514 3514 err = ENOBUFS;
3515 3515 } else {
3516 3516 (void) strlcpy(pr_val, valstr, pr_valsize);
3517 3517 }
3518 3518 }
3519 3519
3520 3520 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3521 3521 "<== hxge_get_priv_prop: return %d", err));
3522 3522
3523 3523 return (err);
3524 3524 }
3525 3525 /*
3526 3526 * Module loading and removing entry points.
3527 3527 */
3528 3528 DDI_DEFINE_STREAM_OPS(hxge_dev_ops, nulldev, nulldev, hxge_attach, hxge_detach,
3529 3529 nodev, NULL, D_MP, NULL, NULL);
3530 3530
3531 3531 extern struct mod_ops mod_driverops;
3532 3532
3533 3533 #define HXGE_DESC_VER "HXGE 10Gb Ethernet Driver"
3534 3534
3535 3535 /*
3536 3536 * Module linkage information for the kernel.
3537 3537 */
3538 3538 static struct modldrv hxge_modldrv = {
3539 3539 &mod_driverops,
3540 3540 HXGE_DESC_VER,
3541 3541 &hxge_dev_ops
3542 3542 };
3543 3543
3544 3544 static struct modlinkage modlinkage = {
3545 3545 MODREV_1, (void *) &hxge_modldrv, NULL
3546 3546 };
3547 3547
3548 3548 int
3549 3549 _init(void)
3550 3550 {
3551 3551 int status;
3552 3552
3553 3553 HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init"));
3554 3554 mac_init_ops(&hxge_dev_ops, "hxge");
3555 3555 status = ddi_soft_state_init(&hxge_list, sizeof (hxge_t), 0);
3556 3556 if (status != 0) {
3557 3557 HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL,
3558 3558 "failed to init device soft state"));
3559 3559 mac_fini_ops(&hxge_dev_ops);
3560 3560 goto _init_exit;
3561 3561 }
3562 3562
3563 3563 status = mod_install(&modlinkage);
3564 3564 if (status != 0) {
3565 3565 ddi_soft_state_fini(&hxge_list);
3566 3566 HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, "Mod install failed"));
3567 3567 goto _init_exit;
3568 3568 }
3569 3569
3570 3570 MUTEX_INIT(&hxge_common_lock, NULL, MUTEX_DRIVER, NULL);
3571 3571
3572 3572 _init_exit:
3573 3573 HXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status));
3574 3574
3575 3575 return (status);
3576 3576 }
3577 3577
3578 3578 int
3579 3579 _fini(void)
3580 3580 {
3581 3581 int status;
3582 3582
3583 3583 HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini"));
3584 3584
3585 3585 HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove"));
3586 3586
3587 3587 if (hxge_mblks_pending)
3588 3588 return (EBUSY);
3589 3589
3590 3590 status = mod_remove(&modlinkage);
3591 3591 if (status != DDI_SUCCESS) {
3592 3592 HXGE_DEBUG_MSG((NULL, MOD_CTL,
3593 3593 "Module removal failed 0x%08x", status));
3594 3594 goto _fini_exit;
3595 3595 }
3596 3596
3597 3597 mac_fini_ops(&hxge_dev_ops);
3598 3598
3599 3599 ddi_soft_state_fini(&hxge_list);
3600 3600
3601 3601 MUTEX_DESTROY(&hxge_common_lock);
3602 3602
3603 3603 _fini_exit:
3604 3604 HXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status));
3605 3605
3606 3606 return (status);
3607 3607 }
3608 3608
3609 3609 int
3610 3610 _info(struct modinfo *modinfop)
3611 3611 {
3612 3612 int status;
3613 3613
3614 3614 HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info"));
3615 3615 status = mod_info(&modlinkage, modinfop);
3616 3616 HXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status));
3617 3617
3618 3618 return (status);
3619 3619 }
3620 3620
3621 3621 /*ARGSUSED*/
3622 3622 static hxge_status_t
3623 3623 hxge_add_intrs(p_hxge_t hxgep)
3624 3624 {
3625 3625 int intr_types;
3626 3626 int type = 0;
3627 3627 int ddi_status = DDI_SUCCESS;
3628 3628 hxge_status_t status = HXGE_OK;
3629 3629
3630 3630 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs"));
3631 3631
3632 3632 hxgep->hxge_intr_type.intr_registered = B_FALSE;
3633 3633 hxgep->hxge_intr_type.intr_enabled = B_FALSE;
3634 3634 hxgep->hxge_intr_type.msi_intx_cnt = 0;
3635 3635 hxgep->hxge_intr_type.intr_added = 0;
3636 3636 hxgep->hxge_intr_type.niu_msi_enable = B_FALSE;
3637 3637 hxgep->hxge_intr_type.intr_type = 0;
3638 3638
3639 3639 if (hxge_msi_enable) {
3640 3640 hxgep->hxge_intr_type.niu_msi_enable = B_TRUE;
3641 3641 }
3642 3642
3643 3643 /* Get the supported interrupt types */
3644 3644 if ((ddi_status = ddi_intr_get_supported_types(hxgep->dip, &intr_types))
3645 3645 != DDI_SUCCESS) {
3646 3646 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_add_intrs: "
3647 3647 "ddi_intr_get_supported_types failed: status 0x%08x",
3648 3648 ddi_status));
3649 3649 return (HXGE_ERROR | HXGE_DDI_FAILED);
3650 3650 }
3651 3651
3652 3652 hxgep->hxge_intr_type.intr_types = intr_types;
3653 3653
3654 3654 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3655 3655 "ddi_intr_get_supported_types: 0x%08x", intr_types));
3656 3656
3657 3657 /*
3658 3658 * Pick the interrupt type to use MSIX, MSI, INTX hxge_msi_enable:
3659 3659 * (1): 1 - MSI
3660 3660 * (2): 2 - MSI-X
3661 3661 * others - FIXED
3662 3662 */
3663 3663 switch (hxge_msi_enable) {
3664 3664 default:
3665 3665 type = DDI_INTR_TYPE_FIXED;
3666 3666 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3667 3667 "use fixed (intx emulation) type %08x", type));
3668 3668 break;
3669 3669
3670 3670 case 2:
3671 3671 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3672 3672 "ddi_intr_get_supported_types: 0x%08x", intr_types));
3673 3673 if (intr_types & DDI_INTR_TYPE_MSIX) {
3674 3674 type = DDI_INTR_TYPE_MSIX;
3675 3675 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3676 3676 "==> hxge_add_intrs: "
3677 3677 "ddi_intr_get_supported_types: MSIX 0x%08x", type));
3678 3678 } else if (intr_types & DDI_INTR_TYPE_MSI) {
3679 3679 type = DDI_INTR_TYPE_MSI;
3680 3680 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3681 3681 "==> hxge_add_intrs: "
3682 3682 "ddi_intr_get_supported_types: MSI 0x%08x", type));
3683 3683 } else if (intr_types & DDI_INTR_TYPE_FIXED) {
3684 3684 type = DDI_INTR_TYPE_FIXED;
3685 3685 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3686 3686 "ddi_intr_get_supported_types: MSXED0x%08x", type));
3687 3687 }
3688 3688 break;
3689 3689
3690 3690 case 1:
3691 3691 if (intr_types & DDI_INTR_TYPE_MSI) {
3692 3692 type = DDI_INTR_TYPE_MSI;
3693 3693 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3694 3694 "==> hxge_add_intrs: "
3695 3695 "ddi_intr_get_supported_types: MSI 0x%08x", type));
3696 3696 } else if (intr_types & DDI_INTR_TYPE_MSIX) {
3697 3697 type = DDI_INTR_TYPE_MSIX;
3698 3698 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3699 3699 "==> hxge_add_intrs: "
3700 3700 "ddi_intr_get_supported_types: MSIX 0x%08x", type));
3701 3701 } else if (intr_types & DDI_INTR_TYPE_FIXED) {
3702 3702 type = DDI_INTR_TYPE_FIXED;
3703 3703 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3704 3704 "==> hxge_add_intrs: "
3705 3705 "ddi_intr_get_supported_types: MSXED0x%08x", type));
3706 3706 }
3707 3707 }
3708 3708
3709 3709 hxgep->hxge_intr_type.intr_type = type;
3710 3710 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI ||
3711 3711 type == DDI_INTR_TYPE_FIXED) &&
3712 3712 hxgep->hxge_intr_type.niu_msi_enable) {
3713 3713 if ((status = hxge_add_intrs_adv(hxgep)) != DDI_SUCCESS) {
3714 3714 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3715 3715 " hxge_add_intrs: "
3716 3716 " hxge_add_intrs_adv failed: status 0x%08x",
3717 3717 status));
3718 3718 return (status);
3719 3719 } else {
3720 3720 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_add_intrs: "
3721 3721 "interrupts registered : type %d", type));
3722 3722 hxgep->hxge_intr_type.intr_registered = B_TRUE;
3723 3723
3724 3724 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
3725 3725 "\nAdded advanced hxge add_intr_adv "
3726 3726 "intr type 0x%x\n", type));
3727 3727
3728 3728 return (status);
3729 3729 }
3730 3730 }
3731 3731
3732 3732 if (!hxgep->hxge_intr_type.intr_registered) {
3733 3733 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3734 3734 "==> hxge_add_intrs: failed to register interrupts"));
3735 3735 return (HXGE_ERROR | HXGE_DDI_FAILED);
3736 3736 }
3737 3737
3738 3738 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs"));
3739 3739
3740 3740 return (status);
3741 3741 }
3742 3742
3743 3743 /*ARGSUSED*/
3744 3744 static hxge_status_t
3745 3745 hxge_add_intrs_adv(p_hxge_t hxgep)
3746 3746 {
3747 3747 int intr_type;
3748 3748 p_hxge_intr_t intrp;
3749 3749 hxge_status_t status;
3750 3750
3751 3751 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv"));
3752 3752
3753 3753 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3754 3754 intr_type = intrp->intr_type;
3755 3755
3756 3756 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv: type 0x%x",
3757 3757 intr_type));
3758 3758
3759 3759 switch (intr_type) {
3760 3760 case DDI_INTR_TYPE_MSI: /* 0x2 */
3761 3761 case DDI_INTR_TYPE_MSIX: /* 0x4 */
3762 3762 status = hxge_add_intrs_adv_type(hxgep, intr_type);
3763 3763 break;
3764 3764
3765 3765 case DDI_INTR_TYPE_FIXED: /* 0x1 */
3766 3766 status = hxge_add_intrs_adv_type_fix(hxgep, intr_type);
3767 3767 break;
3768 3768
3769 3769 default:
3770 3770 status = HXGE_ERROR;
3771 3771 break;
3772 3772 }
3773 3773
3774 3774 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv"));
3775 3775
3776 3776 return (status);
3777 3777 }
3778 3778
3779 3779 /*ARGSUSED*/
3780 3780 static hxge_status_t
3781 3781 hxge_add_intrs_adv_type(p_hxge_t hxgep, uint32_t int_type)
3782 3782 {
3783 3783 dev_info_t *dip = hxgep->dip;
3784 3784 p_hxge_ldg_t ldgp;
3785 3785 p_hxge_intr_t intrp;
3786 3786 uint_t *inthandler;
3787 3787 void *arg1, *arg2;
3788 3788 int behavior;
3789 3789 int nintrs, navail;
3790 3790 int nactual, nrequired, nrequest;
3791 3791 int inum = 0;
3792 3792 int loop = 0;
3793 3793 int x, y;
3794 3794 int ddi_status = DDI_SUCCESS;
3795 3795 hxge_status_t status = HXGE_OK;
3796 3796
3797 3797 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv_type"));
3798 3798
3799 3799 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3800 3800
3801 3801 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
3802 3802 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
3803 3803 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3804 3804 "ddi_intr_get_nintrs() failed, status: 0x%x%, "
3805 3805 "nintrs: %d", ddi_status, nintrs));
3806 3806 return (HXGE_ERROR | HXGE_DDI_FAILED);
3807 3807 }
3808 3808
3809 3809 ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
3810 3810 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
3811 3811 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3812 3812 "ddi_intr_get_navail() failed, status: 0x%x%, "
3813 3813 "nintrs: %d", ddi_status, navail));
3814 3814 return (HXGE_ERROR | HXGE_DDI_FAILED);
3815 3815 }
3816 3816
3817 3817 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3818 3818 "ddi_intr_get_navail() returned: intr type %d nintrs %d, navail %d",
3819 3819 int_type, nintrs, navail));
3820 3820
3821 3821 /* PSARC/2007/453 MSI-X interrupt limit override */
3822 3822 if (int_type == DDI_INTR_TYPE_MSIX) {
3823 3823 nrequest = hxge_create_msi_property(hxgep);
3824 3824 if (nrequest < navail) {
3825 3825 navail = nrequest;
3826 3826 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3827 3827 "hxge_add_intrs_adv_type: nintrs %d "
3828 3828 "navail %d (nrequest %d)",
3829 3829 nintrs, navail, nrequest));
3830 3830 }
3831 3831 }
3832 3832
3833 3833 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) {
3834 3834 /* MSI must be power of 2 */
3835 3835 if ((navail & 16) == 16) {
3836 3836 navail = 16;
3837 3837 } else if ((navail & 8) == 8) {
3838 3838 navail = 8;
3839 3839 } else if ((navail & 4) == 4) {
3840 3840 navail = 4;
3841 3841 } else if ((navail & 2) == 2) {
3842 3842 navail = 2;
3843 3843 } else {
3844 3844 navail = 1;
3845 3845 }
3846 3846 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3847 3847 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, "
3848 3848 "navail %d", nintrs, navail));
3849 3849 }
3850 3850
3851 3851 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3852 3852 "requesting: intr type %d nintrs %d, navail %d",
3853 3853 int_type, nintrs, navail));
3854 3854
3855 3855 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
3856 3856 DDI_INTR_ALLOC_NORMAL);
3857 3857 intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
3858 3858 intrp->htable = kmem_zalloc(intrp->intr_size, KM_SLEEP);
3859 3859
3860 3860 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
3861 3861 navail, &nactual, behavior);
3862 3862 if (ddi_status != DDI_SUCCESS || nactual == 0) {
3863 3863 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3864 3864 " ddi_intr_alloc() failed: %d", ddi_status));
3865 3865 kmem_free(intrp->htable, intrp->intr_size);
3866 3866 return (HXGE_ERROR | HXGE_DDI_FAILED);
3867 3867 }
3868 3868
3869 3869 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3870 3870 "ddi_intr_alloc() returned: navail %d nactual %d",
3871 3871 navail, nactual));
3872 3872
3873 3873 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
3874 3874 (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
3875 3875 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3876 3876 " ddi_intr_get_pri() failed: %d", ddi_status));
3877 3877 /* Free already allocated interrupts */
3878 3878 for (y = 0; y < nactual; y++) {
3879 3879 (void) ddi_intr_free(intrp->htable[y]);
3880 3880 }
3881 3881
3882 3882 kmem_free(intrp->htable, intrp->intr_size);
3883 3883 return (HXGE_ERROR | HXGE_DDI_FAILED);
3884 3884 }
3885 3885
3886 3886 nrequired = 0;
3887 3887 status = hxge_ldgv_init(hxgep, &nactual, &nrequired);
3888 3888 if (status != HXGE_OK) {
3889 3889 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3890 3890 "hxge_add_intrs_adv_typ:hxge_ldgv_init "
3891 3891 "failed: 0x%x", status));
3892 3892 /* Free already allocated interrupts */
3893 3893 for (y = 0; y < nactual; y++) {
3894 3894 (void) ddi_intr_free(intrp->htable[y]);
3895 3895 }
3896 3896
3897 3897 kmem_free(intrp->htable, intrp->intr_size);
3898 3898 return (status);
3899 3899 }
3900 3900
3901 3901 ldgp = hxgep->ldgvp->ldgp;
3902 3902 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3903 3903 "After hxge_ldgv_init(): nreq %d nactual %d", nrequired, nactual));
3904 3904
3905 3905 if (nactual < nrequired)
3906 3906 loop = nactual;
3907 3907 else
3908 3908 loop = nrequired;
3909 3909
3910 3910 for (x = 0; x < loop; x++, ldgp++) {
3911 3911 ldgp->vector = (uint8_t)x;
3912 3912 arg1 = ldgp->ldvp;
3913 3913 arg2 = hxgep;
3914 3914 if (ldgp->nldvs == 1) {
3915 3915 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
3916 3916 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3917 3917 "hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: "
3918 3918 "1-1 int handler (entry %d)\n",
3919 3919 arg1, arg2, x));
3920 3920 } else if (ldgp->nldvs > 1) {
3921 3921 inthandler = (uint_t *)ldgp->sys_intr_handler;
3922 3922 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3923 3923 "hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: "
3924 3924 "nldevs %d int handler (entry %d)\n",
3925 3925 arg1, arg2, ldgp->nldvs, x));
3926 3926 }
3927 3927 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3928 3928 "==> hxge_add_intrs_adv_type: ddi_add_intr(inum) #%d "
3929 3929 "htable 0x%llx", x, intrp->htable[x]));
3930 3930
3931 3931 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
3932 3932 (ddi_intr_handler_t *)inthandler, arg1, arg2)) !=
3933 3933 DDI_SUCCESS) {
3934 3934 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3935 3935 "==> hxge_add_intrs_adv_type: failed #%d "
3936 3936 "status 0x%x", x, ddi_status));
3937 3937 for (y = 0; y < intrp->intr_added; y++) {
3938 3938 (void) ddi_intr_remove_handler(
3939 3939 intrp->htable[y]);
3940 3940 }
3941 3941
3942 3942 /* Free already allocated intr */
3943 3943 for (y = 0; y < nactual; y++) {
3944 3944 (void) ddi_intr_free(intrp->htable[y]);
3945 3945 }
3946 3946 kmem_free(intrp->htable, intrp->intr_size);
3947 3947
3948 3948 (void) hxge_ldgv_uninit(hxgep);
3949 3949
3950 3950 return (HXGE_ERROR | HXGE_DDI_FAILED);
3951 3951 }
3952 3952
3953 3953 ldgp->htable_idx = x;
3954 3954 intrp->intr_added++;
3955 3955 }
3956 3956 intrp->msi_intx_cnt = nactual;
3957 3957
3958 3958 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3959 3959 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d",
3960 3960 navail, nactual, intrp->msi_intx_cnt, intrp->intr_added));
3961 3961
3962 3962 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
3963 3963 (void) hxge_intr_ldgv_init(hxgep);
3964 3964
3965 3965 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv_type"));
3966 3966
3967 3967 return (status);
3968 3968 }
3969 3969
3970 3970 /*ARGSUSED*/
3971 3971 static hxge_status_t
3972 3972 hxge_add_intrs_adv_type_fix(p_hxge_t hxgep, uint32_t int_type)
3973 3973 {
3974 3974 dev_info_t *dip = hxgep->dip;
3975 3975 p_hxge_ldg_t ldgp;
3976 3976 p_hxge_intr_t intrp;
3977 3977 uint_t *inthandler;
3978 3978 void *arg1, *arg2;
3979 3979 int behavior;
3980 3980 int nintrs, navail;
3981 3981 int nactual, nrequired;
3982 3982 int inum = 0;
3983 3983 int x, y;
3984 3984 int ddi_status = DDI_SUCCESS;
3985 3985 hxge_status_t status = HXGE_OK;
3986 3986
3987 3987 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv_type_fix"));
3988 3988 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3989 3989
3990 3990 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
3991 3991 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
3992 3992 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3993 3993 "ddi_intr_get_nintrs() failed, status: 0x%x%, "
3994 3994 "nintrs: %d", status, nintrs));
3995 3995 return (HXGE_ERROR | HXGE_DDI_FAILED);
3996 3996 }
3997 3997
3998 3998 ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
3999 3999 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
4000 4000 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4001 4001 "ddi_intr_get_navail() failed, status: 0x%x%, "
4002 4002 "nintrs: %d", ddi_status, navail));
4003 4003 return (HXGE_ERROR | HXGE_DDI_FAILED);
4004 4004 }
4005 4005
4006 4006 HXGE_DEBUG_MSG((hxgep, INT_CTL,
4007 4007 "ddi_intr_get_navail() returned: nintrs %d, naavail %d",
4008 4008 nintrs, navail));
4009 4009
4010 4010 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
4011 4011 DDI_INTR_ALLOC_NORMAL);
4012 4012 intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
4013 4013 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP);
4014 4014 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
4015 4015 navail, &nactual, behavior);
4016 4016 if (ddi_status != DDI_SUCCESS || nactual == 0) {
4017 4017 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4018 4018 " ddi_intr_alloc() failed: %d", ddi_status));
4019 4019 kmem_free(intrp->htable, intrp->intr_size);
4020 4020 return (HXGE_ERROR | HXGE_DDI_FAILED);
4021 4021 }
4022 4022
4023 4023 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
4024 4024 (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
4025 4025 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4026 4026 " ddi_intr_get_pri() failed: %d", ddi_status));
4027 4027 /* Free already allocated interrupts */
4028 4028 for (y = 0; y < nactual; y++) {
4029 4029 (void) ddi_intr_free(intrp->htable[y]);
4030 4030 }
4031 4031
4032 4032 kmem_free(intrp->htable, intrp->intr_size);
4033 4033 return (HXGE_ERROR | HXGE_DDI_FAILED);
4034 4034 }
4035 4035
4036 4036 nrequired = 0;
4037 4037 status = hxge_ldgv_init(hxgep, &nactual, &nrequired);
4038 4038 if (status != HXGE_OK) {
4039 4039 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4040 4040 "hxge_add_intrs_adv_type_fix:hxge_ldgv_init "
4041 4041 "failed: 0x%x", status));
4042 4042 /* Free already allocated interrupts */
4043 4043 for (y = 0; y < nactual; y++) {
4044 4044 (void) ddi_intr_free(intrp->htable[y]);
4045 4045 }
4046 4046
4047 4047 kmem_free(intrp->htable, intrp->intr_size);
4048 4048 return (status);
4049 4049 }
4050 4050
4051 4051 ldgp = hxgep->ldgvp->ldgp;
4052 4052 for (x = 0; x < nrequired; x++, ldgp++) {
4053 4053 ldgp->vector = (uint8_t)x;
4054 4054 arg1 = ldgp->ldvp;
4055 4055 arg2 = hxgep;
4056 4056 if (ldgp->nldvs == 1) {
4057 4057 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
4058 4058 HXGE_DEBUG_MSG((hxgep, INT_CTL,
4059 4059 "hxge_add_intrs_adv_type_fix: "
4060 4060 "1-1 int handler(%d) ldg %d ldv %d "
4061 4061 "arg1 $%p arg2 $%p\n",
4062 4062 x, ldgp->ldg, ldgp->ldvp->ldv, arg1, arg2));
4063 4063 } else if (ldgp->nldvs > 1) {
4064 4064 inthandler = (uint_t *)ldgp->sys_intr_handler;
4065 4065 HXGE_DEBUG_MSG((hxgep, INT_CTL,
4066 4066 "hxge_add_intrs_adv_type_fix: "
4067 4067 "shared ldv %d int handler(%d) ldv %d ldg %d"
4068 4068 "arg1 0x%016llx arg2 0x%016llx\n",
4069 4069 x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv,
4070 4070 arg1, arg2));
4071 4071 }
4072 4072
4073 4073 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
4074 4074 (ddi_intr_handler_t *)inthandler, arg1, arg2)) !=
4075 4075 DDI_SUCCESS) {
4076 4076 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4077 4077 "==> hxge_add_intrs_adv_type_fix: failed #%d "
4078 4078 "status 0x%x", x, ddi_status));
4079 4079 for (y = 0; y < intrp->intr_added; y++) {
4080 4080 (void) ddi_intr_remove_handler(
4081 4081 intrp->htable[y]);
4082 4082 }
4083 4083 for (y = 0; y < nactual; y++) {
4084 4084 (void) ddi_intr_free(intrp->htable[y]);
4085 4085 }
4086 4086 /* Free already allocated intr */
4087 4087 kmem_free(intrp->htable, intrp->intr_size);
4088 4088
4089 4089 (void) hxge_ldgv_uninit(hxgep);
4090 4090
4091 4091 return (HXGE_ERROR | HXGE_DDI_FAILED);
4092 4092 }
4093 4093 intrp->intr_added++;
4094 4094 }
4095 4095
4096 4096 intrp->msi_intx_cnt = nactual;
4097 4097
4098 4098 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
4099 4099
4100 4100 status = hxge_intr_ldgv_init(hxgep);
4101 4101
4102 4102 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv_type_fix"));
4103 4103
4104 4104 return (status);
4105 4105 }
4106 4106
4107 4107 /*ARGSUSED*/
4108 4108 static void
4109 4109 hxge_remove_intrs(p_hxge_t hxgep)
4110 4110 {
4111 4111 int i, inum;
4112 4112 p_hxge_intr_t intrp;
4113 4113
4114 4114 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_intrs"));
4115 4115 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
4116 4116 if (!intrp->intr_registered) {
4117 4117 HXGE_DEBUG_MSG((hxgep, INT_CTL,
4118 4118 "<== hxge_remove_intrs: interrupts not registered"));
4119 4119 return;
4120 4120 }
4121 4121
4122 4122 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_intrs:advanced"));
4123 4123
4124 4124 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4125 4125 (void) ddi_intr_block_disable(intrp->htable,
4126 4126 intrp->intr_added);
4127 4127 } else {
4128 4128 for (i = 0; i < intrp->intr_added; i++) {
4129 4129 (void) ddi_intr_disable(intrp->htable[i]);
4130 4130 }
4131 4131 }
4132 4132
4133 4133 for (inum = 0; inum < intrp->intr_added; inum++) {
4134 4134 if (intrp->htable[inum]) {
4135 4135 (void) ddi_intr_remove_handler(intrp->htable[inum]);
4136 4136 }
4137 4137 }
4138 4138
4139 4139 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) {
4140 4140 if (intrp->htable[inum]) {
4141 4141 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
4142 4142 "hxge_remove_intrs: ddi_intr_free inum %d "
4143 4143 "msi_intx_cnt %d intr_added %d",
4144 4144 inum, intrp->msi_intx_cnt, intrp->intr_added));
4145 4145
4146 4146 (void) ddi_intr_free(intrp->htable[inum]);
4147 4147 }
4148 4148 }
4149 4149
4150 4150 kmem_free(intrp->htable, intrp->intr_size);
4151 4151 intrp->intr_registered = B_FALSE;
4152 4152 intrp->intr_enabled = B_FALSE;
4153 4153 intrp->msi_intx_cnt = 0;
4154 4154 intrp->intr_added = 0;
4155 4155
4156 4156 (void) hxge_ldgv_uninit(hxgep);
4157 4157
4158 4158 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_remove_intrs"));
4159 4159 }
4160 4160
4161 4161 /*ARGSUSED*/
4162 4162 static void
4163 4163 hxge_intrs_enable(p_hxge_t hxgep)
4164 4164 {
4165 4165 p_hxge_intr_t intrp;
4166 4166 int i;
4167 4167 int status;
4168 4168
4169 4169 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable"));
4170 4170
4171 4171 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
4172 4172
4173 4173 if (!intrp->intr_registered) {
4174 4174 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_intrs_enable: "
4175 4175 "interrupts are not registered"));
4176 4176 return;
4177 4177 }
4178 4178
4179 4179 if (intrp->intr_enabled) {
4180 4180 HXGE_DEBUG_MSG((hxgep, INT_CTL,
4181 4181 "<== hxge_intrs_enable: already enabled"));
4182 4182 return;
4183 4183 }
4184 4184
4185 4185 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4186 4186 status = ddi_intr_block_enable(intrp->htable,
4187 4187 intrp->intr_added);
4188 4188 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable "
4189 4189 "block enable - status 0x%x total inums #%d\n",
4190 4190 status, intrp->intr_added));
4191 4191 } else {
4192 4192 for (i = 0; i < intrp->intr_added; i++) {
4193 4193 status = ddi_intr_enable(intrp->htable[i]);
4194 4194 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable "
4195 4195 "ddi_intr_enable:enable - status 0x%x "
4196 4196 "total inums %d enable inum #%d\n",
4197 4197 status, intrp->intr_added, i));
4198 4198 if (status == DDI_SUCCESS) {
4199 4199 intrp->intr_enabled = B_TRUE;
4200 4200 }
4201 4201 }
4202 4202 }
4203 4203
4204 4204 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_enable"));
4205 4205 }
4206 4206
4207 4207 /*ARGSUSED*/
4208 4208 static void
4209 4209 hxge_intrs_disable(p_hxge_t hxgep)
4210 4210 {
4211 4211 p_hxge_intr_t intrp;
4212 4212 int i;
4213 4213
4214 4214 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_disable"));
4215 4215
4216 4216 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
4217 4217
4218 4218 if (!intrp->intr_registered) {
4219 4219 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_disable: "
4220 4220 "interrupts are not registered"));
4221 4221 return;
4222 4222 }
4223 4223
4224 4224 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4225 4225 (void) ddi_intr_block_disable(intrp->htable,
4226 4226 intrp->intr_added);
4227 4227 } else {
4228 4228 for (i = 0; i < intrp->intr_added; i++) {
4229 4229 (void) ddi_intr_disable(intrp->htable[i]);
4230 4230 }
4231 4231 }
4232 4232
4233 4233 intrp->intr_enabled = B_FALSE;
4234 4234 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_disable"));
4235 4235 }
4236 4236
4237 4237 static hxge_status_t
4238 4238 hxge_mac_register(p_hxge_t hxgep)
4239 4239 {
4240 4240 mac_register_t *macp;
4241 4241 int status;
4242 4242
4243 4243 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_mac_register"));
4244 4244
4245 4245 if ((macp = mac_alloc(MAC_VERSION)) == NULL)
4246 4246 return (HXGE_ERROR);
4247 4247
4248 4248 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
4249 4249 macp->m_driver = hxgep;
4250 4250 macp->m_dip = hxgep->dip;
4251 4251 macp->m_src_addr = hxgep->ouraddr.ether_addr_octet;
4252 4252 macp->m_callbacks = &hxge_m_callbacks;
4253 4253 macp->m_min_sdu = 0;
4254 4254 macp->m_max_sdu = hxgep->vmac.maxframesize - MTU_TO_FRAME_SIZE;
4255 4255 macp->m_margin = VLAN_TAGSZ;
4256 4256 macp->m_priv_props = hxge_priv_props;
4257 4257 macp->m_v12n = MAC_VIRT_LEVEL1;
4258 4258
4259 4259 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
4260 4260 "hxge_mac_register: ether addr is %x:%x:%x:%x:%x:%x",
4261 4261 macp->m_src_addr[0],
4262 4262 macp->m_src_addr[1],
4263 4263 macp->m_src_addr[2],
4264 4264 macp->m_src_addr[3],
4265 4265 macp->m_src_addr[4],
4266 4266 macp->m_src_addr[5]));
4267 4267
4268 4268 status = mac_register(macp, &hxgep->mach);
4269 4269 mac_free(macp);
4270 4270
4271 4271 if (status != 0) {
4272 4272 cmn_err(CE_WARN,
4273 4273 "hxge_mac_register failed (status %d instance %d)",
4274 4274 status, hxgep->instance);
4275 4275 return (HXGE_ERROR);
4276 4276 }
4277 4277
4278 4278 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_mac_register success "
4279 4279 "(instance %d)", hxgep->instance));
4280 4280
4281 4281 return (HXGE_OK);
4282 4282 }
4283 4283
4284 4284 static int
4285 4285 hxge_init_common_dev(p_hxge_t hxgep)
4286 4286 {
4287 4287 p_hxge_hw_list_t hw_p;
4288 4288 dev_info_t *p_dip;
4289 4289
4290 4290 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==> hxge_init_common_dev"));
4291 4291
4292 4292 p_dip = hxgep->p_dip;
4293 4293 MUTEX_ENTER(&hxge_common_lock);
4294 4294
4295 4295 /*
4296 4296 * Loop through existing per Hydra hardware list.
4297 4297 */
4298 4298 for (hw_p = hxge_hw_list; hw_p; hw_p = hw_p->next) {
4299 4299 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4300 4300 "==> hxge_init_common_dev: hw_p $%p parent dip $%p",
4301 4301 hw_p, p_dip));
4302 4302 if (hw_p->parent_devp == p_dip) {
4303 4303 hxgep->hxge_hw_p = hw_p;
4304 4304 hw_p->ndevs++;
4305 4305 hw_p->hxge_p = hxgep;
4306 4306 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4307 4307 "==> hxge_init_common_device: "
4308 4308 "hw_p $%p parent dip $%p ndevs %d (found)",
4309 4309 hw_p, p_dip, hw_p->ndevs));
4310 4310 break;
4311 4311 }
4312 4312 }
4313 4313
4314 4314 if (hw_p == NULL) {
4315 4315 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4316 4316 "==> hxge_init_common_dev: parent dip $%p (new)", p_dip));
4317 4317 hw_p = kmem_zalloc(sizeof (hxge_hw_list_t), KM_SLEEP);
4318 4318 hw_p->parent_devp = p_dip;
4319 4319 hw_p->magic = HXGE_MAGIC;
4320 4320 hxgep->hxge_hw_p = hw_p;
4321 4321 hw_p->ndevs++;
4322 4322 hw_p->hxge_p = hxgep;
4323 4323 hw_p->next = hxge_hw_list;
4324 4324
4325 4325 MUTEX_INIT(&hw_p->hxge_cfg_lock, NULL, MUTEX_DRIVER, NULL);
4326 4326 MUTEX_INIT(&hw_p->hxge_tcam_lock, NULL, MUTEX_DRIVER, NULL);
4327 4327 MUTEX_INIT(&hw_p->hxge_vlan_lock, NULL, MUTEX_DRIVER, NULL);
4328 4328
4329 4329 hxge_hw_list = hw_p;
4330 4330 }
4331 4331 MUTEX_EXIT(&hxge_common_lock);
4332 4332 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4333 4333 "==> hxge_init_common_dev (hxge_hw_list) $%p", hxge_hw_list));
4334 4334 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<== hxge_init_common_dev"));
4335 4335
4336 4336 return (HXGE_OK);
4337 4337 }
4338 4338
4339 4339 static void
4340 4340 hxge_uninit_common_dev(p_hxge_t hxgep)
4341 4341 {
4342 4342 p_hxge_hw_list_t hw_p, h_hw_p;
4343 4343 dev_info_t *p_dip;
4344 4344
4345 4345 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==> hxge_uninit_common_dev"));
4346 4346 if (hxgep->hxge_hw_p == NULL) {
4347 4347 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4348 4348 "<== hxge_uninit_common_dev (no common)"));
4349 4349 return;
4350 4350 }
4351 4351
4352 4352 MUTEX_ENTER(&hxge_common_lock);
4353 4353 h_hw_p = hxge_hw_list;
4354 4354 for (hw_p = hxge_hw_list; hw_p; hw_p = hw_p->next) {
4355 4355 p_dip = hw_p->parent_devp;
4356 4356 if (hxgep->hxge_hw_p == hw_p && p_dip == hxgep->p_dip &&
4357 4357 hxgep->hxge_hw_p->magic == HXGE_MAGIC &&
4358 4358 hw_p->magic == HXGE_MAGIC) {
4359 4359 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4360 4360 "==> hxge_uninit_common_dev: "
4361 4361 "hw_p $%p parent dip $%p ndevs %d (found)",
4362 4362 hw_p, p_dip, hw_p->ndevs));
4363 4363
4364 4364 hxgep->hxge_hw_p = NULL;
4365 4365 if (hw_p->ndevs) {
4366 4366 hw_p->ndevs--;
4367 4367 }
4368 4368 hw_p->hxge_p = NULL;
4369 4369 if (!hw_p->ndevs) {
4370 4370 MUTEX_DESTROY(&hw_p->hxge_vlan_lock);
4371 4371 MUTEX_DESTROY(&hw_p->hxge_tcam_lock);
4372 4372 MUTEX_DESTROY(&hw_p->hxge_cfg_lock);
4373 4373 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4374 4374 "==> hxge_uninit_common_dev: "
4375 4375 "hw_p $%p parent dip $%p ndevs %d (last)",
4376 4376 hw_p, p_dip, hw_p->ndevs));
4377 4377
4378 4378 if (hw_p == hxge_hw_list) {
4379 4379 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4380 4380 "==> hxge_uninit_common_dev:"
4381 4381 "remove head "
4382 4382 "hw_p $%p parent dip $%p "
4383 4383 "ndevs %d (head)",
4384 4384 hw_p, p_dip, hw_p->ndevs));
4385 4385 hxge_hw_list = hw_p->next;
4386 4386 } else {
4387 4387 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4388 4388 "==> hxge_uninit_common_dev:"
4389 4389 "remove middle "
4390 4390 "hw_p $%p parent dip $%p "
4391 4391 "ndevs %d (middle)",
4392 4392 hw_p, p_dip, hw_p->ndevs));
4393 4393 h_hw_p->next = hw_p->next;
4394 4394 }
4395 4395
4396 4396 KMEM_FREE(hw_p, sizeof (hxge_hw_list_t));
4397 4397 }
4398 4398 break;
4399 4399 } else {
4400 4400 h_hw_p = hw_p;
4401 4401 }
4402 4402 }
4403 4403
4404 4404 MUTEX_EXIT(&hxge_common_lock);
4405 4405 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4406 4406 "==> hxge_uninit_common_dev (hxge_hw_list) $%p", hxge_hw_list));
4407 4407
4408 4408 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<= hxge_uninit_common_dev"));
4409 4409 }
4410 4410
4411 4411 #define HXGE_MSIX_ENTRIES 32
4412 4412 #define HXGE_MSIX_WAIT_COUNT 10
4413 4413 #define HXGE_MSIX_PARITY_CHECK_COUNT 30
4414 4414
4415 4415 static void
4416 4416 hxge_link_poll(void *arg)
4417 4417 {
4418 4418 p_hxge_t hxgep = (p_hxge_t)arg;
4419 4419 hpi_handle_t handle;
4420 4420 cip_link_stat_t link_stat;
4421 4421 hxge_timeout *to = &hxgep->timeout;
4422 4422
4423 4423 handle = HXGE_DEV_HPI_HANDLE(hxgep);
4424 4424 HXGE_REG_RD32(handle, CIP_LINK_STAT, &link_stat.value);
4425 4425
4426 4426 if (to->report_link_status ||
4427 4427 (to->link_status != link_stat.bits.xpcs0_link_up)) {
4428 4428 to->link_status = link_stat.bits.xpcs0_link_up;
4429 4429 to->report_link_status = B_FALSE;
4430 4430
4431 4431 if (link_stat.bits.xpcs0_link_up) {
4432 4432 hxge_link_update(hxgep, LINK_STATE_UP);
4433 4433 } else {
4434 4434 hxge_link_update(hxgep, LINK_STATE_DOWN);
4435 4435 }
4436 4436 }
4437 4437
4438 4438 /* Restart the link status timer to check the link status */
4439 4439 MUTEX_ENTER(&to->lock);
4440 4440 to->id = timeout(hxge_link_poll, arg, to->ticks);
4441 4441 MUTEX_EXIT(&to->lock);
4442 4442 }
4443 4443
4444 4444 static void
4445 4445 hxge_link_update(p_hxge_t hxgep, link_state_t state)
4446 4446 {
4447 4447 p_hxge_stats_t statsp = (p_hxge_stats_t)hxgep->statsp;
4448 4448
4449 4449 mac_link_update(hxgep->mach, state);
4450 4450 if (state == LINK_STATE_UP) {
4451 4451 statsp->mac_stats.link_speed = 10000;
4452 4452 statsp->mac_stats.link_duplex = 2;
4453 4453 statsp->mac_stats.link_up = 1;
4454 4454 } else {
4455 4455 statsp->mac_stats.link_speed = 0;
4456 4456 statsp->mac_stats.link_duplex = 0;
4457 4457 statsp->mac_stats.link_up = 0;
4458 4458 }
4459 4459 }
4460 4460
4461 4461 static void
4462 4462 hxge_msix_init(p_hxge_t hxgep)
4463 4463 {
4464 4464 uint32_t data0;
4465 4465 uint32_t data1;
4466 4466 uint32_t data2;
4467 4467 int i;
4468 4468 uint32_t msix_entry0;
4469 4469 uint32_t msix_entry1;
4470 4470 uint32_t msix_entry2;
4471 4471 uint32_t msix_entry3;
4472 4472
4473 4473 /* Change to use MSIx bar instead of indirect access */
4474 4474 for (i = 0; i < HXGE_MSIX_ENTRIES; i++) {
4475 4475 data0 = 0xffffffff - i;
4476 4476 data1 = 0xffffffff - i - 1;
4477 4477 data2 = 0xffffffff - i - 2;
4478 4478
4479 4479 HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16, data0);
4480 4480 HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16 + 4, data1);
4481 4481 HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16 + 8, data2);
4482 4482 HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16 + 12, 0);
4483 4483 }
4484 4484
4485 4485 /* Initialize ram data out buffer. */
4486 4486 for (i = 0; i < HXGE_MSIX_ENTRIES; i++) {
4487 4487 HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16, &msix_entry0);
4488 4488 HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 4, &msix_entry1);
4489 4489 HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 8, &msix_entry2);
4490 4490 HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 12, &msix_entry3);
4491 4491 }
4492 4492 }
4493 4493
4494 4494 /*
4495 4495 * The following function is to support
4496 4496 * PSARC/2007/453 MSI-X interrupt limit override.
4497 4497 */
4498 4498 static int
4499 4499 hxge_create_msi_property(p_hxge_t hxgep)
4500 4500 {
4501 4501 int nmsi;
4502 4502 extern int ncpus;
4503 4503
4504 4504 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==>hxge_create_msi_property"));
4505 4505
4506 4506 (void) ddi_prop_create(DDI_DEV_T_NONE, hxgep->dip,
4507 4507 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0);
4508 4508 /*
4509 4509 * The maximum MSI-X requested will be 8.
4510 4510 * If the # of CPUs is less than 8, we will reqeust
4511 4511 * # MSI-X based on the # of CPUs.
4512 4512 */
4513 4513 if (ncpus >= HXGE_MSIX_REQUEST_10G) {
4514 4514 nmsi = HXGE_MSIX_REQUEST_10G;
4515 4515 } else {
4516 4516 nmsi = ncpus;
4517 4517 }
4518 4518
4519 4519 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4520 4520 "==>hxge_create_msi_property(10G): exists 0x%x (nmsi %d)",
4521 4521 ddi_prop_exists(DDI_DEV_T_NONE, hxgep->dip,
4522 4522 DDI_PROP_CANSLEEP, "#msix-request"), nmsi));
4523 4523
4524 4524 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<==hxge_create_msi_property"));
4525 4525 return (nmsi);
4526 4526 }
↓ open down ↓ |
2083 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX