Print this page
5042 stop using deprecated atomic functions
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/rge/rge_chip.c
+++ new/usr/src/uts/common/io/rge/rge_chip.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25
26 26 #include "rge.h"
27 27
28 28 #define REG32(rgep, reg) ((uint32_t *)(rgep->io_regs+(reg)))
29 29 #define REG16(rgep, reg) ((uint16_t *)(rgep->io_regs+(reg)))
30 30 #define REG8(rgep, reg) ((uint8_t *)(rgep->io_regs+(reg)))
31 31 #define PIO_ADDR(rgep, offset) ((void *)(rgep->io_regs+(offset)))
32 32
33 33 /*
34 34 * Patchable globals:
35 35 *
36 36 * rge_autorecover
37 37 * Enables/disables automatic recovery after fault detection
38 38 */
39 39 static uint32_t rge_autorecover = 1;
40 40
41 41 /*
42 42 * globals:
43 43 */
44 44 #define RGE_DBG RGE_DBG_REGS /* debug flag for this code */
45 45 static uint32_t rge_watchdog_count = 1 << 5;
46 46 static uint32_t rge_rx_watchdog_count = 1 << 3;
47 47
48 48 /*
49 49 * Operating register get/set access routines
50 50 */
51 51
52 52 static uint32_t rge_reg_get32(rge_t *rgep, uintptr_t regno);
53 53 #pragma inline(rge_reg_get32)
54 54
55 55 static uint32_t
56 56 rge_reg_get32(rge_t *rgep, uintptr_t regno)
57 57 {
58 58 RGE_TRACE(("rge_reg_get32($%p, 0x%lx)",
59 59 (void *)rgep, regno));
60 60
61 61 return (ddi_get32(rgep->io_handle, REG32(rgep, regno)));
62 62 }
63 63
64 64 static void rge_reg_put32(rge_t *rgep, uintptr_t regno, uint32_t data);
65 65 #pragma inline(rge_reg_put32)
66 66
67 67 static void
68 68 rge_reg_put32(rge_t *rgep, uintptr_t regno, uint32_t data)
69 69 {
70 70 RGE_TRACE(("rge_reg_put32($%p, 0x%lx, 0x%x)",
71 71 (void *)rgep, regno, data));
72 72
73 73 ddi_put32(rgep->io_handle, REG32(rgep, regno), data);
74 74 }
75 75
76 76 static void rge_reg_set32(rge_t *rgep, uintptr_t regno, uint32_t bits);
77 77 #pragma inline(rge_reg_set32)
78 78
79 79 static void
80 80 rge_reg_set32(rge_t *rgep, uintptr_t regno, uint32_t bits)
81 81 {
82 82 uint32_t regval;
83 83
84 84 RGE_TRACE(("rge_reg_set32($%p, 0x%lx, 0x%x)",
85 85 (void *)rgep, regno, bits));
86 86
87 87 regval = rge_reg_get32(rgep, regno);
88 88 regval |= bits;
89 89 rge_reg_put32(rgep, regno, regval);
90 90 }
91 91
92 92 static void rge_reg_clr32(rge_t *rgep, uintptr_t regno, uint32_t bits);
93 93 #pragma inline(rge_reg_clr32)
94 94
95 95 static void
96 96 rge_reg_clr32(rge_t *rgep, uintptr_t regno, uint32_t bits)
97 97 {
98 98 uint32_t regval;
99 99
100 100 RGE_TRACE(("rge_reg_clr32($%p, 0x%lx, 0x%x)",
101 101 (void *)rgep, regno, bits));
102 102
103 103 regval = rge_reg_get32(rgep, regno);
104 104 regval &= ~bits;
105 105 rge_reg_put32(rgep, regno, regval);
106 106 }
107 107
108 108 static uint16_t rge_reg_get16(rge_t *rgep, uintptr_t regno);
109 109 #pragma inline(rge_reg_get16)
110 110
111 111 static uint16_t
112 112 rge_reg_get16(rge_t *rgep, uintptr_t regno)
113 113 {
114 114 RGE_TRACE(("rge_reg_get16($%p, 0x%lx)",
115 115 (void *)rgep, regno));
116 116
117 117 return (ddi_get16(rgep->io_handle, REG16(rgep, regno)));
118 118 }
119 119
120 120 static void rge_reg_put16(rge_t *rgep, uintptr_t regno, uint16_t data);
121 121 #pragma inline(rge_reg_put16)
122 122
123 123 static void
124 124 rge_reg_put16(rge_t *rgep, uintptr_t regno, uint16_t data)
125 125 {
126 126 RGE_TRACE(("rge_reg_put16($%p, 0x%lx, 0x%x)",
127 127 (void *)rgep, regno, data));
128 128
129 129 ddi_put16(rgep->io_handle, REG16(rgep, regno), data);
130 130 }
131 131
132 132 static uint8_t rge_reg_get8(rge_t *rgep, uintptr_t regno);
133 133 #pragma inline(rge_reg_get8)
134 134
135 135 static uint8_t
136 136 rge_reg_get8(rge_t *rgep, uintptr_t regno)
137 137 {
138 138 RGE_TRACE(("rge_reg_get8($%p, 0x%lx)",
139 139 (void *)rgep, regno));
140 140
141 141 return (ddi_get8(rgep->io_handle, REG8(rgep, regno)));
142 142 }
143 143
144 144 static void rge_reg_put8(rge_t *rgep, uintptr_t regno, uint8_t data);
145 145 #pragma inline(rge_reg_put8)
146 146
147 147 static void
148 148 rge_reg_put8(rge_t *rgep, uintptr_t regno, uint8_t data)
149 149 {
150 150 RGE_TRACE(("rge_reg_put8($%p, 0x%lx, 0x%x)",
151 151 (void *)rgep, regno, data));
152 152
153 153 ddi_put8(rgep->io_handle, REG8(rgep, regno), data);
154 154 }
155 155
156 156 static void rge_reg_set8(rge_t *rgep, uintptr_t regno, uint8_t bits);
157 157 #pragma inline(rge_reg_set8)
158 158
159 159 static void
160 160 rge_reg_set8(rge_t *rgep, uintptr_t regno, uint8_t bits)
161 161 {
162 162 uint8_t regval;
163 163
164 164 RGE_TRACE(("rge_reg_set8($%p, 0x%lx, 0x%x)",
165 165 (void *)rgep, regno, bits));
166 166
167 167 regval = rge_reg_get8(rgep, regno);
168 168 regval |= bits;
169 169 rge_reg_put8(rgep, regno, regval);
170 170 }
171 171
172 172 static void rge_reg_clr8(rge_t *rgep, uintptr_t regno, uint8_t bits);
173 173 #pragma inline(rge_reg_clr8)
174 174
175 175 static void
176 176 rge_reg_clr8(rge_t *rgep, uintptr_t regno, uint8_t bits)
177 177 {
178 178 uint8_t regval;
179 179
180 180 RGE_TRACE(("rge_reg_clr8($%p, 0x%lx, 0x%x)",
181 181 (void *)rgep, regno, bits));
182 182
183 183 regval = rge_reg_get8(rgep, regno);
184 184 regval &= ~bits;
185 185 rge_reg_put8(rgep, regno, regval);
186 186 }
187 187
188 188 uint16_t rge_mii_get16(rge_t *rgep, uintptr_t mii);
189 189 #pragma no_inline(rge_mii_get16)
190 190
191 191 uint16_t
192 192 rge_mii_get16(rge_t *rgep, uintptr_t mii)
193 193 {
194 194 uint32_t regval;
195 195 uint32_t val32;
196 196 uint32_t i;
197 197
198 198 regval = (mii & PHY_REG_MASK) << PHY_REG_SHIFT;
199 199 rge_reg_put32(rgep, PHY_ACCESS_REG, regval);
200 200
201 201 /*
202 202 * Waiting for PHY reading OK
203 203 */
204 204 for (i = 0; i < PHY_RESET_LOOP; i++) {
205 205 drv_usecwait(1000);
206 206 val32 = rge_reg_get32(rgep, PHY_ACCESS_REG);
207 207 if (val32 & PHY_ACCESS_WR_FLAG)
208 208 return ((uint16_t)(val32 & 0xffff));
209 209 }
210 210
211 211 RGE_REPORT((rgep, "rge_mii_get16(0x%x) fail, val = %x", mii, val32));
212 212 return ((uint16_t)~0u);
213 213 }
214 214
215 215 void rge_mii_put16(rge_t *rgep, uintptr_t mii, uint16_t data);
216 216 #pragma no_inline(rge_mii_put16)
217 217
218 218 void
219 219 rge_mii_put16(rge_t *rgep, uintptr_t mii, uint16_t data)
220 220 {
221 221 uint32_t regval;
222 222 uint32_t val32;
223 223 uint32_t i;
224 224
225 225 regval = (mii & PHY_REG_MASK) << PHY_REG_SHIFT;
226 226 regval |= data & PHY_DATA_MASK;
227 227 regval |= PHY_ACCESS_WR_FLAG;
228 228 rge_reg_put32(rgep, PHY_ACCESS_REG, regval);
229 229
230 230 /*
231 231 * Waiting for PHY writing OK
232 232 */
233 233 for (i = 0; i < PHY_RESET_LOOP; i++) {
234 234 drv_usecwait(1000);
235 235 val32 = rge_reg_get32(rgep, PHY_ACCESS_REG);
236 236 if (!(val32 & PHY_ACCESS_WR_FLAG))
237 237 return;
238 238 }
239 239 RGE_REPORT((rgep, "rge_mii_put16(0x%lx, 0x%x) fail",
240 240 mii, data));
241 241 }
242 242
243 243 void rge_ephy_put16(rge_t *rgep, uintptr_t emii, uint16_t data);
244 244 #pragma no_inline(rge_ephy_put16)
245 245
246 246 void
247 247 rge_ephy_put16(rge_t *rgep, uintptr_t emii, uint16_t data)
248 248 {
249 249 uint32_t regval;
250 250 uint32_t val32;
251 251 uint32_t i;
252 252
253 253 regval = (emii & EPHY_REG_MASK) << EPHY_REG_SHIFT;
254 254 regval |= data & EPHY_DATA_MASK;
255 255 regval |= EPHY_ACCESS_WR_FLAG;
256 256 rge_reg_put32(rgep, EPHY_ACCESS_REG, regval);
257 257
258 258 /*
259 259 * Waiting for PHY writing OK
260 260 */
261 261 for (i = 0; i < PHY_RESET_LOOP; i++) {
262 262 drv_usecwait(1000);
263 263 val32 = rge_reg_get32(rgep, EPHY_ACCESS_REG);
264 264 if (!(val32 & EPHY_ACCESS_WR_FLAG))
265 265 return;
266 266 }
267 267 RGE_REPORT((rgep, "rge_ephy_put16(0x%lx, 0x%x) fail",
268 268 emii, data));
269 269 }
270 270
271 271 /*
272 272 * Atomically shift a 32-bit word left, returning
273 273 * the value it had *before* the shift was applied
274 274 */
275 275 static uint32_t rge_atomic_shl32(uint32_t *sp, uint_t count);
276 276 #pragma inline(rge_mii_put16)
277 277
↓ open down ↓ |
277 lines elided |
↑ open up ↑ |
278 278 static uint32_t
279 279 rge_atomic_shl32(uint32_t *sp, uint_t count)
280 280 {
281 281 uint32_t oldval;
282 282 uint32_t newval;
283 283
284 284 /* ATOMICALLY */
285 285 do {
286 286 oldval = *sp;
287 287 newval = oldval << count;
288 - } while (cas32(sp, oldval, newval) != oldval);
288 + } while (atomic_cas_32(sp, oldval, newval) != oldval);
289 289
290 290 return (oldval);
291 291 }
292 292
293 293 /*
294 294 * PHY operation routines
295 295 */
296 296 #if RGE_DEBUGGING
297 297
298 298 void
299 299 rge_phydump(rge_t *rgep)
300 300 {
301 301 uint16_t regs[32];
302 302 int i;
303 303
304 304 ASSERT(mutex_owned(rgep->genlock));
305 305
306 306 for (i = 0; i < 32; ++i) {
307 307 regs[i] = rge_mii_get16(rgep, i);
308 308 }
309 309
310 310 for (i = 0; i < 32; i += 8)
311 311 RGE_DEBUG(("rge_phydump: "
312 312 "0x%04x %04x %04x %04x %04x %04x %04x %04x",
313 313 regs[i+0], regs[i+1], regs[i+2], regs[i+3],
314 314 regs[i+4], regs[i+5], regs[i+6], regs[i+7]));
315 315 }
316 316
317 317 #endif /* RGE_DEBUGGING */
318 318
319 319 static void
320 320 rge_phy_check(rge_t *rgep)
321 321 {
322 322 uint16_t gig_ctl;
323 323
324 324 if (rgep->param_link_up == LINK_STATE_DOWN) {
325 325 /*
326 326 * RTL8169S/8110S PHY has the "PCS bug". Need reset PHY
327 327 * every 15 seconds whin link down & advertise is 1000.
328 328 */
329 329 if (rgep->chipid.phy_ver == PHY_VER_S) {
330 330 gig_ctl = rge_mii_get16(rgep, MII_1000BASE_T_CONTROL);
331 331 if (gig_ctl & MII_1000BT_CTL_ADV_FDX) {
332 332 rgep->link_down_count++;
333 333 if (rgep->link_down_count > 15) {
334 334 (void) rge_phy_reset(rgep);
335 335 rgep->stats.phy_reset++;
336 336 rgep->link_down_count = 0;
337 337 }
338 338 }
339 339 }
340 340 } else {
341 341 rgep->link_down_count = 0;
342 342 }
343 343 }
344 344
345 345 /*
346 346 * Basic low-level function to reset the PHY.
347 347 * Doesn't incorporate any special-case workarounds.
348 348 *
349 349 * Returns TRUE on success, FALSE if the RESET bit doesn't clear
350 350 */
351 351 boolean_t
352 352 rge_phy_reset(rge_t *rgep)
353 353 {
354 354 uint16_t control;
355 355 uint_t count;
356 356
357 357 /*
358 358 * Set the PHY RESET bit, then wait up to 5 ms for it to self-clear
359 359 */
360 360 control = rge_mii_get16(rgep, MII_CONTROL);
361 361 rge_mii_put16(rgep, MII_CONTROL, control | MII_CONTROL_RESET);
362 362 for (count = 0; count < 5; count++) {
363 363 drv_usecwait(100);
364 364 control = rge_mii_get16(rgep, MII_CONTROL);
365 365 if (BIC(control, MII_CONTROL_RESET))
366 366 return (B_TRUE);
367 367 }
368 368
369 369 RGE_REPORT((rgep, "rge_phy_reset: FAILED, control now 0x%x", control));
370 370 return (B_FALSE);
371 371 }
372 372
373 373 /*
374 374 * Synchronise the PHY's speed/duplex/autonegotiation capabilities
375 375 * and advertisements with the required settings as specified by the various
376 376 * param_* variables that can be poked via the NDD interface.
377 377 *
378 378 * We always reset the PHY and reprogram *all* the relevant registers,
379 379 * not just those changed. This should cause the link to go down, and then
380 380 * back up again once the link is stable and autonegotiation (if enabled)
381 381 * is complete. We should get a link state change interrupt somewhere along
382 382 * the way ...
383 383 *
384 384 * NOTE: <genlock> must already be held by the caller
385 385 */
386 386 void
387 387 rge_phy_update(rge_t *rgep)
388 388 {
389 389 boolean_t adv_autoneg;
390 390 boolean_t adv_pause;
391 391 boolean_t adv_asym_pause;
392 392 boolean_t adv_1000fdx;
393 393 boolean_t adv_1000hdx;
394 394 boolean_t adv_100fdx;
395 395 boolean_t adv_100hdx;
396 396 boolean_t adv_10fdx;
397 397 boolean_t adv_10hdx;
398 398
399 399 uint16_t control;
400 400 uint16_t gigctrl;
401 401 uint16_t anar;
402 402
403 403 ASSERT(mutex_owned(rgep->genlock));
404 404
405 405 RGE_DEBUG(("rge_phy_update: autoneg %d "
406 406 "pause %d asym_pause %d "
407 407 "1000fdx %d 1000hdx %d "
408 408 "100fdx %d 100hdx %d "
409 409 "10fdx %d 10hdx %d ",
410 410 rgep->param_adv_autoneg,
411 411 rgep->param_adv_pause, rgep->param_adv_asym_pause,
412 412 rgep->param_adv_1000fdx, rgep->param_adv_1000hdx,
413 413 rgep->param_adv_100fdx, rgep->param_adv_100hdx,
414 414 rgep->param_adv_10fdx, rgep->param_adv_10hdx));
415 415
416 416 control = gigctrl = anar = 0;
417 417
418 418 /*
419 419 * PHY settings are normally based on the param_* variables,
420 420 * but if any loopback mode is in effect, that takes precedence.
421 421 *
422 422 * RGE supports MAC-internal loopback, PHY-internal loopback,
423 423 * and External loopback at a variety of speeds (with a special
424 424 * cable). In all cases, autoneg is turned OFF, full-duplex
425 425 * is turned ON, and the speed/mastership is forced.
426 426 */
427 427 switch (rgep->param_loop_mode) {
428 428 case RGE_LOOP_NONE:
429 429 default:
430 430 adv_autoneg = rgep->param_adv_autoneg;
431 431 adv_pause = rgep->param_adv_pause;
432 432 adv_asym_pause = rgep->param_adv_asym_pause;
433 433 adv_1000fdx = rgep->param_adv_1000fdx;
434 434 adv_1000hdx = rgep->param_adv_1000hdx;
435 435 adv_100fdx = rgep->param_adv_100fdx;
436 436 adv_100hdx = rgep->param_adv_100hdx;
437 437 adv_10fdx = rgep->param_adv_10fdx;
438 438 adv_10hdx = rgep->param_adv_10hdx;
439 439 break;
440 440
441 441 case RGE_LOOP_INTERNAL_PHY:
442 442 case RGE_LOOP_INTERNAL_MAC:
443 443 adv_autoneg = adv_pause = adv_asym_pause = B_FALSE;
444 444 adv_1000fdx = adv_100fdx = adv_10fdx = B_FALSE;
445 445 adv_1000hdx = adv_100hdx = adv_10hdx = B_FALSE;
446 446 rgep->param_link_duplex = LINK_DUPLEX_FULL;
447 447
448 448 switch (rgep->param_loop_mode) {
449 449 case RGE_LOOP_INTERNAL_PHY:
450 450 if (rgep->chipid.mac_ver != MAC_VER_8101E) {
451 451 rgep->param_link_speed = 1000;
452 452 adv_1000fdx = B_TRUE;
453 453 } else {
454 454 rgep->param_link_speed = 100;
455 455 adv_100fdx = B_TRUE;
456 456 }
457 457 control = MII_CONTROL_LOOPBACK;
458 458 break;
459 459
460 460 case RGE_LOOP_INTERNAL_MAC:
461 461 if (rgep->chipid.mac_ver != MAC_VER_8101E) {
462 462 rgep->param_link_speed = 1000;
463 463 adv_1000fdx = B_TRUE;
464 464 } else {
465 465 rgep->param_link_speed = 100;
466 466 adv_100fdx = B_TRUE;
467 467 break;
468 468 }
469 469 }
470 470
471 471 RGE_DEBUG(("rge_phy_update: autoneg %d "
472 472 "pause %d asym_pause %d "
473 473 "1000fdx %d 1000hdx %d "
474 474 "100fdx %d 100hdx %d "
475 475 "10fdx %d 10hdx %d ",
476 476 adv_autoneg,
477 477 adv_pause, adv_asym_pause,
478 478 adv_1000fdx, adv_1000hdx,
479 479 adv_100fdx, adv_100hdx,
480 480 adv_10fdx, adv_10hdx));
481 481
482 482 /*
483 483 * We should have at least one technology capability set;
484 484 * if not, we select a default of 1000Mb/s full-duplex
485 485 */
486 486 if (!adv_1000fdx && !adv_100fdx && !adv_10fdx &&
487 487 !adv_1000hdx && !adv_100hdx && !adv_10hdx) {
488 488 if (rgep->chipid.mac_ver != MAC_VER_8101E)
489 489 adv_1000fdx = B_TRUE;
490 490 } else {
491 491 adv_1000fdx = B_FALSE;
492 492 adv_100fdx = B_TRUE;
493 493 }
494 494 }
495 495
496 496 /*
497 497 * Now transform the adv_* variables into the proper settings
498 498 * of the PHY registers ...
499 499 *
500 500 * If autonegotiation is (now) enabled, we want to trigger
501 501 * a new autonegotiation cycle once the PHY has been
502 502 * programmed with the capabilities to be advertised.
503 503 *
504 504 * RTL8169/8110 doesn't support 1000Mb/s half-duplex.
505 505 */
506 506 if (adv_autoneg)
507 507 control |= MII_CONTROL_ANE|MII_CONTROL_RSAN;
508 508
509 509 if (adv_1000fdx)
510 510 control |= MII_CONTROL_1GB|MII_CONTROL_FDUPLEX;
511 511 else if (adv_1000hdx)
512 512 control |= MII_CONTROL_1GB;
513 513 else if (adv_100fdx)
514 514 control |= MII_CONTROL_100MB|MII_CONTROL_FDUPLEX;
515 515 else if (adv_100hdx)
516 516 control |= MII_CONTROL_100MB;
517 517 else if (adv_10fdx)
518 518 control |= MII_CONTROL_FDUPLEX;
519 519 else if (adv_10hdx)
520 520 control |= 0;
521 521 else
522 522 { _NOTE(EMPTY); } /* Can't get here anyway ... */
523 523
524 524 if (adv_1000fdx) {
525 525 gigctrl |= MII_1000BT_CTL_ADV_FDX;
526 526 /*
527 527 * Chipset limitation: need set other capabilities to true
528 528 */
529 529 if (rgep->chipid.is_pcie)
530 530 adv_1000hdx = B_TRUE;
531 531 adv_100fdx = B_TRUE;
532 532 adv_100hdx = B_TRUE;
533 533 adv_10fdx = B_TRUE;
534 534 adv_10hdx = B_TRUE;
535 535 }
536 536
537 537 if (adv_1000hdx)
538 538 gigctrl |= MII_1000BT_CTL_ADV_HDX;
539 539
540 540 if (adv_100fdx)
541 541 anar |= MII_ABILITY_100BASE_TX_FD;
542 542 if (adv_100hdx)
543 543 anar |= MII_ABILITY_100BASE_TX;
544 544 if (adv_10fdx)
545 545 anar |= MII_ABILITY_10BASE_T_FD;
546 546 if (adv_10hdx)
547 547 anar |= MII_ABILITY_10BASE_T;
548 548
549 549 if (adv_pause)
550 550 anar |= MII_ABILITY_PAUSE;
551 551 if (adv_asym_pause)
552 552 anar |= MII_ABILITY_ASMPAUSE;
553 553
554 554 /*
555 555 * Munge in any other fixed bits we require ...
556 556 */
557 557 anar |= MII_AN_SELECTOR_8023;
558 558
559 559 /*
560 560 * Restart the PHY and write the new values. Note the
561 561 * time, so that we can say whether subsequent link state
562 562 * changes can be attributed to our reprogramming the PHY
563 563 */
564 564 rge_phy_init(rgep);
565 565 if (rgep->chipid.mac_ver == MAC_VER_8168B_B ||
566 566 rgep->chipid.mac_ver == MAC_VER_8168B_C) {
567 567 /* power up PHY for RTL8168B chipset */
568 568 rge_mii_put16(rgep, PHY_1F_REG, 0x0000);
569 569 rge_mii_put16(rgep, PHY_0E_REG, 0x0000);
570 570 rge_mii_put16(rgep, PHY_1F_REG, 0x0000);
571 571 }
572 572 rge_mii_put16(rgep, MII_AN_ADVERT, anar);
573 573 rge_mii_put16(rgep, MII_1000BASE_T_CONTROL, gigctrl);
574 574 rge_mii_put16(rgep, MII_CONTROL, control);
575 575
576 576 RGE_DEBUG(("rge_phy_update: anar <- 0x%x", anar));
577 577 RGE_DEBUG(("rge_phy_update: control <- 0x%x", control));
578 578 RGE_DEBUG(("rge_phy_update: gigctrl <- 0x%x", gigctrl));
579 579 }
580 580
581 581 void rge_phy_init(rge_t *rgep);
582 582 #pragma no_inline(rge_phy_init)
583 583
584 584 void
585 585 rge_phy_init(rge_t *rgep)
586 586 {
587 587 rgep->phy_mii_addr = 1;
588 588
589 589 /*
590 590 * Below phy config steps are copied from the Programming Guide
591 591 * (there's no detail comments for these steps.)
592 592 */
593 593 switch (rgep->chipid.mac_ver) {
594 594 case MAC_VER_8169S_D:
595 595 case MAC_VER_8169S_E :
596 596 rge_mii_put16(rgep, PHY_1F_REG, 0x0001);
597 597 rge_mii_put16(rgep, PHY_15_REG, 0x1000);
598 598 rge_mii_put16(rgep, PHY_18_REG, 0x65c7);
599 599 rge_mii_put16(rgep, PHY_ANAR_REG, 0x0000);
600 600 rge_mii_put16(rgep, PHY_ID_REG_2, 0x00a1);
601 601 rge_mii_put16(rgep, PHY_ID_REG_1, 0x0008);
602 602 rge_mii_put16(rgep, PHY_BMSR_REG, 0x1020);
603 603 rge_mii_put16(rgep, PHY_BMCR_REG, 0x1000);
604 604 rge_mii_put16(rgep, PHY_ANAR_REG, 0x0800);
605 605 rge_mii_put16(rgep, PHY_ANAR_REG, 0x0000);
606 606 rge_mii_put16(rgep, PHY_ANAR_REG, 0x7000);
607 607 rge_mii_put16(rgep, PHY_ID_REG_2, 0xff41);
608 608 rge_mii_put16(rgep, PHY_ID_REG_1, 0xde60);
609 609 rge_mii_put16(rgep, PHY_BMSR_REG, 0x0140);
610 610 rge_mii_put16(rgep, PHY_BMCR_REG, 0x0077);
611 611 rge_mii_put16(rgep, PHY_ANAR_REG, 0x7800);
612 612 rge_mii_put16(rgep, PHY_ANAR_REG, 0x7000);
613 613 rge_mii_put16(rgep, PHY_ANAR_REG, 0xa000);
614 614 rge_mii_put16(rgep, PHY_ID_REG_2, 0xdf01);
615 615 rge_mii_put16(rgep, PHY_ID_REG_1, 0xdf20);
616 616 rge_mii_put16(rgep, PHY_BMSR_REG, 0xff95);
617 617 rge_mii_put16(rgep, PHY_BMCR_REG, 0xfa00);
618 618 rge_mii_put16(rgep, PHY_ANAR_REG, 0xa800);
619 619 rge_mii_put16(rgep, PHY_ANAR_REG, 0xa000);
620 620 rge_mii_put16(rgep, PHY_ANAR_REG, 0xb000);
621 621 rge_mii_put16(rgep, PHY_ID_REG_2, 0xff41);
622 622 rge_mii_put16(rgep, PHY_ID_REG_1, 0xde20);
623 623 rge_mii_put16(rgep, PHY_BMSR_REG, 0x0140);
624 624 rge_mii_put16(rgep, PHY_BMCR_REG, 0x00bb);
625 625 rge_mii_put16(rgep, PHY_ANAR_REG, 0xb800);
626 626 rge_mii_put16(rgep, PHY_ANAR_REG, 0xb000);
627 627 rge_mii_put16(rgep, PHY_ANAR_REG, 0xf000);
628 628 rge_mii_put16(rgep, PHY_ID_REG_2, 0xdf01);
629 629 rge_mii_put16(rgep, PHY_ID_REG_1, 0xdf20);
630 630 rge_mii_put16(rgep, PHY_BMSR_REG, 0xff95);
631 631 rge_mii_put16(rgep, PHY_BMCR_REG, 0xbf00);
632 632 rge_mii_put16(rgep, PHY_ANAR_REG, 0xf800);
633 633 rge_mii_put16(rgep, PHY_ANAR_REG, 0xf000);
634 634 rge_mii_put16(rgep, PHY_ANAR_REG, 0x0000);
635 635 rge_mii_put16(rgep, PHY_1F_REG, 0x0000);
636 636 rge_mii_put16(rgep, PHY_0B_REG, 0x0000);
637 637 break;
638 638
639 639 case MAC_VER_8169SB:
640 640 rge_mii_put16(rgep, PHY_1F_REG, 0x0001);
641 641 rge_mii_put16(rgep, PHY_1B_REG, 0xD41E);
642 642 rge_mii_put16(rgep, PHY_0E_REG, 0x7bff);
643 643 rge_mii_put16(rgep, PHY_GBCR_REG, GBCR_DEFAULT);
644 644 rge_mii_put16(rgep, PHY_1F_REG, 0x0002);
645 645 rge_mii_put16(rgep, PHY_BMSR_REG, 0x90D0);
646 646 rge_mii_put16(rgep, PHY_1F_REG, 0x0000);
647 647 break;
648 648
649 649 case MAC_VER_8169SC:
650 650 rge_mii_put16(rgep, PHY_1F_REG, 0x0001);
651 651 rge_mii_put16(rgep, PHY_ANER_REG, 0x0078);
652 652 rge_mii_put16(rgep, PHY_ANNPRR_REG, 0x05dc);
653 653 rge_mii_put16(rgep, PHY_GBCR_REG, 0x2672);
654 654 rge_mii_put16(rgep, PHY_GBSR_REG, 0x6a14);
655 655 rge_mii_put16(rgep, PHY_0B_REG, 0x7cb0);
656 656 rge_mii_put16(rgep, PHY_0C_REG, 0xdb80);
657 657 rge_mii_put16(rgep, PHY_1B_REG, 0xc414);
658 658 rge_mii_put16(rgep, PHY_1C_REG, 0xef03);
659 659 rge_mii_put16(rgep, PHY_1D_REG, 0x3dc8);
660 660 rge_mii_put16(rgep, PHY_1F_REG, 0x0003);
661 661 rge_mii_put16(rgep, PHY_13_REG, 0x0600);
662 662 rge_mii_put16(rgep, PHY_1F_REG, 0x0000);
663 663 break;
664 664
665 665 case MAC_VER_8168:
666 666 rge_mii_put16(rgep, PHY_1F_REG, 0x0001);
667 667 rge_mii_put16(rgep, PHY_ANER_REG, 0x00aa);
668 668 rge_mii_put16(rgep, PHY_ANNPTR_REG, 0x3173);
669 669 rge_mii_put16(rgep, PHY_ANNPRR_REG, 0x08fc);
670 670 rge_mii_put16(rgep, PHY_GBCR_REG, 0xe2d0);
671 671 rge_mii_put16(rgep, PHY_0B_REG, 0x941a);
672 672 rge_mii_put16(rgep, PHY_18_REG, 0x65fe);
673 673 rge_mii_put16(rgep, PHY_1C_REG, 0x1e02);
674 674 rge_mii_put16(rgep, PHY_1F_REG, 0x0002);
675 675 rge_mii_put16(rgep, PHY_ANNPTR_REG, 0x103e);
676 676 rge_mii_put16(rgep, PHY_1F_REG, 0x0000);
677 677 break;
678 678
679 679 case MAC_VER_8168B_B:
680 680 case MAC_VER_8168B_C:
681 681 rge_mii_put16(rgep, PHY_1F_REG, 0x0001);
682 682 rge_mii_put16(rgep, PHY_0B_REG, 0x94b0);
683 683 rge_mii_put16(rgep, PHY_1B_REG, 0xc416);
684 684 rge_mii_put16(rgep, PHY_1F_REG, 0x0003);
685 685 rge_mii_put16(rgep, PHY_12_REG, 0x6096);
686 686 rge_mii_put16(rgep, PHY_1F_REG, 0x0000);
687 687 break;
688 688 }
689 689 }
690 690
691 691 void rge_chip_ident(rge_t *rgep);
692 692 #pragma no_inline(rge_chip_ident)
693 693
694 694 void
695 695 rge_chip_ident(rge_t *rgep)
696 696 {
697 697 chip_id_t *chip = &rgep->chipid;
698 698 uint32_t val32;
699 699 uint16_t val16;
700 700
701 701 /*
702 702 * Read and record MAC version
703 703 */
704 704 val32 = rge_reg_get32(rgep, TX_CONFIG_REG);
705 705 val32 &= HW_VERSION_ID_0 | HW_VERSION_ID_1;
706 706 chip->mac_ver = val32;
707 707 chip->is_pcie = pci_lcap_locate(rgep->cfg_handle,
708 708 PCI_CAP_ID_PCI_E, &val16) == DDI_SUCCESS;
709 709
710 710 /*
711 711 * Workaround for 8101E_C
712 712 */
713 713 chip->enable_mac_first = !chip->is_pcie;
714 714 if (chip->mac_ver == MAC_VER_8101E_C) {
715 715 chip->is_pcie = B_FALSE;
716 716 }
717 717
718 718 /*
719 719 * Read and record PHY version
720 720 */
721 721 val16 = rge_mii_get16(rgep, PHY_ID_REG_2);
722 722 val16 &= PHY_VER_MASK;
723 723 chip->phy_ver = val16;
724 724
725 725 /* set pci latency timer */
726 726 if (chip->mac_ver == MAC_VER_8169 ||
727 727 chip->mac_ver == MAC_VER_8169S_D ||
728 728 chip->mac_ver == MAC_VER_8169S_E ||
729 729 chip->mac_ver == MAC_VER_8169SC)
730 730 pci_config_put8(rgep->cfg_handle, PCI_CONF_LATENCY_TIMER, 0x40);
731 731
732 732 if (chip->mac_ver == MAC_VER_8169SC) {
733 733 val16 = rge_reg_get16(rgep, RT_CONFIG_1_REG);
734 734 val16 &= 0x0300;
735 735 if (val16 == 0x1) /* 66Mhz PCI */
736 736 rge_reg_put32(rgep, 0x7c, 0x000700ff);
737 737 else if (val16 == 0x0) /* 33Mhz PCI */
738 738 rge_reg_put32(rgep, 0x7c, 0x0007ff00);
739 739 }
740 740
741 741 /*
742 742 * PCIE chipset require the Rx buffer start address must be
743 743 * 8-byte alignment and the Rx buffer size must be multiple of 8.
744 744 * We'll just use bcopy in receive procedure for the PCIE chipset.
745 745 */
746 746 if (chip->is_pcie) {
747 747 rgep->chip_flags |= CHIP_FLAG_FORCE_BCOPY;
748 748 if (rgep->default_mtu > ETHERMTU) {
749 749 rge_notice(rgep, "Jumbo packets not supported "
750 750 "for this PCIE chipset");
751 751 rgep->default_mtu = ETHERMTU;
752 752 }
753 753 }
754 754 if (rgep->chip_flags & CHIP_FLAG_FORCE_BCOPY)
755 755 rgep->head_room = 0;
756 756 else
757 757 rgep->head_room = RGE_HEADROOM;
758 758
759 759 /*
760 760 * Initialize other variables.
761 761 */
762 762 if (rgep->default_mtu < ETHERMTU || rgep->default_mtu > RGE_JUMBO_MTU)
763 763 rgep->default_mtu = ETHERMTU;
764 764 if (rgep->default_mtu > ETHERMTU) {
765 765 rgep->rxbuf_size = RGE_BUFF_SIZE_JUMBO;
766 766 rgep->txbuf_size = RGE_BUFF_SIZE_JUMBO;
767 767 rgep->ethmax_size = RGE_JUMBO_SIZE;
768 768 } else {
769 769 rgep->rxbuf_size = RGE_BUFF_SIZE_STD;
770 770 rgep->txbuf_size = RGE_BUFF_SIZE_STD;
771 771 rgep->ethmax_size = ETHERMAX;
772 772 }
773 773 chip->rxconfig = RX_CONFIG_DEFAULT;
774 774 chip->txconfig = TX_CONFIG_DEFAULT;
775 775
776 776 /* interval to update statistics for polling mode */
777 777 rgep->tick_delta = drv_usectohz(1000*1000/CLK_TICK);
778 778
779 779 /* ensure we are not in polling mode */
780 780 rgep->curr_tick = ddi_get_lbolt() - 2*rgep->tick_delta;
781 781 RGE_TRACE(("%s: MAC version = %x, PHY version = %x",
782 782 rgep->ifname, chip->mac_ver, chip->phy_ver));
783 783 }
784 784
785 785 /*
786 786 * Perform first-stage chip (re-)initialisation, using only config-space
787 787 * accesses:
788 788 *
789 789 * + Read the vendor/device/revision/subsystem/cache-line-size registers,
790 790 * returning the data in the structure pointed to by <idp>.
791 791 * + Enable Memory Space accesses.
792 792 * + Enable Bus Mastering according.
793 793 */
794 794 void rge_chip_cfg_init(rge_t *rgep, chip_id_t *cidp);
795 795 #pragma no_inline(rge_chip_cfg_init)
796 796
797 797 void
798 798 rge_chip_cfg_init(rge_t *rgep, chip_id_t *cidp)
799 799 {
800 800 ddi_acc_handle_t handle;
801 801 uint16_t commd;
802 802
803 803 handle = rgep->cfg_handle;
804 804
805 805 /*
806 806 * Save PCI cache line size and subsystem vendor ID
807 807 */
808 808 cidp->command = pci_config_get16(handle, PCI_CONF_COMM);
809 809 cidp->vendor = pci_config_get16(handle, PCI_CONF_VENID);
810 810 cidp->device = pci_config_get16(handle, PCI_CONF_DEVID);
811 811 cidp->subven = pci_config_get16(handle, PCI_CONF_SUBVENID);
812 812 cidp->subdev = pci_config_get16(handle, PCI_CONF_SUBSYSID);
813 813 cidp->revision = pci_config_get8(handle, PCI_CONF_REVID);
814 814 cidp->clsize = pci_config_get8(handle, PCI_CONF_CACHE_LINESZ);
815 815 cidp->latency = pci_config_get8(handle, PCI_CONF_LATENCY_TIMER);
816 816
817 817 /*
818 818 * Turn on Master Enable (DMA) and IO Enable bits.
819 819 * Enable PCI Memory Space accesses
820 820 */
821 821 commd = cidp->command;
822 822 commd |= PCI_COMM_ME | PCI_COMM_MAE | PCI_COMM_IO;
823 823 pci_config_put16(handle, PCI_CONF_COMM, commd);
824 824
825 825 RGE_DEBUG(("rge_chip_cfg_init: vendor 0x%x device 0x%x revision 0x%x",
826 826 cidp->vendor, cidp->device, cidp->revision));
827 827 RGE_DEBUG(("rge_chip_cfg_init: subven 0x%x subdev 0x%x",
828 828 cidp->subven, cidp->subdev));
829 829 RGE_DEBUG(("rge_chip_cfg_init: clsize %d latency %d command 0x%x",
830 830 cidp->clsize, cidp->latency, cidp->command));
831 831 }
832 832
833 833 int rge_chip_reset(rge_t *rgep);
834 834 #pragma no_inline(rge_chip_reset)
835 835
836 836 int
837 837 rge_chip_reset(rge_t *rgep)
838 838 {
839 839 int i;
840 840 uint8_t val8;
841 841
842 842 /*
843 843 * Chip should be in STOP state
844 844 */
845 845 rge_reg_clr8(rgep, RT_COMMAND_REG,
846 846 RT_COMMAND_RX_ENABLE | RT_COMMAND_TX_ENABLE);
847 847
848 848 /*
849 849 * Disable interrupt
850 850 */
851 851 rgep->int_mask = INT_MASK_NONE;
852 852 rge_reg_put16(rgep, INT_MASK_REG, rgep->int_mask);
853 853
854 854 /*
855 855 * Clear pended interrupt
856 856 */
857 857 rge_reg_put16(rgep, INT_STATUS_REG, INT_MASK_ALL);
858 858
859 859 /*
860 860 * Reset chip
861 861 */
862 862 rge_reg_set8(rgep, RT_COMMAND_REG, RT_COMMAND_RESET);
863 863
864 864 /*
865 865 * Wait for reset success
866 866 */
867 867 for (i = 0; i < CHIP_RESET_LOOP; i++) {
868 868 drv_usecwait(10);
869 869 val8 = rge_reg_get8(rgep, RT_COMMAND_REG);
870 870 if (!(val8 & RT_COMMAND_RESET)) {
871 871 rgep->rge_chip_state = RGE_CHIP_RESET;
872 872 return (0);
873 873 }
874 874 }
875 875 RGE_REPORT((rgep, "rge_chip_reset fail."));
876 876 return (-1);
877 877 }
878 878
879 879 void rge_chip_init(rge_t *rgep);
880 880 #pragma no_inline(rge_chip_init)
881 881
882 882 void
883 883 rge_chip_init(rge_t *rgep)
884 884 {
885 885 uint32_t val32;
886 886 uint32_t val16;
887 887 uint32_t *hashp;
888 888 chip_id_t *chip = &rgep->chipid;
889 889
890 890 /*
891 891 * Increase the threshold voltage of RX sensitivity
892 892 */
893 893 if (chip->mac_ver == MAC_VER_8168B_B ||
894 894 chip->mac_ver == MAC_VER_8168B_C ||
895 895 chip->mac_ver == MAC_VER_8101E) {
896 896 rge_ephy_put16(rgep, 0x01, 0x1bd3);
897 897 }
898 898
899 899 if (chip->mac_ver == MAC_VER_8168 ||
900 900 chip->mac_ver == MAC_VER_8168B_B) {
901 901 val16 = rge_reg_get8(rgep, PHY_STATUS_REG);
902 902 val16 = 0x12<<8 | val16;
903 903 rge_reg_put16(rgep, PHY_STATUS_REG, val16);
904 904 rge_reg_put32(rgep, RT_CSI_DATA_REG, 0x00021c01);
905 905 rge_reg_put32(rgep, RT_CSI_ACCESS_REG, 0x8000f088);
906 906 rge_reg_put32(rgep, RT_CSI_DATA_REG, 0x00004000);
907 907 rge_reg_put32(rgep, RT_CSI_ACCESS_REG, 0x8000f0b0);
908 908 rge_reg_put32(rgep, RT_CSI_ACCESS_REG, 0x0000f068);
909 909 val32 = rge_reg_get32(rgep, RT_CSI_DATA_REG);
910 910 val32 |= 0x7000;
911 911 val32 &= 0xffff5fff;
912 912 rge_reg_put32(rgep, RT_CSI_DATA_REG, val32);
913 913 rge_reg_put32(rgep, RT_CSI_ACCESS_REG, 0x8000f068);
914 914 }
915 915
916 916 /*
917 917 * Config MII register
918 918 */
919 919 rgep->param_link_up = LINK_STATE_DOWN;
920 920 rge_phy_update(rgep);
921 921
922 922 /*
923 923 * Enable Rx checksum offload.
924 924 * Then for vlan support, we must enable receive vlan de-tagging.
925 925 * Otherwise, there'll be checksum error.
926 926 */
927 927 val16 = rge_reg_get16(rgep, CPLUS_COMMAND_REG);
928 928 val16 |= RX_CKSM_OFFLOAD | RX_VLAN_DETAG;
929 929 if (chip->mac_ver == MAC_VER_8169S_D) {
930 930 val16 |= CPLUS_BIT14 | MUL_PCI_RW_ENABLE;
931 931 rge_reg_put8(rgep, RESV_82_REG, 0x01);
932 932 }
933 933 if (chip->mac_ver == MAC_VER_8169S_E ||
934 934 chip->mac_ver == MAC_VER_8169SC) {
935 935 val16 |= MUL_PCI_RW_ENABLE;
936 936 }
937 937 rge_reg_put16(rgep, CPLUS_COMMAND_REG, val16 & (~0x03));
938 938
939 939 /*
940 940 * Start transmit/receive before set tx/rx configuration register
941 941 */
942 942 if (chip->enable_mac_first)
943 943 rge_reg_set8(rgep, RT_COMMAND_REG,
944 944 RT_COMMAND_RX_ENABLE | RT_COMMAND_TX_ENABLE);
945 945
946 946 /*
947 947 * Set dump tally counter register
948 948 */
949 949 val32 = rgep->dma_area_stats.cookie.dmac_laddress >> 32;
950 950 rge_reg_put32(rgep, DUMP_COUNTER_REG_1, val32);
951 951 val32 = rge_reg_get32(rgep, DUMP_COUNTER_REG_0);
952 952 val32 &= DUMP_COUNTER_REG_RESV;
953 953 val32 |= rgep->dma_area_stats.cookie.dmac_laddress;
954 954 rge_reg_put32(rgep, DUMP_COUNTER_REG_0, val32);
955 955
956 956 /*
957 957 * Change to config register write enable mode
958 958 */
959 959 rge_reg_set8(rgep, RT_93c46_COMMOND_REG, RT_93c46_MODE_CONFIG);
960 960
961 961 /*
962 962 * Set Tx/Rx maximum packet size
963 963 */
964 964 if (rgep->default_mtu > ETHERMTU) {
965 965 rge_reg_put8(rgep, TX_MAX_PKTSIZE_REG, TX_PKTSIZE_JUMBO);
966 966 rge_reg_put16(rgep, RX_MAX_PKTSIZE_REG, RX_PKTSIZE_JUMBO);
967 967 } else if (rgep->chipid.mac_ver != MAC_VER_8101E) {
968 968 rge_reg_put8(rgep, TX_MAX_PKTSIZE_REG, TX_PKTSIZE_STD);
969 969 rge_reg_put16(rgep, RX_MAX_PKTSIZE_REG, RX_PKTSIZE_STD);
970 970 } else {
971 971 rge_reg_put8(rgep, TX_MAX_PKTSIZE_REG, TX_PKTSIZE_STD_8101E);
972 972 rge_reg_put16(rgep, RX_MAX_PKTSIZE_REG, RX_PKTSIZE_STD_8101E);
973 973 }
974 974
975 975 /*
976 976 * Set receive configuration register
977 977 */
978 978 val32 = rge_reg_get32(rgep, RX_CONFIG_REG);
979 979 val32 &= RX_CONFIG_REG_RESV;
980 980 if (rgep->promisc)
981 981 val32 |= RX_ACCEPT_ALL_PKT;
982 982 rge_reg_put32(rgep, RX_CONFIG_REG, val32 | chip->rxconfig);
983 983
984 984 /*
985 985 * Set transmit configuration register
986 986 */
987 987 val32 = rge_reg_get32(rgep, TX_CONFIG_REG);
988 988 val32 &= TX_CONFIG_REG_RESV;
989 989 rge_reg_put32(rgep, TX_CONFIG_REG, val32 | chip->txconfig);
990 990
991 991 /*
992 992 * Set Tx/Rx descriptor register
993 993 */
994 994 val32 = rgep->tx_desc.cookie.dmac_laddress;
995 995 rge_reg_put32(rgep, NORMAL_TX_RING_ADDR_LO_REG, val32);
996 996 val32 = rgep->tx_desc.cookie.dmac_laddress >> 32;
997 997 rge_reg_put32(rgep, NORMAL_TX_RING_ADDR_HI_REG, val32);
998 998 rge_reg_put32(rgep, HIGH_TX_RING_ADDR_LO_REG, 0);
999 999 rge_reg_put32(rgep, HIGH_TX_RING_ADDR_HI_REG, 0);
1000 1000 val32 = rgep->rx_desc.cookie.dmac_laddress;
1001 1001 rge_reg_put32(rgep, RX_RING_ADDR_LO_REG, val32);
1002 1002 val32 = rgep->rx_desc.cookie.dmac_laddress >> 32;
1003 1003 rge_reg_put32(rgep, RX_RING_ADDR_HI_REG, val32);
1004 1004
1005 1005 /*
1006 1006 * Suggested setting from Realtek
1007 1007 */
1008 1008 if (rgep->chipid.mac_ver != MAC_VER_8101E)
1009 1009 rge_reg_put16(rgep, RESV_E2_REG, 0x282a);
1010 1010 else
1011 1011 rge_reg_put16(rgep, RESV_E2_REG, 0x0000);
1012 1012
1013 1013 /*
1014 1014 * Set multicast register
1015 1015 */
1016 1016 hashp = (uint32_t *)rgep->mcast_hash;
1017 1017 if (rgep->promisc) {
1018 1018 rge_reg_put32(rgep, MULTICAST_0_REG, ~0U);
1019 1019 rge_reg_put32(rgep, MULTICAST_4_REG, ~0U);
1020 1020 } else {
1021 1021 rge_reg_put32(rgep, MULTICAST_0_REG, RGE_BSWAP_32(hashp[0]));
1022 1022 rge_reg_put32(rgep, MULTICAST_4_REG, RGE_BSWAP_32(hashp[1]));
1023 1023 }
1024 1024
1025 1025 /*
1026 1026 * Msic register setting:
1027 1027 * -- Missed packet counter: clear it
1028 1028 * -- TimerInt Register
1029 1029 * -- Timer count register
1030 1030 */
1031 1031 rge_reg_put32(rgep, RX_PKT_MISS_COUNT_REG, 0);
1032 1032 rge_reg_put32(rgep, TIMER_INT_REG, TIMER_INT_NONE);
1033 1033 rge_reg_put32(rgep, TIMER_COUNT_REG, 0);
1034 1034
1035 1035 /*
1036 1036 * disable the Unicast Wakeup Frame capability
1037 1037 */
1038 1038 rge_reg_clr8(rgep, RT_CONFIG_5_REG, RT_UNI_WAKE_FRAME);
1039 1039
1040 1040 /*
1041 1041 * Return to normal network/host communication mode
1042 1042 */
1043 1043 rge_reg_clr8(rgep, RT_93c46_COMMOND_REG, RT_93c46_MODE_CONFIG);
1044 1044 drv_usecwait(20);
1045 1045 }
1046 1046
1047 1047 /*
1048 1048 * rge_chip_start() -- start the chip transmitting and/or receiving,
1049 1049 * including enabling interrupts
1050 1050 */
1051 1051 void rge_chip_start(rge_t *rgep);
1052 1052 #pragma no_inline(rge_chip_start)
1053 1053
1054 1054 void
1055 1055 rge_chip_start(rge_t *rgep)
1056 1056 {
1057 1057 /*
1058 1058 * Clear statistics
1059 1059 */
1060 1060 bzero(&rgep->stats, sizeof (rge_stats_t));
1061 1061 DMA_ZERO(rgep->dma_area_stats);
1062 1062
1063 1063 /*
1064 1064 * Start transmit/receive
1065 1065 */
1066 1066 rge_reg_set8(rgep, RT_COMMAND_REG,
1067 1067 RT_COMMAND_RX_ENABLE | RT_COMMAND_TX_ENABLE);
1068 1068
1069 1069 /*
1070 1070 * Enable interrupt
1071 1071 */
1072 1072 rgep->int_mask = RGE_INT_MASK;
1073 1073 if (rgep->chipid.is_pcie) {
1074 1074 rgep->int_mask |= NO_TXDESC_INT;
1075 1075 }
1076 1076 rgep->rx_fifo_ovf = 0;
1077 1077 rgep->int_mask |= RX_FIFO_OVERFLOW_INT;
1078 1078 rge_reg_put16(rgep, INT_MASK_REG, rgep->int_mask);
1079 1079
1080 1080 /*
1081 1081 * All done!
1082 1082 */
1083 1083 rgep->rge_chip_state = RGE_CHIP_RUNNING;
1084 1084 }
1085 1085
1086 1086 /*
1087 1087 * rge_chip_stop() -- stop board receiving
1088 1088 *
1089 1089 * Since this function is also invoked by rge_quiesce(), it
1090 1090 * must not block; also, no tracing or logging takes place
1091 1091 * when invoked by rge_quiesce().
1092 1092 */
1093 1093 void rge_chip_stop(rge_t *rgep, boolean_t fault);
1094 1094 #pragma no_inline(rge_chip_stop)
1095 1095
1096 1096 void
1097 1097 rge_chip_stop(rge_t *rgep, boolean_t fault)
1098 1098 {
1099 1099 /*
1100 1100 * Disable interrupt
1101 1101 */
1102 1102 rgep->int_mask = INT_MASK_NONE;
1103 1103 rge_reg_put16(rgep, INT_MASK_REG, rgep->int_mask);
1104 1104
1105 1105 /*
1106 1106 * Clear pended interrupt
1107 1107 */
1108 1108 if (!rgep->suspended) {
1109 1109 rge_reg_put16(rgep, INT_STATUS_REG, INT_MASK_ALL);
1110 1110 }
1111 1111
1112 1112 /*
1113 1113 * Stop the board and disable transmit/receive
1114 1114 */
1115 1115 rge_reg_clr8(rgep, RT_COMMAND_REG,
1116 1116 RT_COMMAND_RX_ENABLE | RT_COMMAND_TX_ENABLE);
1117 1117
1118 1118 if (fault)
1119 1119 rgep->rge_chip_state = RGE_CHIP_FAULT;
1120 1120 else
1121 1121 rgep->rge_chip_state = RGE_CHIP_STOPPED;
1122 1122 }
1123 1123
1124 1124 /*
1125 1125 * rge_get_mac_addr() -- get the MAC address on NIC
1126 1126 */
1127 1127 static void rge_get_mac_addr(rge_t *rgep);
1128 1128 #pragma inline(rge_get_mac_addr)
1129 1129
1130 1130 static void
1131 1131 rge_get_mac_addr(rge_t *rgep)
1132 1132 {
1133 1133 uint8_t *macaddr = rgep->netaddr;
1134 1134 uint32_t val32;
1135 1135
1136 1136 /*
1137 1137 * Read first 4-byte of mac address
1138 1138 */
1139 1139 val32 = rge_reg_get32(rgep, ID_0_REG);
1140 1140 macaddr[0] = val32 & 0xff;
1141 1141 val32 = val32 >> 8;
1142 1142 macaddr[1] = val32 & 0xff;
1143 1143 val32 = val32 >> 8;
1144 1144 macaddr[2] = val32 & 0xff;
1145 1145 val32 = val32 >> 8;
1146 1146 macaddr[3] = val32 & 0xff;
1147 1147
1148 1148 /*
1149 1149 * Read last 2-byte of mac address
1150 1150 */
1151 1151 val32 = rge_reg_get32(rgep, ID_4_REG);
1152 1152 macaddr[4] = val32 & 0xff;
1153 1153 val32 = val32 >> 8;
1154 1154 macaddr[5] = val32 & 0xff;
1155 1155 }
1156 1156
1157 1157 static void rge_set_mac_addr(rge_t *rgep);
1158 1158 #pragma inline(rge_set_mac_addr)
1159 1159
1160 1160 static void
1161 1161 rge_set_mac_addr(rge_t *rgep)
1162 1162 {
1163 1163 uint8_t *p = rgep->netaddr;
1164 1164 uint32_t val32;
1165 1165
1166 1166 /*
1167 1167 * Change to config register write enable mode
1168 1168 */
1169 1169 rge_reg_set8(rgep, RT_93c46_COMMOND_REG, RT_93c46_MODE_CONFIG);
1170 1170
1171 1171 /*
1172 1172 * Get first 4 bytes of mac address
1173 1173 */
1174 1174 val32 = p[3];
1175 1175 val32 = val32 << 8;
1176 1176 val32 |= p[2];
1177 1177 val32 = val32 << 8;
1178 1178 val32 |= p[1];
1179 1179 val32 = val32 << 8;
1180 1180 val32 |= p[0];
1181 1181
1182 1182 /*
1183 1183 * Set first 4 bytes of mac address
1184 1184 */
1185 1185 rge_reg_put32(rgep, ID_0_REG, val32);
1186 1186
1187 1187 /*
1188 1188 * Get last 2 bytes of mac address
1189 1189 */
1190 1190 val32 = p[5];
1191 1191 val32 = val32 << 8;
1192 1192 val32 |= p[4];
1193 1193
1194 1194 /*
1195 1195 * Set last 2 bytes of mac address
1196 1196 */
1197 1197 val32 |= rge_reg_get32(rgep, ID_4_REG) & ~0xffff;
1198 1198 rge_reg_put32(rgep, ID_4_REG, val32);
1199 1199
1200 1200 /*
1201 1201 * Return to normal network/host communication mode
1202 1202 */
1203 1203 rge_reg_clr8(rgep, RT_93c46_COMMOND_REG, RT_93c46_MODE_CONFIG);
1204 1204 }
1205 1205
1206 1206 static void rge_set_multi_addr(rge_t *rgep);
1207 1207 #pragma inline(rge_set_multi_addr)
1208 1208
1209 1209 static void
1210 1210 rge_set_multi_addr(rge_t *rgep)
1211 1211 {
1212 1212 uint32_t *hashp;
1213 1213
1214 1214 hashp = (uint32_t *)rgep->mcast_hash;
1215 1215
1216 1216 /*
1217 1217 * Change to config register write enable mode
1218 1218 */
1219 1219 if (rgep->chipid.mac_ver == MAC_VER_8169SC) {
1220 1220 rge_reg_set8(rgep, RT_93c46_COMMOND_REG, RT_93c46_MODE_CONFIG);
1221 1221 }
1222 1222 if (rgep->promisc) {
1223 1223 rge_reg_put32(rgep, MULTICAST_0_REG, ~0U);
1224 1224 rge_reg_put32(rgep, MULTICAST_4_REG, ~0U);
1225 1225 } else {
1226 1226 rge_reg_put32(rgep, MULTICAST_0_REG, RGE_BSWAP_32(hashp[0]));
1227 1227 rge_reg_put32(rgep, MULTICAST_4_REG, RGE_BSWAP_32(hashp[1]));
1228 1228 }
1229 1229
1230 1230 /*
1231 1231 * Return to normal network/host communication mode
1232 1232 */
1233 1233 if (rgep->chipid.mac_ver == MAC_VER_8169SC) {
1234 1234 rge_reg_clr8(rgep, RT_93c46_COMMOND_REG, RT_93c46_MODE_CONFIG);
1235 1235 }
1236 1236 }
1237 1237
1238 1238 static void rge_set_promisc(rge_t *rgep);
1239 1239 #pragma inline(rge_set_promisc)
1240 1240
1241 1241 static void
1242 1242 rge_set_promisc(rge_t *rgep)
1243 1243 {
1244 1244 if (rgep->promisc)
1245 1245 rge_reg_set32(rgep, RX_CONFIG_REG, RX_ACCEPT_ALL_PKT);
1246 1246 else
1247 1247 rge_reg_clr32(rgep, RX_CONFIG_REG, RX_ACCEPT_ALL_PKT);
1248 1248 }
1249 1249
1250 1250 /*
1251 1251 * rge_chip_sync() -- program the chip with the unicast MAC address,
1252 1252 * the multicast hash table, the required level of promiscuity, and
1253 1253 * the current loopback mode ...
1254 1254 */
1255 1255 void rge_chip_sync(rge_t *rgep, enum rge_sync_op todo);
1256 1256 #pragma no_inline(rge_chip_sync)
1257 1257
1258 1258 void
1259 1259 rge_chip_sync(rge_t *rgep, enum rge_sync_op todo)
1260 1260 {
1261 1261 switch (todo) {
1262 1262 case RGE_GET_MAC:
1263 1263 rge_get_mac_addr(rgep);
1264 1264 break;
1265 1265 case RGE_SET_MAC:
1266 1266 /* Reprogram the unicast MAC address(es) ... */
1267 1267 rge_set_mac_addr(rgep);
1268 1268 break;
1269 1269 case RGE_SET_MUL:
1270 1270 /* Reprogram the hashed multicast address table ... */
1271 1271 rge_set_multi_addr(rgep);
1272 1272 break;
1273 1273 case RGE_SET_PROMISC:
1274 1274 /* Set or clear the PROMISCUOUS mode bit */
1275 1275 rge_set_multi_addr(rgep);
1276 1276 rge_set_promisc(rgep);
1277 1277 break;
1278 1278 default:
1279 1279 break;
1280 1280 }
1281 1281 }
1282 1282
1283 1283 void rge_chip_blank(void *arg, time_t ticks, uint_t count, int flag);
1284 1284 #pragma no_inline(rge_chip_blank)
1285 1285
1286 1286 /* ARGSUSED */
1287 1287 void
1288 1288 rge_chip_blank(void *arg, time_t ticks, uint_t count, int flag)
1289 1289 {
1290 1290 _NOTE(ARGUNUSED(arg, ticks, count));
1291 1291 }
1292 1292
1293 1293 void rge_tx_trigger(rge_t *rgep);
1294 1294 #pragma no_inline(rge_tx_trigger)
1295 1295
1296 1296 void
1297 1297 rge_tx_trigger(rge_t *rgep)
1298 1298 {
1299 1299 rge_reg_put8(rgep, TX_RINGS_POLL_REG, NORMAL_TX_RING_POLL);
1300 1300 }
1301 1301
1302 1302 void rge_hw_stats_dump(rge_t *rgep);
1303 1303 #pragma no_inline(rge_tx_trigger)
1304 1304
1305 1305 void
1306 1306 rge_hw_stats_dump(rge_t *rgep)
1307 1307 {
1308 1308 int i = 0;
1309 1309 uint32_t regval = 0;
1310 1310
1311 1311 if (rgep->rge_mac_state == RGE_MAC_STOPPED)
1312 1312 return;
1313 1313
1314 1314 regval = rge_reg_get32(rgep, DUMP_COUNTER_REG_0);
1315 1315 while (regval & DUMP_START) {
1316 1316 drv_usecwait(100);
1317 1317 if (++i > STATS_DUMP_LOOP) {
1318 1318 RGE_DEBUG(("rge h/w statistics dump fail!"));
1319 1319 rgep->rge_chip_state = RGE_CHIP_ERROR;
1320 1320 return;
1321 1321 }
1322 1322 regval = rge_reg_get32(rgep, DUMP_COUNTER_REG_0);
1323 1323 }
1324 1324 DMA_SYNC(rgep->dma_area_stats, DDI_DMA_SYNC_FORKERNEL);
1325 1325
1326 1326 /*
1327 1327 * Start H/W statistics dump for RTL8169 chip
1328 1328 */
1329 1329 rge_reg_set32(rgep, DUMP_COUNTER_REG_0, DUMP_START);
1330 1330 }
1331 1331
1332 1332 /*
1333 1333 * ========== Hardware interrupt handler ==========
1334 1334 */
1335 1335
1336 1336 #undef RGE_DBG
1337 1337 #define RGE_DBG RGE_DBG_INT /* debug flag for this code */
1338 1338
1339 1339 static void rge_wake_factotum(rge_t *rgep);
1340 1340 #pragma inline(rge_wake_factotum)
1341 1341
1342 1342 static void
1343 1343 rge_wake_factotum(rge_t *rgep)
1344 1344 {
1345 1345 if (rgep->factotum_flag == 0) {
1346 1346 rgep->factotum_flag = 1;
1347 1347 (void) ddi_intr_trigger_softint(rgep->factotum_hdl, NULL);
1348 1348 }
1349 1349 }
1350 1350
1351 1351 /*
1352 1352 * rge_intr() -- handle chip interrupts
1353 1353 */
1354 1354 uint_t rge_intr(caddr_t arg1, caddr_t arg2);
1355 1355 #pragma no_inline(rge_intr)
1356 1356
1357 1357 uint_t
1358 1358 rge_intr(caddr_t arg1, caddr_t arg2)
1359 1359 {
1360 1360 rge_t *rgep = (rge_t *)arg1;
1361 1361 uint16_t int_status;
1362 1362 clock_t now;
1363 1363 uint32_t tx_pkts;
1364 1364 uint32_t rx_pkts;
1365 1365 uint32_t poll_rate;
1366 1366 uint32_t opt_pkts;
1367 1367 uint32_t opt_intrs;
1368 1368 boolean_t update_int_mask = B_FALSE;
1369 1369 uint32_t itimer;
1370 1370
1371 1371 _NOTE(ARGUNUSED(arg2))
1372 1372
1373 1373 mutex_enter(rgep->genlock);
1374 1374
1375 1375 if (rgep->suspended) {
1376 1376 mutex_exit(rgep->genlock);
1377 1377 return (DDI_INTR_UNCLAIMED);
1378 1378 }
1379 1379
1380 1380 /*
1381 1381 * Was this interrupt caused by our device...
1382 1382 */
1383 1383 int_status = rge_reg_get16(rgep, INT_STATUS_REG);
1384 1384 if (!(int_status & rgep->int_mask)) {
1385 1385 mutex_exit(rgep->genlock);
1386 1386 return (DDI_INTR_UNCLAIMED);
1387 1387 /* indicate it wasn't our interrupt */
1388 1388 }
1389 1389 rgep->stats.intr++;
1390 1390
1391 1391 /*
1392 1392 * Clear interrupt
1393 1393 * For PCIE chipset, we need disable interrupt first.
1394 1394 */
1395 1395 if (rgep->chipid.is_pcie) {
1396 1396 rge_reg_put16(rgep, INT_MASK_REG, INT_MASK_NONE);
1397 1397 update_int_mask = B_TRUE;
1398 1398 }
1399 1399 rge_reg_put16(rgep, INT_STATUS_REG, int_status);
1400 1400
1401 1401 /*
1402 1402 * Calculate optimal polling interval
1403 1403 */
1404 1404 now = ddi_get_lbolt();
1405 1405 if (now - rgep->curr_tick >= rgep->tick_delta &&
1406 1406 (rgep->param_link_speed == RGE_SPEED_1000M ||
1407 1407 rgep->param_link_speed == RGE_SPEED_100M)) {
1408 1408 /* number of rx and tx packets in the last tick */
1409 1409 tx_pkts = rgep->stats.opackets - rgep->last_opackets;
1410 1410 rx_pkts = rgep->stats.rpackets - rgep->last_rpackets;
1411 1411
1412 1412 rgep->last_opackets = rgep->stats.opackets;
1413 1413 rgep->last_rpackets = rgep->stats.rpackets;
1414 1414
1415 1415 /* restore interrupt mask */
1416 1416 rgep->int_mask |= TX_OK_INT | RX_OK_INT;
1417 1417 if (rgep->chipid.is_pcie) {
1418 1418 rgep->int_mask |= NO_TXDESC_INT;
1419 1419 }
1420 1420
1421 1421 /* optimal number of packets in a tick */
1422 1422 if (rgep->param_link_speed == RGE_SPEED_1000M) {
1423 1423 opt_pkts = (1000*1000*1000/8)/ETHERMTU/CLK_TICK;
1424 1424 } else {
1425 1425 opt_pkts = (100*1000*1000/8)/ETHERMTU/CLK_TICK;
1426 1426 }
1427 1427
1428 1428 /*
1429 1429 * calculate polling interval based on rx and tx packets
1430 1430 * in the last tick
1431 1431 */
1432 1432 poll_rate = 0;
1433 1433 if (now - rgep->curr_tick < 2*rgep->tick_delta) {
1434 1434 opt_intrs = opt_pkts/TX_COALESC;
1435 1435 if (tx_pkts > opt_intrs) {
1436 1436 poll_rate = max(tx_pkts/TX_COALESC, opt_intrs);
1437 1437 rgep->int_mask &= ~(TX_OK_INT | NO_TXDESC_INT);
1438 1438 }
1439 1439
1440 1440 opt_intrs = opt_pkts/RX_COALESC;
1441 1441 if (rx_pkts > opt_intrs) {
1442 1442 opt_intrs = max(rx_pkts/RX_COALESC, opt_intrs);
1443 1443 poll_rate = max(opt_intrs, poll_rate);
1444 1444 rgep->int_mask &= ~RX_OK_INT;
1445 1445 }
1446 1446 /* ensure poll_rate reasonable */
1447 1447 poll_rate = min(poll_rate, opt_pkts*4);
1448 1448 }
1449 1449
1450 1450 if (poll_rate) {
1451 1451 /* move to polling mode */
1452 1452 if (rgep->chipid.is_pcie) {
1453 1453 itimer = (TIMER_CLK_PCIE/CLK_TICK)/poll_rate;
1454 1454 } else {
1455 1455 itimer = (TIMER_CLK_PCI/CLK_TICK)/poll_rate;
1456 1456 }
1457 1457 } else {
1458 1458 /* move to normal mode */
1459 1459 itimer = 0;
1460 1460 }
1461 1461 RGE_DEBUG(("%s: poll: itimer:%d int_mask:0x%x",
1462 1462 __func__, itimer, rgep->int_mask));
1463 1463 rge_reg_put32(rgep, TIMER_INT_REG, itimer);
1464 1464
1465 1465 /* update timestamp for statistics */
1466 1466 rgep->curr_tick = now;
1467 1467
1468 1468 /* reset timer */
1469 1469 int_status |= TIME_OUT_INT;
1470 1470
1471 1471 update_int_mask = B_TRUE;
1472 1472 }
1473 1473
1474 1474 if (int_status & TIME_OUT_INT) {
1475 1475 rge_reg_put32(rgep, TIMER_COUNT_REG, 0);
1476 1476 }
1477 1477
1478 1478 /* flush post writes */
1479 1479 (void) rge_reg_get16(rgep, INT_STATUS_REG);
1480 1480
1481 1481 /*
1482 1482 * Cable link change interrupt
1483 1483 */
1484 1484 if (int_status & LINK_CHANGE_INT) {
1485 1485 rge_chip_cyclic(rgep);
1486 1486 }
1487 1487
1488 1488 if (int_status & RX_FIFO_OVERFLOW_INT) {
1489 1489 /* start rx watchdog timeout detection */
1490 1490 rgep->rx_fifo_ovf = 1;
1491 1491 if (rgep->int_mask & RX_FIFO_OVERFLOW_INT) {
1492 1492 rgep->int_mask &= ~RX_FIFO_OVERFLOW_INT;
1493 1493 update_int_mask = B_TRUE;
1494 1494 }
1495 1495 } else if (int_status & RGE_RX_INT) {
1496 1496 /* stop rx watchdog timeout detection */
1497 1497 rgep->rx_fifo_ovf = 0;
1498 1498 if ((rgep->int_mask & RX_FIFO_OVERFLOW_INT) == 0) {
1499 1499 rgep->int_mask |= RX_FIFO_OVERFLOW_INT;
1500 1500 update_int_mask = B_TRUE;
1501 1501 }
1502 1502 }
1503 1503
1504 1504 mutex_exit(rgep->genlock);
1505 1505
1506 1506 /*
1507 1507 * Receive interrupt
1508 1508 */
1509 1509 if (int_status & RGE_RX_INT)
1510 1510 rge_receive(rgep);
1511 1511
1512 1512 /*
1513 1513 * Transmit interrupt
1514 1514 */
1515 1515 if (int_status & TX_ERR_INT) {
1516 1516 RGE_REPORT((rgep, "tx error happened, resetting the chip "));
1517 1517 mutex_enter(rgep->genlock);
1518 1518 rgep->rge_chip_state = RGE_CHIP_ERROR;
1519 1519 mutex_exit(rgep->genlock);
1520 1520 } else if ((rgep->chipid.is_pcie && (int_status & NO_TXDESC_INT)) ||
1521 1521 ((int_status & TX_OK_INT) && rgep->tx_free < RGE_SEND_SLOTS/8)) {
1522 1522 (void) ddi_intr_trigger_softint(rgep->resched_hdl, NULL);
1523 1523 }
1524 1524
1525 1525 /*
1526 1526 * System error interrupt
1527 1527 */
1528 1528 if (int_status & SYS_ERR_INT) {
1529 1529 RGE_REPORT((rgep, "sys error happened, resetting the chip "));
1530 1530 mutex_enter(rgep->genlock);
1531 1531 rgep->rge_chip_state = RGE_CHIP_ERROR;
1532 1532 mutex_exit(rgep->genlock);
1533 1533 }
1534 1534
1535 1535 /*
1536 1536 * Re-enable interrupt for PCIE chipset or install new int_mask
1537 1537 */
1538 1538 if (update_int_mask)
1539 1539 rge_reg_put16(rgep, INT_MASK_REG, rgep->int_mask);
1540 1540
1541 1541 return (DDI_INTR_CLAIMED); /* indicate it was our interrupt */
1542 1542 }
1543 1543
1544 1544 /*
1545 1545 * ========== Factotum, implemented as a softint handler ==========
1546 1546 */
1547 1547
1548 1548 #undef RGE_DBG
1549 1549 #define RGE_DBG RGE_DBG_FACT /* debug flag for this code */
1550 1550
1551 1551 static boolean_t rge_factotum_link_check(rge_t *rgep);
1552 1552 #pragma no_inline(rge_factotum_link_check)
1553 1553
1554 1554 static boolean_t
1555 1555 rge_factotum_link_check(rge_t *rgep)
1556 1556 {
1557 1557 uint8_t media_status;
1558 1558 int32_t link;
1559 1559
1560 1560 media_status = rge_reg_get8(rgep, PHY_STATUS_REG);
1561 1561 link = (media_status & PHY_STATUS_LINK_UP) ?
1562 1562 LINK_STATE_UP : LINK_STATE_DOWN;
1563 1563 if (rgep->param_link_up != link) {
1564 1564 /*
1565 1565 * Link change.
1566 1566 */
1567 1567 rgep->param_link_up = link;
1568 1568
1569 1569 if (link == LINK_STATE_UP) {
1570 1570 if (media_status & PHY_STATUS_1000MF) {
1571 1571 rgep->param_link_speed = RGE_SPEED_1000M;
1572 1572 rgep->param_link_duplex = LINK_DUPLEX_FULL;
1573 1573 } else {
1574 1574 rgep->param_link_speed =
1575 1575 (media_status & PHY_STATUS_100M) ?
1576 1576 RGE_SPEED_100M : RGE_SPEED_10M;
1577 1577 rgep->param_link_duplex =
1578 1578 (media_status & PHY_STATUS_DUPLEX_FULL) ?
1579 1579 LINK_DUPLEX_FULL : LINK_DUPLEX_HALF;
1580 1580 }
1581 1581 }
1582 1582 return (B_TRUE);
1583 1583 }
1584 1584 return (B_FALSE);
1585 1585 }
1586 1586
1587 1587 /*
1588 1588 * Factotum routine to check for Tx stall, using the 'watchdog' counter
1589 1589 */
1590 1590 static boolean_t rge_factotum_stall_check(rge_t *rgep);
1591 1591 #pragma no_inline(rge_factotum_stall_check)
1592 1592
1593 1593 static boolean_t
1594 1594 rge_factotum_stall_check(rge_t *rgep)
1595 1595 {
1596 1596 uint32_t dogval;
1597 1597
1598 1598 ASSERT(mutex_owned(rgep->genlock));
1599 1599
1600 1600 /*
1601 1601 * Specific check for RX stall ...
1602 1602 */
1603 1603 rgep->rx_fifo_ovf <<= 1;
1604 1604 if (rgep->rx_fifo_ovf > rge_rx_watchdog_count) {
1605 1605 RGE_REPORT((rgep, "rx_hang detected"));
1606 1606 return (B_TRUE);
1607 1607 }
1608 1608
1609 1609 /*
1610 1610 * Specific check for Tx stall ...
1611 1611 *
1612 1612 * The 'watchdog' counter is incremented whenever a packet
1613 1613 * is queued, reset to 1 when some (but not all) buffers
1614 1614 * are reclaimed, reset to 0 (disabled) when all buffers
1615 1615 * are reclaimed, and shifted left here. If it exceeds the
1616 1616 * threshold value, the chip is assumed to have stalled and
1617 1617 * is put into the ERROR state. The factotum will then reset
1618 1618 * it on the next pass.
1619 1619 *
1620 1620 * All of which should ensure that we don't get into a state
1621 1621 * where packets are left pending indefinitely!
1622 1622 */
1623 1623 if (rgep->resched_needed)
1624 1624 (void) ddi_intr_trigger_softint(rgep->resched_hdl, NULL);
1625 1625 dogval = rge_atomic_shl32(&rgep->watchdog, 1);
1626 1626 if (dogval < rge_watchdog_count)
1627 1627 return (B_FALSE);
1628 1628
1629 1629 RGE_REPORT((rgep, "Tx stall detected, watchdog code 0x%x", dogval));
1630 1630 return (B_TRUE);
1631 1631
1632 1632 }
1633 1633
1634 1634 /*
1635 1635 * The factotum is woken up when there's something to do that we'd rather
1636 1636 * not do from inside a hardware interrupt handler or high-level cyclic.
1637 1637 * Its two main tasks are:
1638 1638 * reset & restart the chip after an error
1639 1639 * check the link status whenever necessary
1640 1640 */
1641 1641 uint_t rge_chip_factotum(caddr_t arg1, caddr_t arg2);
1642 1642 #pragma no_inline(rge_chip_factotum)
1643 1643
1644 1644 uint_t
1645 1645 rge_chip_factotum(caddr_t arg1, caddr_t arg2)
1646 1646 {
1647 1647 rge_t *rgep;
1648 1648 uint_t result;
1649 1649 boolean_t error;
1650 1650 boolean_t linkchg;
1651 1651
1652 1652 rgep = (rge_t *)arg1;
1653 1653 _NOTE(ARGUNUSED(arg2))
1654 1654
1655 1655 if (rgep->factotum_flag == 0)
1656 1656 return (DDI_INTR_UNCLAIMED);
1657 1657
1658 1658 rgep->factotum_flag = 0;
1659 1659 result = DDI_INTR_CLAIMED;
1660 1660 error = B_FALSE;
1661 1661 linkchg = B_FALSE;
1662 1662
1663 1663 mutex_enter(rgep->genlock);
1664 1664 switch (rgep->rge_chip_state) {
1665 1665 default:
1666 1666 break;
1667 1667
1668 1668 case RGE_CHIP_RUNNING:
1669 1669 linkchg = rge_factotum_link_check(rgep);
1670 1670 error = rge_factotum_stall_check(rgep);
1671 1671 break;
1672 1672
1673 1673 case RGE_CHIP_ERROR:
1674 1674 error = B_TRUE;
1675 1675 break;
1676 1676
1677 1677 case RGE_CHIP_FAULT:
1678 1678 /*
1679 1679 * Fault detected, time to reset ...
1680 1680 */
1681 1681 if (rge_autorecover) {
1682 1682 RGE_REPORT((rgep, "automatic recovery activated"));
1683 1683 rge_restart(rgep);
1684 1684 }
1685 1685 break;
1686 1686 }
1687 1687
1688 1688 /*
1689 1689 * If an error is detected, stop the chip now, marking it as
1690 1690 * faulty, so that it will be reset next time through ...
1691 1691 */
1692 1692 if (error)
1693 1693 rge_chip_stop(rgep, B_TRUE);
1694 1694 mutex_exit(rgep->genlock);
1695 1695
1696 1696 /*
1697 1697 * If the link state changed, tell the world about it.
1698 1698 * Note: can't do this while still holding the mutex.
1699 1699 */
1700 1700 if (linkchg)
1701 1701 mac_link_update(rgep->mh, rgep->param_link_up);
1702 1702
1703 1703 return (result);
1704 1704 }
1705 1705
1706 1706 /*
1707 1707 * High-level cyclic handler
1708 1708 *
1709 1709 * This routine schedules a (low-level) softint callback to the
1710 1710 * factotum, and prods the chip to update the status block (which
1711 1711 * will cause a hardware interrupt when complete).
1712 1712 */
1713 1713 void rge_chip_cyclic(void *arg);
1714 1714 #pragma no_inline(rge_chip_cyclic)
1715 1715
1716 1716 void
1717 1717 rge_chip_cyclic(void *arg)
1718 1718 {
1719 1719 rge_t *rgep;
1720 1720
1721 1721 rgep = arg;
1722 1722
1723 1723 switch (rgep->rge_chip_state) {
1724 1724 default:
1725 1725 return;
1726 1726
1727 1727 case RGE_CHIP_RUNNING:
1728 1728 rge_phy_check(rgep);
1729 1729 if (rgep->tx_free < RGE_SEND_SLOTS)
1730 1730 rge_send_recycle(rgep);
1731 1731 break;
1732 1732
1733 1733 case RGE_CHIP_FAULT:
1734 1734 case RGE_CHIP_ERROR:
1735 1735 break;
1736 1736 }
1737 1737
1738 1738 rge_wake_factotum(rgep);
1739 1739 }
1740 1740
1741 1741
1742 1742 /*
1743 1743 * ========== Ioctl subfunctions ==========
1744 1744 */
1745 1745
1746 1746 #undef RGE_DBG
1747 1747 #define RGE_DBG RGE_DBG_PPIO /* debug flag for this code */
1748 1748
1749 1749 #if RGE_DEBUGGING || RGE_DO_PPIO
1750 1750
1751 1751 static void rge_chip_peek_cfg(rge_t *rgep, rge_peekpoke_t *ppd);
1752 1752 #pragma no_inline(rge_chip_peek_cfg)
1753 1753
1754 1754 static void
1755 1755 rge_chip_peek_cfg(rge_t *rgep, rge_peekpoke_t *ppd)
1756 1756 {
1757 1757 uint64_t regval;
1758 1758 uint64_t regno;
1759 1759
1760 1760 RGE_TRACE(("rge_chip_peek_cfg($%p, $%p)",
1761 1761 (void *)rgep, (void *)ppd));
1762 1762
1763 1763 regno = ppd->pp_acc_offset;
1764 1764
1765 1765 switch (ppd->pp_acc_size) {
1766 1766 case 1:
1767 1767 regval = pci_config_get8(rgep->cfg_handle, regno);
1768 1768 break;
1769 1769
1770 1770 case 2:
1771 1771 regval = pci_config_get16(rgep->cfg_handle, regno);
1772 1772 break;
1773 1773
1774 1774 case 4:
1775 1775 regval = pci_config_get32(rgep->cfg_handle, regno);
1776 1776 break;
1777 1777
1778 1778 case 8:
1779 1779 regval = pci_config_get64(rgep->cfg_handle, regno);
1780 1780 break;
1781 1781 }
1782 1782
1783 1783 ppd->pp_acc_data = regval;
1784 1784 }
1785 1785
1786 1786 static void rge_chip_poke_cfg(rge_t *rgep, rge_peekpoke_t *ppd);
1787 1787 #pragma no_inline(rge_chip_poke_cfg)
1788 1788
1789 1789 static void
1790 1790 rge_chip_poke_cfg(rge_t *rgep, rge_peekpoke_t *ppd)
1791 1791 {
1792 1792 uint64_t regval;
1793 1793 uint64_t regno;
1794 1794
1795 1795 RGE_TRACE(("rge_chip_poke_cfg($%p, $%p)",
1796 1796 (void *)rgep, (void *)ppd));
1797 1797
1798 1798 regno = ppd->pp_acc_offset;
1799 1799 regval = ppd->pp_acc_data;
1800 1800
1801 1801 switch (ppd->pp_acc_size) {
1802 1802 case 1:
1803 1803 pci_config_put8(rgep->cfg_handle, regno, regval);
1804 1804 break;
1805 1805
1806 1806 case 2:
1807 1807 pci_config_put16(rgep->cfg_handle, regno, regval);
1808 1808 break;
1809 1809
1810 1810 case 4:
1811 1811 pci_config_put32(rgep->cfg_handle, regno, regval);
1812 1812 break;
1813 1813
1814 1814 case 8:
1815 1815 pci_config_put64(rgep->cfg_handle, regno, regval);
1816 1816 break;
1817 1817 }
1818 1818 }
1819 1819
1820 1820 static void rge_chip_peek_reg(rge_t *rgep, rge_peekpoke_t *ppd);
1821 1821 #pragma no_inline(rge_chip_peek_reg)
1822 1822
1823 1823 static void
1824 1824 rge_chip_peek_reg(rge_t *rgep, rge_peekpoke_t *ppd)
1825 1825 {
1826 1826 uint64_t regval;
1827 1827 void *regaddr;
1828 1828
1829 1829 RGE_TRACE(("rge_chip_peek_reg($%p, $%p)",
1830 1830 (void *)rgep, (void *)ppd));
1831 1831
1832 1832 regaddr = PIO_ADDR(rgep, ppd->pp_acc_offset);
1833 1833
1834 1834 switch (ppd->pp_acc_size) {
1835 1835 case 1:
1836 1836 regval = ddi_get8(rgep->io_handle, regaddr);
1837 1837 break;
1838 1838
1839 1839 case 2:
1840 1840 regval = ddi_get16(rgep->io_handle, regaddr);
1841 1841 break;
1842 1842
1843 1843 case 4:
1844 1844 regval = ddi_get32(rgep->io_handle, regaddr);
1845 1845 break;
1846 1846
1847 1847 case 8:
1848 1848 regval = ddi_get64(rgep->io_handle, regaddr);
1849 1849 break;
1850 1850 }
1851 1851
1852 1852 ppd->pp_acc_data = regval;
1853 1853 }
1854 1854
1855 1855 static void rge_chip_poke_reg(rge_t *rgep, rge_peekpoke_t *ppd);
1856 1856 #pragma no_inline(rge_chip_peek_reg)
1857 1857
1858 1858 static void
1859 1859 rge_chip_poke_reg(rge_t *rgep, rge_peekpoke_t *ppd)
1860 1860 {
1861 1861 uint64_t regval;
1862 1862 void *regaddr;
1863 1863
1864 1864 RGE_TRACE(("rge_chip_poke_reg($%p, $%p)",
1865 1865 (void *)rgep, (void *)ppd));
1866 1866
1867 1867 regaddr = PIO_ADDR(rgep, ppd->pp_acc_offset);
1868 1868 regval = ppd->pp_acc_data;
1869 1869
1870 1870 switch (ppd->pp_acc_size) {
1871 1871 case 1:
1872 1872 ddi_put8(rgep->io_handle, regaddr, regval);
1873 1873 break;
1874 1874
1875 1875 case 2:
1876 1876 ddi_put16(rgep->io_handle, regaddr, regval);
1877 1877 break;
1878 1878
1879 1879 case 4:
1880 1880 ddi_put32(rgep->io_handle, regaddr, regval);
1881 1881 break;
1882 1882
1883 1883 case 8:
1884 1884 ddi_put64(rgep->io_handle, regaddr, regval);
1885 1885 break;
1886 1886 }
1887 1887 }
1888 1888
1889 1889 static void rge_chip_peek_mii(rge_t *rgep, rge_peekpoke_t *ppd);
1890 1890 #pragma no_inline(rge_chip_peek_mii)
1891 1891
1892 1892 static void
1893 1893 rge_chip_peek_mii(rge_t *rgep, rge_peekpoke_t *ppd)
1894 1894 {
1895 1895 RGE_TRACE(("rge_chip_peek_mii($%p, $%p)",
1896 1896 (void *)rgep, (void *)ppd));
1897 1897
1898 1898 ppd->pp_acc_data = rge_mii_get16(rgep, ppd->pp_acc_offset/2);
1899 1899 }
1900 1900
1901 1901 static void rge_chip_poke_mii(rge_t *rgep, rge_peekpoke_t *ppd);
1902 1902 #pragma no_inline(rge_chip_poke_mii)
1903 1903
1904 1904 static void
1905 1905 rge_chip_poke_mii(rge_t *rgep, rge_peekpoke_t *ppd)
1906 1906 {
1907 1907 RGE_TRACE(("rge_chip_poke_mii($%p, $%p)",
1908 1908 (void *)rgep, (void *)ppd));
1909 1909
1910 1910 rge_mii_put16(rgep, ppd->pp_acc_offset/2, ppd->pp_acc_data);
1911 1911 }
1912 1912
1913 1913 static void rge_chip_peek_mem(rge_t *rgep, rge_peekpoke_t *ppd);
1914 1914 #pragma no_inline(rge_chip_peek_mem)
1915 1915
1916 1916 static void
1917 1917 rge_chip_peek_mem(rge_t *rgep, rge_peekpoke_t *ppd)
1918 1918 {
1919 1919 uint64_t regval;
1920 1920 void *vaddr;
1921 1921
1922 1922 RGE_TRACE(("rge_chip_peek_rge($%p, $%p)",
1923 1923 (void *)rgep, (void *)ppd));
1924 1924
1925 1925 vaddr = (void *)(uintptr_t)ppd->pp_acc_offset;
1926 1926
1927 1927 switch (ppd->pp_acc_size) {
1928 1928 case 1:
1929 1929 regval = *(uint8_t *)vaddr;
1930 1930 break;
1931 1931
1932 1932 case 2:
1933 1933 regval = *(uint16_t *)vaddr;
1934 1934 break;
1935 1935
1936 1936 case 4:
1937 1937 regval = *(uint32_t *)vaddr;
1938 1938 break;
1939 1939
1940 1940 case 8:
1941 1941 regval = *(uint64_t *)vaddr;
1942 1942 break;
1943 1943 }
1944 1944
1945 1945 RGE_DEBUG(("rge_chip_peek_mem($%p, $%p) peeked 0x%llx from $%p",
1946 1946 (void *)rgep, (void *)ppd, regval, vaddr));
1947 1947
1948 1948 ppd->pp_acc_data = regval;
1949 1949 }
1950 1950
1951 1951 static void rge_chip_poke_mem(rge_t *rgep, rge_peekpoke_t *ppd);
1952 1952 #pragma no_inline(rge_chip_poke_mem)
1953 1953
1954 1954 static void
1955 1955 rge_chip_poke_mem(rge_t *rgep, rge_peekpoke_t *ppd)
1956 1956 {
1957 1957 uint64_t regval;
1958 1958 void *vaddr;
1959 1959
1960 1960 RGE_TRACE(("rge_chip_poke_mem($%p, $%p)",
1961 1961 (void *)rgep, (void *)ppd));
1962 1962
1963 1963 vaddr = (void *)(uintptr_t)ppd->pp_acc_offset;
1964 1964 regval = ppd->pp_acc_data;
1965 1965
1966 1966 RGE_DEBUG(("rge_chip_poke_mem($%p, $%p) poking 0x%llx at $%p",
1967 1967 (void *)rgep, (void *)ppd, regval, vaddr));
1968 1968
1969 1969 switch (ppd->pp_acc_size) {
1970 1970 case 1:
1971 1971 *(uint8_t *)vaddr = (uint8_t)regval;
1972 1972 break;
1973 1973
1974 1974 case 2:
1975 1975 *(uint16_t *)vaddr = (uint16_t)regval;
1976 1976 break;
1977 1977
1978 1978 case 4:
1979 1979 *(uint32_t *)vaddr = (uint32_t)regval;
1980 1980 break;
1981 1981
1982 1982 case 8:
1983 1983 *(uint64_t *)vaddr = (uint64_t)regval;
1984 1984 break;
1985 1985 }
1986 1986 }
1987 1987
1988 1988 static enum ioc_reply rge_pp_ioctl(rge_t *rgep, int cmd, mblk_t *mp,
1989 1989 struct iocblk *iocp);
1990 1990 #pragma no_inline(rge_pp_ioctl)
1991 1991
1992 1992 static enum ioc_reply
1993 1993 rge_pp_ioctl(rge_t *rgep, int cmd, mblk_t *mp, struct iocblk *iocp)
1994 1994 {
1995 1995 void (*ppfn)(rge_t *rgep, rge_peekpoke_t *ppd);
1996 1996 rge_peekpoke_t *ppd;
1997 1997 dma_area_t *areap;
1998 1998 uint64_t sizemask;
1999 1999 uint64_t mem_va;
2000 2000 uint64_t maxoff;
2001 2001 boolean_t peek;
2002 2002
2003 2003 switch (cmd) {
2004 2004 default:
2005 2005 /* NOTREACHED */
2006 2006 rge_error(rgep, "rge_pp_ioctl: invalid cmd 0x%x", cmd);
2007 2007 return (IOC_INVAL);
2008 2008
2009 2009 case RGE_PEEK:
2010 2010 peek = B_TRUE;
2011 2011 break;
2012 2012
2013 2013 case RGE_POKE:
2014 2014 peek = B_FALSE;
2015 2015 break;
2016 2016 }
2017 2017
2018 2018 /*
2019 2019 * Validate format of ioctl
2020 2020 */
2021 2021 if (iocp->ioc_count != sizeof (rge_peekpoke_t))
2022 2022 return (IOC_INVAL);
2023 2023 if (mp->b_cont == NULL)
2024 2024 return (IOC_INVAL);
2025 2025 ppd = (rge_peekpoke_t *)mp->b_cont->b_rptr;
2026 2026
2027 2027 /*
2028 2028 * Validate request parameters
2029 2029 */
2030 2030 switch (ppd->pp_acc_space) {
2031 2031 default:
2032 2032 return (IOC_INVAL);
2033 2033
2034 2034 case RGE_PP_SPACE_CFG:
2035 2035 /*
2036 2036 * Config space
2037 2037 */
2038 2038 sizemask = 8|4|2|1;
2039 2039 mem_va = 0;
2040 2040 maxoff = PCI_CONF_HDR_SIZE;
2041 2041 ppfn = peek ? rge_chip_peek_cfg : rge_chip_poke_cfg;
2042 2042 break;
2043 2043
2044 2044 case RGE_PP_SPACE_REG:
2045 2045 /*
2046 2046 * Memory-mapped I/O space
2047 2047 */
2048 2048 sizemask = 8|4|2|1;
2049 2049 mem_va = 0;
2050 2050 maxoff = RGE_REGISTER_MAX;
2051 2051 ppfn = peek ? rge_chip_peek_reg : rge_chip_poke_reg;
2052 2052 break;
2053 2053
2054 2054 case RGE_PP_SPACE_MII:
2055 2055 /*
2056 2056 * PHY's MII registers
2057 2057 * NB: all PHY registers are two bytes, but the
2058 2058 * addresses increment in ones (word addressing).
2059 2059 * So we scale the address here, then undo the
2060 2060 * transformation inside the peek/poke functions.
2061 2061 */
2062 2062 ppd->pp_acc_offset *= 2;
2063 2063 sizemask = 2;
2064 2064 mem_va = 0;
2065 2065 maxoff = (MII_MAXREG+1)*2;
2066 2066 ppfn = peek ? rge_chip_peek_mii : rge_chip_poke_mii;
2067 2067 break;
2068 2068
2069 2069 case RGE_PP_SPACE_RGE:
2070 2070 /*
2071 2071 * RGE data structure!
2072 2072 */
2073 2073 sizemask = 8|4|2|1;
2074 2074 mem_va = (uintptr_t)rgep;
2075 2075 maxoff = sizeof (*rgep);
2076 2076 ppfn = peek ? rge_chip_peek_mem : rge_chip_poke_mem;
2077 2077 break;
2078 2078
2079 2079 case RGE_PP_SPACE_STATISTICS:
2080 2080 case RGE_PP_SPACE_TXDESC:
2081 2081 case RGE_PP_SPACE_TXBUFF:
2082 2082 case RGE_PP_SPACE_RXDESC:
2083 2083 case RGE_PP_SPACE_RXBUFF:
2084 2084 /*
2085 2085 * Various DMA_AREAs
2086 2086 */
2087 2087 switch (ppd->pp_acc_space) {
2088 2088 case RGE_PP_SPACE_TXDESC:
2089 2089 areap = &rgep->dma_area_txdesc;
2090 2090 break;
2091 2091 case RGE_PP_SPACE_RXDESC:
2092 2092 areap = &rgep->dma_area_rxdesc;
2093 2093 break;
2094 2094 case RGE_PP_SPACE_STATISTICS:
2095 2095 areap = &rgep->dma_area_stats;
2096 2096 break;
2097 2097 }
2098 2098
2099 2099 sizemask = 8|4|2|1;
2100 2100 mem_va = (uintptr_t)areap->mem_va;
2101 2101 maxoff = areap->alength;
2102 2102 ppfn = peek ? rge_chip_peek_mem : rge_chip_poke_mem;
2103 2103 break;
2104 2104 }
2105 2105
2106 2106 switch (ppd->pp_acc_size) {
2107 2107 default:
2108 2108 return (IOC_INVAL);
2109 2109
2110 2110 case 8:
2111 2111 case 4:
2112 2112 case 2:
2113 2113 case 1:
2114 2114 if ((ppd->pp_acc_size & sizemask) == 0)
2115 2115 return (IOC_INVAL);
2116 2116 break;
2117 2117 }
2118 2118
2119 2119 if ((ppd->pp_acc_offset % ppd->pp_acc_size) != 0)
2120 2120 return (IOC_INVAL);
2121 2121
2122 2122 if (ppd->pp_acc_offset >= maxoff)
2123 2123 return (IOC_INVAL);
2124 2124
2125 2125 if (ppd->pp_acc_offset+ppd->pp_acc_size > maxoff)
2126 2126 return (IOC_INVAL);
2127 2127
2128 2128 /*
2129 2129 * All OK - go do it!
2130 2130 */
2131 2131 ppd->pp_acc_offset += mem_va;
2132 2132 (*ppfn)(rgep, ppd);
2133 2133 return (peek ? IOC_REPLY : IOC_ACK);
2134 2134 }
2135 2135
2136 2136 static enum ioc_reply rge_diag_ioctl(rge_t *rgep, int cmd, mblk_t *mp,
2137 2137 struct iocblk *iocp);
2138 2138 #pragma no_inline(rge_diag_ioctl)
2139 2139
2140 2140 static enum ioc_reply
2141 2141 rge_diag_ioctl(rge_t *rgep, int cmd, mblk_t *mp, struct iocblk *iocp)
2142 2142 {
2143 2143 ASSERT(mutex_owned(rgep->genlock));
2144 2144
2145 2145 switch (cmd) {
2146 2146 default:
2147 2147 /* NOTREACHED */
2148 2148 rge_error(rgep, "rge_diag_ioctl: invalid cmd 0x%x", cmd);
2149 2149 return (IOC_INVAL);
2150 2150
2151 2151 case RGE_DIAG:
2152 2152 /*
2153 2153 * Currently a no-op
2154 2154 */
2155 2155 return (IOC_ACK);
2156 2156
2157 2157 case RGE_PEEK:
2158 2158 case RGE_POKE:
2159 2159 return (rge_pp_ioctl(rgep, cmd, mp, iocp));
2160 2160
2161 2161 case RGE_PHY_RESET:
2162 2162 return (IOC_RESTART_ACK);
2163 2163
2164 2164 case RGE_SOFT_RESET:
2165 2165 case RGE_HARD_RESET:
2166 2166 /*
2167 2167 * Reset and reinitialise the 570x hardware
2168 2168 */
2169 2169 rge_restart(rgep);
2170 2170 return (IOC_ACK);
2171 2171 }
2172 2172
2173 2173 /* NOTREACHED */
2174 2174 }
2175 2175
2176 2176 #endif /* RGE_DEBUGGING || RGE_DO_PPIO */
2177 2177
2178 2178 static enum ioc_reply rge_mii_ioctl(rge_t *rgep, int cmd, mblk_t *mp,
2179 2179 struct iocblk *iocp);
2180 2180 #pragma no_inline(rge_mii_ioctl)
2181 2181
2182 2182 static enum ioc_reply
2183 2183 rge_mii_ioctl(rge_t *rgep, int cmd, mblk_t *mp, struct iocblk *iocp)
2184 2184 {
2185 2185 struct rge_mii_rw *miirwp;
2186 2186
2187 2187 /*
2188 2188 * Validate format of ioctl
2189 2189 */
2190 2190 if (iocp->ioc_count != sizeof (struct rge_mii_rw))
2191 2191 return (IOC_INVAL);
2192 2192 if (mp->b_cont == NULL)
2193 2193 return (IOC_INVAL);
2194 2194 miirwp = (struct rge_mii_rw *)mp->b_cont->b_rptr;
2195 2195
2196 2196 /*
2197 2197 * Validate request parameters ...
2198 2198 */
2199 2199 if (miirwp->mii_reg > MII_MAXREG)
2200 2200 return (IOC_INVAL);
2201 2201
2202 2202 switch (cmd) {
2203 2203 default:
2204 2204 /* NOTREACHED */
2205 2205 rge_error(rgep, "rge_mii_ioctl: invalid cmd 0x%x", cmd);
2206 2206 return (IOC_INVAL);
2207 2207
2208 2208 case RGE_MII_READ:
2209 2209 miirwp->mii_data = rge_mii_get16(rgep, miirwp->mii_reg);
2210 2210 return (IOC_REPLY);
2211 2211
2212 2212 case RGE_MII_WRITE:
2213 2213 rge_mii_put16(rgep, miirwp->mii_reg, miirwp->mii_data);
2214 2214 return (IOC_ACK);
2215 2215 }
2216 2216
2217 2217 /* NOTREACHED */
2218 2218 }
2219 2219
2220 2220 enum ioc_reply rge_chip_ioctl(rge_t *rgep, queue_t *wq, mblk_t *mp,
2221 2221 struct iocblk *iocp);
2222 2222 #pragma no_inline(rge_chip_ioctl)
2223 2223
2224 2224 enum ioc_reply
2225 2225 rge_chip_ioctl(rge_t *rgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
2226 2226 {
2227 2227 int cmd;
2228 2228
2229 2229 RGE_TRACE(("rge_chip_ioctl($%p, $%p, $%p, $%p)",
2230 2230 (void *)rgep, (void *)wq, (void *)mp, (void *)iocp));
2231 2231
2232 2232 ASSERT(mutex_owned(rgep->genlock));
2233 2233
2234 2234 cmd = iocp->ioc_cmd;
2235 2235 switch (cmd) {
2236 2236 default:
2237 2237 /* NOTREACHED */
2238 2238 rge_error(rgep, "rge_chip_ioctl: invalid cmd 0x%x", cmd);
2239 2239 return (IOC_INVAL);
2240 2240
2241 2241 case RGE_DIAG:
2242 2242 case RGE_PEEK:
2243 2243 case RGE_POKE:
2244 2244 case RGE_PHY_RESET:
2245 2245 case RGE_SOFT_RESET:
2246 2246 case RGE_HARD_RESET:
2247 2247 #if RGE_DEBUGGING || RGE_DO_PPIO
2248 2248 return (rge_diag_ioctl(rgep, cmd, mp, iocp));
2249 2249 #else
2250 2250 return (IOC_INVAL);
2251 2251 #endif /* RGE_DEBUGGING || RGE_DO_PPIO */
2252 2252
2253 2253 case RGE_MII_READ:
2254 2254 case RGE_MII_WRITE:
2255 2255 return (rge_mii_ioctl(rgep, cmd, mp, iocp));
2256 2256
2257 2257 }
2258 2258
2259 2259 /* NOTREACHED */
2260 2260 }
↓ open down ↓ |
1962 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX