Print this page
5255 uts shouldn't open-code ISP2
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/arn/arn_main.c
+++ new/usr/src/uts/common/io/arn/arn_main.c
1 1 /*
2 2 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
3 3 * Use is subject to license terms.
4 4 */
5 5
6 6 /*
7 7 * Copyright (c) 2008 Atheros Communications Inc.
8 8 *
9 9 * Permission to use, copy, modify, and/or distribute this software for any
10 10 * purpose with or without fee is hereby granted, provided that the above
11 11 * copyright notice and this permission notice appear in all copies.
↓ open down ↓ |
11 lines elided |
↑ open up ↑ |
12 12 *
13 13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14 14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15 15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16 16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17 17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 20 */
21 21
22 +#include <sys/sysmacros.h>
22 23 #include <sys/param.h>
23 24 #include <sys/types.h>
24 25 #include <sys/signal.h>
25 26 #include <sys/stream.h>
26 27 #include <sys/termio.h>
27 28 #include <sys/errno.h>
28 29 #include <sys/file.h>
29 30 #include <sys/cmn_err.h>
30 31 #include <sys/stropts.h>
31 32 #include <sys/strsubr.h>
32 33 #include <sys/strtty.h>
33 34 #include <sys/kbio.h>
34 35 #include <sys/cred.h>
35 36 #include <sys/stat.h>
36 37 #include <sys/consdev.h>
37 38 #include <sys/kmem.h>
38 39 #include <sys/modctl.h>
39 40 #include <sys/ddi.h>
40 41 #include <sys/sunddi.h>
41 42 #include <sys/pci.h>
42 43 #include <sys/errno.h>
43 44 #include <sys/mac_provider.h>
44 45 #include <sys/dlpi.h>
45 46 #include <sys/ethernet.h>
46 47 #include <sys/list.h>
47 48 #include <sys/byteorder.h>
48 49 #include <sys/strsun.h>
49 50 #include <sys/policy.h>
50 51 #include <inet/common.h>
51 52 #include <inet/nd.h>
52 53 #include <inet/mi.h>
53 54 #include <inet/wifi_ioctl.h>
54 55 #include <sys/mac_wifi.h>
55 56 #include <sys/net80211.h>
56 57 #include <sys/net80211_proto.h>
57 58 #include <sys/net80211_ht.h>
58 59
59 60
60 61 #include "arn_ath9k.h"
61 62 #include "arn_core.h"
62 63 #include "arn_reg.h"
63 64 #include "arn_hw.h"
64 65
65 66 #define ARN_MAX_RSSI 45 /* max rssi */
66 67
67 68 /*
68 69 * Default 11n reates supported by this station.
69 70 */
70 71 extern struct ieee80211_htrateset ieee80211_rateset_11n;
71 72
72 73 /*
73 74 * PIO access attributes for registers
74 75 */
75 76 static ddi_device_acc_attr_t arn_reg_accattr = {
76 77 DDI_DEVICE_ATTR_V0,
77 78 DDI_STRUCTURE_LE_ACC,
78 79 DDI_STRICTORDER_ACC,
79 80 DDI_DEFAULT_ACC
80 81 };
81 82
82 83 /*
83 84 * DMA access attributes for descriptors: NOT to be byte swapped.
84 85 */
85 86 static ddi_device_acc_attr_t arn_desc_accattr = {
86 87 DDI_DEVICE_ATTR_V0,
87 88 DDI_STRUCTURE_LE_ACC,
88 89 DDI_STRICTORDER_ACC,
89 90 DDI_DEFAULT_ACC
90 91 };
91 92
92 93 /*
93 94 * Describes the chip's DMA engine
94 95 */
95 96 static ddi_dma_attr_t arn_dma_attr = {
96 97 DMA_ATTR_V0, /* version number */
97 98 0, /* low address */
98 99 0xffffffffU, /* high address */
99 100 0x3ffffU, /* counter register max */
100 101 1, /* alignment */
101 102 0xFFF, /* burst sizes */
102 103 1, /* minimum transfer size */
103 104 0x3ffffU, /* max transfer size */
104 105 0xffffffffU, /* address register max */
105 106 1, /* no scatter-gather */
106 107 1, /* granularity of device */
107 108 0, /* DMA flags */
108 109 };
109 110
110 111 static ddi_dma_attr_t arn_desc_dma_attr = {
111 112 DMA_ATTR_V0, /* version number */
112 113 0, /* low address */
113 114 0xffffffffU, /* high address */
114 115 0xffffffffU, /* counter register max */
115 116 0x1000, /* alignment */
116 117 0xFFF, /* burst sizes */
117 118 1, /* minimum transfer size */
118 119 0xffffffffU, /* max transfer size */
119 120 0xffffffffU, /* address register max */
120 121 1, /* no scatter-gather */
121 122 1, /* granularity of device */
122 123 0, /* DMA flags */
123 124 };
124 125
125 126 #define ATH_DEF_CACHE_BYTES 32 /* default cache line size */
126 127
127 128 static kmutex_t arn_loglock;
128 129 static void *arn_soft_state_p = NULL;
129 130 static int arn_dwelltime = 200; /* scan interval */
130 131
131 132 static int arn_m_stat(void *, uint_t, uint64_t *);
132 133 static int arn_m_start(void *);
133 134 static void arn_m_stop(void *);
134 135 static int arn_m_promisc(void *, boolean_t);
135 136 static int arn_m_multicst(void *, boolean_t, const uint8_t *);
136 137 static int arn_m_unicst(void *, const uint8_t *);
137 138 static mblk_t *arn_m_tx(void *, mblk_t *);
138 139 static void arn_m_ioctl(void *, queue_t *, mblk_t *);
139 140 static int arn_m_setprop(void *, const char *, mac_prop_id_t,
140 141 uint_t, const void *);
141 142 static int arn_m_getprop(void *, const char *, mac_prop_id_t,
142 143 uint_t, void *);
143 144 static void arn_m_propinfo(void *, const char *, mac_prop_id_t,
144 145 mac_prop_info_handle_t);
145 146
146 147 /* MAC Callcack Functions */
147 148 static mac_callbacks_t arn_m_callbacks = {
148 149 MC_IOCTL | MC_SETPROP | MC_GETPROP | MC_PROPINFO,
149 150 arn_m_stat,
150 151 arn_m_start,
151 152 arn_m_stop,
152 153 arn_m_promisc,
153 154 arn_m_multicst,
154 155 arn_m_unicst,
155 156 arn_m_tx,
156 157 NULL,
157 158 arn_m_ioctl,
158 159 NULL,
159 160 NULL,
160 161 NULL,
161 162 arn_m_setprop,
162 163 arn_m_getprop,
163 164 arn_m_propinfo
164 165 };
165 166
166 167 /*
167 168 * ARN_DBG_HW
168 169 * ARN_DBG_REG_IO
169 170 * ARN_DBG_QUEUE
170 171 * ARN_DBG_EEPROM
171 172 * ARN_DBG_XMIT
172 173 * ARN_DBG_RECV
173 174 * ARN_DBG_CALIBRATE
174 175 * ARN_DBG_CHANNEL
175 176 * ARN_DBG_INTERRUPT
176 177 * ARN_DBG_REGULATORY
177 178 * ARN_DBG_ANI
178 179 * ARN_DBG_POWER_MGMT
179 180 * ARN_DBG_KEYCACHE
180 181 * ARN_DBG_BEACON
181 182 * ARN_DBG_RATE
182 183 * ARN_DBG_INIT
183 184 * ARN_DBG_ATTACH
184 185 * ARN_DBG_DEATCH
185 186 * ARN_DBG_AGGR
186 187 * ARN_DBG_RESET
187 188 * ARN_DBG_FATAL
188 189 * ARN_DBG_ANY
189 190 * ARN_DBG_ALL
190 191 */
191 192 uint32_t arn_dbg_mask = 0;
192 193
193 194 /*
194 195 * Exception/warning cases not leading to panic.
195 196 */
196 197 void
197 198 arn_problem(const int8_t *fmt, ...)
198 199 {
199 200 va_list args;
200 201
201 202 mutex_enter(&arn_loglock);
202 203
203 204 va_start(args, fmt);
204 205 vcmn_err(CE_WARN, fmt, args);
205 206 va_end(args);
206 207
207 208 mutex_exit(&arn_loglock);
208 209 }
209 210
210 211 /*
211 212 * Normal log information independent of debug.
212 213 */
213 214 void
214 215 arn_log(const int8_t *fmt, ...)
215 216 {
216 217 va_list args;
217 218
218 219 mutex_enter(&arn_loglock);
219 220
220 221 va_start(args, fmt);
221 222 vcmn_err(CE_CONT, fmt, args);
222 223 va_end(args);
223 224
224 225 mutex_exit(&arn_loglock);
225 226 }
226 227
227 228 void
228 229 arn_dbg(uint32_t dbg_flags, const int8_t *fmt, ...)
229 230 {
230 231 va_list args;
231 232
232 233 if (dbg_flags & arn_dbg_mask) {
233 234 mutex_enter(&arn_loglock);
234 235 va_start(args, fmt);
235 236 vcmn_err(CE_CONT, fmt, args);
236 237 va_end(args);
237 238 mutex_exit(&arn_loglock);
238 239 }
239 240 }
240 241
241 242 /*
242 243 * Read and write, they both share the same lock. We do this to serialize
243 244 * reads and writes on Atheros 802.11n PCI devices only. This is required
244 245 * as the FIFO on these devices can only accept sanely 2 requests. After
245 246 * that the device goes bananas. Serializing the reads/writes prevents this
246 247 * from happening.
247 248 */
248 249 void
249 250 arn_iowrite32(struct ath_hal *ah, uint32_t reg_offset, uint32_t val)
250 251 {
251 252 struct arn_softc *sc = ah->ah_sc;
252 253 if (ah->ah_config.serialize_regmode == SER_REG_MODE_ON) {
253 254 mutex_enter(&sc->sc_serial_rw);
254 255 ddi_put32(sc->sc_io_handle,
255 256 (uint32_t *)((uintptr_t)(sc->mem) + (reg_offset)), val);
256 257 mutex_exit(&sc->sc_serial_rw);
257 258 } else {
258 259 ddi_put32(sc->sc_io_handle,
259 260 (uint32_t *)((uintptr_t)(sc->mem) + (reg_offset)), val);
260 261 }
261 262 }
262 263
263 264 unsigned int
264 265 arn_ioread32(struct ath_hal *ah, uint32_t reg_offset)
265 266 {
266 267 uint32_t val;
267 268 struct arn_softc *sc = ah->ah_sc;
268 269 if (ah->ah_config.serialize_regmode == SER_REG_MODE_ON) {
269 270 mutex_enter(&sc->sc_serial_rw);
270 271 val = ddi_get32(sc->sc_io_handle,
271 272 (uint32_t *)((uintptr_t)(sc->mem) + (reg_offset)));
272 273 mutex_exit(&sc->sc_serial_rw);
273 274 } else {
274 275 val = ddi_get32(sc->sc_io_handle,
275 276 (uint32_t *)((uintptr_t)(sc->mem) + (reg_offset)));
276 277 }
277 278
278 279 return (val);
279 280 }
280 281
281 282 /*
282 283 * Allocate an area of memory and a DMA handle for accessing it
283 284 */
284 285 static int
285 286 arn_alloc_dma_mem(dev_info_t *devinfo, ddi_dma_attr_t *dma_attr, size_t memsize,
286 287 ddi_device_acc_attr_t *attr_p, uint_t alloc_flags,
287 288 uint_t bind_flags, dma_area_t *dma_p)
288 289 {
289 290 int err;
290 291
291 292 /*
292 293 * Allocate handle
293 294 */
294 295 err = ddi_dma_alloc_handle(devinfo, dma_attr,
295 296 DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
296 297 if (err != DDI_SUCCESS)
297 298 return (DDI_FAILURE);
298 299
299 300 /*
300 301 * Allocate memory
301 302 */
302 303 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, attr_p,
303 304 alloc_flags, DDI_DMA_SLEEP, NULL, &dma_p->mem_va,
304 305 &dma_p->alength, &dma_p->acc_hdl);
305 306 if (err != DDI_SUCCESS)
306 307 return (DDI_FAILURE);
307 308
308 309 /*
309 310 * Bind the two together
310 311 */
311 312 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
312 313 dma_p->mem_va, dma_p->alength, bind_flags,
313 314 DDI_DMA_SLEEP, NULL, &dma_p->cookie, &dma_p->ncookies);
314 315 if (err != DDI_DMA_MAPPED)
315 316 return (DDI_FAILURE);
316 317
317 318 dma_p->nslots = ~0U;
318 319 dma_p->size = ~0U;
319 320 dma_p->token = ~0U;
320 321 dma_p->offset = 0;
321 322 return (DDI_SUCCESS);
322 323 }
323 324
324 325 /*
325 326 * Free one allocated area of DMAable memory
326 327 */
327 328 static void
328 329 arn_free_dma_mem(dma_area_t *dma_p)
329 330 {
330 331 if (dma_p->dma_hdl != NULL) {
331 332 (void) ddi_dma_unbind_handle(dma_p->dma_hdl);
332 333 if (dma_p->acc_hdl != NULL) {
333 334 ddi_dma_mem_free(&dma_p->acc_hdl);
334 335 dma_p->acc_hdl = NULL;
335 336 }
336 337 ddi_dma_free_handle(&dma_p->dma_hdl);
337 338 dma_p->ncookies = 0;
338 339 dma_p->dma_hdl = NULL;
339 340 }
340 341 }
341 342
342 343 /*
343 344 * Initialize tx, rx. or beacon buffer list. Allocate DMA memory for
344 345 * each buffer.
345 346 */
346 347 static int
347 348 arn_buflist_setup(dev_info_t *devinfo,
348 349 struct arn_softc *sc,
349 350 list_t *bflist,
350 351 struct ath_buf **pbf,
351 352 struct ath_desc **pds,
352 353 int nbuf,
353 354 uint_t dmabflags,
354 355 uint32_t buflen)
355 356 {
356 357 int i, err;
357 358 struct ath_buf *bf = *pbf;
358 359 struct ath_desc *ds = *pds;
359 360
360 361 list_create(bflist, sizeof (struct ath_buf),
361 362 offsetof(struct ath_buf, bf_node));
362 363 for (i = 0; i < nbuf; i++, bf++, ds++) {
363 364 bf->bf_desc = ds;
364 365 bf->bf_daddr = sc->sc_desc_dma.cookie.dmac_address +
365 366 ((uintptr_t)ds - (uintptr_t)sc->sc_desc);
366 367 list_insert_tail(bflist, bf);
367 368
368 369 /* alloc DMA memory */
369 370 err = arn_alloc_dma_mem(devinfo, &arn_dma_attr,
370 371 buflen, &arn_desc_accattr, DDI_DMA_STREAMING,
371 372 dmabflags, &bf->bf_dma);
372 373 if (err != DDI_SUCCESS)
373 374 return (err);
374 375 }
375 376 *pbf = bf;
376 377 *pds = ds;
377 378
378 379 return (DDI_SUCCESS);
379 380 }
380 381
381 382 /*
382 383 * Destroy tx, rx or beacon buffer list. Free DMA memory.
383 384 */
384 385 static void
385 386 arn_buflist_cleanup(list_t *buflist)
386 387 {
387 388 struct ath_buf *bf;
388 389
389 390 if (!buflist)
390 391 return;
391 392
392 393 bf = list_head(buflist);
393 394 while (bf != NULL) {
394 395 if (bf->bf_m != NULL) {
395 396 freemsg(bf->bf_m);
396 397 bf->bf_m = NULL;
397 398 }
398 399 /* Free DMA buffer */
399 400 arn_free_dma_mem(&bf->bf_dma);
400 401 if (bf->bf_in != NULL) {
401 402 ieee80211_free_node(bf->bf_in);
402 403 bf->bf_in = NULL;
403 404 }
404 405 list_remove(buflist, bf);
405 406 bf = list_head(buflist);
406 407 }
407 408 list_destroy(buflist);
408 409 }
409 410
410 411 static void
411 412 arn_desc_free(struct arn_softc *sc)
412 413 {
413 414 arn_buflist_cleanup(&sc->sc_txbuf_list);
414 415 arn_buflist_cleanup(&sc->sc_rxbuf_list);
415 416 #ifdef ARN_IBSS
416 417 arn_buflist_cleanup(&sc->sc_bcbuf_list);
417 418 #endif
418 419
419 420 /* Free descriptor DMA buffer */
420 421 arn_free_dma_mem(&sc->sc_desc_dma);
421 422
422 423 kmem_free((void *)sc->sc_vbufptr, sc->sc_vbuflen);
423 424 sc->sc_vbufptr = NULL;
424 425 }
425 426
426 427 static int
427 428 arn_desc_alloc(dev_info_t *devinfo, struct arn_softc *sc)
428 429 {
429 430 int err;
430 431 size_t size;
431 432 struct ath_desc *ds;
432 433 struct ath_buf *bf;
433 434
434 435 #ifdef ARN_IBSS
435 436 size = sizeof (struct ath_desc) * (ATH_TXBUF + ATH_RXBUF + ATH_BCBUF);
436 437 #else
437 438 size = sizeof (struct ath_desc) * (ATH_TXBUF + ATH_RXBUF);
438 439 #endif
439 440
440 441 err = arn_alloc_dma_mem(devinfo, &arn_desc_dma_attr, size,
441 442 &arn_desc_accattr, DDI_DMA_CONSISTENT,
442 443 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &sc->sc_desc_dma);
443 444
444 445 /* virtual address of the first descriptor */
445 446 sc->sc_desc = (struct ath_desc *)sc->sc_desc_dma.mem_va;
446 447
447 448 ds = sc->sc_desc;
448 449 ARN_DBG((ARN_DBG_INIT, "arn: arn_desc_alloc(): DMA map: "
449 450 "%p (%d) -> %p\n",
450 451 sc->sc_desc, sc->sc_desc_dma.alength,
451 452 sc->sc_desc_dma.cookie.dmac_address));
452 453
453 454 /* allocate data structures to describe TX/RX DMA buffers */
454 455 #ifdef ARN_IBSS
455 456 sc->sc_vbuflen = sizeof (struct ath_buf) * (ATH_TXBUF + ATH_RXBUF +
456 457 ATH_BCBUF);
457 458 #else
458 459 sc->sc_vbuflen = sizeof (struct ath_buf) * (ATH_TXBUF + ATH_RXBUF);
459 460 #endif
460 461 bf = (struct ath_buf *)kmem_zalloc(sc->sc_vbuflen, KM_SLEEP);
461 462 sc->sc_vbufptr = bf;
462 463
463 464 /* DMA buffer size for each TX/RX packet */
464 465 #ifdef ARN_TX_AGGREGRATION
465 466 sc->tx_dmabuf_size =
466 467 roundup((IEEE80211_MAX_MPDU_LEN + 3840 * 2),
467 468 min(sc->sc_cachelsz, (uint16_t)64));
468 469 #else
469 470 sc->tx_dmabuf_size =
470 471 roundup(IEEE80211_MAX_MPDU_LEN, min(sc->sc_cachelsz, (uint16_t)64));
471 472 #endif
472 473 sc->rx_dmabuf_size =
473 474 roundup(IEEE80211_MAX_MPDU_LEN, min(sc->sc_cachelsz, (uint16_t)64));
474 475
475 476 /* create RX buffer list */
476 477 err = arn_buflist_setup(devinfo, sc, &sc->sc_rxbuf_list, &bf, &ds,
477 478 ATH_RXBUF, DDI_DMA_READ | DDI_DMA_STREAMING, sc->rx_dmabuf_size);
478 479 if (err != DDI_SUCCESS) {
479 480 arn_desc_free(sc);
480 481 return (err);
481 482 }
482 483
483 484 /* create TX buffer list */
484 485 err = arn_buflist_setup(devinfo, sc, &sc->sc_txbuf_list, &bf, &ds,
485 486 ATH_TXBUF, DDI_DMA_STREAMING, sc->tx_dmabuf_size);
486 487 if (err != DDI_SUCCESS) {
487 488 arn_desc_free(sc);
488 489 return (err);
489 490 }
490 491
491 492 /* create beacon buffer list */
492 493 #ifdef ARN_IBSS
493 494 err = arn_buflist_setup(devinfo, sc, &sc->sc_bcbuf_list, &bf, &ds,
494 495 ATH_BCBUF, DDI_DMA_STREAMING);
495 496 if (err != DDI_SUCCESS) {
496 497 arn_desc_free(sc);
497 498 return (err);
498 499 }
499 500 #endif
500 501
501 502 return (DDI_SUCCESS);
502 503 }
503 504
504 505 static struct ath_rate_table *
505 506 /* LINTED E_STATIC_UNUSED */
506 507 arn_get_ratetable(struct arn_softc *sc, uint32_t mode)
507 508 {
508 509 struct ath_rate_table *rate_table = NULL;
509 510
510 511 switch (mode) {
511 512 case IEEE80211_MODE_11A:
512 513 rate_table = sc->hw_rate_table[ATH9K_MODE_11A];
513 514 break;
514 515 case IEEE80211_MODE_11B:
515 516 rate_table = sc->hw_rate_table[ATH9K_MODE_11B];
516 517 break;
517 518 case IEEE80211_MODE_11G:
518 519 rate_table = sc->hw_rate_table[ATH9K_MODE_11G];
519 520 break;
520 521 #ifdef ARB_11N
521 522 case IEEE80211_MODE_11NA_HT20:
522 523 rate_table = sc->hw_rate_table[ATH9K_MODE_11NA_HT20];
523 524 break;
524 525 case IEEE80211_MODE_11NG_HT20:
525 526 rate_table = sc->hw_rate_table[ATH9K_MODE_11NG_HT20];
526 527 break;
527 528 case IEEE80211_MODE_11NA_HT40PLUS:
528 529 rate_table = sc->hw_rate_table[ATH9K_MODE_11NA_HT40PLUS];
529 530 break;
530 531 case IEEE80211_MODE_11NA_HT40MINUS:
531 532 rate_table = sc->hw_rate_table[ATH9K_MODE_11NA_HT40MINUS];
532 533 break;
533 534 case IEEE80211_MODE_11NG_HT40PLUS:
534 535 rate_table = sc->hw_rate_table[ATH9K_MODE_11NG_HT40PLUS];
535 536 break;
536 537 case IEEE80211_MODE_11NG_HT40MINUS:
537 538 rate_table = sc->hw_rate_table[ATH9K_MODE_11NG_HT40MINUS];
538 539 break;
539 540 #endif
540 541 default:
541 542 ARN_DBG((ARN_DBG_FATAL, "arn: arn_get_ratetable(): "
542 543 "invalid mode %u\n", mode));
543 544 return (NULL);
544 545 }
545 546
546 547 return (rate_table);
547 548
548 549 }
549 550
550 551 static void
551 552 arn_setcurmode(struct arn_softc *sc, enum wireless_mode mode)
552 553 {
553 554 struct ath_rate_table *rt;
554 555 int i;
555 556
556 557 for (i = 0; i < sizeof (sc->asc_rixmap); i++)
557 558 sc->asc_rixmap[i] = 0xff;
558 559
559 560 rt = sc->hw_rate_table[mode];
560 561 ASSERT(rt != NULL);
561 562
562 563 for (i = 0; i < rt->rate_cnt; i++)
563 564 sc->asc_rixmap[rt->info[i].dot11rate &
564 565 IEEE80211_RATE_VAL] = (uint8_t)i; /* LINT */
565 566
566 567 sc->sc_currates = rt;
567 568 sc->sc_curmode = mode;
568 569
569 570 /*
570 571 * All protection frames are transmited at 2Mb/s for
571 572 * 11g, otherwise at 1Mb/s.
572 573 * XXX select protection rate index from rate table.
573 574 */
574 575 sc->sc_protrix = (mode == ATH9K_MODE_11G ? 1 : 0);
575 576 }
576 577
577 578 static enum wireless_mode
578 579 arn_chan2mode(struct ath9k_channel *chan)
579 580 {
580 581 if (chan->chanmode == CHANNEL_A)
581 582 return (ATH9K_MODE_11A);
582 583 else if (chan->chanmode == CHANNEL_G)
583 584 return (ATH9K_MODE_11G);
584 585 else if (chan->chanmode == CHANNEL_B)
585 586 return (ATH9K_MODE_11B);
586 587 else if (chan->chanmode == CHANNEL_A_HT20)
587 588 return (ATH9K_MODE_11NA_HT20);
588 589 else if (chan->chanmode == CHANNEL_G_HT20)
589 590 return (ATH9K_MODE_11NG_HT20);
590 591 else if (chan->chanmode == CHANNEL_A_HT40PLUS)
591 592 return (ATH9K_MODE_11NA_HT40PLUS);
592 593 else if (chan->chanmode == CHANNEL_A_HT40MINUS)
593 594 return (ATH9K_MODE_11NA_HT40MINUS);
594 595 else if (chan->chanmode == CHANNEL_G_HT40PLUS)
595 596 return (ATH9K_MODE_11NG_HT40PLUS);
596 597 else if (chan->chanmode == CHANNEL_G_HT40MINUS)
597 598 return (ATH9K_MODE_11NG_HT40MINUS);
598 599
599 600 return (ATH9K_MODE_11B);
600 601 }
601 602
602 603 static void
603 604 arn_update_txpow(struct arn_softc *sc)
604 605 {
605 606 struct ath_hal *ah = sc->sc_ah;
606 607 uint32_t txpow;
607 608
608 609 if (sc->sc_curtxpow != sc->sc_config.txpowlimit) {
609 610 (void) ath9k_hw_set_txpowerlimit(ah, sc->sc_config.txpowlimit);
610 611 /* read back in case value is clamped */
611 612 (void) ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow);
612 613 sc->sc_curtxpow = (uint32_t)txpow;
613 614 }
614 615 }
615 616
616 617 uint8_t
617 618 parse_mpdudensity(uint8_t mpdudensity)
618 619 {
619 620 /*
620 621 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
621 622 * 0 for no restriction
622 623 * 1 for 1/4 us
623 624 * 2 for 1/2 us
624 625 * 3 for 1 us
625 626 * 4 for 2 us
626 627 * 5 for 4 us
627 628 * 6 for 8 us
628 629 * 7 for 16 us
629 630 */
630 631 switch (mpdudensity) {
631 632 case 0:
632 633 return (0);
633 634 case 1:
634 635 case 2:
635 636 case 3:
636 637 /*
637 638 * Our lower layer calculations limit our
638 639 * precision to 1 microsecond
639 640 */
640 641 return (1);
641 642 case 4:
642 643 return (2);
643 644 case 5:
644 645 return (4);
645 646 case 6:
646 647 return (8);
647 648 case 7:
648 649 return (16);
649 650 default:
650 651 return (0);
651 652 }
652 653 }
653 654
654 655 static void
655 656 arn_setup_rates(struct arn_softc *sc, uint32_t mode)
656 657 {
657 658 int i, maxrates;
658 659 struct ath_rate_table *rate_table = NULL;
659 660 struct ieee80211_rateset *rateset;
660 661 ieee80211com_t *ic = (ieee80211com_t *)sc;
661 662
662 663 /* rate_table = arn_get_ratetable(sc, mode); */
663 664 switch (mode) {
664 665 case IEEE80211_MODE_11A:
665 666 rate_table = sc->hw_rate_table[ATH9K_MODE_11A];
666 667 break;
667 668 case IEEE80211_MODE_11B:
668 669 rate_table = sc->hw_rate_table[ATH9K_MODE_11B];
669 670 break;
670 671 case IEEE80211_MODE_11G:
671 672 rate_table = sc->hw_rate_table[ATH9K_MODE_11G];
672 673 break;
673 674 #ifdef ARN_11N
674 675 case IEEE80211_MODE_11NA_HT20:
675 676 rate_table = sc->hw_rate_table[ATH9K_MODE_11NA_HT20];
676 677 break;
677 678 case IEEE80211_MODE_11NG_HT20:
678 679 rate_table = sc->hw_rate_table[ATH9K_MODE_11NG_HT20];
679 680 break;
680 681 case IEEE80211_MODE_11NA_HT40PLUS:
681 682 rate_table = sc->hw_rate_table[ATH9K_MODE_11NA_HT40PLUS];
682 683 break;
683 684 case IEEE80211_MODE_11NA_HT40MINUS:
684 685 rate_table = sc->hw_rate_table[ATH9K_MODE_11NA_HT40MINUS];
685 686 break;
686 687 case IEEE80211_MODE_11NG_HT40PLUS:
687 688 rate_table = sc->hw_rate_table[ATH9K_MODE_11NG_HT40PLUS];
688 689 break;
689 690 case IEEE80211_MODE_11NG_HT40MINUS:
690 691 rate_table = sc->hw_rate_table[ATH9K_MODE_11NG_HT40MINUS];
691 692 break;
692 693 #endif
693 694 default:
694 695 ARN_DBG((ARN_DBG_RATE, "arn: arn_get_ratetable(): "
695 696 "invalid mode %u\n", mode));
696 697 break;
697 698 }
698 699 if (rate_table == NULL)
699 700 return;
700 701 if (rate_table->rate_cnt > ATH_RATE_MAX) {
701 702 ARN_DBG((ARN_DBG_RATE, "arn: arn_rate_setup(): "
702 703 "rate table too small (%u > %u)\n",
703 704 rate_table->rate_cnt, IEEE80211_RATE_MAXSIZE));
704 705 maxrates = ATH_RATE_MAX;
705 706 } else
706 707 maxrates = rate_table->rate_cnt;
707 708
708 709 ARN_DBG((ARN_DBG_RATE, "arn: arn_rate_setup(): "
709 710 "maxrates is %d\n", maxrates));
710 711
711 712 rateset = &ic->ic_sup_rates[mode];
712 713 for (i = 0; i < maxrates; i++) {
713 714 rateset->ir_rates[i] = rate_table->info[i].dot11rate;
714 715 ARN_DBG((ARN_DBG_RATE, "arn: arn_rate_setup(): "
715 716 "%d\n", rate_table->info[i].dot11rate));
716 717 }
717 718 rateset->ir_nrates = (uint8_t)maxrates; /* ??? */
718 719 }
719 720
720 721 static int
721 722 arn_setup_channels(struct arn_softc *sc)
722 723 {
723 724 struct ath_hal *ah = sc->sc_ah;
724 725 ieee80211com_t *ic = (ieee80211com_t *)sc;
725 726 int nchan, i, index;
726 727 uint8_t regclassids[ATH_REGCLASSIDS_MAX];
727 728 uint32_t nregclass = 0;
728 729 struct ath9k_channel *c;
729 730
730 731 /* Fill in ah->ah_channels */
731 732 if (!ath9k_regd_init_channels(ah, ATH_CHAN_MAX, (uint32_t *)&nchan,
732 733 regclassids, ATH_REGCLASSIDS_MAX, &nregclass, CTRY_DEFAULT,
733 734 B_FALSE, 1)) {
734 735 uint32_t rd = ah->ah_currentRD;
735 736 ARN_DBG((ARN_DBG_CHANNEL, "arn: arn_setup_channels(): "
736 737 "unable to collect channel list; "
737 738 "regdomain likely %u country code %u\n",
738 739 rd, CTRY_DEFAULT));
739 740 return (EINVAL);
740 741 }
741 742
742 743 ARN_DBG((ARN_DBG_CHANNEL, "arn: arn_setup_channels(): "
743 744 "number of channel is %d\n", nchan));
744 745
745 746 for (i = 0; i < nchan; i++) {
746 747 c = &ah->ah_channels[i];
747 748 uint32_t flags;
748 749 index = ath9k_hw_mhz2ieee(ah, c->channel, c->channelFlags);
749 750
750 751 if (index > IEEE80211_CHAN_MAX) {
751 752 ARN_DBG((ARN_DBG_CHANNEL,
752 753 "arn: arn_setup_channels(): "
753 754 "bad hal channel %d (%u/%x) ignored\n",
754 755 index, c->channel, c->channelFlags));
755 756 continue;
756 757 }
757 758 /* NB: flags are known to be compatible */
758 759 if (index < 0) {
759 760 /*
760 761 * can't handle frequency <2400MHz (negative
761 762 * channels) right now
762 763 */
763 764 ARN_DBG((ARN_DBG_CHANNEL,
764 765 "arn: arn_setup_channels(): "
765 766 "hal channel %d (%u/%x) "
766 767 "cannot be handled, ignored\n",
767 768 index, c->channel, c->channelFlags));
768 769 continue;
769 770 }
770 771
771 772 /*
772 773 * Calculate net80211 flags; most are compatible
773 774 * but some need massaging. Note the static turbo
774 775 * conversion can be removed once net80211 is updated
775 776 * to understand static vs. dynamic turbo.
776 777 */
777 778
778 779 flags = c->channelFlags & (CHANNEL_ALL | CHANNEL_PASSIVE);
779 780
780 781 if (ic->ic_sup_channels[index].ich_freq == 0) {
781 782 ic->ic_sup_channels[index].ich_freq = c->channel;
782 783 ic->ic_sup_channels[index].ich_flags = flags;
783 784 } else {
784 785 /* channels overlap; e.g. 11g and 11b */
785 786 ic->ic_sup_channels[index].ich_flags |= flags;
786 787 }
787 788 if ((c->channelFlags & CHANNEL_G) == CHANNEL_G) {
788 789 sc->sc_have11g = 1;
789 790 ic->ic_caps |= IEEE80211_C_SHPREAMBLE |
790 791 IEEE80211_C_SHSLOT; /* short slot time */
791 792 }
792 793 }
793 794
794 795 return (0);
795 796 }
796 797
797 798 uint32_t
798 799 arn_chan2flags(ieee80211com_t *isc, struct ieee80211_channel *chan)
799 800 {
800 801 uint32_t channel_mode;
801 802 switch (ieee80211_chan2mode(isc, chan)) {
802 803 case IEEE80211_MODE_11NA:
803 804 if (chan->ich_flags & IEEE80211_CHAN_HT40U)
804 805 channel_mode = CHANNEL_A_HT40PLUS;
805 806 else if (chan->ich_flags & IEEE80211_CHAN_HT40D)
806 807 channel_mode = CHANNEL_A_HT40MINUS;
807 808 else
808 809 channel_mode = CHANNEL_A_HT20;
809 810 break;
810 811 case IEEE80211_MODE_11NG:
811 812 if (chan->ich_flags & IEEE80211_CHAN_HT40U)
812 813 channel_mode = CHANNEL_G_HT40PLUS;
813 814 else if (chan->ich_flags & IEEE80211_CHAN_HT40D)
814 815 channel_mode = CHANNEL_G_HT40MINUS;
815 816 else
816 817 channel_mode = CHANNEL_G_HT20;
817 818 break;
818 819 case IEEE80211_MODE_TURBO_G:
819 820 case IEEE80211_MODE_STURBO_A:
820 821 case IEEE80211_MODE_TURBO_A:
821 822 channel_mode = 0;
822 823 break;
823 824 case IEEE80211_MODE_11A:
824 825 channel_mode = CHANNEL_A;
825 826 break;
826 827 case IEEE80211_MODE_11G:
827 828 channel_mode = CHANNEL_B;
828 829 break;
829 830 case IEEE80211_MODE_11B:
830 831 channel_mode = CHANNEL_G;
831 832 break;
832 833 case IEEE80211_MODE_FH:
833 834 channel_mode = 0;
834 835 break;
835 836 default:
836 837 break;
837 838 }
838 839
839 840 return (channel_mode);
840 841 }
841 842
842 843 /*
843 844 * Update internal state after a channel change.
844 845 */
845 846 void
846 847 arn_chan_change(struct arn_softc *sc, struct ieee80211_channel *chan)
847 848 {
848 849 struct ieee80211com *ic = &sc->sc_isc;
849 850 enum ieee80211_phymode mode;
850 851 enum wireless_mode wlmode;
851 852
852 853 /*
853 854 * Change channels and update the h/w rate map
854 855 * if we're switching; e.g. 11a to 11b/g.
855 856 */
856 857 mode = ieee80211_chan2mode(ic, chan);
857 858 switch (mode) {
858 859 case IEEE80211_MODE_11A:
859 860 wlmode = ATH9K_MODE_11A;
860 861 break;
861 862 case IEEE80211_MODE_11B:
862 863 wlmode = ATH9K_MODE_11B;
863 864 break;
864 865 case IEEE80211_MODE_11G:
865 866 wlmode = ATH9K_MODE_11B;
866 867 break;
867 868 default:
868 869 break;
869 870 }
870 871 if (wlmode != sc->sc_curmode)
871 872 arn_setcurmode(sc, wlmode);
872 873
873 874 }
874 875
875 876 /*
876 877 * Set/change channels. If the channel is really being changed, it's done
877 878 * by reseting the chip. To accomplish this we must first cleanup any pending
878 879 * DMA, then restart stuff.
879 880 */
880 881 static int
881 882 arn_set_channel(struct arn_softc *sc, struct ath9k_channel *hchan)
882 883 {
883 884 struct ath_hal *ah = sc->sc_ah;
884 885 ieee80211com_t *ic = &sc->sc_isc;
885 886 boolean_t fastcc = B_TRUE;
886 887 boolean_t stopped;
887 888 struct ieee80211_channel chan;
888 889 enum wireless_mode curmode;
889 890
890 891 if (sc->sc_flags & SC_OP_INVALID)
891 892 return (EIO);
892 893
893 894 if (hchan->channel != sc->sc_ah->ah_curchan->channel ||
894 895 hchan->channelFlags != sc->sc_ah->ah_curchan->channelFlags ||
895 896 (sc->sc_flags & SC_OP_CHAINMASK_UPDATE) ||
896 897 (sc->sc_flags & SC_OP_FULL_RESET)) {
897 898 int status;
898 899
899 900 /*
900 901 * This is only performed if the channel settings have
901 902 * actually changed.
902 903 *
903 904 * To switch channels clear any pending DMA operations;
904 905 * wait long enough for the RX fifo to drain, reset the
905 906 * hardware at the new frequency, and then re-enable
906 907 * the relevant bits of the h/w.
907 908 */
908 909 (void) ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
909 910 arn_draintxq(sc, B_FALSE); /* clear pending tx frames */
910 911 stopped = arn_stoprecv(sc); /* turn off frame recv */
911 912
912 913 /*
913 914 * XXX: do not flush receive queue here. We don't want
914 915 * to flush data frames already in queue because of
915 916 * changing channel.
916 917 */
917 918
918 919 if (!stopped || (sc->sc_flags & SC_OP_FULL_RESET))
919 920 fastcc = B_FALSE;
920 921
921 922 ARN_DBG((ARN_DBG_CHANNEL, "arn: arn_set_channel(): "
922 923 "(%u MHz) -> (%u MHz), cflags:%x, chanwidth: %d\n",
923 924 sc->sc_ah->ah_curchan->channel,
924 925 hchan->channel, hchan->channelFlags, sc->tx_chan_width));
925 926
926 927 if (!ath9k_hw_reset(ah, hchan, sc->tx_chan_width,
927 928 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
928 929 sc->sc_ht_extprotspacing, fastcc, &status)) {
929 930 ARN_DBG((ARN_DBG_FATAL, "arn: arn_set_channel(): "
930 931 "unable to reset channel %u (%uMhz) "
931 932 "flags 0x%x hal status %u\n",
932 933 ath9k_hw_mhz2ieee(ah, hchan->channel,
933 934 hchan->channelFlags),
934 935 hchan->channel, hchan->channelFlags, status));
935 936 return (EIO);
936 937 }
937 938
938 939 sc->sc_curchan = *hchan;
939 940
940 941 sc->sc_flags &= ~SC_OP_CHAINMASK_UPDATE;
941 942 sc->sc_flags &= ~SC_OP_FULL_RESET;
942 943
943 944 if (arn_startrecv(sc) != 0) {
944 945 arn_problem("arn: arn_set_channel(): "
945 946 "unable to restart recv logic\n");
946 947 return (EIO);
947 948 }
948 949
949 950 chan.ich_freq = hchan->channel;
950 951 chan.ich_flags = hchan->channelFlags;
951 952 ic->ic_ibss_chan = &chan;
952 953
953 954 /*
954 955 * Change channels and update the h/w rate map
955 956 * if we're switching; e.g. 11a to 11b/g.
956 957 */
957 958 curmode = arn_chan2mode(hchan);
958 959 if (curmode != sc->sc_curmode)
959 960 arn_setcurmode(sc, arn_chan2mode(hchan));
960 961
961 962 arn_update_txpow(sc);
962 963
963 964 (void) ath9k_hw_set_interrupts(ah, sc->sc_imask);
964 965 }
965 966
966 967 return (0);
967 968 }
968 969
969 970 /*
970 971 * This routine performs the periodic noise floor calibration function
971 972 * that is used to adjust and optimize the chip performance. This
972 973 * takes environmental changes (location, temperature) into account.
973 974 * When the task is complete, it reschedules itself depending on the
974 975 * appropriate interval that was calculated.
975 976 */
976 977 static void
977 978 arn_ani_calibrate(void *arg)
978 979
979 980 {
980 981 ieee80211com_t *ic = (ieee80211com_t *)arg;
981 982 struct arn_softc *sc = (struct arn_softc *)ic;
982 983 struct ath_hal *ah = sc->sc_ah;
983 984 boolean_t longcal = B_FALSE;
984 985 boolean_t shortcal = B_FALSE;
985 986 boolean_t aniflag = B_FALSE;
986 987 unsigned int timestamp = drv_hztousec(ddi_get_lbolt())/1000;
987 988 uint32_t cal_interval;
988 989
989 990 /*
990 991 * don't calibrate when we're scanning.
991 992 * we are most likely not on our home channel.
992 993 */
993 994 if (ic->ic_state != IEEE80211_S_RUN)
994 995 goto settimer;
995 996
996 997 /* Long calibration runs independently of short calibration. */
997 998 if ((timestamp - sc->sc_ani.sc_longcal_timer) >= ATH_LONG_CALINTERVAL) {
998 999 longcal = B_TRUE;
999 1000 ARN_DBG((ARN_DBG_CALIBRATE, "arn: "
1000 1001 "%s: longcal @%lu\n", __func__, drv_hztousec));
1001 1002 sc->sc_ani.sc_longcal_timer = timestamp;
1002 1003 }
1003 1004
1004 1005 /* Short calibration applies only while sc_caldone is FALSE */
1005 1006 if (!sc->sc_ani.sc_caldone) {
1006 1007 if ((timestamp - sc->sc_ani.sc_shortcal_timer) >=
1007 1008 ATH_SHORT_CALINTERVAL) {
1008 1009 shortcal = B_TRUE;
1009 1010 ARN_DBG((ARN_DBG_CALIBRATE, "arn: "
1010 1011 "%s: shortcal @%lu\n",
1011 1012 __func__, drv_hztousec));
1012 1013 sc->sc_ani.sc_shortcal_timer = timestamp;
1013 1014 sc->sc_ani.sc_resetcal_timer = timestamp;
1014 1015 }
1015 1016 } else {
1016 1017 if ((timestamp - sc->sc_ani.sc_resetcal_timer) >=
1017 1018 ATH_RESTART_CALINTERVAL) {
1018 1019 ath9k_hw_reset_calvalid(ah, ah->ah_curchan,
1019 1020 &sc->sc_ani.sc_caldone);
1020 1021 if (sc->sc_ani.sc_caldone)
1021 1022 sc->sc_ani.sc_resetcal_timer = timestamp;
1022 1023 }
1023 1024 }
1024 1025
1025 1026 /* Verify whether we must check ANI */
1026 1027 if ((timestamp - sc->sc_ani.sc_checkani_timer) >=
1027 1028 ATH_ANI_POLLINTERVAL) {
1028 1029 aniflag = B_TRUE;
1029 1030 sc->sc_ani.sc_checkani_timer = timestamp;
1030 1031 }
1031 1032
1032 1033 /* Skip all processing if there's nothing to do. */
1033 1034 if (longcal || shortcal || aniflag) {
1034 1035 /* Call ANI routine if necessary */
1035 1036 if (aniflag)
1036 1037 ath9k_hw_ani_monitor(ah, &sc->sc_halstats,
1037 1038 ah->ah_curchan);
1038 1039
1039 1040 /* Perform calibration if necessary */
1040 1041 if (longcal || shortcal) {
1041 1042 boolean_t iscaldone = B_FALSE;
1042 1043
1043 1044 if (ath9k_hw_calibrate(ah, ah->ah_curchan,
1044 1045 sc->sc_rx_chainmask, longcal, &iscaldone)) {
1045 1046 if (longcal)
1046 1047 sc->sc_ani.sc_noise_floor =
1047 1048 ath9k_hw_getchan_noise(ah,
1048 1049 ah->ah_curchan);
1049 1050
1050 1051 ARN_DBG((ARN_DBG_CALIBRATE, "arn: "
1051 1052 "%s: calibrate chan %u/%x nf: %d\n",
1052 1053 __func__,
1053 1054 ah->ah_curchan->channel,
1054 1055 ah->ah_curchan->channelFlags,
1055 1056 sc->sc_ani.sc_noise_floor));
1056 1057 } else {
1057 1058 ARN_DBG((ARN_DBG_CALIBRATE, "arn: "
1058 1059 "%s: calibrate chan %u/%x failed\n",
1059 1060 __func__,
1060 1061 ah->ah_curchan->channel,
1061 1062 ah->ah_curchan->channelFlags));
1062 1063 }
1063 1064 sc->sc_ani.sc_caldone = iscaldone;
1064 1065 }
1065 1066 }
1066 1067
1067 1068 settimer:
1068 1069 /*
1069 1070 * Set timer interval based on previous results.
1070 1071 * The interval must be the shortest necessary to satisfy ANI,
1071 1072 * short calibration and long calibration.
1072 1073 */
1073 1074 cal_interval = ATH_LONG_CALINTERVAL;
1074 1075 if (sc->sc_ah->ah_config.enable_ani)
1075 1076 cal_interval =
1076 1077 min(cal_interval, (uint32_t)ATH_ANI_POLLINTERVAL);
1077 1078
1078 1079 if (!sc->sc_ani.sc_caldone)
1079 1080 cal_interval = min(cal_interval,
1080 1081 (uint32_t)ATH_SHORT_CALINTERVAL);
1081 1082
1082 1083 sc->sc_scan_timer = 0;
1083 1084 sc->sc_scan_timer = timeout(arn_ani_calibrate, (void *)sc,
1084 1085 drv_usectohz(cal_interval * 1000));
1085 1086 }
1086 1087
1087 1088 static void
1088 1089 arn_stop_caltimer(struct arn_softc *sc)
1089 1090 {
1090 1091 timeout_id_t tmp_id = 0;
1091 1092
1092 1093 while ((sc->sc_cal_timer != 0) && (tmp_id != sc->sc_cal_timer)) {
1093 1094 tmp_id = sc->sc_cal_timer;
1094 1095 (void) untimeout(tmp_id);
1095 1096 }
1096 1097 sc->sc_cal_timer = 0;
1097 1098 }
1098 1099
1099 1100 static uint_t
1100 1101 arn_isr(caddr_t arg)
1101 1102 {
1102 1103 /* LINTED E_BAD_PTR_CAST_ALIGN */
1103 1104 struct arn_softc *sc = (struct arn_softc *)arg;
1104 1105 struct ath_hal *ah = sc->sc_ah;
1105 1106 enum ath9k_int status;
1106 1107 ieee80211com_t *ic = (ieee80211com_t *)sc;
1107 1108
1108 1109 ARN_LOCK(sc);
1109 1110
1110 1111 if (sc->sc_flags & SC_OP_INVALID) {
1111 1112 /*
1112 1113 * The hardware is not ready/present, don't
1113 1114 * touch anything. Note this can happen early
1114 1115 * on if the IRQ is shared.
1115 1116 */
1116 1117 ARN_UNLOCK(sc);
1117 1118 return (DDI_INTR_UNCLAIMED);
1118 1119 }
1119 1120 if (!ath9k_hw_intrpend(ah)) { /* shared irq, not for us */
1120 1121 ARN_UNLOCK(sc);
1121 1122 return (DDI_INTR_UNCLAIMED);
1122 1123 }
1123 1124
1124 1125 /*
1125 1126 * Figure out the reason(s) for the interrupt. Note
1126 1127 * that the hal returns a pseudo-ISR that may include
1127 1128 * bits we haven't explicitly enabled so we mask the
1128 1129 * value to insure we only process bits we requested.
1129 1130 */
1130 1131 (void) ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */
1131 1132
1132 1133 status &= sc->sc_imask; /* discard unasked-for bits */
1133 1134
1134 1135 /*
1135 1136 * If there are no status bits set, then this interrupt was not
1136 1137 * for me (should have been caught above).
1137 1138 */
1138 1139 if (!status) {
1139 1140 ARN_UNLOCK(sc);
1140 1141 return (DDI_INTR_UNCLAIMED);
1141 1142 }
1142 1143
1143 1144 sc->sc_intrstatus = status;
1144 1145
1145 1146 if (status & ATH9K_INT_FATAL) {
1146 1147 /* need a chip reset */
1147 1148 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1148 1149 "ATH9K_INT_FATAL\n"));
1149 1150 goto reset;
1150 1151 } else if (status & ATH9K_INT_RXORN) {
1151 1152 /* need a chip reset */
1152 1153 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1153 1154 "ATH9K_INT_RXORN\n"));
1154 1155 goto reset;
1155 1156 } else {
1156 1157 if (status & ATH9K_INT_RXEOL) {
1157 1158 /*
1158 1159 * NB: the hardware should re-read the link when
1159 1160 * RXE bit is written, but it doesn't work
1160 1161 * at least on older hardware revs.
1161 1162 */
1162 1163 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1163 1164 "ATH9K_INT_RXEOL\n"));
1164 1165 sc->sc_rxlink = NULL;
1165 1166 }
1166 1167 if (status & ATH9K_INT_TXURN) {
1167 1168 /* bump tx trigger level */
1168 1169 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1169 1170 "ATH9K_INT_TXURN\n"));
1170 1171 (void) ath9k_hw_updatetxtriglevel(ah, B_TRUE);
1171 1172 }
1172 1173 /* XXX: optimize this */
1173 1174 if (status & ATH9K_INT_RX) {
1174 1175 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1175 1176 "ATH9K_INT_RX\n"));
1176 1177 sc->sc_rx_pend = 1;
1177 1178 ddi_trigger_softintr(sc->sc_softint_id);
1178 1179 }
1179 1180 if (status & ATH9K_INT_TX) {
1180 1181 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1181 1182 "ATH9K_INT_TX\n"));
1182 1183 if (ddi_taskq_dispatch(sc->sc_tq,
1183 1184 arn_tx_int_proc, sc, DDI_NOSLEEP) !=
1184 1185 DDI_SUCCESS) {
1185 1186 arn_problem("arn: arn_isr(): "
1186 1187 "No memory for tx taskq\n");
1187 1188 }
1188 1189 }
1189 1190 #ifdef ARN_ATH9K_INT_MIB
1190 1191 if (status & ATH9K_INT_MIB) {
1191 1192 /*
1192 1193 * Disable interrupts until we service the MIB
1193 1194 * interrupt; otherwise it will continue to
1194 1195 * fire.
1195 1196 */
1196 1197 (void) ath9k_hw_set_interrupts(ah, 0);
1197 1198 /*
1198 1199 * Let the hal handle the event. We assume
1199 1200 * it will clear whatever condition caused
1200 1201 * the interrupt.
1201 1202 */
1202 1203 ath9k_hw_procmibevent(ah, &sc->sc_halstats);
1203 1204 (void) ath9k_hw_set_interrupts(ah, sc->sc_imask);
1204 1205 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1205 1206 "ATH9K_INT_MIB\n"));
1206 1207 }
1207 1208 #endif
1208 1209
1209 1210 #ifdef ARN_ATH9K_INT_TIM_TIMER
1210 1211 if (status & ATH9K_INT_TIM_TIMER) {
1211 1212 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1212 1213 "ATH9K_INT_TIM_TIMER\n"));
1213 1214 if (!(ah->ah_caps.hw_caps &
1214 1215 ATH9K_HW_CAP_AUTOSLEEP)) {
1215 1216 /*
1216 1217 * Clear RxAbort bit so that we can
1217 1218 * receive frames
1218 1219 */
1219 1220 ath9k_hw_setrxabort(ah, 0);
1220 1221 goto reset;
1221 1222 }
1222 1223 }
1223 1224 #endif
1224 1225
1225 1226 if (status & ATH9K_INT_BMISS) {
1226 1227 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1227 1228 "ATH9K_INT_BMISS\n"));
1228 1229 #ifdef ARN_HW_BEACON_MISS_HANDLE
1229 1230 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1230 1231 "handle beacon mmiss by H/W mechanism\n"));
1231 1232 if (ddi_taskq_dispatch(sc->sc_tq, arn_bmiss_proc,
1232 1233 sc, DDI_NOSLEEP) != DDI_SUCCESS) {
1233 1234 arn_problem("arn: arn_isr(): "
1234 1235 "No memory available for bmiss taskq\n");
1235 1236 }
1236 1237 #else
1237 1238 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1238 1239 "handle beacon mmiss by S/W mechanism\n"));
1239 1240 #endif /* ARN_HW_BEACON_MISS_HANDLE */
1240 1241 }
1241 1242
1242 1243 ARN_UNLOCK(sc);
1243 1244
1244 1245 #ifdef ARN_ATH9K_INT_CST
1245 1246 /* carrier sense timeout */
1246 1247 if (status & ATH9K_INT_CST) {
1247 1248 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1248 1249 "ATH9K_INT_CST\n"));
1249 1250 return (DDI_INTR_CLAIMED);
1250 1251 }
1251 1252 #endif
1252 1253
1253 1254 if (status & ATH9K_INT_SWBA) {
1254 1255 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1255 1256 "ATH9K_INT_SWBA\n"));
1256 1257 /* This will occur only in Host-AP or Ad-Hoc mode */
1257 1258 return (DDI_INTR_CLAIMED);
1258 1259 }
1259 1260 }
1260 1261
1261 1262 return (DDI_INTR_CLAIMED);
1262 1263 reset:
1263 1264 ARN_DBG((ARN_DBG_INTERRUPT, "Rset for fatal err\n"));
1264 1265 (void) arn_reset(ic);
1265 1266 ARN_UNLOCK(sc);
1266 1267 return (DDI_INTR_CLAIMED);
1267 1268 }
1268 1269
1269 1270 static int
1270 1271 arn_get_channel(struct arn_softc *sc, struct ieee80211_channel *chan)
1271 1272 {
1272 1273 int i;
1273 1274
1274 1275 for (i = 0; i < sc->sc_ah->ah_nchan; i++) {
1275 1276 if (sc->sc_ah->ah_channels[i].channel == chan->ich_freq)
1276 1277 return (i);
1277 1278 }
1278 1279
1279 1280 return (-1);
1280 1281 }
1281 1282
1282 1283 int
1283 1284 arn_reset(ieee80211com_t *ic)
1284 1285 {
1285 1286 struct arn_softc *sc = (struct arn_softc *)ic;
1286 1287 struct ath_hal *ah = sc->sc_ah;
1287 1288 int status;
1288 1289 int error = 0;
1289 1290
1290 1291 (void) ath9k_hw_set_interrupts(ah, 0);
1291 1292 arn_draintxq(sc, 0);
1292 1293 (void) arn_stoprecv(sc);
1293 1294
1294 1295 if (!ath9k_hw_reset(ah, sc->sc_ah->ah_curchan, sc->tx_chan_width,
1295 1296 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
1296 1297 sc->sc_ht_extprotspacing, B_FALSE, &status)) {
1297 1298 ARN_DBG((ARN_DBG_RESET, "arn: arn_reset(): "
1298 1299 "unable to reset hardware; hal status %u\n", status));
1299 1300 error = EIO;
1300 1301 }
1301 1302
1302 1303 if (arn_startrecv(sc) != 0)
1303 1304 ARN_DBG((ARN_DBG_RESET, "arn: arn_reset(): "
1304 1305 "unable to start recv logic\n"));
1305 1306
1306 1307 /*
1307 1308 * We may be doing a reset in response to a request
1308 1309 * that changes the channel so update any state that
1309 1310 * might change as a result.
1310 1311 */
1311 1312 arn_setcurmode(sc, arn_chan2mode(sc->sc_ah->ah_curchan));
1312 1313
1313 1314 arn_update_txpow(sc);
1314 1315
1315 1316 if (sc->sc_flags & SC_OP_BEACONS)
1316 1317 arn_beacon_config(sc); /* restart beacons */
1317 1318
1318 1319 (void) ath9k_hw_set_interrupts(ah, sc->sc_imask);
1319 1320
1320 1321 return (error);
1321 1322 }
1322 1323
1323 1324 int
1324 1325 arn_get_hal_qnum(uint16_t queue, struct arn_softc *sc)
1325 1326 {
1326 1327 int qnum;
1327 1328
1328 1329 switch (queue) {
1329 1330 case WME_AC_VO:
1330 1331 qnum = sc->sc_haltype2q[ATH9K_WME_AC_VO];
1331 1332 break;
1332 1333 case WME_AC_VI:
1333 1334 qnum = sc->sc_haltype2q[ATH9K_WME_AC_VI];
1334 1335 break;
1335 1336 case WME_AC_BE:
1336 1337 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
1337 1338 break;
1338 1339 case WME_AC_BK:
1339 1340 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BK];
1340 1341 break;
1341 1342 default:
1342 1343 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
1343 1344 break;
1344 1345 }
1345 1346
1346 1347 return (qnum);
1347 1348 }
1348 1349
1349 1350 static struct {
1350 1351 uint32_t version;
1351 1352 const char *name;
1352 1353 } ath_mac_bb_names[] = {
1353 1354 { AR_SREV_VERSION_5416_PCI, "5416" },
1354 1355 { AR_SREV_VERSION_5416_PCIE, "5418" },
1355 1356 { AR_SREV_VERSION_9100, "9100" },
1356 1357 { AR_SREV_VERSION_9160, "9160" },
1357 1358 { AR_SREV_VERSION_9280, "9280" },
1358 1359 { AR_SREV_VERSION_9285, "9285" }
1359 1360 };
1360 1361
1361 1362 static struct {
1362 1363 uint16_t version;
1363 1364 const char *name;
1364 1365 } ath_rf_names[] = {
1365 1366 { 0, "5133" },
1366 1367 { AR_RAD5133_SREV_MAJOR, "5133" },
1367 1368 { AR_RAD5122_SREV_MAJOR, "5122" },
1368 1369 { AR_RAD2133_SREV_MAJOR, "2133" },
1369 1370 { AR_RAD2122_SREV_MAJOR, "2122" }
1370 1371 };
1371 1372
1372 1373 /*
1373 1374 * Return the MAC/BB name. "????" is returned if the MAC/BB is unknown.
1374 1375 */
1375 1376
1376 1377 static const char *
1377 1378 arn_mac_bb_name(uint32_t mac_bb_version)
1378 1379 {
1379 1380 int i;
1380 1381
1381 1382 for (i = 0; i < ARRAY_SIZE(ath_mac_bb_names); i++) {
1382 1383 if (ath_mac_bb_names[i].version == mac_bb_version) {
1383 1384 return (ath_mac_bb_names[i].name);
1384 1385 }
1385 1386 }
1386 1387
1387 1388 return ("????");
1388 1389 }
1389 1390
1390 1391 /*
1391 1392 * Return the RF name. "????" is returned if the RF is unknown.
1392 1393 */
1393 1394
1394 1395 static const char *
1395 1396 arn_rf_name(uint16_t rf_version)
1396 1397 {
1397 1398 int i;
1398 1399
1399 1400 for (i = 0; i < ARRAY_SIZE(ath_rf_names); i++) {
1400 1401 if (ath_rf_names[i].version == rf_version) {
1401 1402 return (ath_rf_names[i].name);
1402 1403 }
1403 1404 }
1404 1405
1405 1406 return ("????");
1406 1407 }
1407 1408
1408 1409 static void
1409 1410 arn_next_scan(void *arg)
1410 1411 {
1411 1412 ieee80211com_t *ic = arg;
1412 1413 struct arn_softc *sc = (struct arn_softc *)ic;
1413 1414
1414 1415 sc->sc_scan_timer = 0;
1415 1416 if (ic->ic_state == IEEE80211_S_SCAN) {
1416 1417 sc->sc_scan_timer = timeout(arn_next_scan, (void *)sc,
1417 1418 drv_usectohz(arn_dwelltime * 1000));
1418 1419 ieee80211_next_scan(ic);
1419 1420 }
1420 1421 }
1421 1422
1422 1423 static void
1423 1424 arn_stop_scantimer(struct arn_softc *sc)
1424 1425 {
1425 1426 timeout_id_t tmp_id = 0;
1426 1427
1427 1428 while ((sc->sc_scan_timer != 0) && (tmp_id != sc->sc_scan_timer)) {
1428 1429 tmp_id = sc->sc_scan_timer;
1429 1430 (void) untimeout(tmp_id);
1430 1431 }
1431 1432 sc->sc_scan_timer = 0;
1432 1433 }
1433 1434
1434 1435 static int32_t
1435 1436 arn_newstate(ieee80211com_t *ic, enum ieee80211_state nstate, int arg)
1436 1437 {
1437 1438 struct arn_softc *sc = (struct arn_softc *)ic;
1438 1439 struct ath_hal *ah = sc->sc_ah;
1439 1440 struct ieee80211_node *in;
1440 1441 int32_t i, error;
1441 1442 uint8_t *bssid;
1442 1443 uint32_t rfilt;
1443 1444 enum ieee80211_state ostate;
1444 1445 struct ath9k_channel *channel;
1445 1446 int pos;
1446 1447
1447 1448 /* Should set up & init LED here */
1448 1449
1449 1450 if (sc->sc_flags & SC_OP_INVALID)
1450 1451 return (0);
1451 1452
1452 1453 ostate = ic->ic_state;
1453 1454 ARN_DBG((ARN_DBG_INIT, "arn: arn_newstate(): "
1454 1455 "%x -> %x!\n", ostate, nstate));
1455 1456
1456 1457 ARN_LOCK(sc);
1457 1458
1458 1459 if (nstate != IEEE80211_S_SCAN)
1459 1460 arn_stop_scantimer(sc);
1460 1461 if (nstate != IEEE80211_S_RUN)
1461 1462 arn_stop_caltimer(sc);
1462 1463
1463 1464 /* Should set LED here */
1464 1465
1465 1466 if (nstate == IEEE80211_S_INIT) {
1466 1467 sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
1467 1468 /*
1468 1469 * Disable interrupts.
1469 1470 */
1470 1471 (void) ath9k_hw_set_interrupts
1471 1472 (ah, sc->sc_imask &~ ATH9K_INT_GLOBAL);
1472 1473
1473 1474 #ifdef ARN_IBSS
1474 1475 if (ic->ic_opmode == IEEE80211_M_IBSS) {
1475 1476 (void) ath9k_hw_stoptxdma(ah, sc->sc_beaconq);
1476 1477 arn_beacon_return(sc);
1477 1478 }
1478 1479 #endif
1479 1480 ARN_UNLOCK(sc);
1480 1481 ieee80211_stop_watchdog(ic);
1481 1482 goto done;
1482 1483 }
1483 1484 in = ic->ic_bss;
1484 1485
1485 1486 pos = arn_get_channel(sc, ic->ic_curchan);
1486 1487
1487 1488 if (pos == -1) {
1488 1489 ARN_DBG((ARN_DBG_FATAL, "arn: "
1489 1490 "%s: Invalid channel\n", __func__));
1490 1491 error = EINVAL;
1491 1492 ARN_UNLOCK(sc);
1492 1493 goto bad;
1493 1494 }
1494 1495
1495 1496 if (in->in_htcap & IEEE80211_HTCAP_CHWIDTH40) {
1496 1497 arn_update_chainmask(sc);
1497 1498 sc->tx_chan_width = ATH9K_HT_MACMODE_2040;
1498 1499 } else
1499 1500 sc->tx_chan_width = ATH9K_HT_MACMODE_20;
1500 1501
1501 1502 sc->sc_ah->ah_channels[pos].chanmode =
1502 1503 arn_chan2flags(ic, ic->ic_curchan);
1503 1504 channel = &sc->sc_ah->ah_channels[pos];
1504 1505 if (channel == NULL) {
1505 1506 arn_problem("arn_newstate(): channel == NULL");
1506 1507 ARN_UNLOCK(sc);
1507 1508 goto bad;
1508 1509 }
1509 1510 error = arn_set_channel(sc, channel);
1510 1511 if (error != 0) {
1511 1512 if (nstate != IEEE80211_S_SCAN) {
1512 1513 ARN_UNLOCK(sc);
1513 1514 ieee80211_reset_chan(ic);
1514 1515 goto bad;
1515 1516 }
1516 1517 }
1517 1518
1518 1519 /*
1519 1520 * Get the receive filter according to the
1520 1521 * operating mode and state
1521 1522 */
1522 1523 rfilt = arn_calcrxfilter(sc);
1523 1524
1524 1525 if (nstate == IEEE80211_S_SCAN)
1525 1526 bssid = ic->ic_macaddr;
1526 1527 else
1527 1528 bssid = in->in_bssid;
1528 1529
1529 1530 ath9k_hw_setrxfilter(ah, rfilt);
1530 1531
1531 1532 if (nstate == IEEE80211_S_RUN && ic->ic_opmode != IEEE80211_M_IBSS)
1532 1533 ath9k_hw_write_associd(ah, bssid, in->in_associd);
1533 1534 else
1534 1535 ath9k_hw_write_associd(ah, bssid, 0);
1535 1536
1536 1537 /* Check for WLAN_CAPABILITY_PRIVACY ? */
1537 1538 if (ic->ic_flags & IEEE80211_F_PRIVACY) {
1538 1539 for (i = 0; i < IEEE80211_WEP_NKID; i++) {
1539 1540 if (ath9k_hw_keyisvalid(ah, (uint16_t)i))
1540 1541 (void) ath9k_hw_keysetmac(ah, (uint16_t)i,
1541 1542 bssid);
1542 1543 }
1543 1544 }
1544 1545
1545 1546 if (nstate == IEEE80211_S_RUN) {
1546 1547 switch (ic->ic_opmode) {
1547 1548 #ifdef ARN_IBSS
1548 1549 case IEEE80211_M_IBSS:
1549 1550 /*
1550 1551 * Allocate and setup the beacon frame.
1551 1552 * Stop any previous beacon DMA.
1552 1553 */
1553 1554 (void) ath9k_hw_stoptxdma(ah, sc->sc_beaconq);
1554 1555 arn_beacon_return(sc);
1555 1556 error = arn_beacon_alloc(sc, in);
1556 1557 if (error != 0) {
1557 1558 ARN_UNLOCK(sc);
1558 1559 goto bad;
1559 1560 }
1560 1561 /*
1561 1562 * If joining an adhoc network defer beacon timer
1562 1563 * configuration to the next beacon frame so we
1563 1564 * have a current TSF to use. Otherwise we're
1564 1565 * starting an ibss/bss so there's no need to delay.
1565 1566 */
1566 1567 if (ic->ic_opmode == IEEE80211_M_IBSS &&
1567 1568 ic->ic_bss->in_tstamp.tsf != 0) {
1568 1569 sc->sc_bsync = 1;
1569 1570 } else {
1570 1571 arn_beacon_config(sc);
1571 1572 }
1572 1573 break;
1573 1574 #endif /* ARN_IBSS */
1574 1575 case IEEE80211_M_STA:
1575 1576 if (ostate != IEEE80211_S_RUN) {
1576 1577 /*
1577 1578 * Defer beacon timer configuration to the next
1578 1579 * beacon frame so we have a current TSF to use.
1579 1580 * Any TSF collected when scanning is likely old
1580 1581 */
1581 1582 #ifdef ARN_IBSS
1582 1583 sc->sc_bsync = 1;
1583 1584 #else
1584 1585 /* Configure the beacon and sleep timers. */
1585 1586 arn_beacon_config(sc);
1586 1587 /* Reset rssi stats */
1587 1588 sc->sc_halstats.ns_avgbrssi =
1588 1589 ATH_RSSI_DUMMY_MARKER;
1589 1590 sc->sc_halstats.ns_avgrssi =
1590 1591 ATH_RSSI_DUMMY_MARKER;
1591 1592 sc->sc_halstats.ns_avgtxrssi =
1592 1593 ATH_RSSI_DUMMY_MARKER;
1593 1594 sc->sc_halstats.ns_avgtxrate =
1594 1595 ATH_RATE_DUMMY_MARKER;
1595 1596 /* end */
1596 1597
1597 1598 #endif /* ARN_IBSS */
1598 1599 }
1599 1600 break;
1600 1601 default:
1601 1602 break;
1602 1603 }
1603 1604 } else {
1604 1605 sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
1605 1606 (void) ath9k_hw_set_interrupts(ah, sc->sc_imask);
1606 1607 }
1607 1608
1608 1609 /*
1609 1610 * Reset the rate control state.
1610 1611 */
1611 1612 arn_rate_ctl_reset(sc, nstate);
1612 1613
1613 1614 ARN_UNLOCK(sc);
1614 1615 done:
1615 1616 /*
1616 1617 * Invoke the parent method to complete the work.
1617 1618 */
1618 1619 error = sc->sc_newstate(ic, nstate, arg);
1619 1620
1620 1621 /*
1621 1622 * Finally, start any timers.
1622 1623 */
1623 1624 if (nstate == IEEE80211_S_RUN) {
1624 1625 ieee80211_start_watchdog(ic, 1);
1625 1626 ASSERT(sc->sc_cal_timer == 0);
1626 1627 sc->sc_cal_timer = timeout(arn_ani_calibrate, (void *)sc,
1627 1628 drv_usectohz(100 * 1000));
1628 1629 } else if ((nstate == IEEE80211_S_SCAN) && (ostate != nstate)) {
1629 1630 /* start ap/neighbor scan timer */
1630 1631 /* ASSERT(sc->sc_scan_timer == 0); */
1631 1632 if (sc->sc_scan_timer != 0) {
1632 1633 (void) untimeout(sc->sc_scan_timer);
1633 1634 sc->sc_scan_timer = 0;
1634 1635 }
1635 1636 sc->sc_scan_timer = timeout(arn_next_scan, (void *)sc,
1636 1637 drv_usectohz(arn_dwelltime * 1000));
1637 1638 }
1638 1639
1639 1640 bad:
1640 1641 return (error);
1641 1642 }
1642 1643
1643 1644 static void
1644 1645 arn_watchdog(void *arg)
1645 1646 {
1646 1647 struct arn_softc *sc = arg;
1647 1648 ieee80211com_t *ic = &sc->sc_isc;
1648 1649 int ntimer = 0;
1649 1650
1650 1651 ARN_LOCK(sc);
1651 1652 ic->ic_watchdog_timer = 0;
1652 1653 if (sc->sc_flags & SC_OP_INVALID) {
1653 1654 ARN_UNLOCK(sc);
1654 1655 return;
1655 1656 }
1656 1657
1657 1658 if (ic->ic_state == IEEE80211_S_RUN) {
1658 1659 /*
1659 1660 * Start the background rate control thread if we
1660 1661 * are not configured to use a fixed xmit rate.
1661 1662 */
1662 1663 #ifdef ARN_LEGACY_RC
1663 1664 if (ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) {
1664 1665 sc->sc_stats.ast_rate_calls ++;
1665 1666 if (ic->ic_opmode == IEEE80211_M_STA)
1666 1667 arn_rate_ctl(ic, ic->ic_bss);
1667 1668 else
1668 1669 ieee80211_iterate_nodes(&ic->ic_sta,
1669 1670 arn_rate_ctl, sc);
1670 1671 }
1671 1672 #endif /* ARN_LEGACY_RC */
1672 1673
1673 1674 #ifdef ARN_HW_BEACON_MISS_HANDLE
1674 1675 /* nothing to do here */
1675 1676 #else
1676 1677 /* currently set 10 seconds as beacon miss threshold */
1677 1678 if (ic->ic_beaconmiss++ > 100) {
1678 1679 ARN_DBG((ARN_DBG_BEACON, "arn_watchdog():"
1679 1680 "Beacon missed for 10 seconds, run"
1680 1681 "ieee80211_new_state(ic, IEEE80211_S_INIT, -1)\n"));
1681 1682 ARN_UNLOCK(sc);
1682 1683 (void) ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
1683 1684 return;
1684 1685 }
1685 1686 #endif /* ARN_HW_BEACON_MISS_HANDLE */
1686 1687
1687 1688 ntimer = 1;
1688 1689 }
1689 1690 ARN_UNLOCK(sc);
1690 1691
1691 1692 ieee80211_watchdog(ic);
1692 1693 if (ntimer != 0)
1693 1694 ieee80211_start_watchdog(ic, ntimer);
1694 1695 }
1695 1696
1696 1697 /* ARGSUSED */
1697 1698 static struct ieee80211_node *
1698 1699 arn_node_alloc(ieee80211com_t *ic)
1699 1700 {
1700 1701 struct ath_node *an;
1701 1702 #ifdef ARN_TX_AGGREGATION
1702 1703 struct arn_softc *sc = (struct arn_softc *)ic;
1703 1704 #endif
1704 1705
1705 1706 an = kmem_zalloc(sizeof (struct ath_node), KM_SLEEP);
1706 1707
1707 1708 /* legacy rate control */
1708 1709 #ifdef ARN_LEGACY_RC
1709 1710 arn_rate_update(sc, &an->an_node, 0);
1710 1711 #endif
1711 1712
1712 1713 #ifdef ARN_TX_AGGREGATION
1713 1714 if (sc->sc_flags & SC_OP_TXAGGR) {
1714 1715 arn_tx_node_init(sc, an);
1715 1716 }
1716 1717 #endif /* ARN_TX_AGGREGATION */
1717 1718
1718 1719 an->last_rssi = ATH_RSSI_DUMMY_MARKER;
1719 1720
1720 1721 return ((an != NULL) ? &an->an_node : NULL);
1721 1722 }
1722 1723
1723 1724 static void
1724 1725 arn_node_free(struct ieee80211_node *in)
1725 1726 {
1726 1727 ieee80211com_t *ic = in->in_ic;
1727 1728 struct arn_softc *sc = (struct arn_softc *)ic;
1728 1729 struct ath_buf *bf;
1729 1730 struct ath_txq *txq;
1730 1731 int32_t i;
1731 1732
1732 1733 #ifdef ARN_TX_AGGREGATION
1733 1734 if (sc->sc_flags & SC_OP_TXAGGR)
1734 1735 arn_tx_node_cleanup(sc, in);
1735 1736 #endif /* TX_AGGREGATION */
1736 1737
1737 1738 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1738 1739 if (ARN_TXQ_SETUP(sc, i)) {
1739 1740 txq = &sc->sc_txq[i];
1740 1741 mutex_enter(&txq->axq_lock);
1741 1742 bf = list_head(&txq->axq_list);
1742 1743 while (bf != NULL) {
1743 1744 if (bf->bf_in == in) {
1744 1745 bf->bf_in = NULL;
1745 1746 }
1746 1747 bf = list_next(&txq->axq_list, bf);
1747 1748 }
1748 1749 mutex_exit(&txq->axq_lock);
1749 1750 }
1750 1751 }
1751 1752
1752 1753 ic->ic_node_cleanup(in);
1753 1754
1754 1755 if (in->in_wpa_ie != NULL)
1755 1756 ieee80211_free(in->in_wpa_ie);
1756 1757
1757 1758 if (in->in_wme_ie != NULL)
1758 1759 ieee80211_free(in->in_wme_ie);
1759 1760
1760 1761 if (in->in_htcap_ie != NULL)
1761 1762 ieee80211_free(in->in_htcap_ie);
1762 1763
1763 1764 kmem_free(in, sizeof (struct ath_node));
1764 1765 }
1765 1766
1766 1767 /*
1767 1768 * Allocate tx/rx key slots for TKIP. We allocate one slot for
1768 1769 * each key. MIC is right after the decrypt/encrypt key.
1769 1770 */
1770 1771 static uint16_t
1771 1772 arn_key_alloc_pair(struct arn_softc *sc, ieee80211_keyix *txkeyix,
1772 1773 ieee80211_keyix *rxkeyix)
1773 1774 {
1774 1775 uint16_t i, keyix;
1775 1776
1776 1777 ASSERT(!sc->sc_splitmic);
1777 1778 for (i = 0; i < ARRAY_SIZE(sc->sc_keymap)/4; i++) {
1778 1779 uint8_t b = sc->sc_keymap[i];
1779 1780 if (b == 0xff)
1780 1781 continue;
1781 1782 for (keyix = i * NBBY; keyix < (i + 1) * NBBY;
1782 1783 keyix++, b >>= 1) {
1783 1784 if ((b & 1) || is_set(keyix+64, sc->sc_keymap)) {
1784 1785 /* full pair unavailable */
1785 1786 continue;
1786 1787 }
1787 1788 set_bit(keyix, sc->sc_keymap);
1788 1789 set_bit(keyix+64, sc->sc_keymap);
1789 1790 ARN_DBG((ARN_DBG_KEYCACHE,
1790 1791 "arn_key_alloc_pair(): key pair %u,%u\n",
1791 1792 keyix, keyix+64));
1792 1793 *txkeyix = *rxkeyix = keyix;
1793 1794 return (1);
1794 1795 }
1795 1796 }
1796 1797 ARN_DBG((ARN_DBG_KEYCACHE, "arn_key_alloc_pair():"
1797 1798 " out of pair space\n"));
1798 1799
1799 1800 return (0);
1800 1801 }
1801 1802
1802 1803 /*
1803 1804 * Allocate tx/rx key slots for TKIP. We allocate two slots for
1804 1805 * each key, one for decrypt/encrypt and the other for the MIC.
1805 1806 */
1806 1807 static int
1807 1808 arn_key_alloc_2pair(struct arn_softc *sc, ieee80211_keyix *txkeyix,
1808 1809 ieee80211_keyix *rxkeyix)
1809 1810 {
1810 1811 uint16_t i, keyix;
1811 1812
1812 1813 ASSERT(sc->sc_splitmic);
1813 1814 for (i = 0; i < ARRAY_SIZE(sc->sc_keymap)/4; i++) {
1814 1815 uint8_t b = sc->sc_keymap[i];
1815 1816 if (b != 0xff) {
1816 1817 /*
1817 1818 * One or more slots in this byte are free.
1818 1819 */
1819 1820 keyix = i*NBBY;
1820 1821 while (b & 1) {
1821 1822 again:
1822 1823 keyix++;
1823 1824 b >>= 1;
1824 1825 }
1825 1826 /* XXX IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV */
1826 1827 if (is_set(keyix+32, sc->sc_keymap) ||
1827 1828 is_set(keyix+64, sc->sc_keymap) ||
1828 1829 is_set(keyix+32+64, sc->sc_keymap)) {
1829 1830 /* full pair unavailable */
1830 1831 if (keyix == (i+1)*NBBY) {
1831 1832 /* no slots were appropriate, advance */
1832 1833 continue;
1833 1834 }
1834 1835 goto again;
1835 1836 }
1836 1837 set_bit(keyix, sc->sc_keymap);
1837 1838 set_bit(keyix+64, sc->sc_keymap);
1838 1839 set_bit(keyix+32, sc->sc_keymap);
1839 1840 set_bit(keyix+32+64, sc->sc_keymap);
1840 1841 ARN_DBG((ARN_DBG_KEYCACHE,
1841 1842 "arn_key_alloc_2pair(): key pair %u,%u %u,%u\n",
1842 1843 keyix, keyix+64,
1843 1844 keyix+32, keyix+32+64));
1844 1845 *txkeyix = *rxkeyix = keyix;
1845 1846 return (1);
1846 1847 }
1847 1848 }
1848 1849 ARN_DBG((ARN_DBG_KEYCACHE, "arn_key_alloc_2pair(): "
1849 1850 " out of pair space\n"));
1850 1851
1851 1852 return (0);
1852 1853 }
1853 1854 /*
1854 1855 * Allocate a single key cache slot.
1855 1856 */
1856 1857 static int
1857 1858 arn_key_alloc_single(struct arn_softc *sc, ieee80211_keyix *txkeyix,
1858 1859 ieee80211_keyix *rxkeyix)
1859 1860 {
1860 1861 uint16_t i, keyix;
1861 1862
1862 1863 /* try i,i+32,i+64,i+32+64 to minimize key pair conflicts */
1863 1864 for (i = 0; i < ARRAY_SIZE(sc->sc_keymap); i++) {
1864 1865 uint8_t b = sc->sc_keymap[i];
1865 1866
1866 1867 if (b != 0xff) {
1867 1868 /*
1868 1869 * One or more slots are free.
1869 1870 */
1870 1871 keyix = i*NBBY;
1871 1872 while (b & 1)
1872 1873 keyix++, b >>= 1;
1873 1874 set_bit(keyix, sc->sc_keymap);
1874 1875 ARN_DBG((ARN_DBG_KEYCACHE, "arn_key_alloc_single(): "
1875 1876 "key %u\n", keyix));
1876 1877 *txkeyix = *rxkeyix = keyix;
1877 1878 return (1);
1878 1879 }
1879 1880 }
1880 1881 return (0);
1881 1882 }
1882 1883
1883 1884 /*
1884 1885 * Allocate one or more key cache slots for a unicast key. The
1885 1886 * key itself is needed only to identify the cipher. For hardware
1886 1887 * TKIP with split cipher+MIC keys we allocate two key cache slot
1887 1888 * pairs so that we can setup separate TX and RX MIC keys. Note
1888 1889 * that the MIC key for a TKIP key at slot i is assumed by the
1889 1890 * hardware to be at slot i+64. This limits TKIP keys to the first
1890 1891 * 64 entries.
1891 1892 */
1892 1893 /* ARGSUSED */
1893 1894 int
1894 1895 arn_key_alloc(ieee80211com_t *ic, const struct ieee80211_key *k,
1895 1896 ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
1896 1897 {
1897 1898 struct arn_softc *sc = (struct arn_softc *)ic;
1898 1899
1899 1900 /*
1900 1901 * We allocate two pair for TKIP when using the h/w to do
1901 1902 * the MIC. For everything else, including software crypto,
1902 1903 * we allocate a single entry. Note that s/w crypto requires
1903 1904 * a pass-through slot on the 5211 and 5212. The 5210 does
1904 1905 * not support pass-through cache entries and we map all
1905 1906 * those requests to slot 0.
1906 1907 */
1907 1908 if (k->wk_flags & IEEE80211_KEY_SWCRYPT) {
1908 1909 return (arn_key_alloc_single(sc, keyix, rxkeyix));
1909 1910 } else if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP &&
1910 1911 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
1911 1912 if (sc->sc_splitmic)
1912 1913 return (arn_key_alloc_2pair(sc, keyix, rxkeyix));
1913 1914 else
1914 1915 return (arn_key_alloc_pair(sc, keyix, rxkeyix));
1915 1916 } else {
1916 1917 return (arn_key_alloc_single(sc, keyix, rxkeyix));
1917 1918 }
1918 1919 }
1919 1920
1920 1921 /*
1921 1922 * Delete an entry in the key cache allocated by ath_key_alloc.
1922 1923 */
1923 1924 int
1924 1925 arn_key_delete(ieee80211com_t *ic, const struct ieee80211_key *k)
1925 1926 {
1926 1927 struct arn_softc *sc = (struct arn_softc *)ic;
1927 1928 struct ath_hal *ah = sc->sc_ah;
1928 1929 const struct ieee80211_cipher *cip = k->wk_cipher;
1929 1930 ieee80211_keyix keyix = k->wk_keyix;
1930 1931
1931 1932 ARN_DBG((ARN_DBG_KEYCACHE, "arn_key_delete():"
1932 1933 " delete key %u ic_cipher=0x%x\n", keyix, cip->ic_cipher));
1933 1934
1934 1935 (void) ath9k_hw_keyreset(ah, keyix);
1935 1936 /*
1936 1937 * Handle split tx/rx keying required for TKIP with h/w MIC.
1937 1938 */
1938 1939 if (cip->ic_cipher == IEEE80211_CIPHER_TKIP &&
1939 1940 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && sc->sc_splitmic)
1940 1941 (void) ath9k_hw_keyreset(ah, keyix+32); /* RX key */
1941 1942
1942 1943 if (keyix >= IEEE80211_WEP_NKID) {
1943 1944 /*
1944 1945 * Don't touch keymap entries for global keys so
1945 1946 * they are never considered for dynamic allocation.
1946 1947 */
1947 1948 clr_bit(keyix, sc->sc_keymap);
1948 1949 if (cip->ic_cipher == IEEE80211_CIPHER_TKIP &&
1949 1950 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
1950 1951 /*
1951 1952 * If splitmic is true +64 is TX key MIC,
1952 1953 * else +64 is RX key + RX key MIC.
1953 1954 */
1954 1955 clr_bit(keyix+64, sc->sc_keymap);
1955 1956 if (sc->sc_splitmic) {
1956 1957 /* Rx key */
1957 1958 clr_bit(keyix+32, sc->sc_keymap);
1958 1959 /* RX key MIC */
1959 1960 clr_bit(keyix+32+64, sc->sc_keymap);
1960 1961 }
1961 1962 }
1962 1963 }
1963 1964 return (1);
1964 1965 }
1965 1966
1966 1967 /*
1967 1968 * Set a TKIP key into the hardware. This handles the
1968 1969 * potential distribution of key state to multiple key
1969 1970 * cache slots for TKIP.
1970 1971 */
1971 1972 static int
1972 1973 arn_keyset_tkip(struct arn_softc *sc, const struct ieee80211_key *k,
1973 1974 struct ath9k_keyval *hk, const uint8_t mac[IEEE80211_ADDR_LEN])
1974 1975 {
1975 1976 uint8_t *key_rxmic = NULL;
1976 1977 uint8_t *key_txmic = NULL;
1977 1978 uint8_t *key = (uint8_t *)&(k->wk_key[0]);
1978 1979 struct ath_hal *ah = sc->sc_ah;
1979 1980
1980 1981 key_txmic = key + 16;
1981 1982 key_rxmic = key + 24;
1982 1983
1983 1984 if (mac == NULL) {
1984 1985 /* Group key installation */
1985 1986 (void) memcpy(hk->kv_mic, key_rxmic, sizeof (hk->kv_mic));
1986 1987 return (ath9k_hw_set_keycache_entry(ah, k->wk_keyix, hk,
1987 1988 mac, B_FALSE));
1988 1989 }
1989 1990 if (!sc->sc_splitmic) {
1990 1991 /*
1991 1992 * data key goes at first index,
1992 1993 * the hal handles the MIC keys at index+64.
1993 1994 */
1994 1995 (void) memcpy(hk->kv_mic, key_rxmic, sizeof (hk->kv_mic));
1995 1996 (void) memcpy(hk->kv_txmic, key_txmic, sizeof (hk->kv_txmic));
1996 1997 return (ath9k_hw_set_keycache_entry(ah, k->wk_keyix, hk,
1997 1998 mac, B_FALSE));
1998 1999 }
1999 2000 /*
2000 2001 * TX key goes at first index, RX key at +32.
2001 2002 * The hal handles the MIC keys at index+64.
2002 2003 */
2003 2004 (void) memcpy(hk->kv_mic, key_txmic, sizeof (hk->kv_mic));
2004 2005 if (!(ath9k_hw_set_keycache_entry(ah, k->wk_keyix, hk, NULL,
2005 2006 B_FALSE))) {
2006 2007 /* Txmic entry failed. No need to proceed further */
2007 2008 ARN_DBG((ARN_DBG_KEYCACHE,
2008 2009 "%s Setting TX MIC Key Failed\n", __func__));
2009 2010 return (0);
2010 2011 }
2011 2012
2012 2013 (void) memcpy(hk->kv_mic, key_rxmic, sizeof (hk->kv_mic));
2013 2014
2014 2015 /* XXX delete tx key on failure? */
2015 2016 return (ath9k_hw_set_keycache_entry(ah, k->wk_keyix, hk, mac, B_FALSE));
2016 2017
2017 2018 }
2018 2019
2019 2020 int
2020 2021 arn_key_set(ieee80211com_t *ic, const struct ieee80211_key *k,
2021 2022 const uint8_t mac[IEEE80211_ADDR_LEN])
2022 2023 {
2023 2024 struct arn_softc *sc = (struct arn_softc *)ic;
2024 2025 const struct ieee80211_cipher *cip = k->wk_cipher;
2025 2026 struct ath9k_keyval hk;
2026 2027
2027 2028 /* cipher table */
2028 2029 static const uint8_t ciphermap[] = {
2029 2030 ATH9K_CIPHER_WEP, /* IEEE80211_CIPHER_WEP */
2030 2031 ATH9K_CIPHER_TKIP, /* IEEE80211_CIPHER_TKIP */
2031 2032 ATH9K_CIPHER_AES_OCB, /* IEEE80211_CIPHER_AES_OCB */
2032 2033 ATH9K_CIPHER_AES_CCM, /* IEEE80211_CIPHER_AES_CCM */
2033 2034 ATH9K_CIPHER_CKIP, /* IEEE80211_CIPHER_CKIP */
2034 2035 ATH9K_CIPHER_CLR, /* IEEE80211_CIPHER_NONE */
2035 2036 };
2036 2037
2037 2038 bzero(&hk, sizeof (hk));
2038 2039
2039 2040 /*
2040 2041 * Software crypto uses a "clear key" so non-crypto
2041 2042 * state kept in the key cache are maintainedd so that
2042 2043 * rx frames have an entry to match.
2043 2044 */
2044 2045 if ((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0) {
2045 2046 ASSERT(cip->ic_cipher < 6);
2046 2047 hk.kv_type = ciphermap[cip->ic_cipher];
2047 2048 hk.kv_len = k->wk_keylen;
2048 2049 bcopy(k->wk_key, hk.kv_val, k->wk_keylen);
2049 2050 } else {
2050 2051 hk.kv_type = ATH9K_CIPHER_CLR;
2051 2052 }
2052 2053
2053 2054 if (hk.kv_type == ATH9K_CIPHER_TKIP &&
2054 2055 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
2055 2056 return (arn_keyset_tkip(sc, k, &hk, mac));
2056 2057 } else {
2057 2058 return (ath9k_hw_set_keycache_entry(sc->sc_ah,
2058 2059 k->wk_keyix, &hk, mac, B_FALSE));
2059 2060 }
2060 2061 }
2061 2062
2062 2063 /*
2063 2064 * Enable/Disable short slot timing
2064 2065 */
2065 2066 void
2066 2067 arn_set_shortslot(ieee80211com_t *ic, int onoff)
2067 2068 {
2068 2069 struct ath_hal *ah = ((struct arn_softc *)ic)->sc_ah;
2069 2070
2070 2071 if (onoff)
2071 2072 (void) ath9k_hw_setslottime(ah, ATH9K_SLOT_TIME_9);
2072 2073 else
2073 2074 (void) ath9k_hw_setslottime(ah, ATH9K_SLOT_TIME_20);
2074 2075 }
2075 2076
2076 2077 static int
2077 2078 arn_open(struct arn_softc *sc)
2078 2079 {
2079 2080 ieee80211com_t *ic = (ieee80211com_t *)sc;
2080 2081 struct ieee80211_channel *curchan = ic->ic_curchan;
2081 2082 struct ath9k_channel *init_channel;
2082 2083 int error = 0, pos, status;
2083 2084
2084 2085 ARN_LOCK_ASSERT(sc);
2085 2086
2086 2087 pos = arn_get_channel(sc, curchan);
2087 2088 if (pos == -1) {
2088 2089 ARN_DBG((ARN_DBG_FATAL, "arn: "
2089 2090 "%s: Invalid channel\n", __func__));
2090 2091 error = EINVAL;
2091 2092 goto error;
2092 2093 }
2093 2094
2094 2095 sc->tx_chan_width = ATH9K_HT_MACMODE_20;
2095 2096
2096 2097 if (sc->sc_curmode == ATH9K_MODE_11A) {
2097 2098 sc->sc_ah->ah_channels[pos].chanmode = CHANNEL_A;
2098 2099 } else {
2099 2100 sc->sc_ah->ah_channels[pos].chanmode = CHANNEL_G;
2100 2101 }
2101 2102
2102 2103 init_channel = &sc->sc_ah->ah_channels[pos];
2103 2104
2104 2105 /* Reset SERDES registers */
2105 2106 ath9k_hw_configpcipowersave(sc->sc_ah, 0);
2106 2107
2107 2108 /*
2108 2109 * The basic interface to setting the hardware in a good
2109 2110 * state is ``reset''. On return the hardware is known to
2110 2111 * be powered up and with interrupts disabled. This must
2111 2112 * be followed by initialization of the appropriate bits
2112 2113 * and then setup of the interrupt mask.
2113 2114 */
2114 2115 if (!ath9k_hw_reset(sc->sc_ah, init_channel,
2115 2116 sc->tx_chan_width, sc->sc_tx_chainmask,
2116 2117 sc->sc_rx_chainmask, sc->sc_ht_extprotspacing,
2117 2118 B_FALSE, &status)) {
2118 2119 ARN_DBG((ARN_DBG_FATAL, "arn: "
2119 2120 "%s: unable to reset hardware; hal status %u "
2120 2121 "(freq %u flags 0x%x)\n", __func__, status,
2121 2122 init_channel->channel, init_channel->channelFlags));
2122 2123
2123 2124 error = EIO;
2124 2125 goto error;
2125 2126 }
2126 2127
2127 2128 /*
2128 2129 * This is needed only to setup initial state
2129 2130 * but it's best done after a reset.
2130 2131 */
2131 2132 arn_update_txpow(sc);
2132 2133
2133 2134 /*
2134 2135 * Setup the hardware after reset:
2135 2136 * The receive engine is set going.
2136 2137 * Frame transmit is handled entirely
2137 2138 * in the frame output path; there's nothing to do
2138 2139 * here except setup the interrupt mask.
2139 2140 */
2140 2141 if (arn_startrecv(sc) != 0) {
2141 2142 ARN_DBG((ARN_DBG_INIT, "arn: "
2142 2143 "%s: unable to start recv logic\n", __func__));
2143 2144 error = EIO;
2144 2145 goto error;
2145 2146 }
2146 2147
2147 2148 /* Setup our intr mask. */
2148 2149 sc->sc_imask = ATH9K_INT_RX | ATH9K_INT_TX |
2149 2150 ATH9K_INT_RXEOL | ATH9K_INT_RXORN |
2150 2151 ATH9K_INT_FATAL | ATH9K_INT_GLOBAL;
2151 2152 #ifdef ARN_ATH9K_HW_CAP_GTT
2152 2153 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_GTT)
2153 2154 sc->sc_imask |= ATH9K_INT_GTT;
2154 2155 #endif
2155 2156
2156 2157 #ifdef ARN_ATH9K_HW_CAP_GTT
2157 2158 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)
2158 2159 sc->sc_imask |= ATH9K_INT_CST;
2159 2160 #endif
2160 2161
2161 2162 /*
2162 2163 * Enable MIB interrupts when there are hardware phy counters.
2163 2164 * Note we only do this (at the moment) for station mode.
2164 2165 */
2165 2166 #ifdef ARN_ATH9K_INT_MIB
2166 2167 if (ath9k_hw_phycounters(sc->sc_ah) &&
2167 2168 ((sc->sc_ah->ah_opmode == ATH9K_M_STA) ||
2168 2169 (sc->sc_ah->ah_opmode == ATH9K_M_IBSS)))
2169 2170 sc->sc_imask |= ATH9K_INT_MIB;
2170 2171 #endif
2171 2172 /*
2172 2173 * Some hardware processes the TIM IE and fires an
2173 2174 * interrupt when the TIM bit is set. For hardware
2174 2175 * that does, if not overridden by configuration,
2175 2176 * enable the TIM interrupt when operating as station.
2176 2177 */
2177 2178 #ifdef ARN_ATH9K_INT_TIM
2178 2179 if ((sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_ENHANCEDPM) &&
2179 2180 (sc->sc_ah->ah_opmode == ATH9K_M_STA) &&
2180 2181 !sc->sc_config.swBeaconProcess)
2181 2182 sc->sc_imask |= ATH9K_INT_TIM;
2182 2183 #endif
2183 2184 if (arn_chan2mode(init_channel) != sc->sc_curmode)
2184 2185 arn_setcurmode(sc, arn_chan2mode(init_channel));
2185 2186 ARN_DBG((ARN_DBG_INIT, "arn: "
2186 2187 "%s: current mode after arn_setcurmode is %d\n",
2187 2188 __func__, sc->sc_curmode));
2188 2189
2189 2190 sc->sc_isrunning = 1;
2190 2191
2191 2192 /* Disable BMISS interrupt when we're not associated */
2192 2193 sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
2193 2194 (void) ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_imask);
2194 2195
2195 2196 return (0);
2196 2197
2197 2198 error:
2198 2199 return (error);
2199 2200 }
2200 2201
2201 2202 static void
2202 2203 arn_close(struct arn_softc *sc)
2203 2204 {
2204 2205 ieee80211com_t *ic = (ieee80211com_t *)sc;
2205 2206 struct ath_hal *ah = sc->sc_ah;
2206 2207
2207 2208 ARN_LOCK_ASSERT(sc);
2208 2209
2209 2210 if (!sc->sc_isrunning)
2210 2211 return;
2211 2212
2212 2213 /*
2213 2214 * Shutdown the hardware and driver
2214 2215 * Note that some of this work is not possible if the
2215 2216 * hardware is gone (invalid).
2216 2217 */
2217 2218 ARN_UNLOCK(sc);
2218 2219 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
2219 2220 ieee80211_stop_watchdog(ic);
2220 2221 ARN_LOCK(sc);
2221 2222
2222 2223 /*
2223 2224 * make sure h/w will not generate any interrupt
2224 2225 * before setting the invalid flag.
2225 2226 */
2226 2227 (void) ath9k_hw_set_interrupts(ah, 0);
2227 2228
2228 2229 if (!(sc->sc_flags & SC_OP_INVALID)) {
2229 2230 arn_draintxq(sc, 0);
2230 2231 (void) arn_stoprecv(sc);
2231 2232 (void) ath9k_hw_phy_disable(ah);
2232 2233 } else {
2233 2234 sc->sc_rxlink = NULL;
2234 2235 }
2235 2236
2236 2237 sc->sc_isrunning = 0;
2237 2238 }
2238 2239
2239 2240 /*
2240 2241 * MAC callback functions
2241 2242 */
2242 2243 static int
2243 2244 arn_m_stat(void *arg, uint_t stat, uint64_t *val)
2244 2245 {
2245 2246 struct arn_softc *sc = arg;
2246 2247 ieee80211com_t *ic = (ieee80211com_t *)sc;
2247 2248 struct ieee80211_node *in;
2248 2249 struct ieee80211_rateset *rs;
2249 2250
2250 2251 ARN_LOCK(sc);
2251 2252 switch (stat) {
2252 2253 case MAC_STAT_IFSPEED:
2253 2254 in = ic->ic_bss;
2254 2255 rs = &in->in_rates;
2255 2256 *val = (rs->ir_rates[in->in_txrate] & IEEE80211_RATE_VAL) / 2 *
2256 2257 1000000ull;
2257 2258 break;
2258 2259 case MAC_STAT_NOXMTBUF:
2259 2260 *val = sc->sc_stats.ast_tx_nobuf +
2260 2261 sc->sc_stats.ast_tx_nobufmgt;
2261 2262 break;
2262 2263 case MAC_STAT_IERRORS:
2263 2264 *val = sc->sc_stats.ast_rx_tooshort;
2264 2265 break;
2265 2266 case MAC_STAT_RBYTES:
2266 2267 *val = ic->ic_stats.is_rx_bytes;
2267 2268 break;
2268 2269 case MAC_STAT_IPACKETS:
2269 2270 *val = ic->ic_stats.is_rx_frags;
2270 2271 break;
2271 2272 case MAC_STAT_OBYTES:
2272 2273 *val = ic->ic_stats.is_tx_bytes;
2273 2274 break;
2274 2275 case MAC_STAT_OPACKETS:
2275 2276 *val = ic->ic_stats.is_tx_frags;
2276 2277 break;
2277 2278 case MAC_STAT_OERRORS:
2278 2279 case WIFI_STAT_TX_FAILED:
2279 2280 *val = sc->sc_stats.ast_tx_fifoerr +
2280 2281 sc->sc_stats.ast_tx_xretries +
2281 2282 sc->sc_stats.ast_tx_discard;
2282 2283 break;
2283 2284 case WIFI_STAT_TX_RETRANS:
2284 2285 *val = sc->sc_stats.ast_tx_xretries;
2285 2286 break;
2286 2287 case WIFI_STAT_FCS_ERRORS:
2287 2288 *val = sc->sc_stats.ast_rx_crcerr;
2288 2289 break;
2289 2290 case WIFI_STAT_WEP_ERRORS:
2290 2291 *val = sc->sc_stats.ast_rx_badcrypt;
2291 2292 break;
2292 2293 case WIFI_STAT_TX_FRAGS:
2293 2294 case WIFI_STAT_MCAST_TX:
2294 2295 case WIFI_STAT_RTS_SUCCESS:
2295 2296 case WIFI_STAT_RTS_FAILURE:
2296 2297 case WIFI_STAT_ACK_FAILURE:
2297 2298 case WIFI_STAT_RX_FRAGS:
2298 2299 case WIFI_STAT_MCAST_RX:
2299 2300 case WIFI_STAT_RX_DUPS:
2300 2301 ARN_UNLOCK(sc);
2301 2302 return (ieee80211_stat(ic, stat, val));
2302 2303 default:
2303 2304 ARN_UNLOCK(sc);
2304 2305 return (ENOTSUP);
2305 2306 }
2306 2307 ARN_UNLOCK(sc);
2307 2308
2308 2309 return (0);
2309 2310 }
2310 2311
2311 2312 int
2312 2313 arn_m_start(void *arg)
2313 2314 {
2314 2315 struct arn_softc *sc = arg;
2315 2316 int err = 0;
2316 2317
2317 2318 ARN_LOCK(sc);
2318 2319
2319 2320 /*
2320 2321 * Stop anything previously setup. This is safe
2321 2322 * whether this is the first time through or not.
2322 2323 */
2323 2324
2324 2325 arn_close(sc);
2325 2326
2326 2327 if ((err = arn_open(sc)) != 0) {
2327 2328 ARN_UNLOCK(sc);
2328 2329 return (err);
2329 2330 }
2330 2331
2331 2332 /* H/W is reday now */
2332 2333 sc->sc_flags &= ~SC_OP_INVALID;
2333 2334
2334 2335 ARN_UNLOCK(sc);
2335 2336
2336 2337 return (0);
2337 2338 }
2338 2339
2339 2340 static void
2340 2341 arn_m_stop(void *arg)
2341 2342 {
2342 2343 struct arn_softc *sc = arg;
2343 2344
2344 2345 ARN_LOCK(sc);
2345 2346 arn_close(sc);
2346 2347
2347 2348 /* disable HAL and put h/w to sleep */
2348 2349 (void) ath9k_hw_disable(sc->sc_ah);
2349 2350 ath9k_hw_configpcipowersave(sc->sc_ah, 1);
2350 2351
2351 2352 /* XXX: hardware will not be ready in suspend state */
2352 2353 sc->sc_flags |= SC_OP_INVALID;
2353 2354 ARN_UNLOCK(sc);
2354 2355 }
2355 2356
2356 2357 static int
2357 2358 arn_m_promisc(void *arg, boolean_t on)
2358 2359 {
2359 2360 struct arn_softc *sc = arg;
2360 2361 struct ath_hal *ah = sc->sc_ah;
2361 2362 uint32_t rfilt;
2362 2363
2363 2364 ARN_LOCK(sc);
2364 2365
2365 2366 rfilt = ath9k_hw_getrxfilter(ah);
2366 2367 if (on)
2367 2368 rfilt |= ATH9K_RX_FILTER_PROM;
2368 2369 else
2369 2370 rfilt &= ~ATH9K_RX_FILTER_PROM;
2370 2371 sc->sc_promisc = on;
2371 2372 ath9k_hw_setrxfilter(ah, rfilt);
2372 2373
2373 2374 ARN_UNLOCK(sc);
2374 2375
2375 2376 return (0);
2376 2377 }
2377 2378
2378 2379 static int
2379 2380 arn_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
2380 2381 {
2381 2382 struct arn_softc *sc = arg;
2382 2383 struct ath_hal *ah = sc->sc_ah;
2383 2384 uint32_t val, index, bit;
2384 2385 uint8_t pos;
2385 2386 uint32_t *mfilt = sc->sc_mcast_hash;
2386 2387
2387 2388 ARN_LOCK(sc);
2388 2389
2389 2390 /* calculate XOR of eight 6bit values */
2390 2391 val = ARN_LE_READ_32(mca + 0);
2391 2392 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2392 2393 val = ARN_LE_READ_32(mca + 3);
2393 2394 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2394 2395 pos &= 0x3f;
2395 2396 index = pos / 32;
2396 2397 bit = 1 << (pos % 32);
2397 2398
2398 2399 if (add) { /* enable multicast */
2399 2400 sc->sc_mcast_refs[pos]++;
2400 2401 mfilt[index] |= bit;
2401 2402 } else { /* disable multicast */
2402 2403 if (--sc->sc_mcast_refs[pos] == 0)
2403 2404 mfilt[index] &= ~bit;
2404 2405 }
2405 2406 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
2406 2407
2407 2408 ARN_UNLOCK(sc);
2408 2409 return (0);
2409 2410 }
2410 2411
2411 2412 static int
2412 2413 arn_m_unicst(void *arg, const uint8_t *macaddr)
2413 2414 {
2414 2415 struct arn_softc *sc = arg;
2415 2416 struct ath_hal *ah = sc->sc_ah;
2416 2417 ieee80211com_t *ic = (ieee80211com_t *)sc;
2417 2418
2418 2419 ARN_DBG((ARN_DBG_XMIT, "ath: ath_gld_saddr(): "
2419 2420 "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n",
2420 2421 macaddr[0], macaddr[1], macaddr[2],
2421 2422 macaddr[3], macaddr[4], macaddr[5]));
2422 2423
2423 2424 ARN_LOCK(sc);
2424 2425 IEEE80211_ADDR_COPY(sc->sc_isc.ic_macaddr, macaddr);
2425 2426 (void) ath9k_hw_setmac(ah, sc->sc_isc.ic_macaddr);
2426 2427 (void) arn_reset(ic);
2427 2428 ARN_UNLOCK(sc);
2428 2429 return (0);
2429 2430 }
2430 2431
2431 2432 static mblk_t *
2432 2433 arn_m_tx(void *arg, mblk_t *mp)
2433 2434 {
2434 2435 struct arn_softc *sc = arg;
2435 2436 int error = 0;
2436 2437 mblk_t *next;
2437 2438 ieee80211com_t *ic = (ieee80211com_t *)sc;
2438 2439
2439 2440 /*
2440 2441 * No data frames go out unless we're associated; this
2441 2442 * should not happen as the 802.11 layer does not enable
2442 2443 * the xmit queue until we enter the RUN state.
2443 2444 */
2444 2445 if (ic->ic_state != IEEE80211_S_RUN) {
2445 2446 ARN_DBG((ARN_DBG_XMIT, "arn: arn_m_tx(): "
2446 2447 "discard, state %u\n", ic->ic_state));
2447 2448 sc->sc_stats.ast_tx_discard++;
2448 2449 freemsgchain(mp);
2449 2450 return (NULL);
2450 2451 }
2451 2452
2452 2453 while (mp != NULL) {
2453 2454 next = mp->b_next;
2454 2455 mp->b_next = NULL;
2455 2456 error = arn_tx(ic, mp, IEEE80211_FC0_TYPE_DATA);
2456 2457 if (error != 0) {
2457 2458 mp->b_next = next;
2458 2459 if (error == ENOMEM) {
2459 2460 break;
2460 2461 } else {
2461 2462 freemsgchain(mp);
2462 2463 return (NULL);
2463 2464 }
2464 2465 }
2465 2466 mp = next;
2466 2467 }
2467 2468
2468 2469 return (mp);
2469 2470 }
2470 2471
2471 2472 static void
2472 2473 arn_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
2473 2474 {
2474 2475 struct arn_softc *sc = arg;
2475 2476 int32_t err;
2476 2477
2477 2478 err = ieee80211_ioctl(&sc->sc_isc, wq, mp);
2478 2479
2479 2480 ARN_LOCK(sc);
2480 2481 if (err == ENETRESET) {
2481 2482 if (!(sc->sc_flags & SC_OP_INVALID)) {
2482 2483 ARN_UNLOCK(sc);
2483 2484
2484 2485 (void) arn_m_start(sc);
2485 2486
2486 2487 (void) ieee80211_new_state(&sc->sc_isc,
2487 2488 IEEE80211_S_SCAN, -1);
2488 2489 ARN_LOCK(sc);
2489 2490 }
2490 2491 }
2491 2492 ARN_UNLOCK(sc);
2492 2493 }
2493 2494
2494 2495 static int
2495 2496 arn_m_setprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
2496 2497 uint_t wldp_length, const void *wldp_buf)
2497 2498 {
2498 2499 struct arn_softc *sc = arg;
2499 2500 int err;
2500 2501
2501 2502 err = ieee80211_setprop(&sc->sc_isc, pr_name, wldp_pr_num,
2502 2503 wldp_length, wldp_buf);
2503 2504
2504 2505 ARN_LOCK(sc);
2505 2506
2506 2507 if (err == ENETRESET) {
2507 2508 if (!(sc->sc_flags & SC_OP_INVALID)) {
2508 2509 ARN_UNLOCK(sc);
2509 2510 (void) arn_m_start(sc);
2510 2511 (void) ieee80211_new_state(&sc->sc_isc,
2511 2512 IEEE80211_S_SCAN, -1);
2512 2513 ARN_LOCK(sc);
2513 2514 }
2514 2515 err = 0;
2515 2516 }
2516 2517
2517 2518 ARN_UNLOCK(sc);
2518 2519
2519 2520 return (err);
2520 2521 }
2521 2522
2522 2523 /* ARGSUSED */
2523 2524 static int
2524 2525 arn_m_getprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
2525 2526 uint_t wldp_length, void *wldp_buf)
2526 2527 {
2527 2528 struct arn_softc *sc = arg;
2528 2529 int err = 0;
2529 2530
2530 2531 err = ieee80211_getprop(&sc->sc_isc, pr_name, wldp_pr_num,
2531 2532 wldp_length, wldp_buf);
2532 2533
2533 2534 return (err);
2534 2535 }
2535 2536
2536 2537 static void
2537 2538 arn_m_propinfo(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
2538 2539 mac_prop_info_handle_t prh)
2539 2540 {
2540 2541 struct arn_softc *sc = arg;
2541 2542
2542 2543 ieee80211_propinfo(&sc->sc_isc, pr_name, wldp_pr_num, prh);
2543 2544 }
2544 2545
2545 2546 /* return bus cachesize in 4B word units */
2546 2547 static void
2547 2548 arn_pci_config_cachesize(struct arn_softc *sc)
2548 2549 {
2549 2550 uint8_t csz;
2550 2551
2551 2552 /*
2552 2553 * Cache line size is used to size and align various
2553 2554 * structures used to communicate with the hardware.
2554 2555 */
2555 2556 csz = pci_config_get8(sc->sc_cfg_handle, PCI_CONF_CACHE_LINESZ);
2556 2557 if (csz == 0) {
2557 2558 /*
2558 2559 * We must have this setup properly for rx buffer
2559 2560 * DMA to work so force a reasonable value here if it
2560 2561 * comes up zero.
2561 2562 */
2562 2563 csz = ATH_DEF_CACHE_BYTES / sizeof (uint32_t);
2563 2564 pci_config_put8(sc->sc_cfg_handle, PCI_CONF_CACHE_LINESZ,
2564 2565 csz);
2565 2566 }
2566 2567 sc->sc_cachelsz = csz << 2;
2567 2568 }
2568 2569
2569 2570 static int
2570 2571 arn_pci_setup(struct arn_softc *sc)
2571 2572 {
2572 2573 uint16_t command;
2573 2574
2574 2575 /*
2575 2576 * Enable memory mapping and bus mastering
2576 2577 */
2577 2578 ASSERT(sc != NULL);
2578 2579 command = pci_config_get16(sc->sc_cfg_handle, PCI_CONF_COMM);
2579 2580 command |= PCI_COMM_MAE | PCI_COMM_ME;
2580 2581 pci_config_put16(sc->sc_cfg_handle, PCI_CONF_COMM, command);
2581 2582 command = pci_config_get16(sc->sc_cfg_handle, PCI_CONF_COMM);
2582 2583 if ((command & PCI_COMM_MAE) == 0) {
2583 2584 arn_problem("arn: arn_pci_setup(): "
2584 2585 "failed to enable memory mapping\n");
2585 2586 return (EIO);
2586 2587 }
2587 2588 if ((command & PCI_COMM_ME) == 0) {
2588 2589 arn_problem("arn: arn_pci_setup(): "
2589 2590 "failed to enable bus mastering\n");
2590 2591 return (EIO);
2591 2592 }
2592 2593 ARN_DBG((ARN_DBG_INIT, "arn: arn_pci_setup(): "
2593 2594 "set command reg to 0x%x \n", command));
2594 2595
2595 2596 return (0);
2596 2597 }
2597 2598
2598 2599 static void
2599 2600 arn_get_hw_encap(struct arn_softc *sc)
2600 2601 {
2601 2602 ieee80211com_t *ic;
2602 2603 struct ath_hal *ah;
2603 2604
2604 2605 ic = (ieee80211com_t *)sc;
2605 2606 ah = sc->sc_ah;
2606 2607
2607 2608 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
2608 2609 ATH9K_CIPHER_AES_CCM, NULL))
2609 2610 ic->ic_caps |= IEEE80211_C_AES_CCM;
2610 2611 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
2611 2612 ATH9K_CIPHER_AES_OCB, NULL))
2612 2613 ic->ic_caps |= IEEE80211_C_AES;
2613 2614 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
2614 2615 ATH9K_CIPHER_TKIP, NULL))
2615 2616 ic->ic_caps |= IEEE80211_C_TKIP;
2616 2617 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
2617 2618 ATH9K_CIPHER_WEP, NULL))
2618 2619 ic->ic_caps |= IEEE80211_C_WEP;
2619 2620 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
2620 2621 ATH9K_CIPHER_MIC, NULL))
2621 2622 ic->ic_caps |= IEEE80211_C_TKIPMIC;
2622 2623 }
2623 2624
2624 2625 static void
2625 2626 arn_setup_ht_cap(struct arn_softc *sc)
2626 2627 {
2627 2628 #define ATH9K_HT_CAP_MAXRXAMPDU_65536 0x3 /* 2 ^ 16 */
2628 2629 #define ATH9K_HT_CAP_MPDUDENSITY_8 0x6 /* 8 usec */
2629 2630
2630 2631 /* LINTED E_FUNC_SET_NOT_USED */
2631 2632 uint8_t tx_streams;
2632 2633 uint8_t rx_streams;
2633 2634
2634 2635 arn_ht_conf *ht_info = &sc->sc_ht_conf;
2635 2636
2636 2637 ht_info->ht_supported = B_TRUE;
2637 2638
↓ open down ↓ |
2606 lines elided |
↑ open up ↑ |
2638 2639 /* Todo: IEEE80211_HTCAP_SMPS */
2639 2640 ht_info->cap = IEEE80211_HTCAP_CHWIDTH40|
2640 2641 IEEE80211_HTCAP_SHORTGI40 |
2641 2642 IEEE80211_HTCAP_DSSSCCK40;
2642 2643
2643 2644 ht_info->ampdu_factor = ATH9K_HT_CAP_MAXRXAMPDU_65536;
2644 2645 ht_info->ampdu_density = ATH9K_HT_CAP_MPDUDENSITY_8;
2645 2646
2646 2647 /* set up supported mcs set */
2647 2648 (void) memset(&ht_info->rx_mcs_mask, 0, sizeof (ht_info->rx_mcs_mask));
2648 - tx_streams =
2649 - !(sc->sc_ah->ah_caps.tx_chainmask &
2650 - (sc->sc_ah->ah_caps.tx_chainmask - 1)) ? 1 : 2;
2651 - rx_streams =
2652 - !(sc->sc_ah->ah_caps.rx_chainmask &
2653 - (sc->sc_ah->ah_caps.rx_chainmask - 1)) ? 1 : 2;
2649 + tx_streams = ISP2(sc->sc_ah->ah_caps.tx_chainmask) ? 1 : 2;
2650 + rx_streams = ISP2(sc->sc_ah->ah_caps.rx_chainmask) ? 1 : 2;
2654 2651
2655 2652 ht_info->rx_mcs_mask[0] = 0xff;
2656 2653 if (rx_streams >= 2)
2657 2654 ht_info->rx_mcs_mask[1] = 0xff;
2658 2655 }
2659 2656
2660 2657 /* xxx should be used for ht rate set negotiating ? */
2661 2658 static void
2662 2659 arn_overwrite_11n_rateset(struct arn_softc *sc)
2663 2660 {
2664 2661 uint8_t *ht_rs = sc->sc_ht_conf.rx_mcs_mask;
2665 2662 int mcs_idx, mcs_count = 0;
2666 2663 int i, j;
2667 2664
2668 2665 (void) memset(&ieee80211_rateset_11n, 0,
2669 2666 sizeof (ieee80211_rateset_11n));
2670 2667 for (i = 0; i < 10; i++) {
2671 2668 for (j = 0; j < 8; j++) {
2672 2669 if (ht_rs[i] & (1 << j)) {
2673 2670 mcs_idx = i * 8 + j;
2674 2671 if (mcs_idx >= IEEE80211_HTRATE_MAXSIZE) {
2675 2672 break;
2676 2673 }
2677 2674
2678 2675 ieee80211_rateset_11n.rs_rates[mcs_idx] =
2679 2676 (uint8_t)mcs_idx;
2680 2677 mcs_count++;
2681 2678 }
2682 2679 }
2683 2680 }
2684 2681
2685 2682 ieee80211_rateset_11n.rs_nrates = (uint8_t)mcs_count;
2686 2683
2687 2684 ARN_DBG((ARN_DBG_RATE, "arn_overwrite_11n_rateset(): "
2688 2685 "MCS rate set supported by this station is as follows:\n"));
2689 2686
2690 2687 for (i = 0; i < ieee80211_rateset_11n.rs_nrates; i++) {
2691 2688 ARN_DBG((ARN_DBG_RATE, "MCS rate %d is %d\n",
2692 2689 i, ieee80211_rateset_11n.rs_rates[i]));
2693 2690 }
2694 2691
2695 2692 }
2696 2693
2697 2694 /*
2698 2695 * Update WME parameters for a transmit queue.
2699 2696 */
2700 2697 static int
2701 2698 arn_tx_queue_update(struct arn_softc *sc, int ac)
2702 2699 {
2703 2700 #define ATH_EXPONENT_TO_VALUE(v) ((1<<v)-1)
2704 2701 #define ATH_TXOP_TO_US(v) (v<<5)
2705 2702 ieee80211com_t *ic = (ieee80211com_t *)sc;
2706 2703 struct ath_txq *txq;
2707 2704 struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac];
2708 2705 struct ath_hal *ah = sc->sc_ah;
2709 2706 struct ath9k_tx_queue_info qi;
2710 2707
2711 2708 txq = &sc->sc_txq[arn_get_hal_qnum(ac, sc)];
2712 2709 (void) ath9k_hw_get_txq_props(ah, txq->axq_qnum, &qi);
2713 2710
2714 2711 /*
2715 2712 * TXQ_FLAG_TXOKINT_ENABLE = 0x0001
2716 2713 * TXQ_FLAG_TXERRINT_ENABLE = 0x0001
2717 2714 * TXQ_FLAG_TXDESCINT_ENABLE = 0x0002
2718 2715 * TXQ_FLAG_TXEOLINT_ENABLE = 0x0004
2719 2716 * TXQ_FLAG_TXURNINT_ENABLE = 0x0008
2720 2717 * TXQ_FLAG_BACKOFF_DISABLE = 0x0010
2721 2718 * TXQ_FLAG_COMPRESSION_ENABLE = 0x0020
2722 2719 * TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE = 0x0040
2723 2720 * TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE = 0x0080
2724 2721 */
2725 2722
2726 2723 /* xxx should update these flags here? */
2727 2724 #if 0
2728 2725 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
2729 2726 TXQ_FLAG_TXERRINT_ENABLE |
2730 2727 TXQ_FLAG_TXDESCINT_ENABLE |
2731 2728 TXQ_FLAG_TXURNINT_ENABLE;
2732 2729 #endif
2733 2730
2734 2731 qi.tqi_aifs = wmep->wmep_aifsn;
2735 2732 qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
2736 2733 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
2737 2734 qi.tqi_readyTime = 0;
2738 2735 qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit);
2739 2736
2740 2737 ARN_DBG((ARN_DBG_INIT,
2741 2738 "%s:"
2742 2739 "Q%u"
2743 2740 "qflags 0x%x"
2744 2741 "aifs %u"
2745 2742 "cwmin %u"
2746 2743 "cwmax %u"
2747 2744 "burstTime %u\n",
2748 2745 __func__,
2749 2746 txq->axq_qnum,
2750 2747 qi.tqi_qflags,
2751 2748 qi.tqi_aifs,
2752 2749 qi.tqi_cwmin,
2753 2750 qi.tqi_cwmax,
2754 2751 qi.tqi_burstTime));
2755 2752
2756 2753 if (!ath9k_hw_set_txq_props(ah, txq->axq_qnum, &qi)) {
2757 2754 arn_problem("unable to update hardware queue "
2758 2755 "parameters for %s traffic!\n",
2759 2756 ieee80211_wme_acnames[ac]);
2760 2757 return (0);
2761 2758 } else {
2762 2759 /* push to H/W */
2763 2760 (void) ath9k_hw_resettxqueue(ah, txq->axq_qnum);
2764 2761 return (1);
2765 2762 }
2766 2763
2767 2764 #undef ATH_TXOP_TO_US
2768 2765 #undef ATH_EXPONENT_TO_VALUE
2769 2766 }
2770 2767
2771 2768 /* Update WME parameters */
2772 2769 static int
2773 2770 arn_wme_update(ieee80211com_t *ic)
2774 2771 {
2775 2772 struct arn_softc *sc = (struct arn_softc *)ic;
2776 2773
2777 2774 /* updateing */
2778 2775 return (!arn_tx_queue_update(sc, WME_AC_BE) ||
2779 2776 !arn_tx_queue_update(sc, WME_AC_BK) ||
2780 2777 !arn_tx_queue_update(sc, WME_AC_VI) ||
2781 2778 !arn_tx_queue_update(sc, WME_AC_VO) ? EIO : 0);
2782 2779 }
2783 2780
2784 2781 /*
2785 2782 * Update tx/rx chainmask. For legacy association,
2786 2783 * hard code chainmask to 1x1, for 11n association, use
2787 2784 * the chainmask configuration.
2788 2785 */
2789 2786 void
2790 2787 arn_update_chainmask(struct arn_softc *sc)
2791 2788 {
2792 2789 boolean_t is_ht = B_FALSE;
2793 2790 sc->sc_flags |= SC_OP_CHAINMASK_UPDATE;
2794 2791
2795 2792 is_ht = sc->sc_ht_conf.ht_supported;
2796 2793 if (is_ht) {
2797 2794 sc->sc_tx_chainmask = sc->sc_ah->ah_caps.tx_chainmask;
2798 2795 sc->sc_rx_chainmask = sc->sc_ah->ah_caps.rx_chainmask;
2799 2796 } else {
2800 2797 sc->sc_tx_chainmask = 1;
2801 2798 sc->sc_rx_chainmask = 1;
2802 2799 }
2803 2800
2804 2801 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2805 2802 "tx_chainmask = %d, rx_chainmask = %d\n",
2806 2803 sc->sc_tx_chainmask, sc->sc_rx_chainmask));
2807 2804 }
2808 2805
2809 2806 static int
2810 2807 arn_resume(dev_info_t *devinfo)
2811 2808 {
2812 2809 struct arn_softc *sc;
2813 2810 int ret = DDI_SUCCESS;
2814 2811
2815 2812 sc = ddi_get_soft_state(arn_soft_state_p, ddi_get_instance(devinfo));
2816 2813 if (sc == NULL) {
2817 2814 ARN_DBG((ARN_DBG_INIT, "ath: ath_resume(): "
2818 2815 "failed to get soft state\n"));
2819 2816 return (DDI_FAILURE);
2820 2817 }
2821 2818
2822 2819 ARN_LOCK(sc);
2823 2820 /*
2824 2821 * Set up config space command register(s). Refuse
2825 2822 * to resume on failure.
2826 2823 */
2827 2824 if (arn_pci_setup(sc) != 0) {
2828 2825 ARN_DBG((ARN_DBG_INIT, "ath: ath_resume(): "
2829 2826 "ath_pci_setup() failed\n"));
2830 2827 ARN_UNLOCK(sc);
2831 2828 return (DDI_FAILURE);
2832 2829 }
2833 2830
2834 2831 if (!(sc->sc_flags & SC_OP_INVALID))
2835 2832 ret = arn_open(sc);
2836 2833 ARN_UNLOCK(sc);
2837 2834
2838 2835 return (ret);
2839 2836 }
2840 2837
2841 2838 static int
2842 2839 arn_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
2843 2840 {
2844 2841 struct arn_softc *sc;
2845 2842 int instance;
2846 2843 int status;
2847 2844 int32_t err;
2848 2845 uint16_t vendor_id;
2849 2846 uint16_t device_id;
2850 2847 uint32_t i;
2851 2848 uint32_t val;
2852 2849 char strbuf[32];
2853 2850 ieee80211com_t *ic;
2854 2851 struct ath_hal *ah;
2855 2852 wifi_data_t wd = { 0 };
2856 2853 mac_register_t *macp;
2857 2854
2858 2855 switch (cmd) {
2859 2856 case DDI_ATTACH:
2860 2857 break;
2861 2858 case DDI_RESUME:
2862 2859 return (arn_resume(devinfo));
2863 2860 default:
2864 2861 return (DDI_FAILURE);
2865 2862 }
2866 2863
2867 2864 instance = ddi_get_instance(devinfo);
2868 2865 if (ddi_soft_state_zalloc(arn_soft_state_p, instance) != DDI_SUCCESS) {
2869 2866 ARN_DBG((ARN_DBG_ATTACH, "arn: "
2870 2867 "%s: Unable to alloc softstate\n", __func__));
2871 2868 return (DDI_FAILURE);
2872 2869 }
2873 2870
2874 2871 sc = ddi_get_soft_state(arn_soft_state_p, ddi_get_instance(devinfo));
2875 2872 ic = (ieee80211com_t *)sc;
2876 2873 sc->sc_dev = devinfo;
2877 2874
2878 2875 mutex_init(&sc->sc_genlock, NULL, MUTEX_DRIVER, NULL);
2879 2876 mutex_init(&sc->sc_serial_rw, NULL, MUTEX_DRIVER, NULL);
2880 2877 mutex_init(&sc->sc_txbuflock, NULL, MUTEX_DRIVER, NULL);
2881 2878 mutex_init(&sc->sc_rxbuflock, NULL, MUTEX_DRIVER, NULL);
2882 2879 mutex_init(&sc->sc_resched_lock, NULL, MUTEX_DRIVER, NULL);
2883 2880 #ifdef ARN_IBSS
2884 2881 mutex_init(&sc->sc_bcbuflock, NULL, MUTEX_DRIVER, NULL);
2885 2882 #endif
2886 2883
2887 2884 sc->sc_flags |= SC_OP_INVALID;
2888 2885
2889 2886 err = pci_config_setup(devinfo, &sc->sc_cfg_handle);
2890 2887 if (err != DDI_SUCCESS) {
2891 2888 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2892 2889 "pci_config_setup() failed"));
2893 2890 goto attach_fail0;
2894 2891 }
2895 2892
2896 2893 if (arn_pci_setup(sc) != 0)
2897 2894 goto attach_fail1;
2898 2895
2899 2896 /* Cache line size set up */
2900 2897 arn_pci_config_cachesize(sc);
2901 2898
2902 2899 vendor_id = pci_config_get16(sc->sc_cfg_handle, PCI_CONF_VENID);
2903 2900 device_id = pci_config_get16(sc->sc_cfg_handle, PCI_CONF_DEVID);
2904 2901 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): vendor 0x%x, "
2905 2902 "device id 0x%x, cache size %d\n",
2906 2903 vendor_id, device_id,
2907 2904 pci_config_get8(sc->sc_cfg_handle, PCI_CONF_CACHE_LINESZ)));
2908 2905
2909 2906 pci_config_put8(sc->sc_cfg_handle, PCI_CONF_LATENCY_TIMER, 0xa8);
2910 2907 val = pci_config_get32(sc->sc_cfg_handle, 0x40);
2911 2908 if ((val & 0x0000ff00) != 0)
2912 2909 pci_config_put32(sc->sc_cfg_handle, 0x40, val & 0xffff00ff);
2913 2910
2914 2911 err = ddi_regs_map_setup(devinfo, 1,
2915 2912 &sc->mem, 0, 0, &arn_reg_accattr, &sc->sc_io_handle);
2916 2913 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2917 2914 "regs map1 = %x err=%d\n", sc->mem, err));
2918 2915 if (err != DDI_SUCCESS) {
2919 2916 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2920 2917 "ddi_regs_map_setup() failed"));
2921 2918 goto attach_fail1;
2922 2919 }
2923 2920
2924 2921 ah = ath9k_hw_attach(device_id, sc, sc->mem, &status);
2925 2922 if (ah == NULL) {
2926 2923 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2927 2924 "unable to attach hw: H/W status %u\n",
2928 2925 status));
2929 2926 goto attach_fail2;
2930 2927 }
2931 2928 sc->sc_ah = ah;
2932 2929
2933 2930 ath9k_hw_getmac(ah, ic->ic_macaddr);
2934 2931
2935 2932 /* Get the hardware key cache size. */
2936 2933 sc->sc_keymax = ah->ah_caps.keycache_size;
2937 2934 if (sc->sc_keymax > ATH_KEYMAX) {
2938 2935 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2939 2936 "Warning, using only %u entries in %u key cache\n",
2940 2937 ATH_KEYMAX, sc->sc_keymax));
2941 2938 sc->sc_keymax = ATH_KEYMAX;
2942 2939 }
2943 2940
2944 2941 /*
2945 2942 * Reset the key cache since some parts do not
2946 2943 * reset the contents on initial power up.
2947 2944 */
2948 2945 for (i = 0; i < sc->sc_keymax; i++)
2949 2946 (void) ath9k_hw_keyreset(ah, (uint16_t)i);
2950 2947 /*
2951 2948 * Mark key cache slots associated with global keys
2952 2949 * as in use. If we knew TKIP was not to be used we
2953 2950 * could leave the +32, +64, and +32+64 slots free.
2954 2951 * XXX only for splitmic.
2955 2952 */
2956 2953 for (i = 0; i < IEEE80211_WEP_NKID; i++) {
2957 2954 set_bit(i, sc->sc_keymap);
2958 2955 set_bit(i + 32, sc->sc_keymap);
2959 2956 set_bit(i + 64, sc->sc_keymap);
2960 2957 set_bit(i + 32 + 64, sc->sc_keymap);
2961 2958 }
2962 2959
2963 2960 /* Collect the channel list using the default country code */
2964 2961 err = arn_setup_channels(sc);
2965 2962 if (err == EINVAL) {
2966 2963 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2967 2964 "ERR:arn_setup_channels\n"));
2968 2965 goto attach_fail3;
2969 2966 }
2970 2967
2971 2968 /* default to STA mode */
2972 2969 sc->sc_ah->ah_opmode = ATH9K_M_STA;
2973 2970
2974 2971 /* Setup rate tables */
2975 2972 arn_rate_attach(sc);
2976 2973 arn_setup_rates(sc, IEEE80211_MODE_11A);
2977 2974 arn_setup_rates(sc, IEEE80211_MODE_11B);
2978 2975 arn_setup_rates(sc, IEEE80211_MODE_11G);
2979 2976
2980 2977 /* Setup current mode here */
2981 2978 arn_setcurmode(sc, ATH9K_MODE_11G);
2982 2979
2983 2980 /* 802.11g features */
2984 2981 if (sc->sc_have11g)
2985 2982 ic->ic_caps |= IEEE80211_C_SHPREAMBLE |
2986 2983 IEEE80211_C_SHSLOT; /* short slot time */
2987 2984
2988 2985 /* Temp workaround */
2989 2986 sc->sc_mrretry = 1;
2990 2987 sc->sc_config.ath_aggr_prot = 0;
2991 2988
2992 2989 /* Setup tx/rx descriptors */
2993 2990 err = arn_desc_alloc(devinfo, sc);
2994 2991 if (err != DDI_SUCCESS) {
2995 2992 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2996 2993 "failed to allocate descriptors: %d\n", err));
2997 2994 goto attach_fail3;
2998 2995 }
2999 2996
3000 2997 if ((sc->sc_tq = ddi_taskq_create(devinfo, "ath_taskq", 1,
3001 2998 TASKQ_DEFAULTPRI, 0)) == NULL) {
3002 2999 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3003 3000 "ERR:ddi_taskq_create\n"));
3004 3001 goto attach_fail4;
3005 3002 }
3006 3003
3007 3004 /*
3008 3005 * Allocate hardware transmit queues: one queue for
3009 3006 * beacon frames and one data queue for each QoS
3010 3007 * priority. Note that the hal handles reseting
3011 3008 * these queues at the needed time.
3012 3009 */
3013 3010 #ifdef ARN_IBSS
3014 3011 sc->sc_beaconq = arn_beaconq_setup(ah);
3015 3012 if (sc->sc_beaconq == (-1)) {
3016 3013 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3017 3014 "unable to setup a beacon xmit queue\n"));
3018 3015 goto attach_fail4;
3019 3016 }
3020 3017 #endif
3021 3018 #ifdef ARN_HOSTAP
3022 3019 sc->sc_cabq = arn_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
3023 3020 if (sc->sc_cabq == NULL) {
3024 3021 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3025 3022 "unable to setup CAB xmit queue\n"));
3026 3023 goto attach_fail4;
3027 3024 }
3028 3025
3029 3026 sc->sc_config.cabqReadytime = ATH_CABQ_READY_TIME;
3030 3027 ath_cabq_update(sc);
3031 3028 #endif
3032 3029
3033 3030 for (i = 0; i < ARRAY_SIZE(sc->sc_haltype2q); i++)
3034 3031 sc->sc_haltype2q[i] = -1;
3035 3032
3036 3033 /* Setup data queues */
3037 3034 /* NB: ensure BK queue is the lowest priority h/w queue */
3038 3035 if (!arn_tx_setup(sc, ATH9K_WME_AC_BK)) {
3039 3036 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3040 3037 "unable to setup xmit queue for BK traffic\n"));
3041 3038 goto attach_fail4;
3042 3039 }
3043 3040 if (!arn_tx_setup(sc, ATH9K_WME_AC_BE)) {
3044 3041 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3045 3042 "unable to setup xmit queue for BE traffic\n"));
3046 3043 goto attach_fail4;
3047 3044 }
3048 3045 if (!arn_tx_setup(sc, ATH9K_WME_AC_VI)) {
3049 3046 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3050 3047 "unable to setup xmit queue for VI traffic\n"));
3051 3048 goto attach_fail4;
3052 3049 }
3053 3050 if (!arn_tx_setup(sc, ATH9K_WME_AC_VO)) {
3054 3051 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3055 3052 "unable to setup xmit queue for VO traffic\n"));
3056 3053 goto attach_fail4;
3057 3054 }
3058 3055
3059 3056 /*
3060 3057 * Initializes the noise floor to a reasonable default value.
3061 3058 * Later on this will be updated during ANI processing.
3062 3059 */
3063 3060
3064 3061 sc->sc_ani.sc_noise_floor = ATH_DEFAULT_NOISE_FLOOR;
3065 3062
3066 3063
3067 3064 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
3068 3065 ATH9K_CIPHER_TKIP, NULL)) {
3069 3066 /*
3070 3067 * Whether we should enable h/w TKIP MIC.
3071 3068 * XXX: if we don't support WME TKIP MIC, then we wouldn't
3072 3069 * report WMM capable, so it's always safe to turn on
3073 3070 * TKIP MIC in this case.
3074 3071 */
3075 3072 (void) ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC,
3076 3073 0, 1, NULL);
3077 3074 }
3078 3075
3079 3076 /* Get cipher releated capability information */
3080 3077 arn_get_hw_encap(sc);
3081 3078
3082 3079 /*
3083 3080 * Check whether the separate key cache entries
3084 3081 * are required to handle both tx+rx MIC keys.
3085 3082 * With split mic keys the number of stations is limited
3086 3083 * to 27 otherwise 59.
3087 3084 */
3088 3085 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
3089 3086 ATH9K_CIPHER_TKIP, NULL) &&
3090 3087 ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
3091 3088 ATH9K_CIPHER_MIC, NULL) &&
3092 3089 ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT,
3093 3090 0, NULL))
3094 3091 sc->sc_splitmic = 1;
3095 3092
3096 3093 /* turn on mcast key search if possible */
3097 3094 if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
3098 3095 (void) ath9k_hw_setcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 1,
3099 3096 1, NULL);
3100 3097
3101 3098 sc->sc_config.txpowlimit = ATH_TXPOWER_MAX;
3102 3099 sc->sc_config.txpowlimit_override = 0;
3103 3100
3104 3101 /* 11n Capabilities */
3105 3102 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) {
3106 3103 sc->sc_flags |= SC_OP_TXAGGR;
3107 3104 sc->sc_flags |= SC_OP_RXAGGR;
3108 3105 arn_setup_ht_cap(sc);
3109 3106 arn_overwrite_11n_rateset(sc);
3110 3107 }
3111 3108
3112 3109 sc->sc_tx_chainmask = 1;
3113 3110 sc->sc_rx_chainmask = 1;
3114 3111 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3115 3112 "tx_chainmask = %d, rx_chainmask = %d\n",
3116 3113 sc->sc_tx_chainmask, sc->sc_rx_chainmask));
3117 3114
3118 3115 /* arn_update_chainmask(sc); */
3119 3116
3120 3117 (void) ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, B_TRUE, NULL);
3121 3118 sc->sc_defant = ath9k_hw_getdefantenna(ah);
3122 3119
3123 3120 ath9k_hw_getmac(ah, sc->sc_myaddr);
3124 3121 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) {
3125 3122 ath9k_hw_getbssidmask(ah, sc->sc_bssidmask);
3126 3123 ATH_SET_VAP_BSSID_MASK(sc->sc_bssidmask);
3127 3124 (void) ath9k_hw_setbssidmask(ah, sc->sc_bssidmask);
3128 3125 }
3129 3126
3130 3127 /* set default value to short slot time */
3131 3128 sc->sc_slottime = ATH9K_SLOT_TIME_9;
3132 3129 (void) ath9k_hw_setslottime(ah, ATH9K_SLOT_TIME_9);
3133 3130
3134 3131 /* initialize beacon slots */
3135 3132 for (i = 0; i < ARRAY_SIZE(sc->sc_bslot); i++)
3136 3133 sc->sc_bslot[i] = ATH_IF_ID_ANY;
3137 3134
3138 3135 /* Save MISC configurations */
3139 3136 sc->sc_config.swBeaconProcess = 1;
3140 3137
3141 3138 /* Support QoS/WME */
3142 3139 ic->ic_caps |= IEEE80211_C_WME;
3143 3140 ic->ic_wme.wme_update = arn_wme_update;
3144 3141
3145 3142 /* Support 802.11n/HT */
3146 3143 if (sc->sc_ht_conf.ht_supported) {
3147 3144 ic->ic_htcaps =
3148 3145 IEEE80211_HTCAP_CHWIDTH40 |
3149 3146 IEEE80211_HTCAP_SHORTGI40 |
3150 3147 IEEE80211_HTCAP_DSSSCCK40 |
3151 3148 IEEE80211_HTCAP_MAXAMSDU_7935 |
3152 3149 IEEE80211_HTC_HT |
3153 3150 IEEE80211_HTC_AMSDU |
3154 3151 IEEE80211_HTCAP_RXSTBC_2STREAM;
3155 3152
3156 3153 #ifdef ARN_TX_AGGREGATION
3157 3154 ic->ic_htcaps |= IEEE80211_HTC_AMPDU;
3158 3155 #endif
3159 3156 }
3160 3157
3161 3158 /* Header padding requested by driver */
3162 3159 ic->ic_flags |= IEEE80211_F_DATAPAD;
3163 3160 /* Support WPA/WPA2 */
3164 3161 ic->ic_caps |= IEEE80211_C_WPA;
3165 3162 #if 0
3166 3163 ic->ic_caps |= IEEE80211_C_TXFRAG; /* handle tx frags */
3167 3164 ic->ic_caps |= IEEE80211_C_BGSCAN; /* capable of bg scanning */
3168 3165 #endif
3169 3166 ic->ic_phytype = IEEE80211_T_HT;
3170 3167 ic->ic_opmode = IEEE80211_M_STA;
3171 3168 ic->ic_state = IEEE80211_S_INIT;
3172 3169 ic->ic_maxrssi = ARN_MAX_RSSI;
3173 3170 ic->ic_set_shortslot = arn_set_shortslot;
3174 3171 ic->ic_xmit = arn_tx;
3175 3172 ieee80211_attach(ic);
3176 3173
3177 3174 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3178 3175 "ic->ic_curchan->ich_freq: %d\n", ic->ic_curchan->ich_freq));
3179 3176
3180 3177 /* different instance has different WPA door */
3181 3178 (void) snprintf(ic->ic_wpadoor, MAX_IEEE80211STR, "%s_%s%d", WPA_DOOR,
3182 3179 ddi_driver_name(devinfo),
3183 3180 ddi_get_instance(devinfo));
3184 3181
3185 3182 if (sc->sc_ht_conf.ht_supported) {
3186 3183 sc->sc_recv_action = ic->ic_recv_action;
3187 3184 ic->ic_recv_action = arn_ampdu_recv_action;
3188 3185 // sc->sc_send_action = ic->ic_send_action;
3189 3186 // ic->ic_send_action = arn_ampdu_send_action;
3190 3187
3191 3188 ic->ic_ampdu_rxmax = sc->sc_ht_conf.ampdu_factor;
3192 3189 ic->ic_ampdu_density = sc->sc_ht_conf.ampdu_density;
3193 3190 ic->ic_ampdu_limit = ic->ic_ampdu_rxmax;
3194 3191 }
3195 3192
3196 3193 /* Override 80211 default routines */
3197 3194 sc->sc_newstate = ic->ic_newstate;
3198 3195 ic->ic_newstate = arn_newstate;
3199 3196 #ifdef ARN_IBSS
3200 3197 sc->sc_recv_mgmt = ic->ic_recv_mgmt;
3201 3198 ic->ic_recv_mgmt = arn_recv_mgmt;
3202 3199 #endif
3203 3200 ic->ic_watchdog = arn_watchdog;
3204 3201 ic->ic_node_alloc = arn_node_alloc;
3205 3202 ic->ic_node_free = arn_node_free;
3206 3203 ic->ic_crypto.cs_key_alloc = arn_key_alloc;
3207 3204 ic->ic_crypto.cs_key_delete = arn_key_delete;
3208 3205 ic->ic_crypto.cs_key_set = arn_key_set;
3209 3206
3210 3207 ieee80211_media_init(ic);
3211 3208
3212 3209 /*
3213 3210 * initialize default tx key
3214 3211 */
3215 3212 ic->ic_def_txkey = 0;
3216 3213
3217 3214 sc->sc_rx_pend = 0;
3218 3215 (void) ath9k_hw_set_interrupts(sc->sc_ah, 0);
3219 3216 err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW,
3220 3217 &sc->sc_softint_id, NULL, 0, arn_softint_handler, (caddr_t)sc);
3221 3218 if (err != DDI_SUCCESS) {
3222 3219 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3223 3220 "ddi_add_softintr() failed....\n"));
3224 3221 goto attach_fail5;
3225 3222 }
3226 3223
3227 3224 if (ddi_get_iblock_cookie(devinfo, 0, &sc->sc_iblock)
3228 3225 != DDI_SUCCESS) {
3229 3226 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3230 3227 "Can not get iblock cookie for INT\n"));
3231 3228 goto attach_fail6;
3232 3229 }
3233 3230
3234 3231 if (ddi_add_intr(devinfo, 0, NULL, NULL, arn_isr,
3235 3232 (caddr_t)sc) != DDI_SUCCESS) {
3236 3233 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3237 3234 "Can not set intr for ARN driver\n"));
3238 3235 goto attach_fail6;
3239 3236 }
3240 3237
3241 3238 /*
3242 3239 * Provide initial settings for the WiFi plugin; whenever this
3243 3240 * information changes, we need to call mac_plugindata_update()
3244 3241 */
3245 3242 wd.wd_opmode = ic->ic_opmode;
3246 3243 wd.wd_secalloc = WIFI_SEC_NONE;
3247 3244 IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_bss->in_bssid);
3248 3245
3249 3246 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3250 3247 "IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_bss->in_bssid)"
3251 3248 "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n",
3252 3249 wd.wd_bssid[0], wd.wd_bssid[1], wd.wd_bssid[2],
3253 3250 wd.wd_bssid[3], wd.wd_bssid[4], wd.wd_bssid[5]));
3254 3251
3255 3252 if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
3256 3253 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3257 3254 "MAC version mismatch\n"));
3258 3255 goto attach_fail7;
3259 3256 }
3260 3257
3261 3258 macp->m_type_ident = MAC_PLUGIN_IDENT_WIFI;
3262 3259 macp->m_driver = sc;
3263 3260 macp->m_dip = devinfo;
3264 3261 macp->m_src_addr = ic->ic_macaddr;
3265 3262 macp->m_callbacks = &arn_m_callbacks;
3266 3263 macp->m_min_sdu = 0;
3267 3264 macp->m_max_sdu = IEEE80211_MTU;
3268 3265 macp->m_pdata = &wd;
3269 3266 macp->m_pdata_size = sizeof (wd);
3270 3267
3271 3268 err = mac_register(macp, &ic->ic_mach);
3272 3269 mac_free(macp);
3273 3270 if (err != 0) {
3274 3271 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3275 3272 "mac_register err %x\n", err));
3276 3273 goto attach_fail7;
3277 3274 }
3278 3275
3279 3276 /* Create minor node of type DDI_NT_NET_WIFI */
3280 3277 (void) snprintf(strbuf, sizeof (strbuf), "%s%d",
3281 3278 ARN_NODENAME, instance);
3282 3279 err = ddi_create_minor_node(devinfo, strbuf, S_IFCHR,
3283 3280 instance + 1, DDI_NT_NET_WIFI, 0);
3284 3281 if (err != DDI_SUCCESS)
3285 3282 ARN_DBG((ARN_DBG_ATTACH, "WARN: arn: arn_attach(): "
3286 3283 "Create minor node failed - %d\n", err));
3287 3284
3288 3285 /* Notify link is down now */
3289 3286 mac_link_update(ic->ic_mach, LINK_STATE_DOWN);
3290 3287
3291 3288 sc->sc_promisc = B_FALSE;
3292 3289 bzero(sc->sc_mcast_refs, sizeof (sc->sc_mcast_refs));
3293 3290 bzero(sc->sc_mcast_hash, sizeof (sc->sc_mcast_hash));
3294 3291
3295 3292 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3296 3293 "Atheros AR%s MAC/BB Rev:%x "
3297 3294 "AR%s RF Rev:%x: mem=0x%lx\n",
3298 3295 arn_mac_bb_name(ah->ah_macVersion),
3299 3296 ah->ah_macRev,
3300 3297 arn_rf_name((ah->ah_analog5GhzRev & AR_RADIO_SREV_MAJOR)),
3301 3298 ah->ah_phyRev,
3302 3299 (unsigned long)sc->mem));
3303 3300
3304 3301 /* XXX: hardware will not be ready until arn_open() being called */
3305 3302 sc->sc_flags |= SC_OP_INVALID;
3306 3303 sc->sc_isrunning = 0;
3307 3304
3308 3305 return (DDI_SUCCESS);
3309 3306
3310 3307 attach_fail7:
3311 3308 ddi_remove_intr(devinfo, 0, sc->sc_iblock);
3312 3309 attach_fail6:
3313 3310 ddi_remove_softintr(sc->sc_softint_id);
3314 3311 attach_fail5:
3315 3312 (void) ieee80211_detach(ic);
3316 3313 attach_fail4:
3317 3314 arn_desc_free(sc);
3318 3315 if (sc->sc_tq)
3319 3316 ddi_taskq_destroy(sc->sc_tq);
3320 3317 attach_fail3:
3321 3318 ath9k_hw_detach(ah);
3322 3319 attach_fail2:
3323 3320 ddi_regs_map_free(&sc->sc_io_handle);
3324 3321 attach_fail1:
3325 3322 pci_config_teardown(&sc->sc_cfg_handle);
3326 3323 attach_fail0:
3327 3324 sc->sc_flags |= SC_OP_INVALID;
3328 3325 /* cleanup tx queues */
3329 3326 mutex_destroy(&sc->sc_txbuflock);
3330 3327 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
3331 3328 if (ARN_TXQ_SETUP(sc, i)) {
3332 3329 /* arn_tx_cleanupq(asc, &asc->sc_txq[i]); */
3333 3330 mutex_destroy(&((&sc->sc_txq[i])->axq_lock));
3334 3331 }
3335 3332 }
3336 3333 mutex_destroy(&sc->sc_rxbuflock);
3337 3334 mutex_destroy(&sc->sc_serial_rw);
3338 3335 mutex_destroy(&sc->sc_genlock);
3339 3336 mutex_destroy(&sc->sc_resched_lock);
3340 3337 #ifdef ARN_IBSS
3341 3338 mutex_destroy(&sc->sc_bcbuflock);
3342 3339 #endif
3343 3340
3344 3341 ddi_soft_state_free(arn_soft_state_p, instance);
3345 3342
3346 3343 return (DDI_FAILURE);
3347 3344
3348 3345 }
3349 3346
3350 3347 /*
3351 3348 * Suspend transmit/receive for powerdown
3352 3349 */
3353 3350 static int
3354 3351 arn_suspend(struct arn_softc *sc)
3355 3352 {
3356 3353 ARN_LOCK(sc);
3357 3354 arn_close(sc);
3358 3355 ARN_UNLOCK(sc);
3359 3356
3360 3357 return (DDI_SUCCESS);
3361 3358 }
3362 3359
3363 3360 static int32_t
3364 3361 arn_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
3365 3362 {
3366 3363 struct arn_softc *sc;
3367 3364 int i;
3368 3365
3369 3366 sc = ddi_get_soft_state(arn_soft_state_p, ddi_get_instance(devinfo));
3370 3367 ASSERT(sc != NULL);
3371 3368
3372 3369 switch (cmd) {
3373 3370 case DDI_DETACH:
3374 3371 break;
3375 3372
3376 3373 case DDI_SUSPEND:
3377 3374 return (arn_suspend(sc));
3378 3375
3379 3376 default:
3380 3377 return (DDI_FAILURE);
3381 3378 }
3382 3379
3383 3380 if (mac_disable(sc->sc_isc.ic_mach) != 0)
3384 3381 return (DDI_FAILURE);
3385 3382
3386 3383 arn_stop_scantimer(sc);
3387 3384 arn_stop_caltimer(sc);
3388 3385
3389 3386 /* disable interrupts */
3390 3387 (void) ath9k_hw_set_interrupts(sc->sc_ah, 0);
3391 3388
3392 3389 /*
3393 3390 * Unregister from the MAC layer subsystem
3394 3391 */
3395 3392 (void) mac_unregister(sc->sc_isc.ic_mach);
3396 3393
3397 3394 /* free intterrupt resources */
3398 3395 ddi_remove_intr(devinfo, 0, sc->sc_iblock);
3399 3396 ddi_remove_softintr(sc->sc_softint_id);
3400 3397
3401 3398 /*
3402 3399 * NB: the order of these is important:
3403 3400 * o call the 802.11 layer before detaching the hal to
3404 3401 * insure callbacks into the driver to delete global
3405 3402 * key cache entries can be handled
3406 3403 * o reclaim the tx queue data structures after calling
3407 3404 * the 802.11 layer as we'll get called back to reclaim
3408 3405 * node state and potentially want to use them
3409 3406 * o to cleanup the tx queues the hal is called, so detach
3410 3407 * it last
3411 3408 */
3412 3409 ieee80211_detach(&sc->sc_isc);
3413 3410
3414 3411 arn_desc_free(sc);
3415 3412
3416 3413 ddi_taskq_destroy(sc->sc_tq);
3417 3414
3418 3415 if (!(sc->sc_flags & SC_OP_INVALID))
3419 3416 (void) ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
3420 3417
3421 3418 /* cleanup tx queues */
3422 3419 mutex_destroy(&sc->sc_txbuflock);
3423 3420 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
3424 3421 if (ARN_TXQ_SETUP(sc, i)) {
3425 3422 arn_tx_cleanupq(sc, &sc->sc_txq[i]);
3426 3423 mutex_destroy(&((&sc->sc_txq[i])->axq_lock));
3427 3424 }
3428 3425 }
3429 3426
3430 3427 ath9k_hw_detach(sc->sc_ah);
3431 3428
3432 3429 /* free io handle */
3433 3430 ddi_regs_map_free(&sc->sc_io_handle);
3434 3431 pci_config_teardown(&sc->sc_cfg_handle);
3435 3432
3436 3433 /* destroy locks */
3437 3434 mutex_destroy(&sc->sc_genlock);
3438 3435 mutex_destroy(&sc->sc_serial_rw);
3439 3436 mutex_destroy(&sc->sc_rxbuflock);
3440 3437 mutex_destroy(&sc->sc_resched_lock);
3441 3438 #ifdef ARN_IBSS
3442 3439 mutex_destroy(&sc->sc_bcbuflock);
3443 3440 #endif
3444 3441
3445 3442 ddi_remove_minor_node(devinfo, NULL);
3446 3443 ddi_soft_state_free(arn_soft_state_p, ddi_get_instance(devinfo));
3447 3444
3448 3445 return (DDI_SUCCESS);
3449 3446 }
3450 3447
3451 3448 /*
3452 3449 * quiesce(9E) entry point.
3453 3450 *
3454 3451 * This function is called when the system is single-threaded at high
3455 3452 * PIL with preemption disabled. Therefore, this function must not be
3456 3453 * blocked.
3457 3454 *
3458 3455 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
3459 3456 * DDI_FAILURE indicates an error condition and should almost never happen.
3460 3457 */
3461 3458 static int32_t
3462 3459 arn_quiesce(dev_info_t *devinfo)
3463 3460 {
3464 3461 struct arn_softc *sc;
3465 3462 int i;
3466 3463 struct ath_hal *ah;
3467 3464
3468 3465 sc = ddi_get_soft_state(arn_soft_state_p, ddi_get_instance(devinfo));
3469 3466
3470 3467 if (sc == NULL || (ah = sc->sc_ah) == NULL)
3471 3468 return (DDI_FAILURE);
3472 3469
3473 3470 /*
3474 3471 * Disable interrupts
3475 3472 */
3476 3473 (void) ath9k_hw_set_interrupts(ah, 0);
3477 3474
3478 3475 /*
3479 3476 * Disable TX HW
3480 3477 */
3481 3478 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
3482 3479 if (ARN_TXQ_SETUP(sc, i))
3483 3480 (void) ath9k_hw_stoptxdma(ah, sc->sc_txq[i].axq_qnum);
3484 3481 }
3485 3482
3486 3483 /*
3487 3484 * Disable RX HW
3488 3485 */
3489 3486 ath9k_hw_stoppcurecv(ah);
3490 3487 ath9k_hw_setrxfilter(ah, 0);
3491 3488 (void) ath9k_hw_stopdmarecv(ah);
3492 3489 drv_usecwait(3000);
3493 3490
3494 3491 /*
3495 3492 * Power down HW
3496 3493 */
3497 3494 (void) ath9k_hw_phy_disable(ah);
3498 3495
3499 3496 return (DDI_SUCCESS);
3500 3497 }
3501 3498
3502 3499 DDI_DEFINE_STREAM_OPS(arn_dev_ops, nulldev, nulldev, arn_attach, arn_detach,
3503 3500 nodev, NULL, D_MP, NULL, arn_quiesce);
3504 3501
3505 3502 static struct modldrv arn_modldrv = {
3506 3503 &mod_driverops, /* Type of module. This one is a driver */
3507 3504 "arn-Atheros 9000 series driver:2.0", /* short description */
3508 3505 &arn_dev_ops /* driver specific ops */
3509 3506 };
3510 3507
3511 3508 static struct modlinkage modlinkage = {
3512 3509 MODREV_1, (void *)&arn_modldrv, NULL
3513 3510 };
3514 3511
3515 3512 int
3516 3513 _info(struct modinfo *modinfop)
3517 3514 {
3518 3515 return (mod_info(&modlinkage, modinfop));
3519 3516 }
3520 3517
3521 3518 int
3522 3519 _init(void)
3523 3520 {
3524 3521 int status;
3525 3522
3526 3523 status = ddi_soft_state_init
3527 3524 (&arn_soft_state_p, sizeof (struct arn_softc), 1);
3528 3525 if (status != 0)
3529 3526 return (status);
3530 3527
3531 3528 mutex_init(&arn_loglock, NULL, MUTEX_DRIVER, NULL);
3532 3529 mac_init_ops(&arn_dev_ops, "arn");
3533 3530 status = mod_install(&modlinkage);
3534 3531 if (status != 0) {
3535 3532 mac_fini_ops(&arn_dev_ops);
3536 3533 mutex_destroy(&arn_loglock);
3537 3534 ddi_soft_state_fini(&arn_soft_state_p);
3538 3535 }
3539 3536
3540 3537 return (status);
3541 3538 }
3542 3539
3543 3540 int
3544 3541 _fini(void)
3545 3542 {
3546 3543 int status;
3547 3544
3548 3545 status = mod_remove(&modlinkage);
3549 3546 if (status == 0) {
3550 3547 mac_fini_ops(&arn_dev_ops);
3551 3548 mutex_destroy(&arn_loglock);
3552 3549 ddi_soft_state_fini(&arn_soft_state_p);
3553 3550 }
3554 3551 return (status);
3555 3552 }
↓ open down ↓ |
892 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX