Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/intel/io/scsi/adapters/arcmsr/arcmsr.c
+++ new/usr/src/uts/intel/io/scsi/adapters/arcmsr/arcmsr.c
1 1 /*
2 2 * O.S : Solaris
3 3 * FILE NAME : arcmsr.c
4 4 * BY : Erich Chen, C.L. Huang
5 5 * Description: SCSI RAID Device Driver for
6 6 * ARECA RAID Host adapter
7 7 *
8 8 * Copyright (C) 2002,2010 Areca Technology Corporation All rights reserved.
9 9 * Copyright (C) 2002,2010 Erich Chen
10 10 * Web site: www.areca.com.tw
11 11 * E-mail: erich@areca.com.tw; ching2048@areca.com.tw
12 12 *
13 13 * Redistribution and use in source and binary forms, with or without
14 14 * modification, are permitted provided that the following conditions
15 15 * are met:
16 16 * 1. Redistributions of source code must retain the above copyright
17 17 * notice, this list of conditions and the following disclaimer.
18 18 * 2. Redistributions in binary form must reproduce the above copyright
19 19 * notice, this list of conditions and the following disclaimer in the
20 20 * documentation and/or other materials provided with the distribution.
21 21 * 3. The party using or redistributing the source code and binary forms
22 22 * agrees to the disclaimer below and the terms and conditions set forth
23 23 * herein.
24 24 *
25 25 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
26 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
29 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 35 * SUCH DAMAGE.
36 36 *
37 37 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
38 38 * Use is subject to license terms.
39 39 *
40 40 */
41 41 /*
42 42 * This file and its contents are supplied under the terms of the
43 43 * Common Development and Distribution License ("CDDL"), version 1.0.
44 44 * You may only use this file in accordance with the terms of version
45 45 * 1.0 of the CDDL.
46 46 *
47 47 * A full copy of the text of the CDDL should have accompanied this
48 48 * source. A copy of the CDDL is also available via the Internet at
49 49 * http://www.illumos.org/license/CDDL.
50 50 */
51 51 /*
52 52 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
53 53 */
54 54 #include <sys/types.h>
55 55 #include <sys/ddidmareq.h>
56 56 #include <sys/scsi/scsi.h>
57 57 #include <sys/ddi.h>
58 58 #include <sys/sunddi.h>
59 59 #include <sys/file.h>
60 60 #include <sys/disp.h>
61 61 #include <sys/signal.h>
62 62 #include <sys/debug.h>
63 63 #include <sys/pci.h>
64 64 #include <sys/policy.h>
65 65 #include <sys/atomic.h>
66 66 #include "arcmsr.h"
67 67
68 68 static int arcmsr_attach(dev_info_t *dev_info, ddi_attach_cmd_t cmd);
69 69 static int arcmsr_cb_ioctl(dev_t dev, int ioctl_cmd, intptr_t arg,
70 70 int mode, cred_t *credp, int *rvalp);
71 71 static int arcmsr_detach(dev_info_t *dev_info, ddi_detach_cmd_t cmd);
72 72 static int arcmsr_reset(dev_info_t *resetdev, ddi_reset_cmd_t cmd);
73 73 static int arcmsr_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt);
74 74 static int arcmsr_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
75 75 static int arcmsr_tran_reset(struct scsi_address *ap, int level);
76 76 static int arcmsr_tran_getcap(struct scsi_address *ap, char *cap, int whom);
77 77 static int arcmsr_tran_setcap(struct scsi_address *ap, char *cap, int value,
78 78 int whom);
79 79 static int arcmsr_tran_tgt_init(dev_info_t *host_dev_info,
80 80 dev_info_t *target_dev_info, scsi_hba_tran_t *hosttran,
81 81 struct scsi_device *sd);
82 82 static void arcmsr_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt);
83 83 static void arcmsr_tran_destroy_pkt(struct scsi_address *ap,
84 84 struct scsi_pkt *pkt);
85 85 static void arcmsr_tran_sync_pkt(struct scsi_address *ap,
86 86 struct scsi_pkt *pkt);
87 87 static struct scsi_pkt *arcmsr_tran_init_pkt(struct scsi_address *ap,
88 88 struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
89 89 int tgtlen, int flags, int (*callback)(), caddr_t arg);
90 90 static int arcmsr_config_child(struct ACB *acb, struct scsi_device *sd,
91 91 dev_info_t **dipp);
92 92
93 93 static int arcmsr_config_lun(struct ACB *acb, uint16_t tgt, uint8_t lun,
94 94 dev_info_t **ldip);
95 95 static uint8_t arcmsr_abort_host_command(struct ACB *acb);
96 96 static uint8_t arcmsr_get_echo_from_iop(struct ACB *acb);
97 97 static uint_t arcmsr_intr_handler(caddr_t arg, caddr_t arg2);
98 98 static int arcmsr_initialize(struct ACB *acb);
99 99 static int arcmsr_dma_alloc(struct ACB *acb,
100 100 struct scsi_pkt *pkt, struct buf *bp, int flags, int (*callback)());
101 101 static int arcmsr_dma_move(struct ACB *acb,
102 102 struct scsi_pkt *pkt, struct buf *bp);
103 103 static void arcmsr_handle_iop_bus_hold(struct ACB *acb);
104 104 static void arcmsr_hbc_message_isr(struct ACB *acb);
105 105 static void arcmsr_pcidev_disattach(struct ACB *acb);
106 106 static void arcmsr_ccb_complete(struct CCB *ccb, int flag);
107 107 static void arcmsr_iop_init(struct ACB *acb);
108 108 static void arcmsr_iop_parking(struct ACB *acb);
109 109 /*PRINTFLIKE3*/
110 110 static void arcmsr_log(struct ACB *acb, int level, char *fmt, ...);
111 111 /*PRINTFLIKE2*/
112 112 static void arcmsr_warn(struct ACB *acb, char *fmt, ...);
113 113 static void arcmsr_mutex_init(struct ACB *acb);
114 114 static void arcmsr_remove_intr(struct ACB *acb);
115 115 static void arcmsr_ccbs_timeout(void* arg);
116 116 static void arcmsr_devMap_monitor(void* arg);
117 117 static void arcmsr_pcidev_disattach(struct ACB *acb);
118 118 static void arcmsr_iop_message_read(struct ACB *acb);
119 119 static void arcmsr_free_ccb(struct CCB *ccb);
120 120 static void arcmsr_post_ioctldata2iop(struct ACB *acb);
121 121 static void arcmsr_report_sense_info(struct CCB *ccb);
122 122 static void arcmsr_init_list_head(struct list_head *list);
123 123 static void arcmsr_enable_allintr(struct ACB *acb, uint32_t intmask_org);
124 124 static void arcmsr_done4abort_postqueue(struct ACB *acb);
125 125 static void arcmsr_list_add_tail(kmutex_t *list_lock,
126 126 struct list_head *new_one, struct list_head *head);
127 127 static int arcmsr_name_node(dev_info_t *dip, char *name, int len);
128 128 static int arcmsr_seek_cmd2abort(struct ACB *acb, struct scsi_pkt *abortpkt);
129 129 static int arcmsr_iop_message_xfer(struct ACB *acb, struct scsi_pkt *pkt);
130 130 static int arcmsr_post_ccb(struct ACB *acb, struct CCB *ccb);
131 131 static int arcmsr_parse_devname(char *devnm, int *tgt, int *lun);
132 132 static int arcmsr_do_ddi_attach(dev_info_t *dev_info, int instance);
133 133 static uint8_t arcmsr_iop_reset(struct ACB *acb);
134 134 static uint32_t arcmsr_disable_allintr(struct ACB *acb);
135 135 static uint32_t arcmsr_iop_confirm(struct ACB *acb);
136 136 static struct CCB *arcmsr_get_freeccb(struct ACB *acb);
137 137 static void arcmsr_flush_hba_cache(struct ACB *acb);
138 138 static void arcmsr_flush_hbb_cache(struct ACB *acb);
139 139 static void arcmsr_flush_hbc_cache(struct ACB *acb);
140 140 static void arcmsr_stop_hba_bgrb(struct ACB *acb);
141 141 static void arcmsr_stop_hbb_bgrb(struct ACB *acb);
142 142 static void arcmsr_stop_hbc_bgrb(struct ACB *acb);
143 143 static void arcmsr_start_hba_bgrb(struct ACB *acb);
144 144 static void arcmsr_start_hbb_bgrb(struct ACB *acb);
145 145 static void arcmsr_start_hbc_bgrb(struct ACB *acb);
146 146 static void arcmsr_mutex_destroy(struct ACB *acb);
147 147 static void arcmsr_polling_hba_ccbdone(struct ACB *acb, struct CCB *poll_ccb);
148 148 static void arcmsr_polling_hbb_ccbdone(struct ACB *acb, struct CCB *poll_ccb);
149 149 static void arcmsr_polling_hbc_ccbdone(struct ACB *acb, struct CCB *poll_ccb);
150 150 static void arcmsr_build_ccb(struct CCB *ccb);
151 151 static int arcmsr_tran_bus_config(dev_info_t *parent, uint_t flags,
152 152 ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
153 153 static int arcmsr_name_node(dev_info_t *dip, char *name, int len);
154 154 static dev_info_t *arcmsr_find_child(struct ACB *acb, uint16_t tgt,
155 155 uint8_t lun);
156 156 static struct QBUFFER *arcmsr_get_iop_rqbuffer(struct ACB *acb);
157 157
158 158 static int arcmsr_add_intr(struct ACB *, int);
159 159
160 160 static void *arcmsr_soft_state = NULL;
161 161
162 162 static ddi_dma_attr_t arcmsr_dma_attr = {
163 163 DMA_ATTR_V0, /* ddi_dma_attr version */
164 164 0, /* low DMA address range */
165 165 0xffffffffffffffffull, /* high DMA address range */
166 166 0x00ffffff, /* DMA counter counter upper bound */
167 167 1, /* DMA address alignment requirements */
168 168 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* burst sizes */
169 169 1, /* minimum effective DMA size */
170 170 ARCMSR_MAX_XFER_LEN, /* maximum DMA xfer size */
171 171 /*
172 172 * The dma_attr_seg field supplies the limit of each Scatter/Gather
173 173 * list element's "address+length". The Intel IOP331 can not use
174 174 * segments over the 4G boundary due to segment boundary restrictions
175 175 */
176 176 0xffffffff,
177 177 ARCMSR_MAX_SG_ENTRIES, /* scatter/gather list count */
178 178 1, /* device granularity */
179 179 DDI_DMA_FORCE_PHYSICAL /* Bus specific DMA flags */
180 180 };
181 181
182 182
183 183 static ddi_dma_attr_t arcmsr_ccb_attr = {
184 184 DMA_ATTR_V0, /* ddi_dma_attr version */
185 185 0, /* low DMA address range */
186 186 0xffffffff, /* high DMA address range */
187 187 0x00ffffff, /* DMA counter counter upper bound */
188 188 1, /* default byte alignment */
189 189 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* burst sizes */
190 190 1, /* minimum effective DMA size */
191 191 0xffffffff, /* maximum DMA xfer size */
192 192 0x00ffffff, /* max segment size, segment boundary restrictions */
193 193 1, /* scatter/gather list count */
194 194 1, /* device granularity */
195 195 DDI_DMA_FORCE_PHYSICAL /* Bus specific DMA flags */
196 196 };
197 197
198 198
199 199 static struct cb_ops arcmsr_cb_ops = {
200 200 scsi_hba_open, /* open(9E) */
201 201 scsi_hba_close, /* close(9E) */
202 202 nodev, /* strategy(9E), returns ENXIO */
203 203 nodev, /* print(9E) */
204 204 nodev, /* dump(9E) Cannot be used as a dump device */
205 205 nodev, /* read(9E) */
206 206 nodev, /* write(9E) */
207 207 arcmsr_cb_ioctl, /* ioctl(9E) */
208 208 nodev, /* devmap(9E) */
209 209 nodev, /* mmap(9E) */
210 210 nodev, /* segmap(9E) */
211 211 NULL, /* chpoll(9E) returns ENXIO */
212 212 nodev, /* prop_op(9E) */
213 213 NULL, /* streamtab(9S) */
214 214 D_MP,
215 215 CB_REV,
216 216 nodev, /* aread(9E) */
217 217 nodev /* awrite(9E) */
218 218 };
219 219
220 220 static struct dev_ops arcmsr_ops = {
221 221 DEVO_REV, /* devo_rev */
222 222 0, /* reference count */
223 223 nodev, /* getinfo */
224 224 nulldev, /* identify */
225 225 nulldev, /* probe */
226 226 arcmsr_attach, /* attach */
227 227 arcmsr_detach, /* detach */
228 228 arcmsr_reset, /* reset, shutdown, reboot notify */
229 229 &arcmsr_cb_ops, /* driver operations */
230 230 NULL, /* bus operations */
231 231 NULL /* power */
232 232 };
233 233
234 234 static struct modldrv arcmsr_modldrv = {
235 235 &mod_driverops, /* Type of module. This is a driver. */
236 236 "ARECA RAID Controller", /* module name, from arcmsr.h */
237 237 &arcmsr_ops, /* driver ops */
238 238 };
239 239
240 240 static struct modlinkage arcmsr_modlinkage = {
241 241 MODREV_1,
242 242 &arcmsr_modldrv,
243 243 NULL
244 244 };
245 245
246 246
247 247 int
248 248 _init(void)
249 249 {
250 250 int ret;
251 251
252 252 ret = ddi_soft_state_init(&arcmsr_soft_state, sizeof (struct ACB), 1);
253 253 if (ret != 0) {
254 254 return (ret);
255 255 }
256 256 if ((ret = scsi_hba_init(&arcmsr_modlinkage)) != 0) {
257 257 ddi_soft_state_fini(&arcmsr_soft_state);
258 258 return (ret);
259 259 }
260 260
261 261 if ((ret = mod_install(&arcmsr_modlinkage)) != 0) {
262 262 scsi_hba_fini(&arcmsr_modlinkage);
263 263 if (arcmsr_soft_state != NULL) {
264 264 ddi_soft_state_fini(&arcmsr_soft_state);
265 265 }
266 266 }
267 267 return (ret);
268 268 }
269 269
270 270
271 271 int
272 272 _fini(void)
273 273 {
274 274 int ret;
275 275
276 276 ret = mod_remove(&arcmsr_modlinkage);
277 277 if (ret == 0) {
278 278 /* if ret = 0 , said driver can remove */
279 279 scsi_hba_fini(&arcmsr_modlinkage);
280 280 if (arcmsr_soft_state != NULL) {
281 281 ddi_soft_state_fini(&arcmsr_soft_state);
282 282 }
283 283 }
284 284 return (ret);
285 285 }
286 286
287 287
288 288 int
289 289 _info(struct modinfo *modinfop)
290 290 {
291 291 return (mod_info(&arcmsr_modlinkage, modinfop));
292 292 }
293 293
294 294
295 295 /*
296 296 * Function: arcmsr_attach(9E)
297 297 * Description: Set up all device state and allocate data structures,
298 298 * mutexes, condition variables, etc. for device operation.
299 299 * Set mt_attr property for driver to indicate MT-safety.
300 300 * Add interrupts needed.
301 301 * Input: dev_info_t *dev_info, ddi_attach_cmd_t cmd
302 302 * Output: Return DDI_SUCCESS if device is ready,
303 303 * else return DDI_FAILURE
304 304 */
305 305 static int
306 306 arcmsr_attach(dev_info_t *dev_info, ddi_attach_cmd_t cmd)
307 307 {
308 308 scsi_hba_tran_t *hba_trans;
309 309 struct ACB *acb;
310 310
311 311 switch (cmd) {
312 312 case DDI_ATTACH:
313 313 return (arcmsr_do_ddi_attach(dev_info,
314 314 ddi_get_instance(dev_info)));
315 315 case DDI_RESUME:
316 316 /*
317 317 * There is no hardware state to restart and no
318 318 * timeouts to restart since we didn't DDI_SUSPEND with
319 319 * active cmds or active timeouts We just need to
320 320 * unblock waiting threads and restart I/O the code
321 321 */
322 322 hba_trans = ddi_get_driver_private(dev_info);
323 323 if (hba_trans == NULL) {
324 324 return (DDI_FAILURE);
325 325 }
326 326 acb = hba_trans->tran_hba_private;
327 327 mutex_enter(&acb->acb_mutex);
328 328 arcmsr_iop_init(acb);
329 329
330 330 /* restart ccbs "timeout" watchdog */
331 331 acb->timeout_count = 0;
332 332 acb->timeout_id = timeout(arcmsr_ccbs_timeout, (caddr_t)acb,
333 333 (ARCMSR_TIMEOUT_WATCH * drv_usectohz(1000000)));
334 334 acb->timeout_sc_id = timeout(arcmsr_devMap_monitor,
335 335 (caddr_t)acb,
336 336 (ARCMSR_DEV_MAP_WATCH * drv_usectohz(1000000)));
337 337 mutex_exit(&acb->acb_mutex);
338 338 return (DDI_SUCCESS);
339 339
340 340 default:
341 341 return (DDI_FAILURE);
342 342 }
343 343 }
344 344
345 345 /*
346 346 * Function: arcmsr_detach(9E)
347 347 * Description: Remove all device allocation and system resources, disable
348 348 * device interrupt.
349 349 * Input: dev_info_t *dev_info
350 350 * ddi_detach_cmd_t cmd
351 351 * Output: Return DDI_SUCCESS if done,
352 352 * else returnDDI_FAILURE
353 353 */
354 354 static int
355 355 arcmsr_detach(dev_info_t *dev_info, ddi_detach_cmd_t cmd) {
356 356
357 357 int instance;
358 358 struct ACB *acb;
359 359
360 360
361 361 instance = ddi_get_instance(dev_info);
362 362 acb = ddi_get_soft_state(arcmsr_soft_state, instance);
363 363 if (acb == NULL)
364 364 return (DDI_FAILURE);
365 365
366 366 switch (cmd) {
367 367 case DDI_DETACH:
368 368 mutex_enter(&acb->acb_mutex);
369 369 if (acb->timeout_id != 0) {
370 370 mutex_exit(&acb->acb_mutex);
371 371 (void) untimeout(acb->timeout_id);
372 372 mutex_enter(&acb->acb_mutex);
373 373 acb->timeout_id = 0;
374 374 }
375 375 if (acb->timeout_sc_id != 0) {
376 376 mutex_exit(&acb->acb_mutex);
377 377 (void) untimeout(acb->timeout_sc_id);
378 378 mutex_enter(&acb->acb_mutex);
379 379 acb->timeout_sc_id = 0;
380 380 }
381 381 arcmsr_pcidev_disattach(acb);
382 382 /* Remove interrupt set up by ddi_add_intr */
383 383 arcmsr_remove_intr(acb);
384 384 /* unbind mapping object to handle */
385 385 (void) ddi_dma_unbind_handle(acb->ccbs_pool_handle);
386 386 /* Free ccb pool memory */
387 387 ddi_dma_mem_free(&acb->ccbs_acc_handle);
388 388 /* Free DMA handle */
389 389 ddi_dma_free_handle(&acb->ccbs_pool_handle);
390 390 ddi_regs_map_free(&acb->reg_mu_acc_handle0);
391 391 if (scsi_hba_detach(dev_info) != DDI_SUCCESS)
392 392 arcmsr_warn(acb, "Unable to detach instance cleanly "
393 393 "(should not happen)");
394 394 /* free scsi_hba_transport from scsi_hba_tran_alloc */
395 395 scsi_hba_tran_free(acb->scsi_hba_transport);
396 396 ddi_taskq_destroy(acb->taskq);
397 397 ddi_prop_remove_all(dev_info);
398 398 mutex_exit(&acb->acb_mutex);
399 399 arcmsr_mutex_destroy(acb);
400 400 pci_config_teardown(&acb->pci_acc_handle);
401 401 ddi_set_driver_private(dev_info, NULL);
402 402 ddi_soft_state_free(arcmsr_soft_state, instance);
403 403 return (DDI_SUCCESS);
404 404 case DDI_SUSPEND:
405 405 mutex_enter(&acb->acb_mutex);
406 406 if (acb->timeout_id != 0) {
407 407 acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
408 408 mutex_exit(&acb->acb_mutex);
409 409 (void) untimeout(acb->timeout_id);
410 410 (void) untimeout(acb->timeout_sc_id);
411 411 mutex_enter(&acb->acb_mutex);
412 412 acb->timeout_id = 0;
413 413 }
414 414
415 415 if (acb->timeout_sc_id != 0) {
416 416 acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
417 417 mutex_exit(&acb->acb_mutex);
418 418 (void) untimeout(acb->timeout_sc_id);
419 419 mutex_enter(&acb->acb_mutex);
420 420 acb->timeout_sc_id = 0;
421 421 }
422 422
423 423 /* disable all outbound interrupt */
424 424 (void) arcmsr_disable_allintr(acb);
425 425 /* stop adapter background rebuild */
426 426 switch (acb->adapter_type) {
427 427 case ACB_ADAPTER_TYPE_A:
428 428 arcmsr_stop_hba_bgrb(acb);
429 429 arcmsr_flush_hba_cache(acb);
430 430 break;
431 431
432 432 case ACB_ADAPTER_TYPE_B:
433 433 arcmsr_stop_hbb_bgrb(acb);
434 434 arcmsr_flush_hbb_cache(acb);
435 435 break;
436 436
437 437 case ACB_ADAPTER_TYPE_C:
438 438 arcmsr_stop_hbc_bgrb(acb);
439 439 arcmsr_flush_hbc_cache(acb);
440 440 break;
441 441 }
442 442 mutex_exit(&acb->acb_mutex);
443 443 return (DDI_SUCCESS);
444 444 default:
445 445 return (DDI_FAILURE);
446 446 }
447 447 }
448 448
449 449 static int
450 450 arcmsr_reset(dev_info_t *resetdev, ddi_reset_cmd_t cmd)
451 451 {
452 452 struct ACB *acb;
453 453 scsi_hba_tran_t *scsi_hba_transport;
454 454 _NOTE(ARGUNUSED(cmd));
455 455
456 456 scsi_hba_transport = ddi_get_driver_private(resetdev);
457 457 if (scsi_hba_transport == NULL)
458 458 return (DDI_FAILURE);
459 459
460 460 acb = (struct ACB *)scsi_hba_transport->tran_hba_private;
461 461 if (!acb)
462 462 return (DDI_FAILURE);
463 463
464 464 arcmsr_pcidev_disattach(acb);
465 465
466 466 return (DDI_SUCCESS);
467 467 }
468 468
469 469 static int
470 470 arcmsr_cb_ioctl(dev_t dev, int ioctl_cmd, intptr_t arg, int mode,
471 471 cred_t *credp, int *rvalp)
472 472 {
473 473 struct ACB *acb;
474 474 struct CMD_MESSAGE_FIELD *pktioctlfld;
475 475 int retvalue = 0;
476 476 int instance = MINOR2INST(getminor(dev));
477 477
478 478 if (instance < 0)
479 479 return (ENXIO);
480 480
481 481 if (secpolicy_sys_config(credp, B_FALSE) != 0)
482 482 return (EPERM);
483 483
484 484 acb = ddi_get_soft_state(arcmsr_soft_state, instance);
485 485 if (acb == NULL)
486 486 return (ENXIO);
487 487
488 488 pktioctlfld = kmem_zalloc(sizeof (struct CMD_MESSAGE_FIELD), KM_SLEEP);
489 489
490 490 mutex_enter(&acb->ioctl_mutex);
491 491 if (ddi_copyin((void *)arg, pktioctlfld,
492 492 sizeof (struct CMD_MESSAGE_FIELD), mode) != 0) {
493 493 retvalue = ENXIO;
494 494 goto ioctl_out;
495 495 }
496 496
497 497 if (memcmp(pktioctlfld->cmdmessage.Signature, "ARCMSR", 6) != 0) {
498 498 /* validity check */
499 499 retvalue = ENXIO;
500 500 goto ioctl_out;
501 501 }
502 502
503 503 switch ((unsigned int)ioctl_cmd) {
504 504 case ARCMSR_MESSAGE_READ_RQBUFFER:
505 505 {
506 506 uint8_t *ver_addr;
507 507 uint8_t *pQbuffer, *ptmpQbuffer;
508 508 int32_t allxfer_len = 0;
509 509
510 510 ver_addr = kmem_zalloc(MSGDATABUFLEN, KM_SLEEP);
511 511 ptmpQbuffer = ver_addr;
512 512 while ((acb->rqbuf_firstidx != acb->rqbuf_lastidx) &&
513 513 (allxfer_len < (MSGDATABUFLEN - 1))) {
514 514 /* copy READ QBUFFER to srb */
515 515 pQbuffer = &acb->rqbuffer[acb->rqbuf_firstidx];
516 516 (void) memcpy(ptmpQbuffer, pQbuffer, 1);
517 517 acb->rqbuf_firstidx++;
518 518 /* if last index number set it to 0 */
519 519 acb->rqbuf_firstidx %= ARCMSR_MAX_QBUFFER;
520 520 ptmpQbuffer++;
521 521 allxfer_len++;
522 522 }
523 523
524 524 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
525 525 struct QBUFFER *prbuffer;
526 526 uint8_t *pQbuffer;
527 527 uint8_t *iop_data;
528 528 int32_t iop_len;
529 529
530 530 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
531 531 prbuffer = arcmsr_get_iop_rqbuffer(acb);
532 532 iop_data = (uint8_t *)prbuffer->data;
533 533 iop_len = (int32_t)prbuffer->data_len;
534 534 /*
535 535 * this iop data does no chance to make me overflow
536 536 * again here, so just do it
537 537 */
538 538 while (iop_len > 0) {
539 539 pQbuffer = &acb->rqbuffer[acb->rqbuf_lastidx];
540 540 (void) memcpy(pQbuffer, iop_data, 1);
541 541 acb->rqbuf_lastidx++;
542 542 /* if last index number set it to 0 */
543 543 acb->rqbuf_lastidx %= ARCMSR_MAX_QBUFFER;
544 544 iop_data++;
545 545 iop_len--;
546 546 }
547 547 /* let IOP know data has been read */
548 548 arcmsr_iop_message_read(acb);
549 549 }
550 550 (void) memcpy(pktioctlfld->messagedatabuffer,
551 551 ver_addr, allxfer_len);
552 552 pktioctlfld->cmdmessage.Length = allxfer_len;
553 553 pktioctlfld->cmdmessage.ReturnCode =
554 554 ARCMSR_MESSAGE_RETURNCODE_OK;
555 555
556 556 if (ddi_copyout(pktioctlfld, (void *)arg,
557 557 sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
558 558 retvalue = ENXIO;
559 559
560 560 kmem_free(ver_addr, MSGDATABUFLEN);
561 561 break;
562 562 }
563 563
564 564 case ARCMSR_MESSAGE_WRITE_WQBUFFER:
565 565 {
566 566 uint8_t *ver_addr;
567 567 int32_t my_empty_len, user_len;
568 568 int32_t wqbuf_firstidx, wqbuf_lastidx;
569 569 uint8_t *pQbuffer, *ptmpuserbuffer;
570 570
571 571 ver_addr = kmem_zalloc(MSGDATABUFLEN, KM_SLEEP);
572 572
573 573 ptmpuserbuffer = ver_addr;
574 574 user_len = min(pktioctlfld->cmdmessage.Length,
575 575 MSGDATABUFLEN);
576 576 (void) memcpy(ptmpuserbuffer,
577 577 pktioctlfld->messagedatabuffer, user_len);
578 578 /*
579 579 * check ifdata xfer length of this request will overflow
580 580 * my array qbuffer
581 581 */
582 582 wqbuf_lastidx = acb->wqbuf_lastidx;
583 583 wqbuf_firstidx = acb->wqbuf_firstidx;
584 584 if (wqbuf_lastidx != wqbuf_firstidx) {
585 585 arcmsr_post_ioctldata2iop(acb);
586 586 pktioctlfld->cmdmessage.ReturnCode =
587 587 ARCMSR_MESSAGE_RETURNCODE_ERROR;
588 588 } else {
589 589 my_empty_len = (wqbuf_firstidx - wqbuf_lastidx - 1)
590 590 & (ARCMSR_MAX_QBUFFER - 1);
591 591 if (my_empty_len >= user_len) {
592 592 while (user_len > 0) {
593 593 /* copy srb data to wqbuffer */
594 594 pQbuffer =
595 595 &acb->wqbuffer[acb->wqbuf_lastidx];
596 596 (void) memcpy(pQbuffer,
597 597 ptmpuserbuffer, 1);
598 598 acb->wqbuf_lastidx++;
599 599 /* iflast index number set it to 0 */
600 600 acb->wqbuf_lastidx %=
601 601 ARCMSR_MAX_QBUFFER;
602 602 ptmpuserbuffer++;
603 603 user_len--;
604 604 }
605 605 /* post first Qbuffer */
606 606 if (acb->acb_flags &
607 607 ACB_F_MESSAGE_WQBUFFER_CLEARED) {
608 608 acb->acb_flags &=
609 609 ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
610 610 arcmsr_post_ioctldata2iop(acb);
611 611 }
612 612 pktioctlfld->cmdmessage.ReturnCode =
613 613 ARCMSR_MESSAGE_RETURNCODE_OK;
614 614 } else {
615 615 pktioctlfld->cmdmessage.ReturnCode =
616 616 ARCMSR_MESSAGE_RETURNCODE_ERROR;
617 617 }
618 618 }
619 619 if (ddi_copyout(pktioctlfld, (void *)arg,
620 620 sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
621 621 retvalue = ENXIO;
622 622
623 623 kmem_free(ver_addr, MSGDATABUFLEN);
624 624 break;
625 625 }
626 626
627 627 case ARCMSR_MESSAGE_CLEAR_RQBUFFER:
628 628 {
629 629 uint8_t *pQbuffer = acb->rqbuffer;
630 630
631 631 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
632 632 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
633 633 arcmsr_iop_message_read(acb);
634 634 }
635 635 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
636 636 acb->rqbuf_firstidx = 0;
637 637 acb->rqbuf_lastidx = 0;
638 638 bzero(pQbuffer, ARCMSR_MAX_QBUFFER);
639 639 /* report success */
640 640 pktioctlfld->cmdmessage.ReturnCode =
641 641 ARCMSR_MESSAGE_RETURNCODE_OK;
642 642
643 643 if (ddi_copyout(pktioctlfld, (void *)arg,
644 644 sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
645 645 retvalue = ENXIO;
646 646 break;
647 647 }
648 648
649 649 case ARCMSR_MESSAGE_CLEAR_WQBUFFER:
650 650 {
651 651 uint8_t *pQbuffer = acb->wqbuffer;
652 652
653 653 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
654 654 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
655 655 arcmsr_iop_message_read(acb);
656 656 }
657 657 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
658 658 ACB_F_MESSAGE_WQBUFFER_READ);
659 659 acb->wqbuf_firstidx = 0;
660 660 acb->wqbuf_lastidx = 0;
661 661 bzero(pQbuffer, ARCMSR_MAX_QBUFFER);
662 662 /* report success */
663 663 pktioctlfld->cmdmessage.ReturnCode =
664 664 ARCMSR_MESSAGE_RETURNCODE_OK;
665 665
666 666 if (ddi_copyout(pktioctlfld, (void *)arg,
667 667 sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
668 668 retvalue = ENXIO;
669 669 break;
670 670 }
671 671
672 672 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER:
673 673 {
674 674 uint8_t *pQbuffer;
675 675
676 676 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
677 677 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
678 678 arcmsr_iop_message_read(acb);
679 679 }
680 680 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
681 681 ACB_F_MESSAGE_RQBUFFER_CLEARED |
682 682 ACB_F_MESSAGE_WQBUFFER_READ);
683 683 acb->rqbuf_firstidx = 0;
684 684 acb->rqbuf_lastidx = 0;
685 685 acb->wqbuf_firstidx = 0;
686 686 acb->wqbuf_lastidx = 0;
687 687 pQbuffer = acb->rqbuffer;
688 688 bzero(pQbuffer, sizeof (struct QBUFFER));
689 689 pQbuffer = acb->wqbuffer;
690 690 bzero(pQbuffer, sizeof (struct QBUFFER));
691 691 /* report success */
692 692 pktioctlfld->cmdmessage.ReturnCode =
693 693 ARCMSR_MESSAGE_RETURNCODE_OK;
694 694 if (ddi_copyout(pktioctlfld, (void *)arg,
695 695 sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
696 696 retvalue = ENXIO;
697 697 break;
698 698 }
699 699
700 700 case ARCMSR_MESSAGE_REQUEST_RETURN_CODE_3F:
701 701 pktioctlfld->cmdmessage.ReturnCode =
702 702 ARCMSR_MESSAGE_RETURNCODE_3F;
703 703 if (ddi_copyout(pktioctlfld, (void *)arg,
704 704 sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
705 705 retvalue = ENXIO;
706 706 break;
707 707
708 708 /* Not supported: ARCMSR_MESSAGE_SAY_HELLO */
709 709 case ARCMSR_MESSAGE_SAY_GOODBYE:
710 710 arcmsr_iop_parking(acb);
711 711 break;
712 712
713 713 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
714 714 switch (acb->adapter_type) {
715 715 case ACB_ADAPTER_TYPE_A:
716 716 arcmsr_flush_hba_cache(acb);
717 717 break;
718 718 case ACB_ADAPTER_TYPE_B:
719 719 arcmsr_flush_hbb_cache(acb);
720 720 break;
721 721 case ACB_ADAPTER_TYPE_C:
722 722 arcmsr_flush_hbc_cache(acb);
723 723 break;
724 724 }
725 725 break;
726 726
727 727 default:
728 728 mutex_exit(&acb->ioctl_mutex);
729 729 kmem_free(pktioctlfld, sizeof (struct CMD_MESSAGE_FIELD));
730 730 return (scsi_hba_ioctl(dev, ioctl_cmd, arg, mode, credp,
731 731 rvalp));
732 732 }
733 733
734 734 ioctl_out:
735 735 kmem_free(pktioctlfld, sizeof (struct CMD_MESSAGE_FIELD));
736 736 mutex_exit(&acb->ioctl_mutex);
737 737
738 738 return (retvalue);
739 739 }
740 740
741 741
742 742 /*
743 743 * Function: arcmsr_tran_tgt_init
744 744 * Description: Called when initializing a target device instance. If
745 745 * no per-target initialization is required, the HBA
746 746 * may leave tran_tgt_init to NULL
747 747 * Input:
748 748 * dev_info_t *host_dev_info,
749 749 * dev_info_t *target_dev_info,
750 750 * scsi_hba_tran_t *tran,
751 751 * struct scsi_device *sd
752 752 *
753 753 * Return: DDI_SUCCESS if success, else return DDI_FAILURE
754 754 *
755 755 * entry point enables the HBA to allocate and/or initialize any per-
756 756 * target resources.
757 757 * It also enables the HBA to qualify the device's address as valid and
758 758 * supportable for that particular HBA.
759 759 * By returning DDI_FAILURE, the instance of the target driver for that
760 760 * device will not be probed or attached.
761 761 * This entry point is not required, and if none is supplied,
762 762 * the framework will attempt to probe and attach all possible instances
763 763 * of the appropriate target drivers.
764 764 */
765 765 static int
766 766 arcmsr_tran_tgt_init(dev_info_t *host_dev_info, dev_info_t *target_dev_info,
767 767 scsi_hba_tran_t *tran, struct scsi_device *sd)
768 768 {
769 769 uint16_t target;
770 770 uint8_t lun;
771 771 struct ACB *acb = tran->tran_hba_private;
772 772
773 773 _NOTE(ARGUNUSED(tran, target_dev_info, host_dev_info))
774 774
775 775 target = sd->sd_address.a_target;
776 776 lun = sd->sd_address.a_lun;
777 777 if ((target >= ARCMSR_MAX_TARGETID) || (lun >= ARCMSR_MAX_TARGETLUN)) {
778 778 return (DDI_FAILURE);
779 779 }
780 780
781 781
782 782 if (ndi_dev_is_persistent_node(target_dev_info) == 0) {
783 783 /*
784 784 * If no persistent node exist, we don't allow .conf node
785 785 * to be created.
786 786 */
787 787 if (arcmsr_find_child(acb, target, lun) != NULL) {
788 788 if ((ndi_merge_node(target_dev_info,
789 789 arcmsr_name_node) != DDI_SUCCESS)) {
790 790 return (DDI_SUCCESS);
791 791 }
792 792 }
793 793 return (DDI_FAILURE);
794 794 }
795 795
796 796 return (DDI_SUCCESS);
797 797 }
798 798
799 799 /*
800 800 * Function: arcmsr_tran_getcap(9E)
801 801 * Description: Get the capability named, and returnits value.
802 802 * Return Values: current value of capability, ifdefined
803 803 * -1 ifcapability is not defined
804 804 * ------------------------------------------------------
805 805 * Common Capability Strings Array
806 806 * ------------------------------------------------------
807 807 * #define SCSI_CAP_DMA_MAX 0
808 808 * #define SCSI_CAP_MSG_OUT 1
809 809 * #define SCSI_CAP_DISCONNECT 2
810 810 * #define SCSI_CAP_SYNCHRONOUS 3
811 811 * #define SCSI_CAP_WIDE_XFER 4
812 812 * #define SCSI_CAP_PARITY 5
813 813 * #define SCSI_CAP_INITIATOR_ID 6
814 814 * #define SCSI_CAP_UNTAGGED_QING 7
815 815 * #define SCSI_CAP_TAGGED_QING 8
816 816 * #define SCSI_CAP_ARQ 9
817 817 * #define SCSI_CAP_LINKED_CMDS 10 a
818 818 * #define SCSI_CAP_SECTOR_SIZE 11 b
819 819 * #define SCSI_CAP_TOTAL_SECTORS 12 c
820 820 * #define SCSI_CAP_GEOMETRY 13 d
821 821 * #define SCSI_CAP_RESET_NOTIFICATION 14 e
822 822 * #define SCSI_CAP_QFULL_RETRIES 15 f
823 823 * #define SCSI_CAP_QFULL_RETRY_INTERVAL 16 10
824 824 * #define SCSI_CAP_SCSI_VERSION 17 11
825 825 * #define SCSI_CAP_INTERCONNECT_TYPE 18 12
826 826 * #define SCSI_CAP_LUN_RESET 19 13
827 827 */
828 828 static int
829 829 arcmsr_tran_getcap(struct scsi_address *ap, char *cap, int whom)
830 830 {
831 831 int capability = 0;
832 832 struct ACB *acb = (struct ACB *)ap->a_hba_tran->tran_hba_private;
833 833
834 834 if (cap == NULL || whom == 0) {
835 835 return (DDI_FAILURE);
836 836 }
837 837
838 838 mutex_enter(&acb->acb_mutex);
839 839 if (acb->devstate[ap->a_target][ap->a_lun] == ARECA_RAID_GONE) {
840 840 mutex_exit(&acb->acb_mutex);
841 841 return (-1);
842 842 }
843 843 switch (scsi_hba_lookup_capstr(cap)) {
844 844 case SCSI_CAP_MSG_OUT:
845 845 case SCSI_CAP_DISCONNECT:
846 846 case SCSI_CAP_WIDE_XFER:
847 847 case SCSI_CAP_TAGGED_QING:
848 848 case SCSI_CAP_UNTAGGED_QING:
849 849 case SCSI_CAP_PARITY:
850 850 case SCSI_CAP_ARQ:
851 851 capability = 1;
852 852 break;
853 853 case SCSI_CAP_SECTOR_SIZE:
854 854 capability = ARCMSR_DEV_SECTOR_SIZE;
855 855 break;
856 856 case SCSI_CAP_DMA_MAX:
857 857 /* Limit to 16MB max transfer */
858 858 capability = ARCMSR_MAX_XFER_LEN;
859 859 break;
860 860 case SCSI_CAP_INITIATOR_ID:
861 861 capability = ARCMSR_SCSI_INITIATOR_ID;
862 862 break;
863 863 case SCSI_CAP_GEOMETRY:
864 864 /* head , track , cylinder */
865 865 capability = (255 << 16) | 63;
866 866 break;
867 867 default:
868 868 capability = -1;
869 869 break;
870 870 }
871 871 mutex_exit(&acb->acb_mutex);
872 872 return (capability);
873 873 }
874 874
875 875 /*
876 876 * Function: arcmsr_tran_setcap(9E)
877 877 * Description: Set the specific capability.
878 878 * Return Values: 1 - capability exists and can be set to new value
879 879 * 0 - capability could not be set to new value
880 880 * -1 - no such capability
881 881 */
882 882 static int
883 883 arcmsr_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom)
884 884 {
885 885 _NOTE(ARGUNUSED(value))
886 886
887 887 int supported = 0;
888 888 struct ACB *acb = (struct ACB *)ap->a_hba_tran->tran_hba_private;
889 889
890 890 if (cap == NULL || whom == 0) {
891 891 return (-1);
892 892 }
893 893
894 894 mutex_enter(&acb->acb_mutex);
895 895 if (acb->devstate[ap->a_target][ap->a_lun] == ARECA_RAID_GONE) {
896 896 mutex_exit(&acb->acb_mutex);
897 897 return (-1);
898 898 }
899 899 switch (supported = scsi_hba_lookup_capstr(cap)) {
900 900 case SCSI_CAP_ARQ: /* 9 auto request sense */
901 901 case SCSI_CAP_UNTAGGED_QING: /* 7 */
902 902 case SCSI_CAP_TAGGED_QING: /* 8 */
903 903 /* these are always on, and cannot be turned off */
904 904 supported = (value == 1) ? 1 : 0;
905 905 break;
906 906 case SCSI_CAP_TOTAL_SECTORS: /* c */
907 907 supported = 1;
908 908 break;
909 909 case SCSI_CAP_DISCONNECT: /* 2 */
910 910 case SCSI_CAP_WIDE_XFER: /* 4 */
911 911 case SCSI_CAP_INITIATOR_ID: /* 6 */
912 912 case SCSI_CAP_DMA_MAX: /* 0 */
913 913 case SCSI_CAP_MSG_OUT: /* 1 */
914 914 case SCSI_CAP_PARITY: /* 5 */
915 915 case SCSI_CAP_LINKED_CMDS: /* a */
916 916 case SCSI_CAP_RESET_NOTIFICATION: /* e */
917 917 case SCSI_CAP_SECTOR_SIZE: /* b */
918 918 /* these are not settable */
919 919 supported = 0;
920 920 break;
921 921 default:
922 922 supported = -1;
923 923 break;
924 924 }
925 925 mutex_exit(&acb->acb_mutex);
926 926 return (supported);
927 927 }
928 928
929 929
930 930 /*
931 931 * Function: arcmsr_tran_init_pkt
932 932 * Return Values: pointer to scsi_pkt, or NULL
933 933 * Description: simultaneously allocate both a scsi_pkt(9S) structure and
934 934 * DMA resources for that pkt.
935 935 * Called by kernel on behalf of a target driver
936 936 * calling scsi_init_pkt(9F).
937 937 * Refer to tran_init_pkt(9E) man page
938 938 * Context: Can be called from different kernel process threads.
939 939 * Can be called by interrupt thread.
940 940 * Allocates SCSI packet and DMA resources
941 941 */
942 942 static struct
943 943 scsi_pkt *arcmsr_tran_init_pkt(struct scsi_address *ap,
944 944 register struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
945 945 int tgtlen, int flags, int (*callback)(), caddr_t arg)
946 946 {
947 947 struct CCB *ccb;
948 948 struct ARCMSR_CDB *arcmsr_cdb;
949 949 struct ACB *acb;
950 950 int old_pkt_flag;
951 951
952 952 acb = (struct ACB *)ap->a_hba_tran->tran_hba_private;
953 953
954 954 if (acb->acb_flags & ACB_F_BUS_RESET) {
955 955 return (NULL);
956 956 }
957 957 if (pkt == NULL) {
958 958 /* get free CCB */
959 959 (void) ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
960 960 DDI_DMA_SYNC_FORKERNEL);
961 961 ccb = arcmsr_get_freeccb(acb);
962 962 if (ccb == (struct CCB *)NULL) {
963 963 return (NULL);
964 964 }
965 965
966 966 if (statuslen < sizeof (struct scsi_arq_status)) {
967 967 statuslen = sizeof (struct scsi_arq_status);
968 968 }
969 969 pkt = scsi_hba_pkt_alloc(acb->dev_info, ap, cmdlen,
970 970 statuslen, tgtlen, sizeof (void *), callback, arg);
971 971 if (pkt == NULL) {
972 972 arcmsr_warn(acb, "scsi pkt allocation failed");
973 973 arcmsr_free_ccb(ccb);
974 974 return (NULL);
975 975 }
976 976 /* Initialize CCB */
977 977 ccb->pkt = pkt;
978 978 ccb->pkt_dma_handle = NULL;
979 979 /* record how many sg are needed to xfer on this pkt */
980 980 ccb->pkt_ncookies = 0;
981 981 /* record how many sg we got from this window */
982 982 ccb->pkt_cookie = 0;
983 983 /* record how many windows have partial dma map set */
984 984 ccb->pkt_nwin = 0;
985 985 /* record current sg window position */
986 986 ccb->pkt_curwin = 0;
987 987 ccb->pkt_dma_len = 0;
988 988 ccb->pkt_dma_offset = 0;
989 989 ccb->resid_dmacookie.dmac_size = 0;
990 990
991 991 /*
992 992 * we will still use this point for we want to fake some
993 993 * information in tran_start
994 994 */
995 995 ccb->bp = bp;
996 996
997 997 /* Initialize arcmsr_cdb */
998 998 arcmsr_cdb = &ccb->arcmsr_cdb;
999 999 bzero(arcmsr_cdb, sizeof (struct ARCMSR_CDB));
1000 1000 arcmsr_cdb->Bus = 0;
1001 1001 arcmsr_cdb->Function = 1;
1002 1002 arcmsr_cdb->LUN = ap->a_lun;
1003 1003 arcmsr_cdb->TargetID = ap->a_target;
1004 1004 arcmsr_cdb->CdbLength = (uint8_t)cmdlen;
1005 1005 arcmsr_cdb->Context = (uintptr_t)arcmsr_cdb;
1006 1006
1007 1007 /* Fill in the rest of the structure */
1008 1008 pkt->pkt_ha_private = ccb;
1009 1009 pkt->pkt_address = *ap;
1010 1010 pkt->pkt_comp = NULL;
1011 1011 pkt->pkt_flags = 0;
1012 1012 pkt->pkt_time = 0;
1013 1013 pkt->pkt_resid = 0;
1014 1014 pkt->pkt_statistics = 0;
1015 1015 pkt->pkt_reason = 0;
1016 1016 old_pkt_flag = 0;
1017 1017 } else {
1018 1018 ccb = pkt->pkt_ha_private;
1019 1019 if (ccb->ccb_state & ARCMSR_ABNORMAL_MASK) {
1020 1020 if (!(ccb->ccb_state & ARCMSR_CCB_BACK)) {
1021 1021 return (NULL);
1022 1022 }
1023 1023 }
1024 1024
1025 1025 /*
1026 1026 * you cannot update CdbLength with cmdlen here, it would
1027 1027 * cause a data compare error
1028 1028 */
1029 1029 ccb->ccb_state = ARCMSR_CCB_UNBUILD;
1030 1030 old_pkt_flag = 1;
1031 1031 }
1032 1032
1033 1033 /* Second step : dma allocation/move */
1034 1034 if (bp && bp->b_bcount != 0) {
1035 1035 /*
1036 1036 * system had a lot of data trunk need to xfer, from...20 byte
1037 1037 * to 819200 byte.
1038 1038 * arcmsr_dma_alloc will get pkt_dma_handle (not null) until
1039 1039 * this lot of data trunk xfer done this mission will be done
1040 1040 * by some of continue READ or WRITE scsi command, till this
1041 1041 * lot of data trunk xfer completed.
1042 1042 * arcmsr_dma_move do the action repeatedly, and use the same
1043 1043 * ccb till this lot of data trunk xfer complete notice.
1044 1044 * when after the arcmsr_tran_init_pkt returns the solaris
1045 1045 * kernel is by your pkt_resid and its b_bcount to give you
1046 1046 * which type of scsi command descriptor to implement the
1047 1047 * length of folowing arcmsr_tran_start scsi cdb (data length)
1048 1048 *
1049 1049 * Each transfer should be aligned on a 512 byte boundary
1050 1050 */
1051 1051 if (ccb->pkt_dma_handle == NULL) {
1052 1052 if (arcmsr_dma_alloc(acb, pkt, bp, flags, callback) ==
1053 1053 DDI_FAILURE) {
1054 1054 /*
1055 1055 * the HBA driver is unable to allocate DMA
1056 1056 * resources, it must free the allocated
1057 1057 * scsi_pkt(9S) before returning
1058 1058 */
1059 1059 arcmsr_warn(acb, "dma allocation failure");
1060 1060 if (old_pkt_flag == 0) {
1061 1061 arcmsr_warn(acb, "dma "
1062 1062 "allocation failed to free "
1063 1063 "scsi hba pkt");
1064 1064 arcmsr_free_ccb(ccb);
1065 1065 scsi_hba_pkt_free(ap, pkt);
1066 1066 }
1067 1067 return (NULL);
1068 1068 }
1069 1069 } else {
1070 1070 /* DMA resources to next DMA window, for old pkt */
1071 1071 if (arcmsr_dma_move(acb, pkt, bp) == DDI_FAILURE) {
1072 1072 arcmsr_warn(acb, "dma move failed");
1073 1073 return (NULL);
1074 1074 }
1075 1075 }
1076 1076 } else {
1077 1077 pkt->pkt_resid = 0;
1078 1078 }
1079 1079 return (pkt);
1080 1080 }
1081 1081
1082 1082 /*
1083 1083 * Function: arcmsr_tran_start(9E)
1084 1084 * Description: Transport the command in pktp to the target device.
1085 1085 * The command is not finished when this returns, only
1086 1086 * sent to the target; arcmsr_intr_handler will call
1087 1087 * scsi_hba_pkt_comp(pktp) when the target device has done.
1088 1088 *
1089 1089 * Input: struct scsi_address *ap, struct scsi_pkt *pktp
1090 1090 * Output: TRAN_ACCEPT if pkt is OK and not driver not busy
1091 1091 * TRAN_BUSY if driver is
1092 1092 * TRAN_BADPKT if pkt is invalid
1093 1093 */
1094 1094 static int
1095 1095 arcmsr_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt)
1096 1096 {
1097 1097 struct ACB *acb;
1098 1098 struct CCB *ccb;
1099 1099 int target = ap->a_target;
1100 1100 int lun = ap->a_lun;
1101 1101
1102 1102 acb = (struct ACB *)ap->a_hba_tran->tran_hba_private;
1103 1103 ccb = pkt->pkt_ha_private;
1104 1104 *pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */
1105 1105
1106 1106 if ((ccb->ccb_flags & CCB_FLAG_DMAVALID) &&
1107 1107 (ccb->ccb_flags & DDI_DMA_CONSISTENT))
1108 1108 (void) ddi_dma_sync(ccb->pkt_dma_handle, 0, 0,
1109 1109 DDI_DMA_SYNC_FORDEV);
1110 1110
1111 1111 if (ccb->ccb_state == ARCMSR_CCB_UNBUILD)
1112 1112 arcmsr_build_ccb(ccb);
1113 1113
1114 1114 if (acb->acb_flags & ACB_F_BUS_RESET) {
1115 1115 pkt->pkt_reason = CMD_RESET;
1116 1116 pkt->pkt_statistics |= STAT_BUS_RESET;
1117 1117 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
1118 1118 STATE_SENT_CMD | STATE_GOT_STATUS);
1119 1119 if ((ccb->ccb_flags & CCB_FLAG_DMACONSISTENT) &&
1120 1120 (pkt->pkt_state & STATE_XFERRED_DATA))
1121 1121 (void) ddi_dma_sync(ccb->pkt_dma_handle,
1122 1122 0, 0, DDI_DMA_SYNC_FORCPU);
1123 1123
1124 1124 scsi_hba_pkt_comp(pkt);
1125 1125 return (TRAN_ACCEPT);
1126 1126 }
1127 1127
1128 1128 /* IMPORTANT: Target 16 is a virtual device for iop message transfer */
1129 1129 if (target == 16) {
1130 1130
1131 1131 struct buf *bp = ccb->bp;
1132 1132 uint8_t scsicmd = pkt->pkt_cdbp[0];
1133 1133
1134 1134 switch (scsicmd) {
1135 1135 case SCMD_INQUIRY: {
1136 1136 if (lun != 0) {
1137 1137 ccb->pkt->pkt_reason = CMD_TIMEOUT;
1138 1138 ccb->pkt->pkt_statistics |= STAT_TIMEOUT;
1139 1139 arcmsr_ccb_complete(ccb, 0);
1140 1140 return (TRAN_ACCEPT);
1141 1141 }
1142 1142
1143 1143 if (bp && bp->b_un.b_addr && bp->b_bcount) {
1144 1144 uint8_t inqdata[36];
1145 1145
1146 1146 /* The EVDP and pagecode is not supported */
1147 1147 if (pkt->pkt_cdbp[1] || pkt->pkt_cdbp[2]) {
1148 1148 inqdata[1] = 0xFF;
1149 1149 inqdata[2] = 0x00;
1150 1150 } else {
1151 1151 /* Periph Qualifier & Periph Dev Type */
1152 1152 inqdata[0] = DTYPE_PROCESSOR;
1153 1153 /* rem media bit & Dev Type Modifier */
1154 1154 inqdata[1] = 0;
1155 1155 /* ISO, ECMA, & ANSI versions */
1156 1156 inqdata[2] = 0;
1157 1157 inqdata[3] = 0;
1158 1158 /* length of additional data */
1159 1159 inqdata[4] = 31;
1160 1160 /* Vendor Identification */
1161 1161 bcopy("Areca ", &inqdata[8], VIDLEN);
1162 1162 /* Product Identification */
1163 1163 bcopy("RAID controller ", &inqdata[16],
1164 1164 PIDLEN);
1165 1165 /* Product Revision */
1166 1166 bcopy(&inqdata[32], "R001", REVLEN);
1167 1167 if (bp->b_flags & (B_PHYS | B_PAGEIO))
1168 1168 bp_mapin(bp);
1169 1169
1170 1170 (void) memcpy(bp->b_un.b_addr,
1171 1171 inqdata, sizeof (inqdata));
1172 1172 }
1173 1173 ccb->pkt->pkt_state |= STATE_XFERRED_DATA;
1174 1174 }
1175 1175 arcmsr_ccb_complete(ccb, 0);
1176 1176 return (TRAN_ACCEPT);
1177 1177 }
1178 1178 case SCMD_WRITE_BUFFER:
1179 1179 case SCMD_READ_BUFFER: {
1180 1180 if (arcmsr_iop_message_xfer(acb, pkt)) {
1181 1181 /* error just for retry */
1182 1182 ccb->pkt->pkt_reason = CMD_TRAN_ERR;
1183 1183 ccb->pkt->pkt_statistics |= STAT_TERMINATED;
1184 1184 }
1185 1185 ccb->pkt->pkt_state |= STATE_XFERRED_DATA;
1186 1186 arcmsr_ccb_complete(ccb, 0);
1187 1187 return (TRAN_ACCEPT);
1188 1188 }
1189 1189 default:
1190 1190 ccb->pkt->pkt_state |= STATE_XFERRED_DATA;
1191 1191 arcmsr_ccb_complete(ccb, 0);
1192 1192 return (TRAN_ACCEPT);
1193 1193 }
1194 1194 }
1195 1195
1196 1196 if (acb->devstate[target][lun] == ARECA_RAID_GONE) {
1197 1197 uint8_t block_cmd;
1198 1198
1199 1199 block_cmd = pkt->pkt_cdbp[0] & 0x0f;
1200 1200 if (block_cmd == 0x08 || block_cmd == 0x0a) {
1201 1201 pkt->pkt_reason = CMD_TIMEOUT;
1202 1202 pkt->pkt_statistics |= STAT_TIMEOUT;
1203 1203 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
1204 1204 STATE_SENT_CMD | STATE_GOT_STATUS);
1205 1205 if ((ccb->ccb_flags & CCB_FLAG_DMACONSISTENT) &&
1206 1206 (pkt->pkt_state & STATE_XFERRED_DATA)) {
1207 1207 (void) ddi_dma_sync(ccb->pkt_dma_handle,
1208 1208 ccb->pkt_dma_offset,
1209 1209 ccb->pkt_dma_len, DDI_DMA_SYNC_FORCPU);
1210 1210 }
1211 1211 scsi_hba_pkt_comp(pkt);
1212 1212 return (TRAN_ACCEPT);
1213 1213 }
1214 1214 }
1215 1215 mutex_enter(&acb->postq_mutex);
1216 1216 if (acb->ccboutstandingcount >= ARCMSR_MAX_OUTSTANDING_CMD) {
1217 1217 ccb->ccb_state = ARCMSR_CCB_RETRY;
1218 1218 mutex_exit(&acb->postq_mutex);
1219 1219 return (TRAN_BUSY);
1220 1220 } else if (arcmsr_post_ccb(acb, ccb) == DDI_FAILURE) {
1221 1221 arcmsr_warn(acb, "post ccb failure, ccboutstandingcount = %d",
1222 1222 acb->ccboutstandingcount);
1223 1223 mutex_exit(&acb->postq_mutex);
1224 1224 return (TRAN_FATAL_ERROR);
1225 1225 }
1226 1226 mutex_exit(&acb->postq_mutex);
1227 1227 return (TRAN_ACCEPT);
1228 1228 }
1229 1229
1230 1230 /*
1231 1231 * Function name: arcmsr_tran_destroy_pkt
1232 1232 * Return Values: none
1233 1233 * Description: Called by kernel on behalf of a target driver
1234 1234 * calling scsi_destroy_pkt(9F).
1235 1235 * Refer to tran_destroy_pkt(9E) man page
1236 1236 * Context: Can be called from different kernel process threads.
1237 1237 * Can be called by interrupt thread.
1238 1238 */
1239 1239 static void
1240 1240 arcmsr_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1241 1241 {
1242 1242 struct CCB *ccb = pkt->pkt_ha_private;
1243 1243 ddi_dma_handle_t pkt_dma_handle = ccb->pkt_dma_handle;
1244 1244
1245 1245 if (ccb == NULL) {
1246 1246 return;
1247 1247 }
1248 1248 if (ccb->pkt != pkt) {
1249 1249 return;
1250 1250 }
1251 1251 if (ccb->ccb_flags & CCB_FLAG_DMAVALID) {
1252 1252 ccb->ccb_flags &= ~CCB_FLAG_DMAVALID;
1253 1253 if (pkt_dma_handle) {
1254 1254 (void) ddi_dma_unbind_handle(ccb->pkt_dma_handle);
1255 1255 }
1256 1256 }
1257 1257 if (pkt_dma_handle) {
1258 1258 (void) ddi_dma_free_handle(&pkt_dma_handle);
1259 1259 }
1260 1260 pkt->pkt_ha_private = NULL;
1261 1261 if (ccb) {
1262 1262 if (ccb->ccb_state & ARCMSR_ABNORMAL_MASK) {
1263 1263 if (ccb->ccb_state & ARCMSR_CCB_BACK) {
1264 1264 arcmsr_free_ccb(ccb);
1265 1265 } else {
1266 1266 ccb->ccb_state |= ARCMSR_CCB_WAIT4_FREE;
1267 1267 }
1268 1268 } else {
1269 1269 arcmsr_free_ccb(ccb);
1270 1270 }
1271 1271 }
1272 1272 scsi_hba_pkt_free(ap, pkt);
1273 1273 }
1274 1274
1275 1275 /*
1276 1276 * Function name: arcmsr_tran_dmafree()
1277 1277 * Return Values: none
1278 1278 * Description: free dvma resources
1279 1279 * Context: Can be called from different kernel process threads.
1280 1280 * Can be called by interrupt thread.
1281 1281 */
1282 1282 static void
1283 1283 arcmsr_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
1284 1284 {
1285 1285 struct CCB *ccb = pkt->pkt_ha_private;
1286 1286
1287 1287 if ((ccb == NULL) || (ccb->pkt != pkt)) {
1288 1288 return;
1289 1289 }
1290 1290 if (ccb->ccb_flags & CCB_FLAG_DMAVALID) {
1291 1291 ccb->ccb_flags &= ~CCB_FLAG_DMAVALID;
1292 1292 if (ddi_dma_unbind_handle(ccb->pkt_dma_handle) != DDI_SUCCESS) {
1293 1293 arcmsr_warn(ccb->acb, "ddi_dma_unbind_handle() failed "
1294 1294 "(target %d lun %d)", ap->a_target, ap->a_lun);
1295 1295 }
1296 1296 ddi_dma_free_handle(&ccb->pkt_dma_handle);
1297 1297 ccb->pkt_dma_handle = NULL;
1298 1298 }
1299 1299 }
1300 1300
1301 1301 /*
1302 1302 * Function name: arcmsr_tran_sync_pkt()
1303 1303 * Return Values: none
1304 1304 * Description: sync dma
1305 1305 * Context: Can be called from different kernel process threads.
1306 1306 * Can be called by interrupt thread.
1307 1307 */
1308 1308 static void
1309 1309 arcmsr_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1310 1310 {
1311 1311 struct CCB *ccb;
1312 1312
1313 1313 ccb = pkt->pkt_ha_private;
1314 1314 if ((ccb == NULL) || (ccb->pkt != pkt)) {
1315 1315 return;
1316 1316 }
1317 1317 if (ccb->ccb_flags & CCB_FLAG_DMAVALID) {
1318 1318 if (ddi_dma_sync(ccb->pkt_dma_handle, 0, 0,
1319 1319 (ccb->ccb_flags & CCB_FLAG_DMAWRITE) ?
1320 1320 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU) !=
1321 1321 DDI_SUCCESS) {
1322 1322 arcmsr_warn(ccb->acb,
1323 1323 "sync pkt failed for target %d lun %d",
1324 1324 ap->a_target, ap->a_lun);
1325 1325 }
1326 1326 }
1327 1327 }
1328 1328
1329 1329
1330 1330 /*
1331 1331 * Function: arcmsr_tran_abort(9E)
1332 1332 * SCSA interface routine to abort pkt(s) in progress.
1333 1333 * Aborts the pkt specified. If NULL pkt, aborts ALL pkts.
1334 1334 * Output: Return 1 if success
1335 1335 * Return 0 if failure
1336 1336 */
1337 1337 static int
1338 1338 arcmsr_tran_abort(struct scsi_address *ap, struct scsi_pkt *abortpkt)
1339 1339 {
1340 1340 struct ACB *acb;
1341 1341 int return_code;
1342 1342
1343 1343 acb = ap->a_hba_tran->tran_hba_private;
1344 1344
1345 1345 while (acb->ccboutstandingcount != 0) {
1346 1346 drv_usecwait(10000);
1347 1347 }
1348 1348
1349 1349 mutex_enter(&acb->isr_mutex);
1350 1350 return_code = arcmsr_seek_cmd2abort(acb, abortpkt);
1351 1351 mutex_exit(&acb->isr_mutex);
1352 1352
1353 1353 if (return_code != DDI_SUCCESS) {
1354 1354 arcmsr_warn(acb, "abort command failed for target %d lun %d",
1355 1355 ap->a_target, ap->a_lun);
1356 1356 return (0);
1357 1357 }
1358 1358 return (1);
1359 1359 }
1360 1360
1361 1361 /*
1362 1362 * Function: arcmsr_tran_reset(9E)
1363 1363 * SCSA interface routine to perform scsi resets on either
1364 1364 * a specified target or the bus (default).
1365 1365 * Output: Return 1 if success
1366 1366 * Return 0 if failure
1367 1367 */
1368 1368 static int
1369 1369 arcmsr_tran_reset(struct scsi_address *ap, int level) {
1370 1370
1371 1371 struct ACB *acb;
1372 1372 int return_code = 1;
1373 1373 int target = ap->a_target;
1374 1374 int lun = ap->a_lun;
1375 1375
1376 1376 /* Are we in the middle of dumping core? */
1377 1377 if (ddi_in_panic())
1378 1378 return (return_code);
1379 1379
1380 1380 acb = (struct ACB *)ap->a_hba_tran->tran_hba_private;
1381 1381 mutex_enter(&acb->isr_mutex);
1382 1382 switch (level) {
1383 1383 case RESET_ALL: /* 0 */
1384 1384 acb->num_resets++;
1385 1385 acb->acb_flags |= ACB_F_BUS_RESET;
1386 1386 if (acb->timeout_count) {
1387 1387 if (arcmsr_iop_reset(acb) != 0) {
1388 1388 arcmsr_handle_iop_bus_hold(acb);
1389 1389 acb->acb_flags &= ~ACB_F_BUS_HANG_ON;
1390 1390 }
1391 1391 }
1392 1392 acb->acb_flags &= ~ACB_F_BUS_RESET;
1393 1393 break;
1394 1394 case RESET_TARGET: /* 1 */
1395 1395 if (acb->devstate[target][lun] == ARECA_RAID_GONE)
1396 1396 return_code = 0;
1397 1397 break;
1398 1398 case RESET_BUS: /* 2 */
1399 1399 return_code = 0;
1400 1400 break;
1401 1401 case RESET_LUN: /* 3 */
1402 1402 return_code = 0;
1403 1403 break;
1404 1404 default:
1405 1405 return_code = 0;
1406 1406 }
1407 1407 mutex_exit(&acb->isr_mutex);
1408 1408 return (return_code);
1409 1409 }
1410 1410
1411 1411 static int
1412 1412 arcmsr_tran_bus_config(dev_info_t *parent, uint_t flags,
1413 1413 ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
1414 1414 {
1415 1415 struct ACB *acb;
1416 1416 int circ = 0;
1417 1417 int rval;
1418 1418 int tgt, lun;
1419 1419
1420 1420 if ((acb = ddi_get_soft_state(arcmsr_soft_state,
1421 1421 ddi_get_instance(parent))) == NULL)
1422 1422 return (NDI_FAILURE);
1423 1423
1424 1424 ndi_devi_enter(parent, &circ);
1425 1425 switch (op) {
1426 1426 case BUS_CONFIG_ONE:
1427 1427 if (arcmsr_parse_devname(arg, &tgt, &lun) != 0) {
1428 1428 rval = NDI_FAILURE;
1429 1429 break;
1430 1430 }
1431 1431 if (acb->device_map[tgt] & 1 << lun) {
1432 1432 acb->devstate[tgt][lun] = ARECA_RAID_GOOD;
1433 1433 rval = arcmsr_config_lun(acb, tgt, lun, childp);
1434 1434 }
1435 1435 break;
1436 1436
1437 1437 case BUS_CONFIG_DRIVER:
1438 1438 case BUS_CONFIG_ALL:
1439 1439 for (tgt = 0; tgt < ARCMSR_MAX_TARGETID; tgt++)
1440 1440 for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++)
1441 1441 if (acb->device_map[tgt] & 1 << lun) {
1442 1442 acb->devstate[tgt][lun] =
1443 1443 ARECA_RAID_GOOD;
1444 1444 (void) arcmsr_config_lun(acb, tgt,
1445 1445 lun, NULL);
1446 1446 }
1447 1447
1448 1448 rval = NDI_SUCCESS;
1449 1449 break;
1450 1450 }
1451 1451 if (rval == NDI_SUCCESS)
1452 1452 rval = ndi_busop_bus_config(parent, flags, op, arg, childp, 0);
1453 1453 ndi_devi_exit(parent, circ);
1454 1454 return (rval);
1455 1455 }
1456 1456
1457 1457 /*
1458 1458 * Function name: arcmsr_dma_alloc
1459 1459 * Return Values: 0 if successful, -1 if failure
1460 1460 * Description: allocate DMA resources
1461 1461 * Context: Can only be called from arcmsr_tran_init_pkt()
1462 1462 * register struct scsi_address *ap = &((pkt)->pkt_address);
1463 1463 */
1464 1464 static int
1465 1465 arcmsr_dma_alloc(struct ACB *acb, struct scsi_pkt *pkt,
1466 1466 struct buf *bp, int flags, int (*callback)())
1467 1467 {
1468 1468 struct CCB *ccb = pkt->pkt_ha_private;
1469 1469 int alloc_result, map_method, dma_flags;
1470 1470 int resid = 0;
1471 1471 int total_ccb_xferlen = 0;
1472 1472 int (*cb)(caddr_t);
1473 1473 uint8_t i;
1474 1474
1475 1475 /*
1476 1476 * at this point the PKT SCSI CDB is empty, and dma xfer length
1477 1477 * is bp->b_bcount
1478 1478 */
1479 1479
1480 1480 if (bp->b_flags & B_READ) {
1481 1481 ccb->ccb_flags &= ~CCB_FLAG_DMAWRITE;
1482 1482 dma_flags = DDI_DMA_READ;
1483 1483 } else {
1484 1484 ccb->ccb_flags |= CCB_FLAG_DMAWRITE;
1485 1485 dma_flags = DDI_DMA_WRITE;
1486 1486 }
1487 1487
1488 1488 if (flags & PKT_CONSISTENT) {
1489 1489 ccb->ccb_flags |= CCB_FLAG_DMACONSISTENT;
1490 1490 dma_flags |= DDI_DMA_CONSISTENT;
1491 1491 }
1492 1492 if (flags & PKT_DMA_PARTIAL) {
1493 1493 dma_flags |= DDI_DMA_PARTIAL;
1494 1494 }
1495 1495
1496 1496 dma_flags |= DDI_DMA_REDZONE;
1497 1497 cb = (callback == NULL_FUNC) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
1498 1498
1499 1499 alloc_result = ddi_dma_alloc_handle(acb->dev_info, &arcmsr_dma_attr,
1500 1500 cb, 0, &ccb->pkt_dma_handle);
1501 1501 if (alloc_result != DDI_SUCCESS) {
1502 1502 arcmsr_warn(acb, "dma allocate failed (%x)", alloc_result);
1503 1503 return (DDI_FAILURE);
1504 1504 }
1505 1505
1506 1506 map_method = ddi_dma_buf_bind_handle(ccb->pkt_dma_handle,
1507 1507 bp, dma_flags, cb, 0,
1508 1508 &ccb->pkt_dmacookies[0], /* SG List pointer */
1509 1509 &ccb->pkt_ncookies); /* number of sgl cookies */
1510 1510
1511 1511 switch (map_method) {
1512 1512 case DDI_DMA_PARTIAL_MAP:
1513 1513 /*
1514 1514 * When your main memory size larger then 4G
1515 1515 * DDI_DMA_PARTIAL_MAP will be touched.
1516 1516 *
1517 1517 * We've already set DDI_DMA_PARTIAL in dma_flags,
1518 1518 * so if it's now missing, there's something screwy
1519 1519 * happening. We plow on....
1520 1520 */
1521 1521
1522 1522 if ((dma_flags & DDI_DMA_PARTIAL) == 0) {
1523 1523 arcmsr_warn(acb,
1524 1524 "dma partial mapping lost ...impossible case!");
1525 1525 }
1526 1526 if (ddi_dma_numwin(ccb->pkt_dma_handle, &ccb->pkt_nwin) ==
1527 1527 DDI_FAILURE) {
1528 1528 arcmsr_warn(acb, "ddi_dma_numwin() failed");
1529 1529 }
1530 1530
1531 1531 if (ddi_dma_getwin(ccb->pkt_dma_handle, ccb->pkt_curwin,
1532 1532 &ccb->pkt_dma_offset, &ccb->pkt_dma_len,
1533 1533 &ccb->pkt_dmacookies[0], &ccb->pkt_ncookies) ==
1534 1534 DDI_FAILURE) {
1535 1535 arcmsr_warn(acb, "ddi_dma_getwin failed");
1536 1536 }
1537 1537
1538 1538 i = 0;
1539 1539 /* first cookie is accessed from ccb->pkt_dmacookies[0] */
1540 1540 total_ccb_xferlen = ccb->pkt_dmacookies[0].dmac_size;
1541 1541 for (;;) {
1542 1542 i++;
1543 1543 if ((i == ARCMSR_MAX_SG_ENTRIES) ||
1544 1544 (i == ccb->pkt_ncookies) ||
1545 1545 (total_ccb_xferlen == ARCMSR_MAX_XFER_LEN)) {
1546 1546 break;
1547 1547 }
1548 1548 /*
1549 1549 * next cookie will be retrieved from
1550 1550 * ccb->pkt_dmacookies[i]
1551 1551 */
1552 1552 ddi_dma_nextcookie(ccb->pkt_dma_handle,
1553 1553 &ccb->pkt_dmacookies[i]);
1554 1554 total_ccb_xferlen += ccb->pkt_dmacookies[i].dmac_size;
1555 1555 }
1556 1556 ccb->pkt_cookie = i;
1557 1557 ccb->arcmsr_cdb.sgcount = i;
1558 1558 if (total_ccb_xferlen > 512) {
1559 1559 resid = total_ccb_xferlen % 512;
1560 1560 if (resid != 0) {
1561 1561 i--;
1562 1562 total_ccb_xferlen -= resid;
1563 1563 /* modify last sg length */
1564 1564 ccb->pkt_dmacookies[i].dmac_size =
1565 1565 ccb->pkt_dmacookies[i].dmac_size - resid;
1566 1566 ccb->resid_dmacookie.dmac_size = resid;
1567 1567 ccb->resid_dmacookie.dmac_laddress =
1568 1568 ccb->pkt_dmacookies[i].dmac_laddress +
1569 1569 ccb->pkt_dmacookies[i].dmac_size;
1570 1570 }
1571 1571 }
1572 1572 ccb->total_dmac_size = total_ccb_xferlen;
1573 1573 ccb->ccb_flags |= CCB_FLAG_DMAVALID;
1574 1574 pkt->pkt_resid = bp->b_bcount - ccb->total_dmac_size;
1575 1575
1576 1576 return (DDI_SUCCESS);
1577 1577
1578 1578 case DDI_DMA_MAPPED:
1579 1579 ccb->pkt_nwin = 1; /* all mapped, so only one window */
1580 1580 ccb->pkt_dma_len = 0;
1581 1581 ccb->pkt_dma_offset = 0;
1582 1582 i = 0;
1583 1583 /* first cookie is accessed from ccb->pkt_dmacookies[0] */
1584 1584 total_ccb_xferlen = ccb->pkt_dmacookies[0].dmac_size;
1585 1585 for (;;) {
1586 1586 i++;
1587 1587 if ((i == ARCMSR_MAX_SG_ENTRIES) ||
1588 1588 (i == ccb->pkt_ncookies) ||
1589 1589 (total_ccb_xferlen == ARCMSR_MAX_XFER_LEN)) {
1590 1590 break;
1591 1591 }
1592 1592 /*
1593 1593 * next cookie will be retrieved from
1594 1594 * ccb->pkt_dmacookies[i]
1595 1595 */
1596 1596 ddi_dma_nextcookie(ccb->pkt_dma_handle,
1597 1597 &ccb->pkt_dmacookies[i]);
1598 1598 total_ccb_xferlen += ccb->pkt_dmacookies[i].dmac_size;
1599 1599 }
1600 1600 ccb->pkt_cookie = i;
1601 1601 ccb->arcmsr_cdb.sgcount = i;
1602 1602 if (total_ccb_xferlen > 512) {
1603 1603 resid = total_ccb_xferlen % 512;
1604 1604 if (resid != 0) {
1605 1605 i--;
1606 1606 total_ccb_xferlen -= resid;
1607 1607 /* modify last sg length */
1608 1608 ccb->pkt_dmacookies[i].dmac_size =
1609 1609 ccb->pkt_dmacookies[i].dmac_size - resid;
1610 1610 ccb->resid_dmacookie.dmac_size = resid;
1611 1611 ccb->resid_dmacookie.dmac_laddress =
1612 1612 ccb->pkt_dmacookies[i].dmac_laddress +
1613 1613 ccb->pkt_dmacookies[i].dmac_size;
1614 1614 }
1615 1615 }
1616 1616 ccb->total_dmac_size = total_ccb_xferlen;
1617 1617 ccb->ccb_flags |= CCB_FLAG_DMAVALID;
1618 1618 pkt->pkt_resid = bp->b_bcount - ccb->total_dmac_size;
1619 1619 return (DDI_SUCCESS);
1620 1620
1621 1621 case DDI_DMA_NORESOURCES:
1622 1622 arcmsr_warn(acb, "dma map got 'no resources'");
1623 1623 bioerror(bp, ENOMEM);
1624 1624 break;
1625 1625
1626 1626 case DDI_DMA_NOMAPPING:
1627 1627 arcmsr_warn(acb, "dma map got 'no mapping'");
1628 1628 bioerror(bp, EFAULT);
1629 1629 break;
1630 1630
1631 1631 case DDI_DMA_TOOBIG:
1632 1632 arcmsr_warn(acb, "dma map got 'too big'");
1633 1633 bioerror(bp, EINVAL);
1634 1634 break;
1635 1635
1636 1636 case DDI_DMA_INUSE:
1637 1637 arcmsr_warn(acb, "dma map got 'in use' "
1638 1638 "(should not happen)");
1639 1639 break;
1640 1640 default:
1641 1641 arcmsr_warn(acb, "dma map failed (0x%x)", i);
1642 1642 break;
1643 1643 }
1644 1644
1645 1645 ddi_dma_free_handle(&ccb->pkt_dma_handle);
1646 1646 ccb->pkt_dma_handle = NULL;
1647 1647 ccb->ccb_flags &= ~CCB_FLAG_DMAVALID;
1648 1648 return (DDI_FAILURE);
1649 1649 }
1650 1650
1651 1651
1652 1652 /*
1653 1653 * Function name: arcmsr_dma_move
1654 1654 * Return Values: 0 if successful, -1 if failure
1655 1655 * Description: move DMA resources to next DMA window
1656 1656 * Context: Can only be called from arcmsr_tran_init_pkt()
1657 1657 */
1658 1658 static int
1659 1659 arcmsr_dma_move(struct ACB *acb, struct scsi_pkt *pkt, struct buf *bp)
1660 1660 {
1661 1661 struct CCB *ccb = pkt->pkt_ha_private;
1662 1662 uint8_t i = 0;
1663 1663 int resid = 0;
1664 1664 int total_ccb_xferlen = 0;
1665 1665
1666 1666 if (ccb->resid_dmacookie.dmac_size != 0) {
1667 1667 total_ccb_xferlen += ccb->resid_dmacookie.dmac_size;
1668 1668 ccb->pkt_dmacookies[i].dmac_size =
1669 1669 ccb->resid_dmacookie.dmac_size;
1670 1670 ccb->pkt_dmacookies[i].dmac_laddress =
1671 1671 ccb->resid_dmacookie.dmac_laddress;
1672 1672 i++;
1673 1673 ccb->resid_dmacookie.dmac_size = 0;
1674 1674 }
1675 1675 /*
1676 1676 * If there are no more cookies remaining in this window,
1677 1677 * move to the next window.
1678 1678 */
1679 1679 if (ccb->pkt_cookie == ccb->pkt_ncookies) {
1680 1680 /*
1681 1681 * only dma map "partial" arrive here
1682 1682 */
1683 1683 if ((ccb->pkt_curwin == ccb->pkt_nwin) &&
1684 1684 (ccb->pkt_nwin == 1)) {
1685 1685 return (DDI_SUCCESS);
1686 1686 }
1687 1687
1688 1688 /* At last window, cannot move */
1689 1689 if (++ccb->pkt_curwin >= ccb->pkt_nwin) {
1690 1690 arcmsr_warn(acb, "dma partial set, numwin exceeded");
1691 1691 return (DDI_FAILURE);
1692 1692 }
1693 1693 if (ddi_dma_getwin(ccb->pkt_dma_handle, ccb->pkt_curwin,
1694 1694 &ccb->pkt_dma_offset, &ccb->pkt_dma_len,
1695 1695 &ccb->pkt_dmacookies[i], &ccb->pkt_ncookies) ==
1696 1696 DDI_FAILURE) {
1697 1697 arcmsr_warn(acb, "ddi_dma_getwin failed");
1698 1698 return (DDI_FAILURE);
1699 1699 }
1700 1700 /* reset cookie pointer */
1701 1701 ccb->pkt_cookie = 0;
1702 1702 } else {
1703 1703 /*
1704 1704 * only dma map "all" arrive here
1705 1705 * We still have more cookies in this window,
1706 1706 * get the next one
1707 1707 * access the pkt_dma_handle remain cookie record at
1708 1708 * ccb->pkt_dmacookies array
1709 1709 */
1710 1710 ddi_dma_nextcookie(ccb->pkt_dma_handle,
1711 1711 &ccb->pkt_dmacookies[i]);
1712 1712 }
1713 1713
1714 1714 /* Get remaining cookies in this window, up to our maximum */
1715 1715 total_ccb_xferlen += ccb->pkt_dmacookies[i].dmac_size;
1716 1716
1717 1717 /* retrieve and store cookies, start at ccb->pkt_dmacookies[0] */
1718 1718 for (;;) {
1719 1719 i++;
1720 1720 /* handled cookies count level indicator */
1721 1721 ccb->pkt_cookie++;
1722 1722 if ((i == ARCMSR_MAX_SG_ENTRIES) ||
1723 1723 (ccb->pkt_cookie == ccb->pkt_ncookies) ||
1724 1724 (total_ccb_xferlen == ARCMSR_MAX_XFER_LEN)) {
1725 1725 break;
1726 1726 }
1727 1727 ddi_dma_nextcookie(ccb->pkt_dma_handle,
1728 1728 &ccb->pkt_dmacookies[i]);
1729 1729 total_ccb_xferlen += ccb->pkt_dmacookies[i].dmac_size;
1730 1730 }
1731 1731
1732 1732 ccb->arcmsr_cdb.sgcount = i;
1733 1733 if (total_ccb_xferlen > 512) {
1734 1734 resid = total_ccb_xferlen % 512;
1735 1735 if (resid != 0) {
1736 1736 i--;
1737 1737 total_ccb_xferlen -= resid;
1738 1738 /* modify last sg length */
1739 1739 ccb->pkt_dmacookies[i].dmac_size =
1740 1740 ccb->pkt_dmacookies[i].dmac_size - resid;
1741 1741 ccb->resid_dmacookie.dmac_size = resid;
1742 1742 ccb->resid_dmacookie.dmac_laddress =
1743 1743 ccb->pkt_dmacookies[i].dmac_laddress +
1744 1744 ccb->pkt_dmacookies[i].dmac_size;
1745 1745 }
1746 1746 }
1747 1747 ccb->total_dmac_size += total_ccb_xferlen;
1748 1748 pkt->pkt_resid = bp->b_bcount - ccb->total_dmac_size;
1749 1749
1750 1750 return (DDI_SUCCESS);
1751 1751 }
1752 1752
1753 1753
1754 1754 /*ARGSUSED*/
1755 1755 static void
1756 1756 arcmsr_build_ccb(struct CCB *ccb)
1757 1757 {
1758 1758 struct scsi_pkt *pkt = ccb->pkt;
1759 1759 struct ARCMSR_CDB *arcmsr_cdb;
1760 1760 char *psge;
1761 1761 uint32_t address_lo, address_hi;
1762 1762 int arccdbsize = 0x30;
1763 1763 uint8_t sgcount;
1764 1764
1765 1765 arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
1766 1766 psge = (char *)&arcmsr_cdb->sgu;
1767 1767
1768 1768 bcopy((caddr_t)pkt->pkt_cdbp, arcmsr_cdb->Cdb, arcmsr_cdb->CdbLength);
1769 1769 sgcount = ccb->arcmsr_cdb.sgcount;
1770 1770
1771 1771 if (sgcount != 0) {
1772 1772 int length, i;
1773 1773 int cdb_sgcount = 0;
1774 1774 int total_xfer_length = 0;
1775 1775
1776 1776 /* map stor port SG list to our iop SG List. */
1777 1777 for (i = 0; i < sgcount; i++) {
1778 1778 /* Get physaddr of the current data pointer */
1779 1779 length = ccb->pkt_dmacookies[i].dmac_size;
1780 1780 total_xfer_length += length;
1781 1781 address_lo =
1782 1782 dma_addr_lo32(ccb->pkt_dmacookies[i].dmac_laddress);
1783 1783 address_hi =
1784 1784 dma_addr_hi32(ccb->pkt_dmacookies[i].dmac_laddress);
1785 1785
1786 1786 if (address_hi == 0) {
1787 1787 struct SG32ENTRY *dma_sg;
1788 1788
1789 1789 dma_sg = (struct SG32ENTRY *)(intptr_t)psge;
1790 1790 dma_sg->address = address_lo;
1791 1791 dma_sg->length = length;
1792 1792 psge += sizeof (struct SG32ENTRY);
1793 1793 arccdbsize += sizeof (struct SG32ENTRY);
1794 1794 } else {
1795 1795 struct SG64ENTRY *dma_sg;
1796 1796
1797 1797 dma_sg = (struct SG64ENTRY *)(intptr_t)psge;
1798 1798 dma_sg->addresshigh = address_hi;
1799 1799 dma_sg->address = address_lo;
1800 1800 dma_sg->length = length | IS_SG64_ADDR;
1801 1801 psge += sizeof (struct SG64ENTRY);
1802 1802 arccdbsize += sizeof (struct SG64ENTRY);
1803 1803 }
1804 1804 cdb_sgcount++;
1805 1805 }
1806 1806 arcmsr_cdb->sgcount = (uint8_t)cdb_sgcount;
1807 1807 arcmsr_cdb->DataLength = total_xfer_length;
1808 1808 if (arccdbsize > 256) {
1809 1809 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
1810 1810 }
1811 1811 } else {
1812 1812 arcmsr_cdb->DataLength = 0;
1813 1813 }
1814 1814
1815 1815 if (ccb->ccb_flags & CCB_FLAG_DMAWRITE)
1816 1816 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
1817 1817 ccb->arc_cdb_size = arccdbsize;
1818 1818 }
1819 1819
1820 1820 /*
1821 1821 * arcmsr_post_ccb - Send a protocol specific ARC send postcard to a AIOC.
1822 1822 *
1823 1823 * handle: Handle of registered ARC protocol driver
1824 1824 * adapter_id: AIOC unique identifier(integer)
1825 1825 * pPOSTCARD_SEND: Pointer to ARC send postcard
1826 1826 *
1827 1827 * This routine posts a ARC send postcard to the request post FIFO of a
1828 1828 * specific ARC adapter.
1829 1829 */
1830 1830 static int
1831 1831 arcmsr_post_ccb(struct ACB *acb, struct CCB *ccb)
1832 1832 {
1833 1833 uint32_t cdb_phyaddr_pattern = ccb->cdb_phyaddr_pattern;
1834 1834 struct scsi_pkt *pkt = ccb->pkt;
↓ open down ↓ |
1834 lines elided |
↑ open up ↑ |
1835 1835 struct ARCMSR_CDB *arcmsr_cdb;
1836 1836 uint_t pkt_flags = pkt->pkt_flags;
1837 1837
1838 1838 arcmsr_cdb = &ccb->arcmsr_cdb;
1839 1839
1840 1840 /* TODO: Use correct offset and size for syncing? */
1841 1841 if (ddi_dma_sync(acb->ccbs_pool_handle, 0, 0, DDI_DMA_SYNC_FORDEV) ==
1842 1842 DDI_FAILURE)
1843 1843 return (DDI_FAILURE);
1844 1844
1845 - atomic_add_32(&acb->ccboutstandingcount, 1);
1845 + atomic_inc_32(&acb->ccboutstandingcount);
1846 1846 ccb->ccb_time = (time_t)(ddi_get_time() + pkt->pkt_time);
1847 1847
1848 1848 ccb->ccb_state = ARCMSR_CCB_START;
1849 1849 switch (acb->adapter_type) {
1850 1850 case ACB_ADAPTER_TYPE_A:
1851 1851 {
1852 1852 struct HBA_msgUnit *phbamu;
1853 1853
1854 1854 phbamu = (struct HBA_msgUnit *)acb->pmu;
1855 1855 if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
1856 1856 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1857 1857 &phbamu->inbound_queueport,
1858 1858 cdb_phyaddr_pattern |
1859 1859 ARCMSR_CCBPOST_FLAG_SGL_BSIZE);
1860 1860 } else {
1861 1861 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1862 1862 &phbamu->inbound_queueport, cdb_phyaddr_pattern);
1863 1863 }
1864 1864 if (pkt_flags & FLAG_NOINTR)
1865 1865 arcmsr_polling_hba_ccbdone(acb, ccb);
1866 1866 break;
1867 1867 }
1868 1868
1869 1869 case ACB_ADAPTER_TYPE_B:
1870 1870 {
1871 1871 struct HBB_msgUnit *phbbmu;
1872 1872 int ending_index, index;
1873 1873
1874 1874 phbbmu = (struct HBB_msgUnit *)acb->pmu;
1875 1875 index = phbbmu->postq_index;
1876 1876 ending_index = ((index+1)%ARCMSR_MAX_HBB_POSTQUEUE);
1877 1877 phbbmu->post_qbuffer[ending_index] = 0;
1878 1878 if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
1879 1879 phbbmu->post_qbuffer[index] =
1880 1880 (cdb_phyaddr_pattern|ARCMSR_CCBPOST_FLAG_SGL_BSIZE);
1881 1881 } else {
1882 1882 phbbmu->post_qbuffer[index] = cdb_phyaddr_pattern;
1883 1883 }
1884 1884 index++;
1885 1885 /* if last index number set it to 0 */
1886 1886 index %= ARCMSR_MAX_HBB_POSTQUEUE;
1887 1887 phbbmu->postq_index = index;
1888 1888 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1889 1889 &phbbmu->hbb_doorbell->drv2iop_doorbell,
1890 1890 ARCMSR_DRV2IOP_CDB_POSTED);
1891 1891
1892 1892 if (pkt_flags & FLAG_NOINTR)
1893 1893 arcmsr_polling_hbb_ccbdone(acb, ccb);
1894 1894 break;
1895 1895 }
1896 1896
1897 1897 case ACB_ADAPTER_TYPE_C:
1898 1898 {
1899 1899 struct HBC_msgUnit *phbcmu;
1900 1900 uint32_t ccb_post_stamp, arc_cdb_size;
1901 1901
1902 1902 phbcmu = (struct HBC_msgUnit *)acb->pmu;
1903 1903 arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 :
1904 1904 ccb->arc_cdb_size;
1905 1905 ccb_post_stamp = (cdb_phyaddr_pattern |
1906 1906 ((arc_cdb_size-1) >> 6) |1);
1907 1907 if (acb->cdb_phyaddr_hi32) {
1908 1908 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1909 1909 &phbcmu->inbound_queueport_high,
1910 1910 acb->cdb_phyaddr_hi32);
1911 1911 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1912 1912 &phbcmu->inbound_queueport_low, ccb_post_stamp);
1913 1913 } else {
1914 1914 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1915 1915 &phbcmu->inbound_queueport_low, ccb_post_stamp);
1916 1916 }
1917 1917 if (pkt_flags & FLAG_NOINTR)
1918 1918 arcmsr_polling_hbc_ccbdone(acb, ccb);
1919 1919 break;
1920 1920 }
1921 1921
1922 1922 }
1923 1923 return (DDI_SUCCESS);
1924 1924 }
1925 1925
1926 1926
1927 1927 static void
1928 1928 arcmsr_ccb_complete(struct CCB *ccb, int flag)
1929 1929 {
1930 1930 struct ACB *acb = ccb->acb;
1931 1931 struct scsi_pkt *pkt = ccb->pkt;
1932 1932
1933 1933 if (pkt == NULL) {
1934 1934 return;
1935 1935 }
1936 1936 ccb->ccb_state |= ARCMSR_CCB_DONE;
1937 1937 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
1938 1938 STATE_SENT_CMD | STATE_GOT_STATUS);
1939 1939
1940 1940 if ((ccb->ccb_flags & CCB_FLAG_DMACONSISTENT) &&
1941 1941 (pkt->pkt_state & STATE_XFERRED_DATA)) {
1942 1942 (void) ddi_dma_sync(ccb->pkt_dma_handle, 0, 0,
1943 1943 DDI_DMA_SYNC_FORCPU);
1944 1944 }
1945 1945 /*
1946 1946 * TODO: This represents a potential race condition, and is
1947 1947 * ultimately a poor design decision. Revisit this code
↓ open down ↓ |
92 lines elided |
↑ open up ↑ |
1948 1948 * and solve the mutex ownership issue correctly.
1949 1949 */
1950 1950 if (mutex_owned(&acb->isr_mutex)) {
1951 1951 mutex_exit(&acb->isr_mutex);
1952 1952 scsi_hba_pkt_comp(pkt);
1953 1953 mutex_enter(&acb->isr_mutex);
1954 1954 } else {
1955 1955 scsi_hba_pkt_comp(pkt);
1956 1956 }
1957 1957 if (flag == 1) {
1958 - atomic_add_32(&acb->ccboutstandingcount, -1);
1958 + atomic_dec_32(&acb->ccboutstandingcount);
1959 1959 }
1960 1960 }
1961 1961
1962 1962 static void
1963 1963 arcmsr_report_ccb_state(struct ACB *acb, struct CCB *ccb, boolean_t error)
1964 1964 {
1965 1965 int id, lun;
1966 1966
1967 1967 ccb->ccb_state |= ARCMSR_CCB_DONE;
1968 1968 id = ccb->pkt->pkt_address.a_target;
1969 1969 lun = ccb->pkt->pkt_address.a_lun;
1970 1970
1971 1971 if (!error) {
1972 1972 if (acb->devstate[id][lun] == ARECA_RAID_GONE) {
1973 1973 acb->devstate[id][lun] = ARECA_RAID_GOOD;
1974 1974 }
1975 1975 ccb->pkt->pkt_reason = CMD_CMPLT;
1976 1976 ccb->pkt->pkt_state |= STATE_XFERRED_DATA;
1977 1977 arcmsr_list_add_tail(&acb->ccb_complete_list_mutex,
1978 1978 &ccb->complete_queue_pointer, &acb->ccb_complete_list);
1979 1979
1980 1980 } else {
1981 1981 switch (ccb->arcmsr_cdb.DeviceStatus) {
1982 1982 case ARCMSR_DEV_SELECT_TIMEOUT:
1983 1983 if (acb->devstate[id][lun] == ARECA_RAID_GOOD) {
1984 1984 arcmsr_warn(acb,
1985 1985 "target %d lun %d selection "
1986 1986 "timeout", id, lun);
1987 1987 }
1988 1988 acb->devstate[id][lun] = ARECA_RAID_GONE;
1989 1989 ccb->pkt->pkt_reason = CMD_TIMEOUT; /* CMD_DEV_GONE; */
1990 1990 ccb->pkt->pkt_statistics |= STAT_TIMEOUT;
1991 1991 arcmsr_list_add_tail(&acb->ccb_complete_list_mutex,
1992 1992 &ccb->complete_queue_pointer,
1993 1993 &acb->ccb_complete_list);
1994 1994 break;
1995 1995 case ARCMSR_DEV_ABORTED:
1996 1996 case ARCMSR_DEV_INIT_FAIL:
1997 1997 arcmsr_warn(acb, "isr got 'ARCMSR_DEV_ABORTED'"
1998 1998 " 'ARCMSR_DEV_INIT_FAIL'");
1999 1999 arcmsr_log(acb, CE_NOTE, "raid volume was kicked out");
2000 2000 acb->devstate[id][lun] = ARECA_RAID_GONE;
2001 2001 ccb->pkt->pkt_reason = CMD_DEV_GONE;
2002 2002 ccb->pkt->pkt_statistics |= STAT_TERMINATED;
2003 2003 arcmsr_list_add_tail(&acb->ccb_complete_list_mutex,
2004 2004 &ccb->complete_queue_pointer,
2005 2005 &acb->ccb_complete_list);
2006 2006 break;
2007 2007 case SCSISTAT_CHECK_CONDITION:
2008 2008 acb->devstate[id][lun] = ARECA_RAID_GOOD;
2009 2009 arcmsr_report_sense_info(ccb);
2010 2010 arcmsr_list_add_tail(&acb->ccb_complete_list_mutex,
2011 2011 &ccb->complete_queue_pointer,
2012 2012 &acb->ccb_complete_list);
2013 2013 break;
2014 2014 default:
2015 2015 arcmsr_warn(acb,
2016 2016 "target %d lun %d isr received CMD_DONE"
2017 2017 " with unknown DeviceStatus (0x%x)",
2018 2018 id, lun, ccb->arcmsr_cdb.DeviceStatus);
2019 2019 arcmsr_log(acb, CE_NOTE, "raid volume was kicked out");
2020 2020 acb->devstate[id][lun] = ARECA_RAID_GONE;
2021 2021 /* unknown error or crc error just for retry */
2022 2022 ccb->pkt->pkt_reason = CMD_TRAN_ERR;
2023 2023 ccb->pkt->pkt_statistics |= STAT_TERMINATED;
2024 2024 arcmsr_list_add_tail(&acb->ccb_complete_list_mutex,
2025 2025 &ccb->complete_queue_pointer,
2026 2026 &acb->ccb_complete_list);
2027 2027 break;
2028 2028 }
2029 2029 }
2030 2030 }
2031 2031
2032 2032
2033 2033 static void
2034 2034 arcmsr_drain_donequeue(struct ACB *acb, struct CCB *ccb, boolean_t error)
2035 2035 {
2036 2036 uint16_t ccb_state;
2037 2037
2038 2038 if (ccb->acb != acb) {
2039 2039 return;
2040 2040 }
2041 2041 if (ccb->ccb_state != ARCMSR_CCB_START) {
2042 2042 switch (ccb->ccb_state & ARCMSR_ABNORMAL_MASK) {
2043 2043 case ARCMSR_CCB_TIMEOUT:
2044 2044 ccb_state = ccb->ccb_state;
2045 2045 if (ccb_state & ARCMSR_CCB_WAIT4_FREE)
2046 2046 arcmsr_free_ccb(ccb);
2047 2047 else
2048 2048 ccb->ccb_state |= ARCMSR_CCB_BACK;
2049 2049 return;
2050 2050
2051 2051 case ARCMSR_CCB_ABORTED:
2052 2052 ccb_state = ccb->ccb_state;
2053 2053 if (ccb_state & ARCMSR_CCB_WAIT4_FREE)
2054 2054 arcmsr_free_ccb(ccb);
2055 2055 else
2056 2056 ccb->ccb_state |= ARCMSR_CCB_BACK;
2057 2057 return;
2058 2058 case ARCMSR_CCB_RESET:
2059 2059 ccb_state = ccb->ccb_state;
2060 2060 if (ccb_state & ARCMSR_CCB_WAIT4_FREE)
2061 2061 arcmsr_free_ccb(ccb);
2062 2062 else
2063 2063 ccb->ccb_state |= ARCMSR_CCB_BACK;
2064 2064 return;
2065 2065 default:
2066 2066 return;
2067 2067 }
2068 2068 }
2069 2069 arcmsr_report_ccb_state(acb, ccb, error);
2070 2070 }
2071 2071
2072 2072 static void
2073 2073 arcmsr_report_sense_info(struct CCB *ccb)
2074 2074 {
2075 2075 struct SENSE_DATA *cdb_sensedata;
2076 2076 struct scsi_pkt *pkt = ccb->pkt;
2077 2077 struct scsi_arq_status *arq_status;
2078 2078 union scsi_cdb *cdbp;
2079 2079 uint64_t err_blkno;
2080 2080
2081 2081 cdbp = (void *)pkt->pkt_cdbp;
2082 2082 err_blkno = ARCMSR_GETGXADDR(ccb->arcmsr_cdb.CdbLength, cdbp);
2083 2083
2084 2084 arq_status = (struct scsi_arq_status *)(intptr_t)(pkt->pkt_scbp);
2085 2085 bzero((caddr_t)arq_status, sizeof (struct scsi_arq_status));
2086 2086 *pkt->pkt_scbp = STATUS_CHECK; /* CHECK CONDITION */
2087 2087 arq_status->sts_rqpkt_reason = CMD_CMPLT;
2088 2088 arq_status->sts_rqpkt_state = (STATE_GOT_BUS | STATE_GOT_TARGET |
2089 2089 STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS);
2090 2090 arq_status->sts_rqpkt_statistics = 0;
2091 2091 arq_status->sts_rqpkt_resid = 0;
2092 2092
2093 2093 pkt->pkt_reason = CMD_CMPLT;
2094 2094 /* auto rqsense took place */
2095 2095 pkt->pkt_state |= STATE_ARQ_DONE;
2096 2096
2097 2097 cdb_sensedata = (struct SENSE_DATA *)ccb->arcmsr_cdb.SenseData;
2098 2098 if (&arq_status->sts_sensedata != NULL) {
2099 2099 if (err_blkno <= 0xfffffffful) {
2100 2100 struct scsi_extended_sense *sts_sensedata;
2101 2101
2102 2102 sts_sensedata = &arq_status->sts_sensedata;
2103 2103 sts_sensedata->es_code = cdb_sensedata->ErrorCode;
2104 2104 /* must eq CLASS_EXTENDED_SENSE (0x07) */
2105 2105 sts_sensedata->es_class = cdb_sensedata->ErrorClass;
2106 2106 sts_sensedata->es_valid = cdb_sensedata->Valid;
2107 2107 sts_sensedata->es_segnum = cdb_sensedata->SegmentNumber;
2108 2108 sts_sensedata->es_key = cdb_sensedata->SenseKey;
2109 2109 sts_sensedata->es_ili = cdb_sensedata->IncorrectLength;
2110 2110 sts_sensedata->es_eom = cdb_sensedata->EndOfMedia;
2111 2111 sts_sensedata->es_filmk = cdb_sensedata->FileMark;
2112 2112 sts_sensedata->es_info_1 = (err_blkno >> 24) & 0xFF;
2113 2113 sts_sensedata->es_info_2 = (err_blkno >> 16) & 0xFF;
2114 2114 sts_sensedata->es_info_3 = (err_blkno >> 8) & 0xFF;
2115 2115 sts_sensedata->es_info_4 = err_blkno & 0xFF;
2116 2116 sts_sensedata->es_add_len =
2117 2117 cdb_sensedata->AdditionalSenseLength;
2118 2118 sts_sensedata->es_cmd_info[0] =
2119 2119 cdb_sensedata->CommandSpecificInformation[0];
2120 2120 sts_sensedata->es_cmd_info[1] =
2121 2121 cdb_sensedata->CommandSpecificInformation[1];
2122 2122 sts_sensedata->es_cmd_info[2] =
2123 2123 cdb_sensedata->CommandSpecificInformation[2];
2124 2124 sts_sensedata->es_cmd_info[3] =
2125 2125 cdb_sensedata->CommandSpecificInformation[3];
2126 2126 sts_sensedata->es_add_code =
2127 2127 cdb_sensedata->AdditionalSenseCode;
2128 2128 sts_sensedata->es_qual_code =
2129 2129 cdb_sensedata->AdditionalSenseCodeQualifier;
2130 2130 sts_sensedata->es_fru_code =
2131 2131 cdb_sensedata->FieldReplaceableUnitCode;
2132 2132 } else { /* 64-bit LBA */
2133 2133 struct scsi_descr_sense_hdr *dsp;
2134 2134 struct scsi_information_sense_descr *isd;
2135 2135
2136 2136 dsp = (struct scsi_descr_sense_hdr *)
2137 2137 &arq_status->sts_sensedata;
2138 2138 dsp->ds_class = CLASS_EXTENDED_SENSE;
2139 2139 dsp->ds_code = CODE_FMT_DESCR_CURRENT;
2140 2140 dsp->ds_key = cdb_sensedata->SenseKey;
2141 2141 dsp->ds_add_code = cdb_sensedata->AdditionalSenseCode;
2142 2142 dsp->ds_qual_code =
2143 2143 cdb_sensedata->AdditionalSenseCodeQualifier;
2144 2144 dsp->ds_addl_sense_length =
2145 2145 sizeof (struct scsi_information_sense_descr);
2146 2146
2147 2147 isd = (struct scsi_information_sense_descr *)(dsp+1);
2148 2148 isd->isd_descr_type = DESCR_INFORMATION;
2149 2149 isd->isd_valid = 1;
2150 2150 isd->isd_information[0] = (err_blkno >> 56) & 0xFF;
2151 2151 isd->isd_information[1] = (err_blkno >> 48) & 0xFF;
2152 2152 isd->isd_information[2] = (err_blkno >> 40) & 0xFF;
2153 2153 isd->isd_information[3] = (err_blkno >> 32) & 0xFF;
2154 2154 isd->isd_information[4] = (err_blkno >> 24) & 0xFF;
2155 2155 isd->isd_information[5] = (err_blkno >> 16) & 0xFF;
2156 2156 isd->isd_information[6] = (err_blkno >> 8) & 0xFF;
2157 2157 isd->isd_information[7] = (err_blkno) & 0xFF;
2158 2158 }
2159 2159 }
2160 2160 }
2161 2161
2162 2162
2163 2163 static int
2164 2164 arcmsr_seek_cmd2abort(struct ACB *acb, struct scsi_pkt *abortpkt)
2165 2165 {
2166 2166 struct CCB *ccb;
2167 2167 uint32_t intmask_org = 0;
2168 2168 int i = 0;
2169 2169
2170 2170 acb->num_aborts++;
2171 2171
2172 2172 if (abortpkt != NULL) {
2173 2173 /*
2174 2174 * We don't support abort of a single packet. All
2175 2175 * callers in our kernel always do a global abort, so
2176 2176 * there is no point in having code to support it
2177 2177 * here.
2178 2178 */
2179 2179 return (DDI_FAILURE);
2180 2180 }
2181 2181
2182 2182 /*
2183 2183 * if abortpkt is NULL, the upper layer needs us
2184 2184 * to abort all commands
2185 2185 */
2186 2186 if (acb->ccboutstandingcount != 0) {
2187 2187 /* disable all outbound interrupt */
2188 2188 intmask_org = arcmsr_disable_allintr(acb);
2189 2189 /* clear and abort all outbound posted Q */
2190 2190 arcmsr_done4abort_postqueue(acb);
2191 2191 /* talk to iop 331 outstanding command aborted */
2192 2192 (void) arcmsr_abort_host_command(acb);
2193 2193
2194 2194 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
2195 2195 ccb = acb->pccb_pool[i];
2196 2196 if (ccb->ccb_state == ARCMSR_CCB_START) {
2197 2197 /*
2198 2198 * this ccb will complete at
2199 2199 * hwinterrupt
2200 2200 */
2201 2201 /* ccb->ccb_state = ARCMSR_CCB_ABORTED; */
2202 2202 ccb->pkt->pkt_reason = CMD_ABORTED;
2203 2203 ccb->pkt->pkt_statistics |= STAT_ABORTED;
2204 2204 arcmsr_ccb_complete(ccb, 1);
2205 2205 }
2206 2206 }
2207 2207 /*
2208 2208 * enable outbound Post Queue, outbound
2209 2209 * doorbell Interrupt
2210 2210 */
2211 2211 arcmsr_enable_allintr(acb, intmask_org);
2212 2212 }
2213 2213 return (DDI_SUCCESS);
2214 2214 }
2215 2215
2216 2216
2217 2217 /*
2218 2218 * Autoconfiguration support
2219 2219 */
2220 2220 static int
2221 2221 arcmsr_parse_devname(char *devnm, int *tgt, int *lun) {
2222 2222
2223 2223 char devbuf[SCSI_MAXNAMELEN];
2224 2224 char *addr;
2225 2225 char *p, *tp, *lp;
2226 2226 long num;
2227 2227
2228 2228 /* Parse dev name and address */
2229 2229 (void) strlcpy(devbuf, devnm, sizeof (devbuf));
2230 2230 addr = "";
2231 2231 for (p = devbuf; *p != '\0'; p++) {
2232 2232 if (*p == '@') {
2233 2233 addr = p + 1;
2234 2234 *p = '\0';
2235 2235 } else if (*p == ':') {
2236 2236 *p = '\0';
2237 2237 break;
2238 2238 }
2239 2239 }
2240 2240
2241 2241 /* Parse target and lun */
2242 2242 for (p = tp = addr, lp = NULL; *p != '\0'; p++) {
2243 2243 if (*p == ',') {
2244 2244 lp = p + 1;
2245 2245 *p = '\0';
2246 2246 break;
2247 2247 }
2248 2248 }
2249 2249 if ((tgt != NULL) && (tp != NULL)) {
2250 2250 if (ddi_strtol(tp, NULL, 0x10, &num) != 0)
2251 2251 return (-1);
2252 2252 *tgt = (int)num;
2253 2253 }
2254 2254 if ((lun != NULL) && (lp != NULL)) {
2255 2255 if (ddi_strtol(lp, NULL, 0x10, &num) != 0)
2256 2256 return (-1);
2257 2257 *lun = (int)num;
2258 2258 }
2259 2259 return (0);
2260 2260 }
2261 2261
2262 2262 static int
2263 2263 arcmsr_name_node(dev_info_t *dip, char *name, int len)
2264 2264 {
2265 2265 int tgt, lun;
2266 2266
2267 2267 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "target",
2268 2268 -1);
2269 2269 if (tgt == -1)
2270 2270 return (DDI_FAILURE);
2271 2271 lun = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "lun",
2272 2272 -1);
2273 2273 if (lun == -1)
2274 2274 return (DDI_FAILURE);
2275 2275 (void) snprintf(name, len, "%x,%x", tgt, lun);
2276 2276 return (DDI_SUCCESS);
2277 2277 }
2278 2278
2279 2279 static dev_info_t *
2280 2280 arcmsr_find_child(struct ACB *acb, uint16_t tgt, uint8_t lun)
2281 2281 {
2282 2282 dev_info_t *child = NULL;
2283 2283 char addr[SCSI_MAXNAMELEN];
2284 2284 char tmp[SCSI_MAXNAMELEN];
2285 2285
2286 2286 (void) sprintf(addr, "%x,%x", tgt, lun);
2287 2287
2288 2288 for (child = ddi_get_child(acb->dev_info);
2289 2289 child;
2290 2290 child = ddi_get_next_sibling(child)) {
2291 2291 /* We don't care about non-persistent node */
2292 2292 if (ndi_dev_is_persistent_node(child) == 0)
2293 2293 continue;
2294 2294 if (arcmsr_name_node(child, tmp, SCSI_MAXNAMELEN) !=
2295 2295 DDI_SUCCESS)
2296 2296 continue;
2297 2297 if (strcmp(addr, tmp) == 0)
2298 2298 break;
2299 2299 }
2300 2300 return (child);
2301 2301 }
2302 2302
2303 2303 static int
2304 2304 arcmsr_config_child(struct ACB *acb, struct scsi_device *sd, dev_info_t **dipp)
2305 2305 {
2306 2306 char *nodename = NULL;
2307 2307 char **compatible = NULL;
2308 2308 int ncompatible = 0;
2309 2309 dev_info_t *ldip = NULL;
2310 2310 int tgt = sd->sd_address.a_target;
2311 2311 int lun = sd->sd_address.a_lun;
2312 2312 int dtype = sd->sd_inq->inq_dtype & DTYPE_MASK;
2313 2313 int rval;
2314 2314
2315 2315 scsi_hba_nodename_compatible_get(sd->sd_inq, NULL, dtype,
2316 2316 NULL, &nodename, &compatible, &ncompatible);
2317 2317 if (nodename == NULL) {
2318 2318 arcmsr_warn(acb, "found no comptible driver for T%dL%d",
2319 2319 tgt, lun);
2320 2320 rval = NDI_FAILURE;
2321 2321 goto finish;
2322 2322 }
2323 2323 /* Create dev node */
2324 2324 rval = ndi_devi_alloc(acb->dev_info, nodename, DEVI_SID_NODEID, &ldip);
2325 2325 if (rval == NDI_SUCCESS) {
2326 2326 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "target", tgt) !=
2327 2327 DDI_PROP_SUCCESS) {
2328 2328 arcmsr_warn(acb,
2329 2329 "unable to create target property for T%dL%d",
2330 2330 tgt, lun);
2331 2331 rval = NDI_FAILURE;
2332 2332 goto finish;
2333 2333 }
2334 2334 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "lun", lun) !=
2335 2335 DDI_PROP_SUCCESS) {
2336 2336 arcmsr_warn(acb,
2337 2337 "unable to create lun property for T%dL%d",
2338 2338 tgt, lun);
2339 2339 rval = NDI_FAILURE;
2340 2340 goto finish;
2341 2341 }
2342 2342 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, ldip,
2343 2343 "compatible", compatible, ncompatible) !=
2344 2344 DDI_PROP_SUCCESS) {
2345 2345 arcmsr_warn(acb,
2346 2346 "unable to create compatible property for T%dL%d",
2347 2347 tgt, lun);
2348 2348 rval = NDI_FAILURE;
2349 2349 goto finish;
2350 2350 }
2351 2351 rval = ndi_devi_online(ldip, NDI_ONLINE_ATTACH);
2352 2352 if (rval != NDI_SUCCESS) {
2353 2353 arcmsr_warn(acb, "unable to online T%dL%d", tgt, lun);
2354 2354 ndi_prop_remove_all(ldip);
2355 2355 (void) ndi_devi_free(ldip);
2356 2356 } else {
2357 2357 arcmsr_log(acb, CE_NOTE, "T%dL%d onlined", tgt, lun);
2358 2358 }
2359 2359 }
2360 2360 finish:
2361 2361 if (dipp)
2362 2362 *dipp = ldip;
2363 2363
2364 2364 scsi_hba_nodename_compatible_free(nodename, compatible);
2365 2365 return (rval);
2366 2366 }
2367 2367
2368 2368 static int
2369 2369 arcmsr_config_lun(struct ACB *acb, uint16_t tgt, uint8_t lun, dev_info_t **ldip)
2370 2370 {
2371 2371 struct scsi_device sd;
2372 2372 dev_info_t *child;
2373 2373 int rval;
2374 2374
2375 2375 if ((child = arcmsr_find_child(acb, tgt, lun)) != NULL) {
2376 2376 if (ldip) {
2377 2377 *ldip = child;
2378 2378 }
2379 2379 return (NDI_SUCCESS);
2380 2380 }
2381 2381 bzero(&sd, sizeof (struct scsi_device));
2382 2382 sd.sd_address.a_hba_tran = acb->scsi_hba_transport;
2383 2383 sd.sd_address.a_target = tgt;
2384 2384 sd.sd_address.a_lun = lun;
2385 2385
2386 2386 rval = scsi_hba_probe(&sd, NULL);
2387 2387 if (rval == SCSIPROBE_EXISTS)
2388 2388 rval = arcmsr_config_child(acb, &sd, ldip);
2389 2389 scsi_unprobe(&sd);
2390 2390 return (rval);
2391 2391 }
2392 2392
2393 2393
2394 2394 static int
2395 2395 arcmsr_add_intr(struct ACB *acb, int intr_type)
2396 2396 {
2397 2397 int rc, count;
2398 2398 dev_info_t *dev_info;
2399 2399 const char *type_str;
2400 2400
2401 2401 switch (intr_type) {
2402 2402 case DDI_INTR_TYPE_MSI:
2403 2403 type_str = "MSI";
2404 2404 break;
2405 2405 case DDI_INTR_TYPE_MSIX:
2406 2406 type_str = "MSIX";
2407 2407 break;
2408 2408 case DDI_INTR_TYPE_FIXED:
2409 2409 type_str = "FIXED";
2410 2410 break;
2411 2411 default:
2412 2412 type_str = "unknown";
2413 2413 break;
2414 2414 }
2415 2415
2416 2416 dev_info = acb->dev_info;
2417 2417 /* Determine number of supported interrupts */
2418 2418 rc = ddi_intr_get_nintrs(dev_info, intr_type, &count);
2419 2419 if ((rc != DDI_SUCCESS) || (count == 0)) {
2420 2420 arcmsr_warn(acb,
2421 2421 "no interrupts of type %s, rc=0x%x, count=%d",
2422 2422 type_str, rc, count);
2423 2423 return (DDI_FAILURE);
2424 2424 }
2425 2425 acb->intr_size = sizeof (ddi_intr_handle_t) * count;
2426 2426 acb->phandle = kmem_zalloc(acb->intr_size, KM_SLEEP);
2427 2427 rc = ddi_intr_alloc(dev_info, acb->phandle, intr_type, 0,
2428 2428 count, &acb->intr_count, DDI_INTR_ALLOC_NORMAL);
2429 2429 if ((rc != DDI_SUCCESS) || (acb->intr_count == 0)) {
2430 2430 arcmsr_warn(acb, "ddi_intr_alloc(%s) failed 0x%x",
2431 2431 type_str, rc);
2432 2432 return (DDI_FAILURE);
2433 2433 }
2434 2434 if (acb->intr_count < count) {
2435 2435 arcmsr_log(acb, CE_NOTE, "Got %d interrupts, but requested %d",
2436 2436 acb->intr_count, count);
2437 2437 }
2438 2438 /*
2439 2439 * Get priority for first msi, assume remaining are all the same
2440 2440 */
2441 2441 if (ddi_intr_get_pri(acb->phandle[0], &acb->intr_pri) != DDI_SUCCESS) {
2442 2442 arcmsr_warn(acb, "ddi_intr_get_pri failed");
2443 2443 return (DDI_FAILURE);
2444 2444 }
2445 2445 if (acb->intr_pri >= ddi_intr_get_hilevel_pri()) {
2446 2446 arcmsr_warn(acb, "high level interrupt not supported");
2447 2447 return (DDI_FAILURE);
2448 2448 }
2449 2449
2450 2450 for (int x = 0; x < acb->intr_count; x++) {
2451 2451 if (ddi_intr_add_handler(acb->phandle[x], arcmsr_intr_handler,
2452 2452 (caddr_t)acb, NULL) != DDI_SUCCESS) {
2453 2453 arcmsr_warn(acb, "ddi_intr_add_handler(%s) failed",
2454 2454 type_str);
2455 2455 return (DDI_FAILURE);
2456 2456 }
2457 2457 }
2458 2458 (void) ddi_intr_get_cap(acb->phandle[0], &acb->intr_cap);
2459 2459 if (acb->intr_cap & DDI_INTR_FLAG_BLOCK) {
2460 2460 /* Call ddi_intr_block_enable() for MSI */
2461 2461 (void) ddi_intr_block_enable(acb->phandle, acb->intr_count);
2462 2462 } else {
2463 2463 /* Call ddi_intr_enable() for MSI non block enable */
2464 2464 for (int x = 0; x < acb->intr_count; x++) {
2465 2465 (void) ddi_intr_enable(acb->phandle[x]);
2466 2466 }
2467 2467 }
2468 2468 return (DDI_SUCCESS);
2469 2469 }
2470 2470
2471 2471 static void
2472 2472 arcmsr_remove_intr(struct ACB *acb)
2473 2473 {
2474 2474 int x;
2475 2475
2476 2476 if (acb->phandle == NULL)
2477 2477 return;
2478 2478
2479 2479 /* Disable all interrupts */
2480 2480 if (acb->intr_cap & DDI_INTR_FLAG_BLOCK) {
2481 2481 /* Call ddi_intr_block_disable() */
2482 2482 (void) ddi_intr_block_disable(acb->phandle, acb->intr_count);
2483 2483 } else {
2484 2484 for (x = 0; x < acb->intr_count; x++) {
2485 2485 (void) ddi_intr_disable(acb->phandle[x]);
2486 2486 }
2487 2487 }
2488 2488 /* Call ddi_intr_remove_handler() */
2489 2489 for (x = 0; x < acb->intr_count; x++) {
2490 2490 (void) ddi_intr_remove_handler(acb->phandle[x]);
2491 2491 (void) ddi_intr_free(acb->phandle[x]);
2492 2492 }
2493 2493 kmem_free(acb->phandle, acb->intr_size);
2494 2494 acb->phandle = NULL;
2495 2495 }
2496 2496
2497 2497 static void
2498 2498 arcmsr_mutex_init(struct ACB *acb)
2499 2499 {
2500 2500 mutex_init(&acb->isr_mutex, NULL, MUTEX_DRIVER, NULL);
2501 2501 mutex_init(&acb->acb_mutex, NULL, MUTEX_DRIVER, NULL);
2502 2502 mutex_init(&acb->postq_mutex, NULL, MUTEX_DRIVER, NULL);
2503 2503 mutex_init(&acb->workingQ_mutex, NULL, MUTEX_DRIVER, NULL);
2504 2504 mutex_init(&acb->ioctl_mutex, NULL, MUTEX_DRIVER, NULL);
2505 2505 }
2506 2506
2507 2507 static void
2508 2508 arcmsr_mutex_destroy(struct ACB *acb)
2509 2509 {
2510 2510 mutex_destroy(&acb->isr_mutex);
2511 2511 mutex_destroy(&acb->acb_mutex);
2512 2512 mutex_destroy(&acb->postq_mutex);
2513 2513 mutex_destroy(&acb->workingQ_mutex);
2514 2514 mutex_destroy(&acb->ioctl_mutex);
2515 2515 }
2516 2516
2517 2517 static int
2518 2518 arcmsr_initialize(struct ACB *acb)
2519 2519 {
2520 2520 struct CCB *pccb_tmp;
2521 2521 size_t allocated_length;
2522 2522 uint16_t wval;
2523 2523 uint_t intmask_org, count;
2524 2524 caddr_t arcmsr_ccbs_area;
2525 2525 uint32_t wlval, cdb_phyaddr, offset, realccb_size;
2526 2526 int32_t dma_sync_size;
2527 2527 int i, id, lun, instance;
2528 2528
2529 2529 instance = ddi_get_instance(acb->dev_info);
2530 2530 wlval = pci_config_get32(acb->pci_acc_handle, 0);
2531 2531 wval = (uint16_t)((wlval >> 16) & 0xffff);
2532 2532 realccb_size = P2ROUNDUP(sizeof (struct CCB), 32);
2533 2533 switch (wval) {
2534 2534 case PCI_DEVICE_ID_ARECA_1880:
2535 2535 case PCI_DEVICE_ID_ARECA_1882:
2536 2536 {
2537 2537 uint32_t *iop_mu_regs_map0;
2538 2538
2539 2539 acb->adapter_type = ACB_ADAPTER_TYPE_C; /* lsi */
2540 2540 dma_sync_size = ARCMSR_MAX_FREECCB_NUM * realccb_size + 0x20;
2541 2541 if (ddi_regs_map_setup(acb->dev_info, 2,
2542 2542 (caddr_t *)&iop_mu_regs_map0, 0,
2543 2543 sizeof (struct HBC_msgUnit), &acb->dev_acc_attr,
2544 2544 &acb->reg_mu_acc_handle0) != DDI_SUCCESS) {
2545 2545 arcmsr_warn(acb, "unable to map registers");
2546 2546 return (DDI_FAILURE);
2547 2547 }
2548 2548
2549 2549 if ((i = ddi_dma_alloc_handle(acb->dev_info, &arcmsr_ccb_attr,
2550 2550 DDI_DMA_SLEEP, NULL, &acb->ccbs_pool_handle)) !=
2551 2551 DDI_SUCCESS) {
2552 2552 ddi_regs_map_free(&acb->reg_mu_acc_handle0);
2553 2553 arcmsr_warn(acb, "ddi_dma_alloc_handle failed");
2554 2554 return (DDI_FAILURE);
2555 2555 }
2556 2556
2557 2557 if (ddi_dma_mem_alloc(acb->ccbs_pool_handle, dma_sync_size,
2558 2558 &acb->dev_acc_attr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2559 2559 DDI_DMA_SLEEP, NULL, (caddr_t *)&arcmsr_ccbs_area,
2560 2560 &allocated_length, &acb->ccbs_acc_handle) != DDI_SUCCESS) {
2561 2561 arcmsr_warn(acb, "ddi_dma_mem_alloc failed");
2562 2562 ddi_dma_free_handle(&acb->ccbs_pool_handle);
2563 2563 ddi_regs_map_free(&acb->reg_mu_acc_handle0);
2564 2564 return (DDI_FAILURE);
2565 2565 }
2566 2566
2567 2567 if (ddi_dma_addr_bind_handle(acb->ccbs_pool_handle, NULL,
2568 2568 (caddr_t)arcmsr_ccbs_area, dma_sync_size, DDI_DMA_RDWR |
2569 2569 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &acb->ccb_cookie,
2570 2570 &count) != DDI_DMA_MAPPED) {
2571 2571 arcmsr_warn(acb, "ddi_dma_addr_bind_handle failed");
2572 2572 ddi_dma_mem_free(&acb->ccbs_acc_handle);
2573 2573 ddi_dma_free_handle(&acb->ccbs_pool_handle);
2574 2574 ddi_regs_map_free(&acb->reg_mu_acc_handle0);
2575 2575 return (DDI_FAILURE);
2576 2576 }
2577 2577 bzero(arcmsr_ccbs_area, dma_sync_size);
2578 2578 offset = (uint32_t)(P2ROUNDUP(PtrToNum(arcmsr_ccbs_area), 32)
2579 2579 - PtrToNum(arcmsr_ccbs_area));
2580 2580 arcmsr_ccbs_area = arcmsr_ccbs_area + offset;
2581 2581 /* ioport base */
2582 2582 acb->pmu = (struct msgUnit *)(intptr_t)iop_mu_regs_map0;
2583 2583 break;
2584 2584 }
2585 2585
2586 2586 case PCI_DEVICE_ID_ARECA_1201:
2587 2587 {
2588 2588 uint32_t *iop_mu_regs_map0;
2589 2589 uint32_t *iop_mu_regs_map1;
2590 2590 struct HBB_msgUnit *phbbmu;
2591 2591
2592 2592 acb->adapter_type = ACB_ADAPTER_TYPE_B; /* marvell */
2593 2593 dma_sync_size =
2594 2594 (ARCMSR_MAX_FREECCB_NUM * realccb_size + 0x20) +
2595 2595 sizeof (struct HBB_msgUnit);
2596 2596 /* Allocate memory for the ccb */
2597 2597 if ((i = ddi_dma_alloc_handle(acb->dev_info, &arcmsr_ccb_attr,
2598 2598 DDI_DMA_SLEEP, NULL, &acb->ccbs_pool_handle)) !=
2599 2599 DDI_SUCCESS) {
2600 2600 arcmsr_warn(acb, "ddi_dma_alloc_handle failed");
2601 2601 return (DDI_FAILURE);
2602 2602 }
2603 2603
2604 2604 if (ddi_dma_mem_alloc(acb->ccbs_pool_handle, dma_sync_size,
2605 2605 &acb->dev_acc_attr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2606 2606 DDI_DMA_SLEEP, NULL, (caddr_t *)&arcmsr_ccbs_area,
2607 2607 &allocated_length, &acb->ccbs_acc_handle) != DDI_SUCCESS) {
2608 2608 arcmsr_warn(acb, "ddi_dma_mem_alloc failed");
2609 2609 ddi_dma_free_handle(&acb->ccbs_pool_handle);
2610 2610 return (DDI_FAILURE);
2611 2611 }
2612 2612
2613 2613 if (ddi_dma_addr_bind_handle(acb->ccbs_pool_handle, NULL,
2614 2614 (caddr_t)arcmsr_ccbs_area, dma_sync_size,
2615 2615 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
2616 2616 NULL, &acb->ccb_cookie, &count) != DDI_DMA_MAPPED) {
2617 2617 arcmsr_warn(acb, "ddi_dma_addr_bind_handle failed");
2618 2618 ddi_dma_mem_free(&acb->ccbs_acc_handle);
2619 2619 ddi_dma_free_handle(&acb->ccbs_pool_handle);
2620 2620 return (DDI_FAILURE);
2621 2621 }
2622 2622 bzero(arcmsr_ccbs_area, dma_sync_size);
2623 2623 offset = (uint32_t)(P2ROUNDUP(PtrToNum(arcmsr_ccbs_area), 32)
2624 2624 - PtrToNum(arcmsr_ccbs_area));
2625 2625 arcmsr_ccbs_area = arcmsr_ccbs_area + offset;
2626 2626 acb->pmu = (struct msgUnit *)
2627 2627 NumToPtr(PtrToNum(arcmsr_ccbs_area) +
2628 2628 (realccb_size*ARCMSR_MAX_FREECCB_NUM));
2629 2629 phbbmu = (struct HBB_msgUnit *)acb->pmu;
2630 2630
2631 2631 /* setup device register */
2632 2632 if (ddi_regs_map_setup(acb->dev_info, 1,
2633 2633 (caddr_t *)&iop_mu_regs_map0, 0,
2634 2634 sizeof (struct HBB_DOORBELL), &acb->dev_acc_attr,
2635 2635 &acb->reg_mu_acc_handle0) != DDI_SUCCESS) {
2636 2636 arcmsr_warn(acb, "unable to map base0 registers");
2637 2637 (void) ddi_dma_unbind_handle(acb->ccbs_pool_handle);
2638 2638 ddi_dma_mem_free(&acb->ccbs_acc_handle);
2639 2639 ddi_dma_free_handle(&acb->ccbs_pool_handle);
2640 2640 return (DDI_FAILURE);
2641 2641 }
2642 2642
2643 2643 /* ARCMSR_DRV2IOP_DOORBELL */
2644 2644 phbbmu->hbb_doorbell = (struct HBB_DOORBELL *)iop_mu_regs_map0;
2645 2645 if (ddi_regs_map_setup(acb->dev_info, 2,
2646 2646 (caddr_t *)&iop_mu_regs_map1, 0,
2647 2647 sizeof (struct HBB_RWBUFFER), &acb->dev_acc_attr,
2648 2648 &acb->reg_mu_acc_handle1) != DDI_SUCCESS) {
2649 2649 arcmsr_warn(acb, "unable to map base1 registers");
2650 2650 ddi_regs_map_free(&acb->reg_mu_acc_handle0);
2651 2651 (void) ddi_dma_unbind_handle(acb->ccbs_pool_handle);
2652 2652 ddi_dma_mem_free(&acb->ccbs_acc_handle);
2653 2653 ddi_dma_free_handle(&acb->ccbs_pool_handle);
2654 2654 return (DDI_FAILURE);
2655 2655 }
2656 2656
2657 2657 /* ARCMSR_MSGCODE_RWBUFFER */
2658 2658 phbbmu->hbb_rwbuffer = (struct HBB_RWBUFFER *)iop_mu_regs_map1;
2659 2659 break;
2660 2660 }
2661 2661
2662 2662 case PCI_DEVICE_ID_ARECA_1110:
2663 2663 case PCI_DEVICE_ID_ARECA_1120:
2664 2664 case PCI_DEVICE_ID_ARECA_1130:
2665 2665 case PCI_DEVICE_ID_ARECA_1160:
2666 2666 case PCI_DEVICE_ID_ARECA_1170:
2667 2667 case PCI_DEVICE_ID_ARECA_1210:
2668 2668 case PCI_DEVICE_ID_ARECA_1220:
2669 2669 case PCI_DEVICE_ID_ARECA_1230:
2670 2670 case PCI_DEVICE_ID_ARECA_1231:
2671 2671 case PCI_DEVICE_ID_ARECA_1260:
2672 2672 case PCI_DEVICE_ID_ARECA_1261:
2673 2673 case PCI_DEVICE_ID_ARECA_1270:
2674 2674 case PCI_DEVICE_ID_ARECA_1280:
2675 2675 case PCI_DEVICE_ID_ARECA_1212:
2676 2676 case PCI_DEVICE_ID_ARECA_1222:
2677 2677 case PCI_DEVICE_ID_ARECA_1380:
2678 2678 case PCI_DEVICE_ID_ARECA_1381:
2679 2679 case PCI_DEVICE_ID_ARECA_1680:
2680 2680 case PCI_DEVICE_ID_ARECA_1681:
2681 2681 {
2682 2682 uint32_t *iop_mu_regs_map0;
2683 2683
2684 2684 acb->adapter_type = ACB_ADAPTER_TYPE_A; /* intel */
2685 2685 dma_sync_size = ARCMSR_MAX_FREECCB_NUM * realccb_size + 0x20;
2686 2686 if (ddi_regs_map_setup(acb->dev_info, 1,
2687 2687 (caddr_t *)&iop_mu_regs_map0, 0,
2688 2688 sizeof (struct HBA_msgUnit), &acb->dev_acc_attr,
2689 2689 &acb->reg_mu_acc_handle0) != DDI_SUCCESS) {
2690 2690 arcmsr_warn(acb, "unable to map registers");
2691 2691 return (DDI_FAILURE);
2692 2692 }
2693 2693
2694 2694 if ((i = ddi_dma_alloc_handle(acb->dev_info, &arcmsr_ccb_attr,
2695 2695 DDI_DMA_SLEEP, NULL, &acb->ccbs_pool_handle)) !=
2696 2696 DDI_SUCCESS) {
2697 2697 arcmsr_warn(acb, "ddi_dma_alloc_handle failed");
2698 2698 ddi_regs_map_free(&acb->reg_mu_acc_handle0);
2699 2699 return (DDI_FAILURE);
2700 2700 }
2701 2701
2702 2702 if (ddi_dma_mem_alloc(acb->ccbs_pool_handle, dma_sync_size,
2703 2703 &acb->dev_acc_attr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2704 2704 DDI_DMA_SLEEP, NULL, (caddr_t *)&arcmsr_ccbs_area,
2705 2705 &allocated_length, &acb->ccbs_acc_handle) != DDI_SUCCESS) {
2706 2706 arcmsr_warn(acb, "ddi_dma_mem_alloc failed", instance);
2707 2707 ddi_dma_free_handle(&acb->ccbs_pool_handle);
2708 2708 ddi_regs_map_free(&acb->reg_mu_acc_handle0);
2709 2709 return (DDI_FAILURE);
2710 2710 }
2711 2711
2712 2712 if (ddi_dma_addr_bind_handle(acb->ccbs_pool_handle, NULL,
2713 2713 (caddr_t)arcmsr_ccbs_area, dma_sync_size, DDI_DMA_RDWR |
2714 2714 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &acb->ccb_cookie,
2715 2715 &count) != DDI_DMA_MAPPED) {
2716 2716 arcmsr_warn(acb, "ddi_dma_addr_bind_handle failed");
2717 2717 ddi_dma_mem_free(&acb->ccbs_acc_handle);
2718 2718 ddi_dma_free_handle(&acb->ccbs_pool_handle);
2719 2719 ddi_regs_map_free(&acb->reg_mu_acc_handle0);
2720 2720 return (DDI_FAILURE);
2721 2721 }
2722 2722 bzero(arcmsr_ccbs_area, dma_sync_size);
2723 2723 offset = (uint32_t)(P2ROUNDUP(PtrToNum(arcmsr_ccbs_area), 32)
2724 2724 - PtrToNum(arcmsr_ccbs_area));
2725 2725 arcmsr_ccbs_area = arcmsr_ccbs_area + offset;
2726 2726 /* ioport base */
2727 2727 acb->pmu = (struct msgUnit *)(intptr_t)iop_mu_regs_map0;
2728 2728 break;
2729 2729 }
2730 2730
2731 2731 default:
2732 2732 arcmsr_warn(acb, "Unknown RAID adapter type!");
2733 2733 return (DDI_FAILURE);
2734 2734 }
2735 2735 arcmsr_init_list_head(&acb->ccb_complete_list);
2736 2736 /* here we can not access pci configuration again */
2737 2737 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
2738 2738 ACB_F_MESSAGE_RQBUFFER_CLEARED | ACB_F_MESSAGE_WQBUFFER_READ);
2739 2739 acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
2740 2740 /* physical address of acb->pccb_pool */
2741 2741 cdb_phyaddr = acb->ccb_cookie.dmac_address + offset;
2742 2742
2743 2743 pccb_tmp = (struct CCB *)(intptr_t)arcmsr_ccbs_area;
2744 2744
2745 2745 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
2746 2746 pccb_tmp->cdb_phyaddr_pattern =
2747 2747 (acb->adapter_type == ACB_ADAPTER_TYPE_C) ?
2748 2748 cdb_phyaddr : (cdb_phyaddr >> 5);
2749 2749 pccb_tmp->acb = acb;
2750 2750 acb->ccbworkingQ[i] = acb->pccb_pool[i] = pccb_tmp;
2751 2751 cdb_phyaddr = cdb_phyaddr + realccb_size;
2752 2752 pccb_tmp = (struct CCB *)NumToPtr(PtrToNum(pccb_tmp) +
2753 2753 realccb_size);
2754 2754 }
2755 2755 acb->vir2phy_offset = PtrToNum(pccb_tmp) - cdb_phyaddr;
2756 2756
2757 2757 /* disable all outbound interrupt */
2758 2758 intmask_org = arcmsr_disable_allintr(acb);
2759 2759
2760 2760 if (!arcmsr_iop_confirm(acb)) {
2761 2761 arcmsr_warn(acb, "arcmsr_iop_confirm error", instance);
2762 2762 ddi_dma_mem_free(&acb->ccbs_acc_handle);
2763 2763 ddi_dma_free_handle(&acb->ccbs_pool_handle);
2764 2764 return (DDI_FAILURE);
2765 2765 }
2766 2766
2767 2767 for (id = 0; id < ARCMSR_MAX_TARGETID; id++) {
2768 2768 for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
2769 2769 acb->devstate[id][lun] = ARECA_RAID_GONE;
2770 2770 }
2771 2771 }
2772 2772
2773 2773 /* enable outbound Post Queue, outbound doorbell Interrupt */
2774 2774 arcmsr_enable_allintr(acb, intmask_org);
2775 2775
2776 2776 return (0);
2777 2777 }
2778 2778
2779 2779 static int
2780 2780 arcmsr_do_ddi_attach(dev_info_t *dev_info, int instance)
2781 2781 {
2782 2782 scsi_hba_tran_t *hba_trans;
2783 2783 ddi_device_acc_attr_t dev_acc_attr;
2784 2784 struct ACB *acb;
2785 2785 uint16_t wval;
2786 2786 int raid6 = 1;
2787 2787 char *type;
2788 2788 int intr_types;
2789 2789
2790 2790
2791 2791 /*
2792 2792 * Soft State Structure
2793 2793 * The driver should allocate the per-device-instance
2794 2794 * soft state structure, being careful to clean up properly if
2795 2795 * an error occurs. Allocate data structure.
2796 2796 */
2797 2797 if (ddi_soft_state_zalloc(arcmsr_soft_state, instance) != DDI_SUCCESS) {
2798 2798 arcmsr_warn(NULL, "ddi_soft_state_zalloc failed");
2799 2799 return (DDI_FAILURE);
2800 2800 }
2801 2801
2802 2802 acb = ddi_get_soft_state(arcmsr_soft_state, instance);
2803 2803 ASSERT(acb);
2804 2804
2805 2805 arcmsr_mutex_init(acb);
2806 2806
2807 2807 /* acb is already zalloc()d so we don't need to bzero() it */
2808 2808 dev_acc_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
2809 2809 dev_acc_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
2810 2810 dev_acc_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
2811 2811
2812 2812 acb->dev_info = dev_info;
2813 2813 acb->dev_acc_attr = dev_acc_attr;
2814 2814
2815 2815 /*
2816 2816 * The driver, if providing DMA, should also check that its hardware is
2817 2817 * installed in a DMA-capable slot
2818 2818 */
2819 2819 if (ddi_slaveonly(dev_info) == DDI_SUCCESS) {
2820 2820 arcmsr_warn(acb, "hardware is not installed in"
2821 2821 " a DMA-capable slot");
2822 2822 goto error_level_0;
2823 2823 }
2824 2824 if (pci_config_setup(dev_info, &acb->pci_acc_handle) != DDI_SUCCESS) {
2825 2825 arcmsr_warn(acb, "pci_config_setup() failed, attach failed");
2826 2826 goto error_level_0;
2827 2827 }
2828 2828
2829 2829 wval = pci_config_get16(acb->pci_acc_handle, PCI_CONF_VENID);
2830 2830 if (wval != PCI_VENDOR_ID_ARECA) {
2831 2831 arcmsr_warn(acb,
2832 2832 "'vendorid (0x%04x) does not match 0x%04x "
2833 2833 "(PCI_VENDOR_ID_ARECA)",
2834 2834 wval, PCI_VENDOR_ID_ARECA);
2835 2835 goto error_level_0;
2836 2836 }
2837 2837
2838 2838 wval = pci_config_get16(acb->pci_acc_handle, PCI_CONF_DEVID);
2839 2839 switch (wval) {
2840 2840 case PCI_DEVICE_ID_ARECA_1110:
2841 2841 case PCI_DEVICE_ID_ARECA_1210:
2842 2842 case PCI_DEVICE_ID_ARECA_1201:
2843 2843 raid6 = 0;
2844 2844 /*FALLTHRU*/
2845 2845 case PCI_DEVICE_ID_ARECA_1120:
2846 2846 case PCI_DEVICE_ID_ARECA_1130:
2847 2847 case PCI_DEVICE_ID_ARECA_1160:
2848 2848 case PCI_DEVICE_ID_ARECA_1170:
2849 2849 case PCI_DEVICE_ID_ARECA_1220:
2850 2850 case PCI_DEVICE_ID_ARECA_1230:
2851 2851 case PCI_DEVICE_ID_ARECA_1260:
2852 2852 case PCI_DEVICE_ID_ARECA_1270:
2853 2853 case PCI_DEVICE_ID_ARECA_1280:
2854 2854 type = "SATA 3G";
2855 2855 break;
2856 2856 case PCI_DEVICE_ID_ARECA_1380:
2857 2857 case PCI_DEVICE_ID_ARECA_1381:
2858 2858 case PCI_DEVICE_ID_ARECA_1680:
2859 2859 case PCI_DEVICE_ID_ARECA_1681:
2860 2860 type = "SAS 3G";
2861 2861 break;
2862 2862 case PCI_DEVICE_ID_ARECA_1880:
2863 2863 type = "SAS 6G";
2864 2864 break;
2865 2865 default:
2866 2866 type = "X-TYPE";
2867 2867 arcmsr_warn(acb, "Unknown Host Adapter RAID Controller!");
2868 2868 goto error_level_0;
2869 2869 }
2870 2870
2871 2871 arcmsr_log(acb, CE_CONT, "Areca %s Host Adapter RAID Controller%s\n",
2872 2872 type, raid6 ? " (RAID6 capable)" : "");
2873 2873
2874 2874 /* we disable iop interrupt here */
2875 2875 if (arcmsr_initialize(acb) == DDI_FAILURE) {
2876 2876 arcmsr_warn(acb, "arcmsr_initialize failed");
2877 2877 goto error_level_1;
2878 2878 }
2879 2879
2880 2880 /* Allocate a transport structure */
2881 2881 hba_trans = scsi_hba_tran_alloc(dev_info, SCSI_HBA_CANSLEEP);
2882 2882 if (hba_trans == NULL) {
2883 2883 arcmsr_warn(acb, "scsi_hba_tran_alloc failed");
2884 2884 goto error_level_2;
2885 2885 }
2886 2886 acb->scsi_hba_transport = hba_trans;
2887 2887 acb->dev_info = dev_info;
2888 2888 /* init scsi host adapter transport entry */
2889 2889 hba_trans->tran_hba_private = acb;
2890 2890 hba_trans->tran_tgt_private = NULL;
2891 2891 /*
2892 2892 * If no per-target initialization is required, the HBA can leave
2893 2893 * tran_tgt_init set to NULL.
2894 2894 */
2895 2895 hba_trans->tran_tgt_init = arcmsr_tran_tgt_init;
2896 2896 hba_trans->tran_tgt_probe = scsi_hba_probe;
2897 2897 hba_trans->tran_tgt_free = NULL;
2898 2898 hba_trans->tran_start = arcmsr_tran_start;
2899 2899 hba_trans->tran_abort = arcmsr_tran_abort;
2900 2900 hba_trans->tran_reset = arcmsr_tran_reset;
2901 2901 hba_trans->tran_getcap = arcmsr_tran_getcap;
2902 2902 hba_trans->tran_setcap = arcmsr_tran_setcap;
2903 2903 hba_trans->tran_init_pkt = arcmsr_tran_init_pkt;
2904 2904 hba_trans->tran_destroy_pkt = arcmsr_tran_destroy_pkt;
2905 2905 hba_trans->tran_dmafree = arcmsr_tran_dmafree;
2906 2906 hba_trans->tran_sync_pkt = arcmsr_tran_sync_pkt;
2907 2907
2908 2908 hba_trans->tran_reset_notify = NULL;
2909 2909 hba_trans->tran_get_bus_addr = NULL;
2910 2910 hba_trans->tran_get_name = NULL;
2911 2911 hba_trans->tran_quiesce = NULL;
2912 2912 hba_trans->tran_unquiesce = NULL;
2913 2913 hba_trans->tran_bus_reset = NULL;
2914 2914 hba_trans->tran_bus_config = arcmsr_tran_bus_config;
2915 2915 hba_trans->tran_add_eventcall = NULL;
2916 2916 hba_trans->tran_get_eventcookie = NULL;
2917 2917 hba_trans->tran_post_event = NULL;
2918 2918 hba_trans->tran_remove_eventcall = NULL;
2919 2919
2920 2920 /* iop init and enable interrupt here */
2921 2921 arcmsr_iop_init(acb);
2922 2922
2923 2923 /* Get supported interrupt types */
2924 2924 if (ddi_intr_get_supported_types(dev_info, &intr_types) !=
2925 2925 DDI_SUCCESS) {
2926 2926 arcmsr_warn(acb, "ddi_intr_get_supported_types failed");
2927 2927 goto error_level_3;
2928 2928 }
2929 2929 if (intr_types & DDI_INTR_TYPE_FIXED) {
2930 2930 if (arcmsr_add_intr(acb, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS)
2931 2931 goto error_level_5;
2932 2932 } else if (intr_types & DDI_INTR_TYPE_MSI) {
2933 2933 if (arcmsr_add_intr(acb, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS)
2934 2934 goto error_level_5;
2935 2935 }
2936 2936
2937 2937 /*
2938 2938 * The driver should attach this instance of the device, and
2939 2939 * perform error cleanup if necessary
2940 2940 */
2941 2941 if (scsi_hba_attach_setup(dev_info, &arcmsr_dma_attr,
2942 2942 hba_trans, SCSI_HBA_TRAN_CLONE) != DDI_SUCCESS) {
2943 2943 arcmsr_warn(acb, "scsi_hba_attach_setup failed");
2944 2944 goto error_level_5;
2945 2945 }
2946 2946
2947 2947 /* Create a taskq for dealing with dr events */
2948 2948 if ((acb->taskq = ddi_taskq_create(dev_info, "arcmsr_dr_taskq", 1,
2949 2949 TASKQ_DEFAULTPRI, 0)) == NULL) {
2950 2950 arcmsr_warn(acb, "ddi_taskq_create failed");
2951 2951 goto error_level_8;
2952 2952 }
2953 2953
2954 2954 acb->timeout_count = 0;
2955 2955 /* active ccbs "timeout" watchdog */
2956 2956 acb->timeout_id = timeout(arcmsr_ccbs_timeout, (caddr_t)acb,
2957 2957 (ARCMSR_TIMEOUT_WATCH * drv_usectohz(1000000)));
2958 2958 acb->timeout_sc_id = timeout(arcmsr_devMap_monitor, (caddr_t)acb,
2959 2959 (ARCMSR_DEV_MAP_WATCH * drv_usectohz(1000000)));
2960 2960
2961 2961 /* report device info */
2962 2962 ddi_report_dev(dev_info);
2963 2963
2964 2964 return (DDI_SUCCESS);
2965 2965
2966 2966 error_level_8:
2967 2967
2968 2968 error_level_7:
2969 2969 error_level_6:
2970 2970 (void) scsi_hba_detach(dev_info);
2971 2971
2972 2972 error_level_5:
2973 2973 arcmsr_remove_intr(acb);
2974 2974
2975 2975 error_level_3:
2976 2976 error_level_4:
2977 2977 if (acb->scsi_hba_transport)
2978 2978 scsi_hba_tran_free(acb->scsi_hba_transport);
2979 2979
2980 2980 error_level_2:
2981 2981 if (acb->ccbs_acc_handle)
2982 2982 ddi_dma_mem_free(&acb->ccbs_acc_handle);
2983 2983 if (acb->ccbs_pool_handle)
2984 2984 ddi_dma_free_handle(&acb->ccbs_pool_handle);
2985 2985
2986 2986 error_level_1:
2987 2987 if (acb->pci_acc_handle)
2988 2988 pci_config_teardown(&acb->pci_acc_handle);
2989 2989 arcmsr_mutex_destroy(acb);
2990 2990 ddi_soft_state_free(arcmsr_soft_state, instance);
2991 2991
2992 2992 error_level_0:
2993 2993 return (DDI_FAILURE);
2994 2994 }
2995 2995
2996 2996
2997 2997 static void
2998 2998 arcmsr_vlog(struct ACB *acb, int level, char *fmt, va_list ap)
2999 2999 {
3000 3000 char buf[256];
3001 3001
3002 3002 if (acb != NULL) {
3003 3003 (void) snprintf(buf, sizeof (buf), "%s%d: %s",
3004 3004 ddi_driver_name(acb->dev_info),
3005 3005 ddi_get_instance(acb->dev_info), fmt);
3006 3006 fmt = buf;
3007 3007 }
3008 3008 vcmn_err(level, fmt, ap);
3009 3009 }
3010 3010
3011 3011 static void
3012 3012 arcmsr_log(struct ACB *acb, int level, char *fmt, ...)
3013 3013 {
3014 3014 va_list ap;
3015 3015
3016 3016 va_start(ap, fmt);
3017 3017 arcmsr_vlog(acb, level, fmt, ap);
3018 3018 va_end(ap);
3019 3019 }
3020 3020
3021 3021 static void
3022 3022 arcmsr_warn(struct ACB *acb, char *fmt, ...)
3023 3023 {
3024 3024 va_list ap;
3025 3025
3026 3026 va_start(ap, fmt);
3027 3027 arcmsr_vlog(acb, CE_WARN, fmt, ap);
3028 3028 va_end(ap);
3029 3029 }
3030 3030
3031 3031 static void
3032 3032 arcmsr_init_list_head(struct list_head *list)
3033 3033 {
3034 3034 list->next = list;
3035 3035 list->prev = list;
3036 3036 }
3037 3037
3038 3038 static void
3039 3039 arcmsr_x_list_del(struct list_head *prev, struct list_head *next)
3040 3040 {
3041 3041 next->prev = prev;
3042 3042 prev->next = next;
3043 3043 }
3044 3044
3045 3045 static void
3046 3046 arcmsr_x_list_add(struct list_head *new_one, struct list_head *prev,
3047 3047 struct list_head *next)
3048 3048 {
3049 3049 next->prev = new_one;
3050 3050 new_one->next = next;
3051 3051 new_one->prev = prev;
3052 3052 prev->next = new_one;
3053 3053 }
3054 3054
3055 3055 static void
3056 3056 arcmsr_list_add_tail(kmutex_t *list_lock, struct list_head *new_one,
3057 3057 struct list_head *head)
3058 3058 {
3059 3059 mutex_enter(list_lock);
3060 3060 arcmsr_x_list_add(new_one, head->prev, head);
3061 3061 mutex_exit(list_lock);
3062 3062 }
3063 3063
3064 3064 static struct list_head *
3065 3065 arcmsr_list_get_first(kmutex_t *list_lock, struct list_head *head)
3066 3066 {
3067 3067 struct list_head *one = NULL;
3068 3068
3069 3069 mutex_enter(list_lock);
3070 3070 if (head->next == head) {
3071 3071 mutex_exit(list_lock);
3072 3072 return (NULL);
3073 3073 }
3074 3074 one = head->next;
3075 3075 arcmsr_x_list_del(one->prev, one->next);
3076 3076 arcmsr_init_list_head(one);
3077 3077 mutex_exit(list_lock);
3078 3078 return (one);
3079 3079 }
3080 3080
3081 3081 static struct CCB *
3082 3082 arcmsr_get_complete_ccb_from_list(struct ACB *acb)
3083 3083 {
3084 3084 struct list_head *first_complete_ccb_list = NULL;
3085 3085 struct CCB *ccb;
3086 3086
3087 3087 first_complete_ccb_list =
3088 3088 arcmsr_list_get_first(&acb->ccb_complete_list_mutex,
3089 3089 &acb->ccb_complete_list);
3090 3090 if (first_complete_ccb_list == NULL) {
3091 3091 return (NULL);
3092 3092 }
3093 3093 ccb = (void *)((caddr_t)(first_complete_ccb_list) -
3094 3094 offsetof(struct CCB, complete_queue_pointer));
3095 3095 return (ccb);
3096 3096 }
3097 3097
3098 3098 static struct CCB *
3099 3099 arcmsr_get_freeccb(struct ACB *acb)
3100 3100 {
3101 3101 struct CCB *ccb;
3102 3102 int ccb_get_index, ccb_put_index;
3103 3103
3104 3104 mutex_enter(&acb->workingQ_mutex);
3105 3105 ccb_put_index = acb->ccb_put_index;
3106 3106 ccb_get_index = acb->ccb_get_index;
3107 3107 ccb = acb->ccbworkingQ[ccb_get_index];
3108 3108 ccb_get_index++;
3109 3109 if (ccb_get_index >= ARCMSR_MAX_FREECCB_NUM)
3110 3110 ccb_get_index = ccb_get_index - ARCMSR_MAX_FREECCB_NUM;
3111 3111 if (ccb_put_index != ccb_get_index) {
3112 3112 acb->ccb_get_index = ccb_get_index;
3113 3113 arcmsr_init_list_head(&ccb->complete_queue_pointer);
3114 3114 ccb->ccb_state = ARCMSR_CCB_UNBUILD;
3115 3115 } else {
3116 3116 ccb = NULL;
3117 3117 }
3118 3118 mutex_exit(&acb->workingQ_mutex);
3119 3119 return (ccb);
3120 3120 }
3121 3121
3122 3122
3123 3123 static void
3124 3124 arcmsr_free_ccb(struct CCB *ccb)
3125 3125 {
3126 3126 struct ACB *acb = ccb->acb;
3127 3127
3128 3128 if (ccb->ccb_state == ARCMSR_CCB_FREE) {
3129 3129 return;
3130 3130 }
3131 3131 mutex_enter(&acb->workingQ_mutex);
3132 3132 ccb->ccb_state = ARCMSR_CCB_FREE;
3133 3133 ccb->pkt = NULL;
3134 3134 ccb->pkt_dma_handle = NULL;
3135 3135 ccb->ccb_flags = 0;
3136 3136 acb->ccbworkingQ[acb->ccb_put_index] = ccb;
3137 3137 acb->ccb_put_index++;
3138 3138 if (acb->ccb_put_index >= ARCMSR_MAX_FREECCB_NUM)
3139 3139 acb->ccb_put_index =
3140 3140 acb->ccb_put_index - ARCMSR_MAX_FREECCB_NUM;
3141 3141 mutex_exit(&acb->workingQ_mutex);
3142 3142 }
3143 3143
3144 3144
3145 3145 static void
3146 3146 arcmsr_ccbs_timeout(void* arg)
3147 3147 {
3148 3148 struct ACB *acb = (struct ACB *)arg;
3149 3149 struct CCB *ccb;
3150 3150 int i, instance, timeout_count = 0;
3151 3151 uint32_t intmask_org;
3152 3152 time_t current_time = ddi_get_time();
3153 3153
3154 3154 intmask_org = arcmsr_disable_allintr(acb);
3155 3155 mutex_enter(&acb->isr_mutex);
3156 3156 if (acb->ccboutstandingcount != 0) {
3157 3157 /* check each ccb */
3158 3158 i = ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
3159 3159 DDI_DMA_SYNC_FORKERNEL);
3160 3160 if (i != DDI_SUCCESS) {
3161 3161 if ((acb->timeout_id != 0) &&
3162 3162 ((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0)) {
3163 3163 /* do pkt timeout check each 60 secs */
3164 3164 acb->timeout_id = timeout(arcmsr_ccbs_timeout,
3165 3165 (void*)acb, (ARCMSR_TIMEOUT_WATCH *
3166 3166 drv_usectohz(1000000)));
3167 3167 }
3168 3168 mutex_exit(&acb->isr_mutex);
3169 3169 arcmsr_enable_allintr(acb, intmask_org);
3170 3170 return;
3171 3171 }
3172 3172 instance = ddi_get_instance(acb->dev_info);
3173 3173 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
3174 3174 ccb = acb->pccb_pool[i];
3175 3175 if (ccb->acb != acb) {
3176 3176 break;
3177 3177 }
3178 3178 if (ccb->ccb_state == ARCMSR_CCB_FREE) {
3179 3179 continue;
3180 3180 }
3181 3181 if (ccb->pkt == NULL) {
3182 3182 continue;
3183 3183 }
3184 3184 if (ccb->pkt->pkt_time == 0) {
3185 3185 continue;
3186 3186 }
3187 3187 if (ccb->ccb_time >= current_time) {
3188 3188 continue;
3189 3189 }
3190 3190 int id = ccb->pkt->pkt_address.a_target;
3191 3191 int lun = ccb->pkt->pkt_address.a_lun;
3192 3192 if (ccb->ccb_state == ARCMSR_CCB_START) {
3193 3193 uint8_t *cdb = (uint8_t *)&ccb->arcmsr_cdb.Cdb;
3194 3194
3195 3195 timeout_count++;
3196 3196 arcmsr_warn(acb,
3197 3197 "scsi target %d lun %d cmd=0x%x "
3198 3198 "command timeout, ccb=0x%p",
3199 3199 instance, id, lun, *cdb, (void *)ccb);
3200 3200 ccb->ccb_state = ARCMSR_CCB_TIMEOUT;
3201 3201 ccb->pkt->pkt_reason = CMD_TIMEOUT;
3202 3202 ccb->pkt->pkt_statistics = STAT_TIMEOUT;
3203 3203 /* acb->devstate[id][lun] = ARECA_RAID_GONE; */
3204 3204 arcmsr_ccb_complete(ccb, 1);
3205 3205 continue;
3206 3206 } else if ((ccb->ccb_state & ARCMSR_CCB_CAN_BE_FREE) ==
3207 3207 ARCMSR_CCB_CAN_BE_FREE) {
3208 3208 arcmsr_free_ccb(ccb);
3209 3209 }
3210 3210 }
3211 3211 }
3212 3212 if ((acb->timeout_id != 0) &&
3213 3213 ((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0)) {
3214 3214 /* do pkt timeout check each 60 secs */
3215 3215 acb->timeout_id = timeout(arcmsr_ccbs_timeout,
3216 3216 (void*)acb, (ARCMSR_TIMEOUT_WATCH * drv_usectohz(1000000)));
3217 3217 }
3218 3218 mutex_exit(&acb->isr_mutex);
3219 3219 arcmsr_enable_allintr(acb, intmask_org);
3220 3220 }
3221 3221
3222 3222 static void
3223 3223 arcmsr_abort_dr_ccbs(struct ACB *acb, uint16_t target, uint8_t lun)
3224 3224 {
3225 3225 struct CCB *ccb;
3226 3226 uint32_t intmask_org;
3227 3227 int i;
3228 3228
3229 3229 /* disable all outbound interrupts */
3230 3230 intmask_org = arcmsr_disable_allintr(acb);
3231 3231 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
3232 3232 ccb = acb->pccb_pool[i];
3233 3233 if (ccb->ccb_state == ARCMSR_CCB_START) {
3234 3234 if ((target == ccb->pkt->pkt_address.a_target) &&
3235 3235 (lun == ccb->pkt->pkt_address.a_lun)) {
3236 3236 ccb->ccb_state = ARCMSR_CCB_ABORTED;
3237 3237 ccb->pkt->pkt_reason = CMD_ABORTED;
3238 3238 ccb->pkt->pkt_statistics |= STAT_ABORTED;
3239 3239 arcmsr_ccb_complete(ccb, 1);
3240 3240 arcmsr_log(acb, CE_NOTE,
3241 3241 "abort T%dL%d ccb", target, lun);
3242 3242 }
3243 3243 }
3244 3244 }
3245 3245 /* enable outbound Post Queue, outbound doorbell Interrupt */
3246 3246 arcmsr_enable_allintr(acb, intmask_org);
3247 3247 }
3248 3248
3249 3249 static int
3250 3250 arcmsr_scsi_device_probe(struct ACB *acb, uint16_t tgt, uint8_t lun)
3251 3251 {
3252 3252 struct scsi_device sd;
3253 3253 dev_info_t *child;
3254 3254 int rval;
3255 3255
3256 3256 bzero(&sd, sizeof (struct scsi_device));
3257 3257 sd.sd_address.a_hba_tran = acb->scsi_hba_transport;
3258 3258 sd.sd_address.a_target = (uint16_t)tgt;
3259 3259 sd.sd_address.a_lun = (uint8_t)lun;
3260 3260 if ((child = arcmsr_find_child(acb, tgt, lun)) != NULL) {
3261 3261 rval = scsi_hba_probe(&sd, NULL);
3262 3262 if (rval == SCSIPROBE_EXISTS) {
3263 3263 rval = ndi_devi_online(child, NDI_ONLINE_ATTACH);
3264 3264 if (rval != NDI_SUCCESS) {
3265 3265 arcmsr_warn(acb, "unable to online T%dL%d",
3266 3266 tgt, lun);
3267 3267 } else {
3268 3268 arcmsr_log(acb, CE_NOTE, "T%dL%d onlined",
3269 3269 tgt, lun);
3270 3270 }
3271 3271 }
3272 3272 } else {
3273 3273 rval = scsi_hba_probe(&sd, NULL);
3274 3274 if (rval == SCSIPROBE_EXISTS)
3275 3275 rval = arcmsr_config_child(acb, &sd, NULL);
3276 3276 }
3277 3277 scsi_unprobe(&sd);
3278 3278 return (rval);
3279 3279 }
3280 3280
3281 3281 static void
3282 3282 arcmsr_dr_handle(struct ACB *acb)
3283 3283 {
3284 3284 char *acb_dev_map = (char *)acb->device_map;
3285 3285 char *devicemap;
3286 3286 char temp;
3287 3287 uint16_t target;
3288 3288 uint8_t lun;
3289 3289 char diff;
3290 3290 int circ = 0;
3291 3291 dev_info_t *dip;
3292 3292 ddi_acc_handle_t reg;
3293 3293
3294 3294 switch (acb->adapter_type) {
3295 3295 case ACB_ADAPTER_TYPE_A:
3296 3296 {
3297 3297 struct HBA_msgUnit *phbamu;
3298 3298
3299 3299 phbamu = (struct HBA_msgUnit *)acb->pmu;
3300 3300 devicemap = (char *)&phbamu->msgcode_rwbuffer[21];
3301 3301 reg = acb->reg_mu_acc_handle0;
3302 3302 break;
3303 3303 }
3304 3304
3305 3305 case ACB_ADAPTER_TYPE_B:
3306 3306 {
3307 3307 struct HBB_msgUnit *phbbmu;
3308 3308
3309 3309 phbbmu = (struct HBB_msgUnit *)acb->pmu;
3310 3310 devicemap = (char *)
3311 3311 &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[21];
3312 3312 reg = acb->reg_mu_acc_handle1;
3313 3313 break;
3314 3314 }
3315 3315
3316 3316 case ACB_ADAPTER_TYPE_C:
3317 3317 {
3318 3318 struct HBC_msgUnit *phbcmu;
3319 3319
3320 3320 phbcmu = (struct HBC_msgUnit *)acb->pmu;
3321 3321 devicemap = (char *)&phbcmu->msgcode_rwbuffer[21];
3322 3322 reg = acb->reg_mu_acc_handle0;
3323 3323 break;
3324 3324 }
3325 3325
3326 3326 }
3327 3327
3328 3328 for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++) {
3329 3329 temp = CHIP_REG_READ8(reg, devicemap);
3330 3330 diff = (*acb_dev_map)^ temp;
3331 3331 if (diff != 0) {
3332 3332 *acb_dev_map = temp;
3333 3333 for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
3334 3334 if ((temp & 0x01) == 1 && (diff & 0x01) == 1) {
3335 3335 ndi_devi_enter(acb->dev_info, &circ);
3336 3336 acb->devstate[target][lun] =
3337 3337 ARECA_RAID_GOOD;
3338 3338 (void) arcmsr_scsi_device_probe(acb,
3339 3339 target, lun);
3340 3340 ndi_devi_exit(acb->dev_info, circ);
3341 3341 arcmsr_log(acb, CE_NOTE,
3342 3342 "T%dL%d on-line", target, lun);
3343 3343 } else if ((temp & 0x01) == 0 &&
3344 3344 (diff & 0x01) == 1) {
3345 3345 dip = arcmsr_find_child(acb, target,
3346 3346 lun);
3347 3347 if (dip != NULL) {
3348 3348 acb->devstate[target][lun] =
3349 3349 ARECA_RAID_GONE;
3350 3350 if (mutex_owned(&acb->
3351 3351 isr_mutex)) {
3352 3352 arcmsr_abort_dr_ccbs(
3353 3353 acb, target, lun);
3354 3354 (void)
3355 3355 ndi_devi_offline(
3356 3356 dip,
3357 3357 NDI_DEVI_REMOVE |
3358 3358 NDI_DEVI_OFFLINE);
3359 3359 } else {
3360 3360 mutex_enter(&acb->
3361 3361 isr_mutex);
3362 3362 arcmsr_abort_dr_ccbs(
3363 3363 acb, target, lun);
3364 3364 (void)
3365 3365 ndi_devi_offline(
3366 3366 dip,
3367 3367 NDI_DEVI_REMOVE |
3368 3368 NDI_DEVI_OFFLINE);
3369 3369 mutex_exit(&acb->
3370 3370 isr_mutex);
3371 3371 }
3372 3372 }
3373 3373 arcmsr_log(acb, CE_NOTE,
3374 3374 "T%dL%d off-line", target, lun);
3375 3375 }
3376 3376 temp >>= 1;
3377 3377 diff >>= 1;
3378 3378 }
3379 3379 }
3380 3380 devicemap++;
3381 3381 acb_dev_map++;
3382 3382 }
3383 3383 }
3384 3384
3385 3385
3386 3386 static void
3387 3387 arcmsr_devMap_monitor(void* arg)
3388 3388 {
3389 3389
3390 3390 struct ACB *acb = (struct ACB *)arg;
3391 3391 switch (acb->adapter_type) {
3392 3392 case ACB_ADAPTER_TYPE_A:
3393 3393 {
3394 3394 struct HBA_msgUnit *phbamu;
3395 3395
3396 3396 phbamu = (struct HBA_msgUnit *)acb->pmu;
3397 3397 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3398 3398 &phbamu->inbound_msgaddr0,
3399 3399 ARCMSR_INBOUND_MESG0_GET_CONFIG);
3400 3400 break;
3401 3401 }
3402 3402
3403 3403 case ACB_ADAPTER_TYPE_B:
3404 3404 {
3405 3405 struct HBB_msgUnit *phbbmu;
3406 3406
3407 3407 phbbmu = (struct HBB_msgUnit *)acb->pmu;
3408 3408 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3409 3409 &phbbmu->hbb_doorbell->drv2iop_doorbell,
3410 3410 ARCMSR_MESSAGE_GET_CONFIG);
3411 3411 break;
3412 3412 }
3413 3413
3414 3414 case ACB_ADAPTER_TYPE_C:
3415 3415 {
3416 3416 struct HBC_msgUnit *phbcmu;
3417 3417
3418 3418 phbcmu = (struct HBC_msgUnit *)acb->pmu;
3419 3419 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3420 3420 &phbcmu->inbound_msgaddr0,
3421 3421 ARCMSR_INBOUND_MESG0_GET_CONFIG);
3422 3422 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3423 3423 &phbcmu->inbound_doorbell,
3424 3424 ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
3425 3425 break;
3426 3426 }
3427 3427
3428 3428 }
3429 3429
3430 3430 if ((acb->timeout_id != 0) &&
3431 3431 ((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0)) {
3432 3432 /* do pkt timeout check each 5 secs */
3433 3433 acb->timeout_id = timeout(arcmsr_devMap_monitor, (void*)acb,
3434 3434 (ARCMSR_DEV_MAP_WATCH * drv_usectohz(1000000)));
3435 3435 }
3436 3436 }
3437 3437
3438 3438
3439 3439 static uint32_t
3440 3440 arcmsr_disable_allintr(struct ACB *acb) {
3441 3441
3442 3442 uint32_t intmask_org;
3443 3443
3444 3444 switch (acb->adapter_type) {
3445 3445 case ACB_ADAPTER_TYPE_A:
3446 3446 {
3447 3447 struct HBA_msgUnit *phbamu;
3448 3448
3449 3449 phbamu = (struct HBA_msgUnit *)acb->pmu;
3450 3450 /* disable all outbound interrupt */
3451 3451 intmask_org = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3452 3452 &phbamu->outbound_intmask);
3453 3453 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3454 3454 &phbamu->outbound_intmask,
3455 3455 intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE);
3456 3456 break;
3457 3457 }
3458 3458
3459 3459 case ACB_ADAPTER_TYPE_B:
3460 3460 {
3461 3461 struct HBB_msgUnit *phbbmu;
3462 3462
3463 3463 phbbmu = (struct HBB_msgUnit *)acb->pmu;
3464 3464 /* disable all outbound interrupt */
3465 3465 intmask_org = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3466 3466 &phbbmu->hbb_doorbell->iop2drv_doorbell_mask);
3467 3467 /* disable all interrupts */
3468 3468 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3469 3469 &phbbmu->hbb_doorbell->iop2drv_doorbell_mask, 0);
3470 3470 break;
3471 3471 }
3472 3472
3473 3473 case ACB_ADAPTER_TYPE_C:
3474 3474 {
3475 3475 struct HBC_msgUnit *phbcmu;
3476 3476
3477 3477 phbcmu = (struct HBC_msgUnit *)acb->pmu;
3478 3478 /* disable all outbound interrupt */
3479 3479 intmask_org = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3480 3480 &phbcmu->host_int_mask); /* disable outbound message0 int */
3481 3481 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3482 3482 &phbcmu->host_int_mask,
3483 3483 intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE);
3484 3484 break;
3485 3485 }
3486 3486
3487 3487 }
3488 3488 return (intmask_org);
3489 3489 }
3490 3490
3491 3491
3492 3492 static void
3493 3493 arcmsr_enable_allintr(struct ACB *acb, uint32_t intmask_org) {
3494 3494
3495 3495 int mask;
3496 3496
3497 3497 switch (acb->adapter_type) {
3498 3498 case ACB_ADAPTER_TYPE_A:
3499 3499 {
3500 3500 struct HBA_msgUnit *phbamu;
3501 3501
3502 3502 phbamu = (struct HBA_msgUnit *)acb->pmu;
3503 3503 /*
3504 3504 * enable outbound Post Queue, outbound doorbell message0
3505 3505 * Interrupt
3506 3506 */
3507 3507 mask = ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE |
3508 3508 ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE |
3509 3509 ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE);
3510 3510 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3511 3511 &phbamu->outbound_intmask, intmask_org & mask);
3512 3512 acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
3513 3513 break;
3514 3514 }
3515 3515
3516 3516 case ACB_ADAPTER_TYPE_B:
3517 3517 {
3518 3518 struct HBB_msgUnit *phbbmu;
3519 3519
3520 3520 phbbmu = (struct HBB_msgUnit *)acb->pmu;
3521 3521 mask = (ARCMSR_IOP2DRV_DATA_WRITE_OK |
3522 3522 ARCMSR_IOP2DRV_DATA_READ_OK | ARCMSR_IOP2DRV_CDB_DONE |
3523 3523 ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
3524 3524 /* 1=interrupt enable, 0=interrupt disable */
3525 3525 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3526 3526 &phbbmu->hbb_doorbell->iop2drv_doorbell_mask,
3527 3527 intmask_org | mask);
3528 3528 acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
3529 3529 break;
3530 3530 }
3531 3531
3532 3532 case ACB_ADAPTER_TYPE_C:
3533 3533 {
3534 3534 struct HBC_msgUnit *phbcmu;
3535 3535
3536 3536 phbcmu = (struct HBC_msgUnit *)acb->pmu;
3537 3537 /* enable outbound Post Queue,outbound doorbell Interrupt */
3538 3538 mask = ~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK |
3539 3539 ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK |
3540 3540 ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK);
3541 3541 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3542 3542 &phbcmu->host_int_mask, intmask_org & mask);
3543 3543 acb->outbound_int_enable = ~(intmask_org & mask) & 0x0000000f;
3544 3544 break;
3545 3545 }
3546 3546
3547 3547 }
3548 3548 }
3549 3549
3550 3550
3551 3551 static void
3552 3552 arcmsr_iop_parking(struct ACB *acb)
3553 3553 {
3554 3554 /* stop adapter background rebuild */
3555 3555 if (acb->acb_flags & ACB_F_MSG_START_BGRB) {
3556 3556 uint32_t intmask_org;
3557 3557
3558 3558 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
3559 3559 /* disable all outbound interrupt */
3560 3560 intmask_org = arcmsr_disable_allintr(acb);
3561 3561 switch (acb->adapter_type) {
3562 3562 case ACB_ADAPTER_TYPE_A:
3563 3563 arcmsr_stop_hba_bgrb(acb);
3564 3564 arcmsr_flush_hba_cache(acb);
3565 3565 break;
3566 3566
3567 3567 case ACB_ADAPTER_TYPE_B:
3568 3568 arcmsr_stop_hbb_bgrb(acb);
3569 3569 arcmsr_flush_hbb_cache(acb);
3570 3570 break;
3571 3571
3572 3572 case ACB_ADAPTER_TYPE_C:
3573 3573 arcmsr_stop_hbc_bgrb(acb);
3574 3574 arcmsr_flush_hbc_cache(acb);
3575 3575 break;
3576 3576 }
3577 3577 /*
3578 3578 * enable outbound Post Queue
3579 3579 * enable outbound doorbell Interrupt
3580 3580 */
3581 3581 arcmsr_enable_allintr(acb, intmask_org);
3582 3582 }
3583 3583 }
3584 3584
3585 3585
3586 3586 static uint8_t
3587 3587 arcmsr_hba_wait_msgint_ready(struct ACB *acb)
3588 3588 {
3589 3589 uint32_t i;
3590 3590 uint8_t retries = 0x00;
3591 3591 struct HBA_msgUnit *phbamu;
3592 3592
3593 3593
3594 3594 phbamu = (struct HBA_msgUnit *)acb->pmu;
3595 3595
3596 3596 do {
3597 3597 for (i = 0; i < 100; i++) {
3598 3598 if (CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3599 3599 &phbamu->outbound_intstatus) &
3600 3600 ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
3601 3601 /* clear interrupt */
3602 3602 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3603 3603 &phbamu->outbound_intstatus,
3604 3604 ARCMSR_MU_OUTBOUND_MESSAGE0_INT);
3605 3605 return (TRUE);
3606 3606 }
3607 3607 drv_usecwait(10000);
3608 3608 if (ddi_in_panic()) {
3609 3609 /* clear interrupts */
3610 3610 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3611 3611 &phbamu->outbound_intstatus,
3612 3612 ARCMSR_MU_OUTBOUND_MESSAGE0_INT);
3613 3613 return (TRUE);
3614 3614 }
3615 3615 } /* max 1 second */
3616 3616 } while (retries++ < 20); /* max 20 seconds */
3617 3617 return (FALSE);
3618 3618 }
3619 3619
3620 3620
3621 3621 static uint8_t
3622 3622 arcmsr_hbb_wait_msgint_ready(struct ACB *acb)
3623 3623 {
3624 3624 struct HBB_msgUnit *phbbmu;
3625 3625 uint32_t i;
3626 3626 uint8_t retries = 0x00;
3627 3627
3628 3628 phbbmu = (struct HBB_msgUnit *)acb->pmu;
3629 3629
3630 3630 do {
3631 3631 for (i = 0; i < 100; i++) {
3632 3632 if (CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3633 3633 &phbbmu->hbb_doorbell->iop2drv_doorbell) &
3634 3634 ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
3635 3635 /* clear interrupt */
3636 3636 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3637 3637 &phbbmu->hbb_doorbell->iop2drv_doorbell,
3638 3638 ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
3639 3639 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3640 3640 &phbbmu->hbb_doorbell->drv2iop_doorbell,
3641 3641 ARCMSR_DRV2IOP_END_OF_INTERRUPT);
3642 3642 return (TRUE);
3643 3643 }
3644 3644 drv_usecwait(10000);
3645 3645 if (ddi_in_panic()) {
3646 3646 /* clear interrupts */
3647 3647 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3648 3648 &phbbmu->hbb_doorbell->iop2drv_doorbell,
3649 3649 ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
3650 3650 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3651 3651 &phbbmu->hbb_doorbell->drv2iop_doorbell,
3652 3652 ARCMSR_DRV2IOP_END_OF_INTERRUPT);
3653 3653 return (TRUE);
3654 3654 }
3655 3655 } /* max 1 second */
3656 3656 } while (retries++ < 20); /* max 20 seconds */
3657 3657
3658 3658 return (FALSE);
3659 3659 }
3660 3660
3661 3661
3662 3662 static uint8_t
3663 3663 arcmsr_hbc_wait_msgint_ready(struct ACB *acb)
3664 3664 {
3665 3665 uint32_t i;
3666 3666 uint8_t retries = 0x00;
3667 3667 struct HBC_msgUnit *phbcmu;
3668 3668 uint32_t c = ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR;
3669 3669
3670 3670
3671 3671 phbcmu = (struct HBC_msgUnit *)acb->pmu;
3672 3672
3673 3673 do {
3674 3674 for (i = 0; i < 100; i++) {
3675 3675 if (CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3676 3676 &phbcmu->outbound_doorbell) &
3677 3677 ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
3678 3678 /* clear interrupt */
3679 3679 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3680 3680 &phbcmu->outbound_doorbell_clear, c);
3681 3681 return (TRUE);
3682 3682 }
3683 3683 drv_usecwait(10000);
3684 3684 if (ddi_in_panic()) {
3685 3685 /* clear interrupts */
3686 3686 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3687 3687 &phbcmu->outbound_doorbell_clear, c);
3688 3688 return (TRUE);
3689 3689 }
3690 3690 } /* max 1 second */
3691 3691 } while (retries++ < 20); /* max 20 seconds */
3692 3692 return (FALSE);
3693 3693 }
3694 3694
3695 3695 static void
3696 3696 arcmsr_flush_hba_cache(struct ACB *acb) {
3697 3697
3698 3698 struct HBA_msgUnit *phbamu;
3699 3699 int retry_count = 30;
3700 3700
3701 3701 /* enlarge wait flush adapter cache time: 10 minutes */
3702 3702
3703 3703 phbamu = (struct HBA_msgUnit *)acb->pmu;
3704 3704
3705 3705 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbamu->inbound_msgaddr0,
3706 3706 ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
3707 3707 do {
3708 3708 if (arcmsr_hba_wait_msgint_ready(acb)) {
3709 3709 break;
3710 3710 } else {
3711 3711 retry_count--;
3712 3712 }
3713 3713 } while (retry_count != 0);
3714 3714 }
3715 3715
3716 3716
3717 3717
3718 3718 static void
3719 3719 arcmsr_flush_hbb_cache(struct ACB *acb) {
3720 3720
3721 3721 struct HBB_msgUnit *phbbmu;
3722 3722 int retry_count = 30;
3723 3723
3724 3724 /* enlarge wait flush adapter cache time: 10 minutes */
3725 3725
3726 3726 phbbmu = (struct HBB_msgUnit *)acb->pmu;
3727 3727 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3728 3728 &phbbmu->hbb_doorbell->drv2iop_doorbell,
3729 3729 ARCMSR_MESSAGE_FLUSH_CACHE);
3730 3730 do {
3731 3731 if (arcmsr_hbb_wait_msgint_ready(acb)) {
3732 3732 break;
3733 3733 } else {
3734 3734 retry_count--;
3735 3735 }
3736 3736 } while (retry_count != 0);
3737 3737 }
3738 3738
3739 3739
3740 3740 static void
3741 3741 arcmsr_flush_hbc_cache(struct ACB *acb)
3742 3742 {
3743 3743 struct HBC_msgUnit *phbcmu;
3744 3744 int retry_count = 30;
3745 3745
3746 3746 /* enlarge wait flush adapter cache time: 10 minutes */
3747 3747
3748 3748 phbcmu = (struct HBC_msgUnit *)acb->pmu;
3749 3749
3750 3750 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbcmu->inbound_msgaddr0,
3751 3751 ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
3752 3752 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbcmu->inbound_doorbell,
3753 3753 ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
3754 3754 do {
3755 3755 if (arcmsr_hbc_wait_msgint_ready(acb)) {
3756 3756 break;
3757 3757 } else {
3758 3758 retry_count--;
3759 3759 }
3760 3760 } while (retry_count != 0);
3761 3761 }
3762 3762
3763 3763
3764 3764
3765 3765 static uint8_t
3766 3766 arcmsr_abort_hba_allcmd(struct ACB *acb)
3767 3767 {
3768 3768 struct HBA_msgUnit *phbamu = (struct HBA_msgUnit *)acb->pmu;
3769 3769
3770 3770 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbamu->inbound_msgaddr0,
3771 3771 ARCMSR_INBOUND_MESG0_ABORT_CMD);
3772 3772
3773 3773 if (!arcmsr_hba_wait_msgint_ready(acb)) {
3774 3774 arcmsr_warn(acb,
3775 3775 "timeout while waiting for 'abort all "
3776 3776 "outstanding commands'");
3777 3777 return (0xff);
3778 3778 }
3779 3779 return (0x00);
3780 3780 }
3781 3781
3782 3782
3783 3783
3784 3784 static uint8_t
3785 3785 arcmsr_abort_hbb_allcmd(struct ACB *acb)
3786 3786 {
3787 3787 struct HBB_msgUnit *phbbmu = (struct HBB_msgUnit *)acb->pmu;
3788 3788
3789 3789 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3790 3790 &phbbmu->hbb_doorbell->drv2iop_doorbell, ARCMSR_MESSAGE_ABORT_CMD);
3791 3791
3792 3792 if (!arcmsr_hbb_wait_msgint_ready(acb)) {
3793 3793 arcmsr_warn(acb,
3794 3794 "timeout while waiting for 'abort all "
3795 3795 "outstanding commands'");
3796 3796 return (0x00);
3797 3797 }
3798 3798 return (0x00);
3799 3799 }
3800 3800
3801 3801
3802 3802 static uint8_t
3803 3803 arcmsr_abort_hbc_allcmd(struct ACB *acb)
3804 3804 {
3805 3805 struct HBC_msgUnit *phbcmu = (struct HBC_msgUnit *)acb->pmu;
3806 3806
3807 3807 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbcmu->inbound_msgaddr0,
3808 3808 ARCMSR_INBOUND_MESG0_ABORT_CMD);
3809 3809 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbcmu->inbound_doorbell,
3810 3810 ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
3811 3811
3812 3812 if (!arcmsr_hbc_wait_msgint_ready(acb)) {
3813 3813 arcmsr_warn(acb,
3814 3814 "timeout while waiting for 'abort all "
3815 3815 "outstanding commands'");
3816 3816 return (0xff);
3817 3817 }
3818 3818 return (0x00);
3819 3819 }
3820 3820
3821 3821
3822 3822 static void
3823 3823 arcmsr_done4abort_postqueue(struct ACB *acb)
3824 3824 {
3825 3825
3826 3826 struct CCB *ccb;
3827 3827 uint32_t flag_ccb;
3828 3828 int i = 0;
3829 3829 boolean_t error;
3830 3830
3831 3831 switch (acb->adapter_type) {
3832 3832 case ACB_ADAPTER_TYPE_A:
3833 3833 {
3834 3834 struct HBA_msgUnit *phbamu;
3835 3835 uint32_t outbound_intstatus;
3836 3836
3837 3837 phbamu = (struct HBA_msgUnit *)acb->pmu;
3838 3838 /* clear and abort all outbound posted Q */
3839 3839 outbound_intstatus = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3840 3840 &phbamu->outbound_intstatus) & acb->outbound_int_enable;
3841 3841 /* clear interrupt */
3842 3842 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3843 3843 &phbamu->outbound_intstatus, outbound_intstatus);
3844 3844 while (((flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3845 3845 &phbamu->outbound_queueport)) != 0xFFFFFFFF) &&
3846 3846 (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
3847 3847 /* frame must be 32 bytes aligned */
3848 3848 /* the CDB is the first field of the CCB */
3849 3849 ccb = NumToPtr((acb->vir2phy_offset + (flag_ccb << 5)));
3850 3850 /* check if command done with no error */
3851 3851 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ?
3852 3852 B_TRUE : B_FALSE;
3853 3853 arcmsr_drain_donequeue(acb, ccb, error);
3854 3854 }
3855 3855 break;
3856 3856 }
3857 3857
3858 3858 case ACB_ADAPTER_TYPE_B:
3859 3859 {
3860 3860 struct HBB_msgUnit *phbbmu;
3861 3861
3862 3862 phbbmu = (struct HBB_msgUnit *)acb->pmu;
3863 3863 /* clear all outbound posted Q */
3864 3864 /* clear doorbell interrupt */
3865 3865 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3866 3866 &phbbmu->hbb_doorbell->iop2drv_doorbell,
3867 3867 ARCMSR_DOORBELL_INT_CLEAR_PATTERN);
3868 3868 for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
3869 3869 if ((flag_ccb = phbbmu->done_qbuffer[i]) != 0) {
3870 3870 phbbmu->done_qbuffer[i] = 0;
3871 3871 /* frame must be 32 bytes aligned */
3872 3872 ccb = NumToPtr((acb->vir2phy_offset +
3873 3873 (flag_ccb << 5)));
3874 3874 /* check if command done with no error */
3875 3875 error =
3876 3876 (flag_ccb &
3877 3877 ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ?
3878 3878 B_TRUE : B_FALSE;
3879 3879 arcmsr_drain_donequeue(acb, ccb, error);
3880 3880 }
3881 3881 phbbmu->post_qbuffer[i] = 0;
3882 3882 } /* drain reply FIFO */
3883 3883 phbbmu->doneq_index = 0;
3884 3884 phbbmu->postq_index = 0;
3885 3885 break;
3886 3886 }
3887 3887
3888 3888 case ACB_ADAPTER_TYPE_C:
3889 3889 {
3890 3890 struct HBC_msgUnit *phbcmu;
3891 3891 uint32_t ccb_cdb_phy;
3892 3892
3893 3893 phbcmu = (struct HBC_msgUnit *)acb->pmu;
3894 3894 while ((CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3895 3895 &phbcmu->host_int_status) &
3896 3896 ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) &&
3897 3897 (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
3898 3898 /* need to do */
3899 3899 flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3900 3900 &phbcmu->outbound_queueport_low);
3901 3901 /* frame must be 32 bytes aligned */
3902 3902 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
3903 3903 ccb = NumToPtr((acb->vir2phy_offset + ccb_cdb_phy));
3904 3904 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1)?
3905 3905 B_TRUE : B_FALSE;
3906 3906 arcmsr_drain_donequeue(acb, ccb, error);
3907 3907 }
3908 3908 break;
3909 3909 }
3910 3910
3911 3911 }
3912 3912 }
3913 3913 /*
3914 3914 * Routine Description: try to get echo from iop.
3915 3915 * Arguments:
3916 3916 * Return Value: Nothing.
3917 3917 */
3918 3918 static uint8_t
3919 3919 arcmsr_get_echo_from_iop(struct ACB *acb)
3920 3920 {
3921 3921 uint32_t intmask_org;
3922 3922 uint8_t rtnval = 0;
3923 3923
3924 3924 if (acb->adapter_type == ACB_ADAPTER_TYPE_A) {
3925 3925 struct HBA_msgUnit *phbamu;
3926 3926
3927 3927 phbamu = (struct HBA_msgUnit *)acb->pmu;
3928 3928 intmask_org = arcmsr_disable_allintr(acb);
3929 3929 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3930 3930 &phbamu->inbound_msgaddr0,
3931 3931 ARCMSR_INBOUND_MESG0_GET_CONFIG);
3932 3932 if (!arcmsr_hba_wait_msgint_ready(acb)) {
3933 3933 arcmsr_warn(acb, "try to get echo from iop,"
3934 3934 "... timeout ...");
3935 3935 acb->acb_flags |= ACB_F_BUS_HANG_ON;
3936 3936 rtnval = 0xFF;
3937 3937 }
3938 3938 /* enable all outbound interrupt */
3939 3939 arcmsr_enable_allintr(acb, intmask_org);
3940 3940 }
3941 3941 return (rtnval);
3942 3942 }
3943 3943
3944 3944 /*
3945 3945 * Routine Description: Reset 80331 iop.
3946 3946 * Arguments:
3947 3947 * Return Value: Nothing.
3948 3948 */
3949 3949 static uint8_t
3950 3950 arcmsr_iop_reset(struct ACB *acb)
3951 3951 {
3952 3952 struct CCB *ccb;
3953 3953 uint32_t intmask_org;
3954 3954 uint8_t rtnval = 0;
3955 3955 int i = 0;
3956 3956
3957 3957 if (acb->ccboutstandingcount > 0) {
3958 3958 /* disable all outbound interrupt */
3959 3959 intmask_org = arcmsr_disable_allintr(acb);
3960 3960 /* clear and abort all outbound posted Q */
3961 3961 arcmsr_done4abort_postqueue(acb);
3962 3962 /* talk to iop 331 outstanding command aborted */
3963 3963 rtnval = (acb->acb_flags & ACB_F_BUS_HANG_ON) ?
3964 3964 0xFF : arcmsr_abort_host_command(acb);
3965 3965
3966 3966 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
3967 3967 ccb = acb->pccb_pool[i];
3968 3968 if (ccb->ccb_state == ARCMSR_CCB_START) {
3969 3969 /* ccb->ccb_state = ARCMSR_CCB_RESET; */
3970 3970 ccb->pkt->pkt_reason = CMD_RESET;
3971 3971 ccb->pkt->pkt_statistics |= STAT_BUS_RESET;
3972 3972 arcmsr_ccb_complete(ccb, 1);
3973 3973 }
3974 3974 }
3975 3975 atomic_and_32(&acb->ccboutstandingcount, 0);
3976 3976 /* enable all outbound interrupt */
3977 3977 arcmsr_enable_allintr(acb, intmask_org);
3978 3978 } else {
3979 3979 rtnval = arcmsr_get_echo_from_iop(acb);
3980 3980 }
3981 3981 return (rtnval);
3982 3982 }
3983 3983
3984 3984
3985 3985 static struct QBUFFER *
3986 3986 arcmsr_get_iop_rqbuffer(struct ACB *acb)
3987 3987 {
3988 3988 struct QBUFFER *qb;
3989 3989
3990 3990 switch (acb->adapter_type) {
3991 3991 case ACB_ADAPTER_TYPE_A:
3992 3992 {
3993 3993 struct HBA_msgUnit *phbamu;
3994 3994
3995 3995 phbamu = (struct HBA_msgUnit *)acb->pmu;
3996 3996 qb = (struct QBUFFER *)&phbamu->message_rbuffer;
3997 3997 break;
3998 3998 }
3999 3999
4000 4000 case ACB_ADAPTER_TYPE_B:
4001 4001 {
4002 4002 struct HBB_msgUnit *phbbmu;
4003 4003
4004 4004 phbbmu = (struct HBB_msgUnit *)acb->pmu;
4005 4005 qb = (struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_rbuffer;
4006 4006 break;
4007 4007 }
4008 4008
4009 4009 case ACB_ADAPTER_TYPE_C:
4010 4010 {
4011 4011 struct HBC_msgUnit *phbcmu;
4012 4012
4013 4013 phbcmu = (struct HBC_msgUnit *)acb->pmu;
4014 4014 qb = (struct QBUFFER *)&phbcmu->message_rbuffer;
4015 4015 break;
4016 4016 }
4017 4017
4018 4018 }
4019 4019 return (qb);
4020 4020 }
4021 4021
4022 4022
4023 4023 static struct QBUFFER *
4024 4024 arcmsr_get_iop_wqbuffer(struct ACB *acb)
4025 4025 {
4026 4026 struct QBUFFER *qbuffer = NULL;
4027 4027
4028 4028 switch (acb->adapter_type) {
4029 4029 case ACB_ADAPTER_TYPE_A:
4030 4030 {
4031 4031 struct HBA_msgUnit *phbamu;
4032 4032
4033 4033 phbamu = (struct HBA_msgUnit *)acb->pmu;
4034 4034 qbuffer = (struct QBUFFER *)&phbamu->message_wbuffer;
4035 4035 break;
4036 4036 }
4037 4037
4038 4038 case ACB_ADAPTER_TYPE_B:
4039 4039 {
4040 4040 struct HBB_msgUnit *phbbmu;
4041 4041
4042 4042 phbbmu = (struct HBB_msgUnit *)acb->pmu;
4043 4043 qbuffer = (struct QBUFFER *)
4044 4044 &phbbmu->hbb_rwbuffer->message_wbuffer;
4045 4045 break;
4046 4046 }
4047 4047
4048 4048 case ACB_ADAPTER_TYPE_C:
4049 4049 {
4050 4050 struct HBC_msgUnit *phbcmu;
4051 4051
4052 4052 phbcmu = (struct HBC_msgUnit *)acb->pmu;
4053 4053 qbuffer = (struct QBUFFER *)&phbcmu->message_wbuffer;
4054 4054 break;
4055 4055 }
4056 4056
4057 4057 }
4058 4058 return (qbuffer);
4059 4059 }
4060 4060
4061 4061
4062 4062
4063 4063 static void
4064 4064 arcmsr_iop_message_read(struct ACB *acb)
4065 4065 {
4066 4066 switch (acb->adapter_type) {
4067 4067 case ACB_ADAPTER_TYPE_A:
4068 4068 {
4069 4069 struct HBA_msgUnit *phbamu;
4070 4070
4071 4071 phbamu = (struct HBA_msgUnit *)acb->pmu;
4072 4072 /* let IOP know the data has been read */
4073 4073 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4074 4074 &phbamu->inbound_doorbell,
4075 4075 ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
4076 4076 break;
4077 4077 }
4078 4078
4079 4079 case ACB_ADAPTER_TYPE_B:
4080 4080 {
4081 4081 struct HBB_msgUnit *phbbmu;
4082 4082
4083 4083 phbbmu = (struct HBB_msgUnit *)acb->pmu;
4084 4084 /* let IOP know the data has been read */
4085 4085 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4086 4086 &phbbmu->hbb_doorbell->drv2iop_doorbell,
4087 4087 ARCMSR_DRV2IOP_DATA_READ_OK);
4088 4088 break;
4089 4089 }
4090 4090
4091 4091 case ACB_ADAPTER_TYPE_C:
4092 4092 {
4093 4093 struct HBC_msgUnit *phbcmu;
4094 4094
4095 4095 phbcmu = (struct HBC_msgUnit *)acb->pmu;
4096 4096 /* let IOP know data has been read */
4097 4097 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4098 4098 &phbcmu->inbound_doorbell,
4099 4099 ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK);
4100 4100 break;
4101 4101 }
4102 4102
4103 4103 }
4104 4104 }
4105 4105
4106 4106
4107 4107
4108 4108 static void
4109 4109 arcmsr_iop_message_wrote(struct ACB *acb)
4110 4110 {
4111 4111 switch (acb->adapter_type) {
4112 4112 case ACB_ADAPTER_TYPE_A: {
4113 4113 struct HBA_msgUnit *phbamu;
4114 4114
4115 4115 phbamu = (struct HBA_msgUnit *)acb->pmu;
4116 4116 /*
4117 4117 * push inbound doorbell tell iop, driver data write ok
4118 4118 * and wait reply on next hwinterrupt for next Qbuffer post
4119 4119 */
4120 4120 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4121 4121 &phbamu->inbound_doorbell,
4122 4122 ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK);
4123 4123 break;
4124 4124 }
4125 4125
4126 4126 case ACB_ADAPTER_TYPE_B:
4127 4127 {
4128 4128 struct HBB_msgUnit *phbbmu;
4129 4129
4130 4130 phbbmu = (struct HBB_msgUnit *)acb->pmu;
4131 4131 /*
4132 4132 * push inbound doorbell tell iop, driver data was writen
4133 4133 * successfully, then await reply on next hwinterrupt for
4134 4134 * next Qbuffer post
4135 4135 */
4136 4136 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4137 4137 &phbbmu->hbb_doorbell->drv2iop_doorbell,
4138 4138 ARCMSR_DRV2IOP_DATA_WRITE_OK);
4139 4139 break;
4140 4140 }
4141 4141
4142 4142 case ACB_ADAPTER_TYPE_C:
4143 4143 {
4144 4144 struct HBC_msgUnit *phbcmu;
4145 4145
4146 4146 phbcmu = (struct HBC_msgUnit *)acb->pmu;
4147 4147 /*
4148 4148 * push inbound doorbell tell iop, driver data write ok
4149 4149 * and wait reply on next hwinterrupt for next Qbuffer post
4150 4150 */
4151 4151 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4152 4152 &phbcmu->inbound_doorbell,
4153 4153 ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK);
4154 4154 break;
4155 4155 }
4156 4156
4157 4157 }
4158 4158 }
4159 4159
4160 4160
4161 4161
4162 4162 static void
4163 4163 arcmsr_post_ioctldata2iop(struct ACB *acb)
4164 4164 {
4165 4165 uint8_t *pQbuffer;
4166 4166 struct QBUFFER *pwbuffer;
4167 4167 uint8_t *iop_data;
4168 4168 int32_t allxfer_len = 0;
4169 4169
4170 4170 pwbuffer = arcmsr_get_iop_wqbuffer(acb);
4171 4171 iop_data = (uint8_t *)pwbuffer->data;
4172 4172 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READ) {
4173 4173 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ);
4174 4174 while ((acb->wqbuf_firstidx != acb->wqbuf_lastidx) &&
4175 4175 (allxfer_len < 124)) {
4176 4176 pQbuffer = &acb->wqbuffer[acb->wqbuf_firstidx];
4177 4177 (void) memcpy(iop_data, pQbuffer, 1);
4178 4178 acb->wqbuf_firstidx++;
4179 4179 /* if last index number set it to 0 */
4180 4180 acb->wqbuf_firstidx %= ARCMSR_MAX_QBUFFER;
4181 4181 iop_data++;
4182 4182 allxfer_len++;
4183 4183 }
4184 4184 pwbuffer->data_len = allxfer_len;
4185 4185 /*
4186 4186 * push inbound doorbell and wait reply at hwinterrupt
4187 4187 * routine for next Qbuffer post
4188 4188 */
4189 4189 arcmsr_iop_message_wrote(acb);
4190 4190 }
4191 4191 }
4192 4192
4193 4193
4194 4194
4195 4195 static void
4196 4196 arcmsr_stop_hba_bgrb(struct ACB *acb)
4197 4197 {
4198 4198 struct HBA_msgUnit *phbamu;
4199 4199
4200 4200 phbamu = (struct HBA_msgUnit *)acb->pmu;
4201 4201
4202 4202 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
4203 4203 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4204 4204 &phbamu->inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB);
4205 4205 if (!arcmsr_hba_wait_msgint_ready(acb))
4206 4206 arcmsr_warn(acb,
4207 4207 "timeout while waiting for background rebuild completion");
4208 4208 }
4209 4209
4210 4210
4211 4211 static void
4212 4212 arcmsr_stop_hbb_bgrb(struct ACB *acb)
4213 4213 {
4214 4214 struct HBB_msgUnit *phbbmu;
4215 4215
4216 4216 phbbmu = (struct HBB_msgUnit *)acb->pmu;
4217 4217
4218 4218 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
4219 4219 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4220 4220 &phbbmu->hbb_doorbell->drv2iop_doorbell, ARCMSR_MESSAGE_STOP_BGRB);
4221 4221
4222 4222 if (!arcmsr_hbb_wait_msgint_ready(acb))
4223 4223 arcmsr_warn(acb,
4224 4224 "timeout while waiting for background rebuild completion");
4225 4225 }
4226 4226
4227 4227
4228 4228 static void
4229 4229 arcmsr_stop_hbc_bgrb(struct ACB *acb)
4230 4230 {
4231 4231 struct HBC_msgUnit *phbcmu;
4232 4232
4233 4233 phbcmu = (struct HBC_msgUnit *)acb->pmu;
4234 4234
4235 4235 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
4236 4236 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4237 4237 &phbcmu->inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB);
4238 4238 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4239 4239 &phbcmu->inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
4240 4240 if (!arcmsr_hbc_wait_msgint_ready(acb))
4241 4241 arcmsr_warn(acb,
4242 4242 "timeout while waiting for background rebuild completion");
4243 4243 }
4244 4244
4245 4245
4246 4246 static int
4247 4247 arcmsr_iop_message_xfer(struct ACB *acb, struct scsi_pkt *pkt)
4248 4248 {
4249 4249 struct CMD_MESSAGE_FIELD *pcmdmessagefld;
4250 4250 struct CCB *ccb = pkt->pkt_ha_private;
4251 4251 struct buf *bp = ccb->bp;
4252 4252 uint8_t *pQbuffer;
4253 4253 int retvalue = 0, transfer_len = 0;
4254 4254 char *buffer;
4255 4255 uint32_t controlcode;
4256 4256
4257 4257
4258 4258 /* 4 bytes: Areca io control code */
4259 4259 controlcode =
4260 4260 (uint32_t)pkt->pkt_cdbp[5] << 24 |
4261 4261 (uint32_t)pkt->pkt_cdbp[6] << 16 |
4262 4262 (uint32_t)pkt->pkt_cdbp[7] << 8 |
4263 4263 (uint32_t)pkt->pkt_cdbp[8];
4264 4264
4265 4265 if (bp->b_flags & (B_PHYS | B_PAGEIO))
4266 4266 bp_mapin(bp);
4267 4267
4268 4268 buffer = bp->b_un.b_addr;
4269 4269 transfer_len = bp->b_bcount;
4270 4270 if (transfer_len > sizeof (struct CMD_MESSAGE_FIELD)) {
4271 4271 retvalue = ARCMSR_MESSAGE_FAIL;
4272 4272 goto message_out;
4273 4273 }
4274 4274
4275 4275 pcmdmessagefld = (struct CMD_MESSAGE_FIELD *)(intptr_t)buffer;
4276 4276 switch (controlcode) {
4277 4277 case ARCMSR_MESSAGE_READ_RQBUFFER:
4278 4278 {
4279 4279 unsigned long *ver_addr;
4280 4280 uint8_t *ptmpQbuffer;
4281 4281 int32_t allxfer_len = 0;
4282 4282
4283 4283 ver_addr = kmem_zalloc(MSGDATABUFLEN, KM_SLEEP);
4284 4284
4285 4285 ptmpQbuffer = (uint8_t *)ver_addr;
4286 4286 while ((acb->rqbuf_firstidx != acb->rqbuf_lastidx) &&
4287 4287 (allxfer_len < (MSGDATABUFLEN - 1))) {
4288 4288 pQbuffer = &acb->rqbuffer[acb->rqbuf_firstidx];
4289 4289 (void) memcpy(ptmpQbuffer, pQbuffer, 1);
4290 4290 acb->rqbuf_firstidx++;
4291 4291 acb->rqbuf_firstidx %= ARCMSR_MAX_QBUFFER;
4292 4292 ptmpQbuffer++;
4293 4293 allxfer_len++;
4294 4294 }
4295 4295
4296 4296 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
4297 4297 struct QBUFFER *prbuffer;
4298 4298 uint8_t *iop_data;
4299 4299 int32_t iop_len;
4300 4300
4301 4301 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
4302 4302 prbuffer = arcmsr_get_iop_rqbuffer(acb);
4303 4303 iop_data = (uint8_t *)prbuffer->data;
4304 4304 iop_len = (int32_t)prbuffer->data_len;
4305 4305
4306 4306 while (iop_len > 0) {
4307 4307 pQbuffer = &acb->rqbuffer[acb->rqbuf_lastidx];
4308 4308 (void) memcpy(pQbuffer, iop_data, 1);
4309 4309 acb->rqbuf_lastidx++;
4310 4310 acb->rqbuf_lastidx %= ARCMSR_MAX_QBUFFER;
4311 4311 iop_data++;
4312 4312 iop_len--;
4313 4313 }
4314 4314 arcmsr_iop_message_read(acb);
4315 4315 }
4316 4316
4317 4317 (void) memcpy(pcmdmessagefld->messagedatabuffer,
4318 4318 (uint8_t *)ver_addr, allxfer_len);
4319 4319 pcmdmessagefld->cmdmessage.Length = allxfer_len;
4320 4320 pcmdmessagefld->cmdmessage.ReturnCode =
4321 4321 ARCMSR_MESSAGE_RETURNCODE_OK;
4322 4322 kmem_free(ver_addr, MSGDATABUFLEN);
4323 4323 break;
4324 4324 }
4325 4325
4326 4326 case ARCMSR_MESSAGE_WRITE_WQBUFFER:
4327 4327 {
4328 4328 uint8_t *ver_addr;
4329 4329 int32_t my_empty_len, user_len, wqbuf_firstidx,
4330 4330 wqbuf_lastidx;
4331 4331 uint8_t *ptmpuserbuffer;
4332 4332
4333 4333 ver_addr = kmem_zalloc(MSGDATABUFLEN, KM_SLEEP);
4334 4334
4335 4335 ptmpuserbuffer = ver_addr;
4336 4336 user_len = min(pcmdmessagefld->cmdmessage.Length,
4337 4337 MSGDATABUFLEN);
4338 4338 (void) memcpy(ptmpuserbuffer,
4339 4339 pcmdmessagefld->messagedatabuffer, user_len);
4340 4340 wqbuf_lastidx = acb->wqbuf_lastidx;
4341 4341 wqbuf_firstidx = acb->wqbuf_firstidx;
4342 4342 if (wqbuf_lastidx != wqbuf_firstidx) {
4343 4343 struct scsi_arq_status *arq_status;
4344 4344
4345 4345 arcmsr_post_ioctldata2iop(acb);
4346 4346 arq_status = (struct scsi_arq_status *)
4347 4347 (intptr_t)(pkt->pkt_scbp);
4348 4348 bzero((caddr_t)arq_status,
4349 4349 sizeof (struct scsi_arq_status));
4350 4350 arq_status->sts_rqpkt_reason = CMD_CMPLT;
4351 4351 arq_status->sts_rqpkt_state = (STATE_GOT_BUS |
4352 4352 STATE_GOT_TARGET | STATE_SENT_CMD |
4353 4353 STATE_XFERRED_DATA | STATE_GOT_STATUS);
4354 4354
4355 4355 arq_status->sts_rqpkt_statistics =
4356 4356 pkt->pkt_statistics;
4357 4357 arq_status->sts_rqpkt_resid = 0;
4358 4358 if (&arq_status->sts_sensedata != NULL) {
4359 4359 struct scsi_extended_sense *sts_sensedata;
4360 4360
4361 4361 sts_sensedata = &arq_status->sts_sensedata;
4362 4362
4363 4363 /* has error report sensedata */
4364 4364 sts_sensedata->es_code = 0x0;
4365 4365 sts_sensedata->es_valid = 0x01;
4366 4366 sts_sensedata->es_key = KEY_ILLEGAL_REQUEST;
4367 4367 /* AdditionalSenseLength */
4368 4368 sts_sensedata->es_add_len = 0x0A;
4369 4369 /* AdditionalSenseCode */
4370 4370 sts_sensedata->es_add_code = 0x20;
4371 4371 }
4372 4372 retvalue = ARCMSR_MESSAGE_FAIL;
4373 4373 } else {
4374 4374 my_empty_len = (wqbuf_firstidx-wqbuf_lastidx - 1) &
4375 4375 (ARCMSR_MAX_QBUFFER - 1);
4376 4376 if (my_empty_len >= user_len) {
4377 4377 while (user_len > 0) {
4378 4378 pQbuffer = &acb->wqbuffer[
4379 4379 acb->wqbuf_lastidx];
4380 4380 (void) memcpy(pQbuffer,
4381 4381 ptmpuserbuffer, 1);
4382 4382 acb->wqbuf_lastidx++;
4383 4383 acb->wqbuf_lastidx %=
4384 4384 ARCMSR_MAX_QBUFFER;
4385 4385 ptmpuserbuffer++;
4386 4386 user_len--;
4387 4387 }
4388 4388 if (acb->acb_flags &
4389 4389 ACB_F_MESSAGE_WQBUFFER_CLEARED) {
4390 4390 acb->acb_flags &=
4391 4391 ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
4392 4392 arcmsr_post_ioctldata2iop(acb);
4393 4393 }
4394 4394 } else {
4395 4395 struct scsi_arq_status *arq_status;
4396 4396
4397 4397 /* has error report sensedata */
4398 4398 arq_status = (struct scsi_arq_status *)
4399 4399 (intptr_t)(pkt->pkt_scbp);
4400 4400 bzero((caddr_t)arq_status,
4401 4401 sizeof (struct scsi_arq_status));
4402 4402 arq_status->sts_rqpkt_reason = CMD_CMPLT;
4403 4403 arq_status->sts_rqpkt_state =
4404 4404 (STATE_GOT_BUS |
4405 4405 STATE_GOT_TARGET |STATE_SENT_CMD |
4406 4406 STATE_XFERRED_DATA | STATE_GOT_STATUS);
4407 4407 arq_status->sts_rqpkt_statistics =
4408 4408 pkt->pkt_statistics;
4409 4409 arq_status->sts_rqpkt_resid = 0;
4410 4410 if (&arq_status->sts_sensedata != NULL) {
4411 4411 struct scsi_extended_sense *
4412 4412 sts_sensedata;
4413 4413
4414 4414 sts_sensedata =
4415 4415 &arq_status->sts_sensedata;
4416 4416
4417 4417 /* has error report sensedata */
4418 4418 sts_sensedata->es_code = 0x0;
4419 4419 sts_sensedata->es_valid = 0x01;
4420 4420 sts_sensedata->es_key =
4421 4421 KEY_ILLEGAL_REQUEST;
4422 4422 /* AdditionalSenseLength */
4423 4423 sts_sensedata->es_add_len = 0x0A;
4424 4424 /* AdditionalSenseCode */
4425 4425 sts_sensedata->es_add_code = 0x20;
4426 4426 }
4427 4427 retvalue = ARCMSR_MESSAGE_FAIL;
4428 4428 }
4429 4429 }
4430 4430 kmem_free(ver_addr, MSGDATABUFLEN);
4431 4431 break;
4432 4432 }
4433 4433
4434 4434 case ARCMSR_MESSAGE_CLEAR_RQBUFFER:
4435 4435 pQbuffer = acb->rqbuffer;
4436 4436
4437 4437 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
4438 4438 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
4439 4439 arcmsr_iop_message_read(acb);
4440 4440 }
4441 4441 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
4442 4442 acb->rqbuf_firstidx = 0;
4443 4443 acb->rqbuf_lastidx = 0;
4444 4444 (void) memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
4445 4445 pcmdmessagefld->cmdmessage.ReturnCode =
4446 4446 ARCMSR_MESSAGE_RETURNCODE_OK;
4447 4447 break;
4448 4448 case ARCMSR_MESSAGE_CLEAR_WQBUFFER:
4449 4449 pQbuffer = acb->wqbuffer;
4450 4450
4451 4451 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
4452 4452 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
4453 4453 arcmsr_iop_message_read(acb);
4454 4454 }
4455 4455 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
4456 4456 ACB_F_MESSAGE_WQBUFFER_READ);
4457 4457 acb->wqbuf_firstidx = 0;
4458 4458 acb->wqbuf_lastidx = 0;
4459 4459 (void) memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
4460 4460 pcmdmessagefld->cmdmessage.ReturnCode =
4461 4461 ARCMSR_MESSAGE_RETURNCODE_OK;
4462 4462 break;
4463 4463 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER:
4464 4464
4465 4465 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
4466 4466 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
4467 4467 arcmsr_iop_message_read(acb);
4468 4468 }
4469 4469 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
4470 4470 ACB_F_MESSAGE_RQBUFFER_CLEARED |
4471 4471 ACB_F_MESSAGE_WQBUFFER_READ);
4472 4472 acb->rqbuf_firstidx = 0;
4473 4473 acb->rqbuf_lastidx = 0;
4474 4474 acb->wqbuf_firstidx = 0;
4475 4475 acb->wqbuf_lastidx = 0;
4476 4476 pQbuffer = acb->rqbuffer;
4477 4477 (void) memset(pQbuffer, 0, sizeof (struct QBUFFER));
4478 4478 pQbuffer = acb->wqbuffer;
4479 4479 (void) memset(pQbuffer, 0, sizeof (struct QBUFFER));
4480 4480 pcmdmessagefld->cmdmessage.ReturnCode =
4481 4481 ARCMSR_MESSAGE_RETURNCODE_OK;
4482 4482 break;
4483 4483
4484 4484 case ARCMSR_MESSAGE_REQUEST_RETURN_CODE_3F:
4485 4485 pcmdmessagefld->cmdmessage.ReturnCode =
4486 4486 ARCMSR_MESSAGE_RETURNCODE_3F;
4487 4487 break;
4488 4488 /*
4489 4489 * Not supported - ARCMSR_MESSAGE_SAY_HELLO
4490 4490 */
4491 4491 case ARCMSR_MESSAGE_SAY_GOODBYE:
4492 4492 arcmsr_iop_parking(acb);
4493 4493 break;
4494 4494 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
4495 4495 switch (acb->adapter_type) {
4496 4496 case ACB_ADAPTER_TYPE_A:
4497 4497 arcmsr_flush_hba_cache(acb);
4498 4498 break;
4499 4499 case ACB_ADAPTER_TYPE_B:
4500 4500 arcmsr_flush_hbb_cache(acb);
4501 4501 break;
4502 4502 case ACB_ADAPTER_TYPE_C:
4503 4503 arcmsr_flush_hbc_cache(acb);
4504 4504 break;
4505 4505 }
4506 4506 break;
4507 4507 default:
4508 4508 retvalue = ARCMSR_MESSAGE_FAIL;
4509 4509 }
4510 4510
4511 4511 message_out:
4512 4512
4513 4513 return (retvalue);
4514 4514 }
4515 4515
4516 4516
4517 4517
4518 4518
4519 4519 static void
4520 4520 arcmsr_pcidev_disattach(struct ACB *acb)
4521 4521 {
4522 4522 struct CCB *ccb;
4523 4523 int i = 0;
4524 4524
4525 4525 /* disable all outbound interrupts */
4526 4526 (void) arcmsr_disable_allintr(acb);
4527 4527 /* stop adapter background rebuild */
4528 4528 switch (acb->adapter_type) {
4529 4529 case ACB_ADAPTER_TYPE_A:
4530 4530 arcmsr_stop_hba_bgrb(acb);
4531 4531 arcmsr_flush_hba_cache(acb);
4532 4532 break;
4533 4533 case ACB_ADAPTER_TYPE_B:
4534 4534 arcmsr_stop_hbb_bgrb(acb);
4535 4535 arcmsr_flush_hbb_cache(acb);
4536 4536 break;
4537 4537 case ACB_ADAPTER_TYPE_C:
4538 4538 arcmsr_stop_hbc_bgrb(acb);
4539 4539 arcmsr_flush_hbc_cache(acb);
4540 4540 break;
4541 4541 }
4542 4542 /* abort all outstanding commands */
4543 4543 acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
4544 4544 acb->acb_flags &= ~ACB_F_IOP_INITED;
4545 4545
4546 4546 if (acb->ccboutstandingcount != 0) {
4547 4547 /* clear and abort all outbound posted Q */
4548 4548 arcmsr_done4abort_postqueue(acb);
4549 4549 /* talk to iop outstanding command aborted */
4550 4550 (void) arcmsr_abort_host_command(acb);
4551 4551
4552 4552 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
4553 4553 ccb = acb->pccb_pool[i];
4554 4554 if (ccb->ccb_state == ARCMSR_CCB_START) {
4555 4555 /* ccb->ccb_state = ARCMSR_CCB_ABORTED; */
4556 4556 ccb->pkt->pkt_reason = CMD_ABORTED;
4557 4557 ccb->pkt->pkt_statistics |= STAT_ABORTED;
4558 4558 arcmsr_ccb_complete(ccb, 1);
4559 4559 }
4560 4560 }
4561 4561 }
4562 4562 }
4563 4563
4564 4564 /* get firmware miscellaneous data */
4565 4565 static void
4566 4566 arcmsr_get_hba_config(struct ACB *acb)
4567 4567 {
4568 4568 struct HBA_msgUnit *phbamu;
4569 4569
4570 4570 char *acb_firm_model;
4571 4571 char *acb_firm_version;
4572 4572 char *acb_device_map;
4573 4573 char *iop_firm_model;
4574 4574 char *iop_firm_version;
4575 4575 char *iop_device_map;
4576 4576 int count;
4577 4577
4578 4578 phbamu = (struct HBA_msgUnit *)acb->pmu;
4579 4579 acb_firm_model = acb->firm_model;
4580 4580 acb_firm_version = acb->firm_version;
4581 4581 acb_device_map = acb->device_map;
4582 4582 /* firm_model, 15 */
4583 4583 iop_firm_model =
4584 4584 (char *)(&phbamu->msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);
4585 4585 /* firm_version, 17 */
4586 4586 iop_firm_version =
4587 4587 (char *)(&phbamu->msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]);
4588 4588
4589 4589 /* device_map, 21 */
4590 4590 iop_device_map =
4591 4591 (char *)(&phbamu->msgcode_rwbuffer[ARCMSR_FW_MAP_OFFSET]);
4592 4592
4593 4593 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4594 4594 &phbamu->inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
4595 4595
4596 4596 if (!arcmsr_hba_wait_msgint_ready(acb))
4597 4597 arcmsr_warn(acb,
4598 4598 "timeout while waiting for adapter firmware "
4599 4599 "miscellaneous data");
4600 4600
4601 4601 count = 8;
4602 4602 while (count) {
4603 4603 *acb_firm_model = CHIP_REG_READ8(acb->reg_mu_acc_handle0,
4604 4604 iop_firm_model);
4605 4605 acb_firm_model++;
4606 4606 iop_firm_model++;
4607 4607 count--;
4608 4608 }
4609 4609
4610 4610 count = 16;
4611 4611 while (count) {
4612 4612 *acb_firm_version =
4613 4613 CHIP_REG_READ8(acb->reg_mu_acc_handle0, iop_firm_version);
4614 4614 acb_firm_version++;
4615 4615 iop_firm_version++;
4616 4616 count--;
4617 4617 }
4618 4618
4619 4619 count = 16;
4620 4620 while (count) {
4621 4621 *acb_device_map =
4622 4622 CHIP_REG_READ8(acb->reg_mu_acc_handle0, iop_device_map);
4623 4623 acb_device_map++;
4624 4624 iop_device_map++;
4625 4625 count--;
4626 4626 }
4627 4627
4628 4628 arcmsr_log(acb, CE_CONT, "ARECA RAID FIRMWARE VERSION %s\n",
4629 4629 acb->firm_version);
4630 4630
4631 4631 /* firm_request_len, 1 */
4632 4632 acb->firm_request_len = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4633 4633 &phbamu->msgcode_rwbuffer[1]);
4634 4634 /* firm_numbers_queue, 2 */
4635 4635 acb->firm_numbers_queue = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4636 4636 &phbamu->msgcode_rwbuffer[2]);
4637 4637 /* firm_sdram_size, 3 */
4638 4638 acb->firm_sdram_size = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4639 4639 &phbamu->msgcode_rwbuffer[3]);
4640 4640 /* firm_ide_channels, 4 */
4641 4641 acb->firm_ide_channels = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4642 4642 &phbamu->msgcode_rwbuffer[4]);
4643 4643 }
4644 4644
4645 4645 /* get firmware miscellaneous data */
4646 4646 static void
4647 4647 arcmsr_get_hbb_config(struct ACB *acb)
4648 4648 {
4649 4649 struct HBB_msgUnit *phbbmu;
4650 4650 char *acb_firm_model;
4651 4651 char *acb_firm_version;
4652 4652 char *acb_device_map;
4653 4653 char *iop_firm_model;
4654 4654 char *iop_firm_version;
4655 4655 char *iop_device_map;
4656 4656 int count;
4657 4657
4658 4658 phbbmu = (struct HBB_msgUnit *)acb->pmu;
4659 4659 acb_firm_model = acb->firm_model;
4660 4660 acb_firm_version = acb->firm_version;
4661 4661 acb_device_map = acb->device_map;
4662 4662 /* firm_model, 15 */
4663 4663 iop_firm_model = (char *)
4664 4664 (&phbbmu->hbb_rwbuffer->msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);
4665 4665 /* firm_version, 17 */
4666 4666 iop_firm_version = (char *)
4667 4667 (&phbbmu->hbb_rwbuffer->msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]);
4668 4668 /* device_map, 21 */
4669 4669 iop_device_map = (char *)
4670 4670 (&phbbmu->hbb_rwbuffer->msgcode_rwbuffer[ARCMSR_FW_MAP_OFFSET]);
4671 4671
4672 4672 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4673 4673 &phbbmu->hbb_doorbell->drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG);
4674 4674
4675 4675 if (!arcmsr_hbb_wait_msgint_ready(acb))
4676 4676 arcmsr_warn(acb,
4677 4677 "timeout while waiting for adapter firmware "
4678 4678 "miscellaneous data");
4679 4679
4680 4680 count = 8;
4681 4681 while (count) {
4682 4682 *acb_firm_model =
4683 4683 CHIP_REG_READ8(acb->reg_mu_acc_handle1, iop_firm_model);
4684 4684 acb_firm_model++;
4685 4685 iop_firm_model++;
4686 4686 count--;
4687 4687 }
4688 4688 count = 16;
4689 4689 while (count) {
4690 4690 *acb_firm_version =
4691 4691 CHIP_REG_READ8(acb->reg_mu_acc_handle1, iop_firm_version);
4692 4692 acb_firm_version++;
4693 4693 iop_firm_version++;
4694 4694 count--;
4695 4695 }
4696 4696 count = 16;
4697 4697 while (count) {
4698 4698 *acb_device_map =
4699 4699 CHIP_REG_READ8(acb->reg_mu_acc_handle1, iop_device_map);
4700 4700 acb_device_map++;
4701 4701 iop_device_map++;
4702 4702 count--;
4703 4703 }
4704 4704
4705 4705 arcmsr_log(acb, CE_CONT, "ARECA RAID FIRMWARE VERSION %s\n",
4706 4706 acb->firm_version);
4707 4707
4708 4708 /* firm_request_len, 1 */
4709 4709 acb->firm_request_len = CHIP_REG_READ32(acb->reg_mu_acc_handle1,
4710 4710 &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[1]);
4711 4711 /* firm_numbers_queue, 2 */
4712 4712 acb->firm_numbers_queue = CHIP_REG_READ32(acb->reg_mu_acc_handle1,
4713 4713 &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[2]);
4714 4714 /* firm_sdram_size, 3 */
4715 4715 acb->firm_sdram_size = CHIP_REG_READ32(acb->reg_mu_acc_handle1,
4716 4716 &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[3]);
4717 4717 /* firm_ide_channels, 4 */
4718 4718 acb->firm_ide_channels = CHIP_REG_READ32(acb->reg_mu_acc_handle1,
4719 4719 &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[4]);
4720 4720 }
4721 4721
4722 4722
4723 4723 /* get firmware miscellaneous data */
4724 4724 static void
4725 4725 arcmsr_get_hbc_config(struct ACB *acb)
4726 4726 {
4727 4727 struct HBC_msgUnit *phbcmu;
4728 4728
4729 4729 char *acb_firm_model;
4730 4730 char *acb_firm_version;
4731 4731 char *acb_device_map;
4732 4732 char *iop_firm_model;
4733 4733 char *iop_firm_version;
4734 4734 char *iop_device_map;
4735 4735 int count;
4736 4736
4737 4737 phbcmu = (struct HBC_msgUnit *)acb->pmu;
4738 4738 acb_firm_model = acb->firm_model;
4739 4739 acb_firm_version = acb->firm_version;
4740 4740 acb_device_map = acb->device_map;
4741 4741 /* firm_model, 15 */
4742 4742 iop_firm_model =
4743 4743 (char *)(&phbcmu->msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);
4744 4744 /* firm_version, 17 */
4745 4745 iop_firm_version =
4746 4746 (char *)(&phbcmu->msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]);
4747 4747 /* device_map, 21 */
4748 4748 iop_device_map =
4749 4749 (char *)(&phbcmu->msgcode_rwbuffer[ARCMSR_FW_MAP_OFFSET]);
4750 4750 /* post "get config" instruction */
4751 4751 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4752 4752 &phbcmu->inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
4753 4753 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4754 4754 &phbcmu->inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
4755 4755 if (!arcmsr_hbc_wait_msgint_ready(acb))
4756 4756 arcmsr_warn(acb,
4757 4757 "timeout while waiting for adapter firmware "
4758 4758 "miscellaneous data");
4759 4759 count = 8;
4760 4760 while (count) {
4761 4761 *acb_firm_model =
4762 4762 CHIP_REG_READ8(acb->reg_mu_acc_handle0, iop_firm_model);
4763 4763 acb_firm_model++;
4764 4764 iop_firm_model++;
4765 4765 count--;
4766 4766 }
4767 4767
4768 4768 count = 16;
4769 4769 while (count) {
4770 4770 *acb_firm_version =
4771 4771 CHIP_REG_READ8(acb->reg_mu_acc_handle0, iop_firm_version);
4772 4772 acb_firm_version++;
4773 4773 iop_firm_version++;
4774 4774 count--;
4775 4775 }
4776 4776
4777 4777 count = 16;
4778 4778 while (count) {
4779 4779 *acb_device_map =
4780 4780 CHIP_REG_READ8(acb->reg_mu_acc_handle0, iop_device_map);
4781 4781 acb_device_map++;
4782 4782 iop_device_map++;
4783 4783 count--;
4784 4784 }
4785 4785
4786 4786 arcmsr_log(acb, CE_CONT, "ARECA RAID FIRMWARE VERSION %s\n",
4787 4787 acb->firm_version);
4788 4788
4789 4789 /* firm_request_len, 1, 04-07 */
4790 4790 acb->firm_request_len = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4791 4791 &phbcmu->msgcode_rwbuffer[1]);
4792 4792 /* firm_numbers_queue, 2, 08-11 */
4793 4793 acb->firm_numbers_queue = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4794 4794 &phbcmu->msgcode_rwbuffer[2]);
4795 4795 /* firm_sdram_size, 3, 12-15 */
4796 4796 acb->firm_sdram_size = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4797 4797 &phbcmu->msgcode_rwbuffer[3]);
4798 4798 /* firm_ide_channels, 4, 16-19 */
4799 4799 acb->firm_ide_channels = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4800 4800 &phbcmu->msgcode_rwbuffer[4]);
4801 4801 /* firm_cfg_version, 25, 100-103 */
4802 4802 acb->firm_cfg_version = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4803 4803 &phbcmu->msgcode_rwbuffer[25]);
4804 4804 }
4805 4805
4806 4806
4807 4807 /* start background rebuild */
4808 4808 static void
4809 4809 arcmsr_start_hba_bgrb(struct ACB *acb) {
4810 4810
4811 4811 struct HBA_msgUnit *phbamu;
4812 4812
4813 4813 phbamu = (struct HBA_msgUnit *)acb->pmu;
4814 4814
4815 4815 acb->acb_flags |= ACB_F_MSG_START_BGRB;
4816 4816 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4817 4817 &phbamu->inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB);
4818 4818
4819 4819 if (!arcmsr_hba_wait_msgint_ready(acb))
4820 4820 arcmsr_warn(acb,
4821 4821 "timeout while waiting for background rebuild to start");
4822 4822 }
4823 4823
4824 4824
4825 4825 static void
4826 4826 arcmsr_start_hbb_bgrb(struct ACB *acb) {
4827 4827
4828 4828 struct HBB_msgUnit *phbbmu;
4829 4829
4830 4830 phbbmu = (struct HBB_msgUnit *)acb->pmu;
4831 4831
4832 4832 acb->acb_flags |= ACB_F_MSG_START_BGRB;
4833 4833 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4834 4834 &phbbmu->hbb_doorbell->drv2iop_doorbell,
4835 4835 ARCMSR_MESSAGE_START_BGRB);
4836 4836
4837 4837 if (!arcmsr_hbb_wait_msgint_ready(acb))
4838 4838 arcmsr_warn(acb,
4839 4839 "timeout while waiting for background rebuild to start");
4840 4840 }
4841 4841
4842 4842
4843 4843 static void
4844 4844 arcmsr_start_hbc_bgrb(struct ACB *acb) {
4845 4845
4846 4846 struct HBC_msgUnit *phbcmu;
4847 4847
4848 4848 phbcmu = (struct HBC_msgUnit *)acb->pmu;
4849 4849
4850 4850 acb->acb_flags |= ACB_F_MSG_START_BGRB;
4851 4851 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4852 4852 &phbcmu->inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB);
4853 4853 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4854 4854 &phbcmu->inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
4855 4855 if (!arcmsr_hbc_wait_msgint_ready(acb))
4856 4856 arcmsr_warn(acb,
4857 4857 "timeout while waiting for background rebuild to start");
4858 4858 }
4859 4859
4860 4860 static void
4861 4861 arcmsr_polling_hba_ccbdone(struct ACB *acb, struct CCB *poll_ccb)
4862 4862 {
4863 4863 struct HBA_msgUnit *phbamu;
4864 4864 struct CCB *ccb;
4865 4865 boolean_t error;
4866 4866 uint32_t flag_ccb, outbound_intstatus, intmask_org;
4867 4867 boolean_t poll_ccb_done = B_FALSE;
4868 4868 uint32_t poll_count = 0;
4869 4869
4870 4870
4871 4871 phbamu = (struct HBA_msgUnit *)acb->pmu;
4872 4872
4873 4873 polling_ccb_retry:
4874 4874 /* TODO: Use correct offset and size for syncing? */
4875 4875 if (ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
4876 4876 DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS)
4877 4877 return;
4878 4878 intmask_org = arcmsr_disable_allintr(acb);
4879 4879
4880 4880 for (;;) {
4881 4881 if ((flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4882 4882 &phbamu->outbound_queueport)) == 0xFFFFFFFF) {
4883 4883 if (poll_ccb_done) {
4884 4884 /* chip FIFO no ccb for completion already */
4885 4885 break;
4886 4886 } else {
4887 4887 drv_usecwait(25000);
4888 4888 if ((poll_count > 100) && (poll_ccb != NULL)) {
4889 4889 break;
4890 4890 }
4891 4891 if (acb->ccboutstandingcount == 0) {
4892 4892 break;
4893 4893 }
4894 4894 poll_count++;
4895 4895 outbound_intstatus =
4896 4896 CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4897 4897 &phbamu->outbound_intstatus) &
4898 4898 acb->outbound_int_enable;
4899 4899
4900 4900 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4901 4901 &phbamu->outbound_intstatus,
4902 4902 outbound_intstatus); /* clear interrupt */
4903 4903 }
4904 4904 }
4905 4905
4906 4906 /* frame must be 32 bytes aligned */
4907 4907 ccb = NumToPtr((acb->vir2phy_offset + (flag_ccb << 5)));
4908 4908
4909 4909 /* check if command done with no error */
4910 4910 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ?
4911 4911 B_TRUE : B_FALSE;
4912 4912 if (poll_ccb != NULL)
4913 4913 poll_ccb_done = (ccb == poll_ccb) ? B_TRUE : B_FALSE;
4914 4914
4915 4915 if (ccb->acb != acb) {
4916 4916 arcmsr_warn(acb, "ccb got a wrong acb!");
4917 4917 continue;
4918 4918 }
4919 4919 if (ccb->ccb_state != ARCMSR_CCB_START) {
4920 4920 if (ccb->ccb_state & ARCMSR_ABNORMAL_MASK) {
4921 4921 ccb->ccb_state |= ARCMSR_CCB_BACK;
4922 4922 ccb->pkt->pkt_reason = CMD_ABORTED;
4923 4923 ccb->pkt->pkt_statistics |= STAT_ABORTED;
4924 4924 arcmsr_ccb_complete(ccb, 1);
4925 4925 continue;
4926 4926 }
4927 4927 arcmsr_report_ccb_state(acb, ccb, error);
4928 4928 arcmsr_warn(acb,
4929 4929 "polling op got unexpected ccb command done");
4930 4930 continue;
4931 4931 }
4932 4932 arcmsr_report_ccb_state(acb, ccb, error);
4933 4933 } /* drain reply FIFO */
4934 4934 arcmsr_enable_allintr(acb, intmask_org);
4935 4935 }
4936 4936
4937 4937
4938 4938 static void
4939 4939 arcmsr_polling_hbb_ccbdone(struct ACB *acb, struct CCB *poll_ccb)
4940 4940 {
4941 4941 struct HBB_msgUnit *phbbmu;
4942 4942 struct CCB *ccb;
4943 4943 uint32_t flag_ccb, intmask_org;
4944 4944 boolean_t error;
4945 4945 uint32_t poll_count = 0;
4946 4946 int index;
4947 4947 boolean_t poll_ccb_done = B_FALSE;
4948 4948
4949 4949
4950 4950 phbbmu = (struct HBB_msgUnit *)acb->pmu;
4951 4951
4952 4952
4953 4953 polling_ccb_retry:
4954 4954 /* Use correct offset and size for syncing */
4955 4955 if (ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
4956 4956 DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS)
4957 4957 return;
4958 4958
4959 4959 intmask_org = arcmsr_disable_allintr(acb);
4960 4960
4961 4961 for (;;) {
4962 4962 index = phbbmu->doneq_index;
4963 4963 if ((flag_ccb = phbbmu->done_qbuffer[index]) == 0) {
4964 4964 if (poll_ccb_done) {
4965 4965 /* chip FIFO no ccb for completion already */
4966 4966 break;
4967 4967 } else {
4968 4968 drv_usecwait(25000);
4969 4969 if ((poll_count > 100) && (poll_ccb != NULL))
4970 4970 break;
4971 4971 if (acb->ccboutstandingcount == 0)
4972 4972 break;
4973 4973 poll_count++;
4974 4974 /* clear doorbell interrupt */
4975 4975 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4976 4976 &phbbmu->hbb_doorbell->iop2drv_doorbell,
4977 4977 ARCMSR_DOORBELL_INT_CLEAR_PATTERN);
4978 4978 }
4979 4979 }
4980 4980
4981 4981 phbbmu->done_qbuffer[index] = 0;
4982 4982 index++;
4983 4983 /* if last index number set it to 0 */
4984 4984 index %= ARCMSR_MAX_HBB_POSTQUEUE;
4985 4985 phbbmu->doneq_index = index;
4986 4986 /* check if command done with no error */
4987 4987 /* frame must be 32 bytes aligned */
4988 4988 ccb = NumToPtr((acb->vir2phy_offset + (flag_ccb << 5)));
4989 4989
4990 4990 /* check if command done with no error */
4991 4991 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ?
4992 4992 B_TRUE : B_FALSE;
4993 4993
4994 4994 if (poll_ccb != NULL)
4995 4995 poll_ccb_done = (ccb == poll_ccb) ? B_TRUE : B_FALSE;
4996 4996 if (ccb->acb != acb) {
4997 4997 arcmsr_warn(acb, "ccb got a wrong acb!");
4998 4998 continue;
4999 4999 }
5000 5000 if (ccb->ccb_state != ARCMSR_CCB_START) {
5001 5001 if (ccb->ccb_state & ARCMSR_ABNORMAL_MASK) {
5002 5002 ccb->ccb_state |= ARCMSR_CCB_BACK;
5003 5003 ccb->pkt->pkt_reason = CMD_ABORTED;
5004 5004 ccb->pkt->pkt_statistics |= STAT_ABORTED;
5005 5005 arcmsr_ccb_complete(ccb, 1);
5006 5006 continue;
5007 5007 }
5008 5008 arcmsr_report_ccb_state(acb, ccb, error);
5009 5009 arcmsr_warn(acb,
5010 5010 "polling op got unexpect ccb command done");
5011 5011 continue;
5012 5012 }
5013 5013 arcmsr_report_ccb_state(acb, ccb, error);
5014 5014 } /* drain reply FIFO */
5015 5015 arcmsr_enable_allintr(acb, intmask_org);
5016 5016 }
5017 5017
5018 5018
5019 5019 static void
5020 5020 arcmsr_polling_hbc_ccbdone(struct ACB *acb, struct CCB *poll_ccb)
5021 5021 {
5022 5022
5023 5023 struct HBC_msgUnit *phbcmu;
5024 5024 struct CCB *ccb;
5025 5025 boolean_t error;
5026 5026 uint32_t ccb_cdb_phy;
5027 5027 uint32_t flag_ccb, intmask_org;
5028 5028 boolean_t poll_ccb_done = B_FALSE;
5029 5029 uint32_t poll_count = 0;
5030 5030
5031 5031
5032 5032 phbcmu = (struct HBC_msgUnit *)acb->pmu;
5033 5033
5034 5034 polling_ccb_retry:
5035 5035
5036 5036 /* Use correct offset and size for syncing */
5037 5037 if (ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
5038 5038 DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS)
5039 5039 return;
5040 5040
5041 5041 intmask_org = arcmsr_disable_allintr(acb);
5042 5042
5043 5043 for (;;) {
5044 5044 if (!(CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5045 5045 &phbcmu->host_int_status) &
5046 5046 ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR)) {
5047 5047
5048 5048 if (poll_ccb_done) {
5049 5049 /* chip FIFO no ccb for completion already */
5050 5050 break;
5051 5051 } else {
5052 5052 drv_usecwait(25000);
5053 5053 if ((poll_count > 100) && (poll_ccb != NULL)) {
5054 5054 break;
5055 5055 }
5056 5056 if (acb->ccboutstandingcount == 0) {
5057 5057 break;
5058 5058 }
5059 5059 poll_count++;
5060 5060 }
5061 5061 }
5062 5062 flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5063 5063 &phbcmu->outbound_queueport_low);
5064 5064 /* frame must be 32 bytes aligned */
5065 5065 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
5066 5066 /* the CDB is the first field of the CCB */
5067 5067 ccb = NumToPtr((acb->vir2phy_offset + ccb_cdb_phy));
5068 5068
5069 5069 /* check if command done with no error */
5070 5070 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ?
5071 5071 B_TRUE : B_FALSE;
5072 5072 if (poll_ccb != NULL)
5073 5073 poll_ccb_done = (ccb == poll_ccb) ? B_TRUE : B_FALSE;
5074 5074
5075 5075 if (ccb->acb != acb) {
5076 5076 arcmsr_warn(acb, "ccb got a wrong acb!");
5077 5077 continue;
5078 5078 }
5079 5079 if (ccb->ccb_state != ARCMSR_CCB_START) {
5080 5080 if (ccb->ccb_state & ARCMSR_ABNORMAL_MASK) {
5081 5081 ccb->ccb_state |= ARCMSR_CCB_BACK;
5082 5082 ccb->pkt->pkt_reason = CMD_ABORTED;
5083 5083 ccb->pkt->pkt_statistics |= STAT_ABORTED;
5084 5084 arcmsr_ccb_complete(ccb, 1);
5085 5085 continue;
5086 5086 }
5087 5087 arcmsr_report_ccb_state(acb, ccb, error);
5088 5088 arcmsr_warn(acb,
5089 5089 "polling op got unexpected ccb command done");
5090 5090 continue;
5091 5091 }
5092 5092 arcmsr_report_ccb_state(acb, ccb, error);
5093 5093 } /* drain reply FIFO */
5094 5094 arcmsr_enable_allintr(acb, intmask_org);
5095 5095 }
5096 5096
5097 5097
5098 5098 /*
5099 5099 * Function: arcmsr_hba_hardware_reset()
5100 5100 * Bug Fix for Intel IOP cause firmware hang on.
5101 5101 * and kernel panic
5102 5102 */
5103 5103 static void
5104 5104 arcmsr_hba_hardware_reset(struct ACB *acb)
5105 5105 {
5106 5106 struct HBA_msgUnit *phbamu;
5107 5107 uint8_t value[64];
5108 5108 int i;
5109 5109
5110 5110 phbamu = (struct HBA_msgUnit *)acb->pmu;
5111 5111 /* backup pci config data */
5112 5112 for (i = 0; i < 64; i++) {
5113 5113 value[i] = pci_config_get8(acb->pci_acc_handle, i);
5114 5114 }
5115 5115 /* hardware reset signal */
5116 5116 if ((PCI_DEVICE_ID_ARECA_1680 ==
5117 5117 pci_config_get16(acb->pci_acc_handle, PCI_CONF_DEVID))) {
5118 5118 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5119 5119 &phbamu->reserved1[0], 0x00000003);
5120 5120 } else {
5121 5121 pci_config_put8(acb->pci_acc_handle, 0x84, 0x20);
5122 5122 }
5123 5123 drv_usecwait(1000000);
5124 5124 /* write back pci config data */
5125 5125 for (i = 0; i < 64; i++) {
5126 5126 pci_config_put8(acb->pci_acc_handle, i, value[i]);
5127 5127 }
5128 5128 drv_usecwait(1000000);
5129 5129 }
5130 5130
5131 5131 /*
5132 5132 * Function: arcmsr_abort_host_command
5133 5133 */
5134 5134 static uint8_t
5135 5135 arcmsr_abort_host_command(struct ACB *acb)
5136 5136 {
5137 5137 uint8_t rtnval = 0;
5138 5138
5139 5139 switch (acb->adapter_type) {
5140 5140 case ACB_ADAPTER_TYPE_A:
5141 5141 rtnval = arcmsr_abort_hba_allcmd(acb);
5142 5142 break;
5143 5143 case ACB_ADAPTER_TYPE_B:
5144 5144 rtnval = arcmsr_abort_hbb_allcmd(acb);
5145 5145 break;
5146 5146 case ACB_ADAPTER_TYPE_C:
5147 5147 rtnval = arcmsr_abort_hbc_allcmd(acb);
5148 5148 break;
5149 5149 }
5150 5150 return (rtnval);
5151 5151 }
5152 5152
5153 5153 /*
5154 5154 * Function: arcmsr_handle_iop_bus_hold
5155 5155 */
5156 5156 static void
5157 5157 arcmsr_handle_iop_bus_hold(struct ACB *acb)
5158 5158 {
5159 5159
5160 5160 switch (acb->adapter_type) {
5161 5161 case ACB_ADAPTER_TYPE_A:
5162 5162 {
5163 5163 struct HBA_msgUnit *phbamu;
5164 5164 int retry_count = 0;
5165 5165
5166 5166 acb->timeout_count = 0;
5167 5167 phbamu = (struct HBA_msgUnit *)acb->pmu;
5168 5168 arcmsr_hba_hardware_reset(acb);
5169 5169 acb->acb_flags &= ~ACB_F_IOP_INITED;
5170 5170 sleep_again:
5171 5171 drv_usecwait(1000000);
5172 5172 if ((CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5173 5173 &phbamu->outbound_msgaddr1) &
5174 5174 ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) {
5175 5175 if (retry_count > 60) {
5176 5176 arcmsr_warn(acb,
5177 5177 "waiting for hardware"
5178 5178 "bus reset return, RETRY TERMINATED!!");
5179 5179 return;
5180 5180 }
5181 5181 retry_count++;
5182 5182 goto sleep_again;
5183 5183 }
5184 5184 arcmsr_iop_init(acb);
5185 5185 break;
5186 5186 }
5187 5187
5188 5188 }
5189 5189 }
5190 5190
5191 5191 static void
5192 5192 arcmsr_iop2drv_data_wrote_handle(struct ACB *acb) {
5193 5193
5194 5194 struct QBUFFER *prbuffer;
5195 5195 uint8_t *pQbuffer;
5196 5196 uint8_t *iop_data;
5197 5197 int my_empty_len, iop_len;
5198 5198 int rqbuf_firstidx, rqbuf_lastidx;
5199 5199
5200 5200 /* check this iop data if overflow my rqbuffer */
5201 5201 rqbuf_lastidx = acb->rqbuf_lastidx;
5202 5202 rqbuf_firstidx = acb->rqbuf_firstidx;
5203 5203 prbuffer = arcmsr_get_iop_rqbuffer(acb);
5204 5204 iop_data = (uint8_t *)prbuffer->data;
5205 5205 iop_len = prbuffer->data_len;
5206 5206 my_empty_len = (rqbuf_firstidx-rqbuf_lastidx - 1) &
5207 5207 (ARCMSR_MAX_QBUFFER - 1);
5208 5208
5209 5209 if (my_empty_len >= iop_len) {
5210 5210 while (iop_len > 0) {
5211 5211 pQbuffer = &acb->rqbuffer[rqbuf_lastidx];
5212 5212 (void) memcpy(pQbuffer, iop_data, 1);
5213 5213 rqbuf_lastidx++;
5214 5214 /* if last index number set it to 0 */
5215 5215 rqbuf_lastidx %= ARCMSR_MAX_QBUFFER;
5216 5216 iop_data++;
5217 5217 iop_len--;
5218 5218 }
5219 5219 acb->rqbuf_lastidx = rqbuf_lastidx;
5220 5220 arcmsr_iop_message_read(acb);
5221 5221 /* signature, let IOP know data has been read */
5222 5222 } else {
5223 5223 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
5224 5224 }
5225 5225 }
5226 5226
5227 5227
5228 5228
5229 5229 static void
5230 5230 arcmsr_iop2drv_data_read_handle(struct ACB *acb) {
5231 5231
5232 5232 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READ;
5233 5233 /*
5234 5234 * check if there are any mail packages from user space program
5235 5235 * in my post bag, now is the time to send them into Areca's firmware
5236 5236 */
5237 5237
5238 5238 if (acb->wqbuf_firstidx != acb->wqbuf_lastidx) {
5239 5239
5240 5240 uint8_t *pQbuffer;
5241 5241 struct QBUFFER *pwbuffer;
5242 5242 uint8_t *iop_data;
5243 5243 int allxfer_len = 0;
5244 5244
5245 5245 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ);
5246 5246 pwbuffer = arcmsr_get_iop_wqbuffer(acb);
5247 5247 iop_data = (uint8_t *)pwbuffer->data;
5248 5248
5249 5249 while ((acb->wqbuf_firstidx != acb->wqbuf_lastidx) &&
5250 5250 (allxfer_len < 124)) {
5251 5251 pQbuffer = &acb->wqbuffer[acb->wqbuf_firstidx];
5252 5252 (void) memcpy(iop_data, pQbuffer, 1);
5253 5253 acb->wqbuf_firstidx++;
5254 5254 /* if last index number set it to 0 */
5255 5255 acb->wqbuf_firstidx %= ARCMSR_MAX_QBUFFER;
5256 5256 iop_data++;
5257 5257 allxfer_len++;
5258 5258 }
5259 5259 pwbuffer->data_len = allxfer_len;
5260 5260 /*
5261 5261 * push inbound doorbell, tell iop driver data write ok
5262 5262 * await reply on next hwinterrupt for next Qbuffer post
5263 5263 */
5264 5264 arcmsr_iop_message_wrote(acb);
5265 5265 }
5266 5266
5267 5267 if (acb->wqbuf_firstidx == acb->wqbuf_lastidx)
5268 5268 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
5269 5269 }
5270 5270
5271 5271
5272 5272 static void
5273 5273 arcmsr_hba_doorbell_isr(struct ACB *acb)
5274 5274 {
5275 5275 uint32_t outbound_doorbell;
5276 5276 struct HBA_msgUnit *phbamu;
5277 5277
5278 5278 phbamu = (struct HBA_msgUnit *)acb->pmu;
5279 5279
5280 5280 /*
5281 5281 * Maybe here we need to check wrqbuffer_lock is locked or not
5282 5282 * DOORBELL: ding! dong!
5283 5283 * check if there are any mail need to pack from firmware
5284 5284 */
5285 5285
5286 5286 outbound_doorbell = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5287 5287 &phbamu->outbound_doorbell);
5288 5288 /* clear doorbell interrupt */
5289 5289 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5290 5290 &phbamu->outbound_doorbell, outbound_doorbell);
5291 5291
5292 5292 if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK)
5293 5293 arcmsr_iop2drv_data_wrote_handle(acb);
5294 5294
5295 5295
5296 5296 if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK)
5297 5297 arcmsr_iop2drv_data_read_handle(acb);
5298 5298 }
5299 5299
5300 5300
5301 5301
5302 5302 static void
5303 5303 arcmsr_hbc_doorbell_isr(struct ACB *acb)
5304 5304 {
5305 5305 uint32_t outbound_doorbell;
5306 5306 struct HBC_msgUnit *phbcmu;
5307 5307
5308 5308 phbcmu = (struct HBC_msgUnit *)acb->pmu;
5309 5309
5310 5310 /*
5311 5311 * Maybe here we need to check wrqbuffer_lock is locked or not
5312 5312 * DOORBELL: ding! dong!
5313 5313 * check if there are any mail need to pick from firmware
5314 5314 */
5315 5315
5316 5316 outbound_doorbell = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5317 5317 &phbcmu->outbound_doorbell);
5318 5318 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5319 5319 &phbcmu->outbound_doorbell_clear,
5320 5320 outbound_doorbell); /* clear interrupt */
5321 5321 if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) {
5322 5322 arcmsr_iop2drv_data_wrote_handle(acb);
5323 5323 }
5324 5324 if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK) {
5325 5325 arcmsr_iop2drv_data_read_handle(acb);
5326 5326 }
5327 5327 if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
5328 5328 /* messenger of "driver to iop commands" */
5329 5329 arcmsr_hbc_message_isr(acb);
5330 5330 }
5331 5331 }
5332 5332
5333 5333
5334 5334 static void
5335 5335 arcmsr_hba_message_isr(struct ACB *acb)
5336 5336 {
5337 5337 struct HBA_msgUnit *phbamu = (struct HBA_msgUnit *)acb->pmu;
5338 5338 uint32_t *signature = (&phbamu->msgcode_rwbuffer[0]);
5339 5339 uint32_t outbound_message;
5340 5340
5341 5341 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5342 5342 &phbamu->outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT);
5343 5343
5344 5344 outbound_message = CHIP_REG_READ32(acb->reg_mu_acc_handle0, signature);
5345 5345 if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
5346 5346 if ((ddi_taskq_dispatch(acb->taskq,
5347 5347 (void (*)(void *))arcmsr_dr_handle,
5348 5348 acb, DDI_NOSLEEP)) != DDI_SUCCESS) {
5349 5349 arcmsr_warn(acb, "DR task start failed");
5350 5350 }
5351 5351 }
5352 5352
5353 5353 static void
5354 5354 arcmsr_hbb_message_isr(struct ACB *acb)
5355 5355 {
5356 5356 struct HBB_msgUnit *phbbmu = (struct HBB_msgUnit *)acb->pmu;
5357 5357 uint32_t *signature = (&phbbmu->hbb_rwbuffer->msgcode_rwbuffer[0]);
5358 5358 uint32_t outbound_message;
5359 5359
5360 5360 /* clear interrupts */
5361 5361 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5362 5362 &phbbmu->hbb_doorbell->iop2drv_doorbell,
5363 5363 ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
5364 5364 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5365 5365 &phbbmu->hbb_doorbell->drv2iop_doorbell,
5366 5366 ARCMSR_DRV2IOP_END_OF_INTERRUPT);
5367 5367
5368 5368 outbound_message = CHIP_REG_READ32(acb->reg_mu_acc_handle0, signature);
5369 5369 if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
5370 5370 if ((ddi_taskq_dispatch(acb->taskq,
5371 5371 (void (*)(void *))arcmsr_dr_handle,
5372 5372 acb, DDI_NOSLEEP)) != DDI_SUCCESS) {
5373 5373 arcmsr_warn(acb, "DR task start failed");
5374 5374 }
5375 5375 }
5376 5376
5377 5377 static void
5378 5378 arcmsr_hbc_message_isr(struct ACB *acb)
5379 5379 {
5380 5380 struct HBC_msgUnit *phbcmu = (struct HBC_msgUnit *)acb->pmu;
5381 5381 uint32_t *signature = (&phbcmu->msgcode_rwbuffer[0]);
5382 5382 uint32_t outbound_message;
5383 5383
5384 5384 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5385 5385 &phbcmu->outbound_doorbell_clear,
5386 5386 ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR);
5387 5387
5388 5388 outbound_message = CHIP_REG_READ32(acb->reg_mu_acc_handle0, signature);
5389 5389 if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
5390 5390 if ((ddi_taskq_dispatch(acb->taskq,
5391 5391 (void (*)(void *))arcmsr_dr_handle,
5392 5392 acb, DDI_NOSLEEP)) != DDI_SUCCESS) {
5393 5393 arcmsr_warn(acb, "DR task start failed");
5394 5394 }
5395 5395 }
5396 5396
5397 5397
5398 5398 static void
5399 5399 arcmsr_hba_postqueue_isr(struct ACB *acb)
5400 5400 {
5401 5401
5402 5402 struct HBA_msgUnit *phbamu;
5403 5403 struct CCB *ccb;
5404 5404 uint32_t flag_ccb;
5405 5405 boolean_t error;
5406 5406
5407 5407 phbamu = (struct HBA_msgUnit *)acb->pmu;
5408 5408
5409 5409 /* areca cdb command done */
5410 5410 /* Use correct offset and size for syncing */
5411 5411 (void) ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
5412 5412 DDI_DMA_SYNC_FORKERNEL);
5413 5413
5414 5414 while ((flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5415 5415 &phbamu->outbound_queueport)) != 0xFFFFFFFF) {
5416 5416 /* frame must be 32 bytes aligned */
5417 5417 ccb = NumToPtr((acb->vir2phy_offset+(flag_ccb << 5)));
5418 5418 /* check if command done with no error */
5419 5419 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ?
5420 5420 B_TRUE : B_FALSE;
5421 5421 arcmsr_drain_donequeue(acb, ccb, error);
5422 5422 } /* drain reply FIFO */
5423 5423 }
5424 5424
5425 5425
5426 5426 static void
5427 5427 arcmsr_hbb_postqueue_isr(struct ACB *acb)
5428 5428 {
5429 5429 struct HBB_msgUnit *phbbmu;
5430 5430 struct CCB *ccb;
5431 5431 uint32_t flag_ccb;
5432 5432 boolean_t error;
5433 5433 int index;
5434 5434
5435 5435 phbbmu = (struct HBB_msgUnit *)acb->pmu;
5436 5436
5437 5437 /* areca cdb command done */
5438 5438 index = phbbmu->doneq_index;
5439 5439 if (ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
5440 5440 DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS)
5441 5441 return;
5442 5442 while ((flag_ccb = phbbmu->done_qbuffer[index]) != 0) {
5443 5443 phbbmu->done_qbuffer[index] = 0;
5444 5444 /* frame must be 32 bytes aligned */
5445 5445
5446 5446 /* the CDB is the first field of the CCB */
5447 5447 ccb = NumToPtr((acb->vir2phy_offset + (flag_ccb << 5)));
5448 5448
5449 5449 /* check if command done with no error */
5450 5450 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ?
5451 5451 B_TRUE : B_FALSE;
5452 5452 arcmsr_drain_donequeue(acb, ccb, error);
5453 5453 index++;
5454 5454 /* if last index number set it to 0 */
5455 5455 index %= ARCMSR_MAX_HBB_POSTQUEUE;
5456 5456 phbbmu->doneq_index = index;
5457 5457 } /* drain reply FIFO */
5458 5458 }
5459 5459
5460 5460
5461 5461 static void
5462 5462 arcmsr_hbc_postqueue_isr(struct ACB *acb)
5463 5463 {
5464 5464
5465 5465 struct HBC_msgUnit *phbcmu;
5466 5466 struct CCB *ccb;
5467 5467 uint32_t flag_ccb, ccb_cdb_phy, throttling = 0;
5468 5468 boolean_t error;
5469 5469
5470 5470 phbcmu = (struct HBC_msgUnit *)acb->pmu;
5471 5471 /* areca cdb command done */
5472 5472 /* Use correct offset and size for syncing */
5473 5473 (void) ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
5474 5474 DDI_DMA_SYNC_FORKERNEL);
5475 5475
5476 5476 while (CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5477 5477 &phbcmu->host_int_status) &
5478 5478 ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) {
5479 5479 /* check if command done with no error */
5480 5480 flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5481 5481 &phbcmu->outbound_queueport_low);
5482 5482 /* frame must be 32 bytes aligned */
5483 5483 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
5484 5484
5485 5485 /* the CDB is the first field of the CCB */
5486 5486 ccb = NumToPtr((acb->vir2phy_offset + ccb_cdb_phy));
5487 5487
5488 5488 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ?
5489 5489 B_TRUE : B_FALSE;
5490 5490 /* check if command done with no error */
5491 5491 arcmsr_drain_donequeue(acb, ccb, error);
5492 5492 if (throttling == ARCMSR_HBC_ISR_THROTTLING_LEVEL) {
5493 5493 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5494 5494 &phbcmu->inbound_doorbell,
5495 5495 ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING);
5496 5496 break;
5497 5497 }
5498 5498 throttling++;
5499 5499 } /* drain reply FIFO */
5500 5500 }
5501 5501
5502 5502
5503 5503 static uint_t
5504 5504 arcmsr_handle_hba_isr(struct ACB *acb) {
5505 5505
5506 5506 uint32_t outbound_intstatus;
5507 5507 struct HBA_msgUnit *phbamu;
5508 5508
5509 5509 phbamu = (struct HBA_msgUnit *)acb->pmu;
5510 5510
5511 5511 outbound_intstatus = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5512 5512 &phbamu->outbound_intstatus) & acb->outbound_int_enable;
5513 5513
5514 5514 if (outbound_intstatus == 0) /* it must be a shared irq */
5515 5515 return (DDI_INTR_UNCLAIMED);
5516 5516
5517 5517 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbamu->outbound_intstatus,
5518 5518 outbound_intstatus); /* clear interrupt */
5519 5519
5520 5520 /* MU doorbell interrupts */
5521 5521
5522 5522 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT)
5523 5523 arcmsr_hba_doorbell_isr(acb);
5524 5524
5525 5525 /* MU post queue interrupts */
5526 5526 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT)
5527 5527 arcmsr_hba_postqueue_isr(acb);
5528 5528
5529 5529 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
5530 5530 arcmsr_hba_message_isr(acb);
5531 5531 }
5532 5532
5533 5533 return (DDI_INTR_CLAIMED);
5534 5534 }
5535 5535
5536 5536
5537 5537 static uint_t
5538 5538 arcmsr_handle_hbb_isr(struct ACB *acb) {
5539 5539
5540 5540 uint32_t outbound_doorbell;
5541 5541 struct HBB_msgUnit *phbbmu;
5542 5542
5543 5543
5544 5544 phbbmu = (struct HBB_msgUnit *)acb->pmu;
5545 5545
5546 5546 outbound_doorbell = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5547 5547 &phbbmu->hbb_doorbell->iop2drv_doorbell) & acb->outbound_int_enable;
5548 5548
5549 5549 if (outbound_doorbell == 0) /* it must be a shared irq */
5550 5550 return (DDI_INTR_UNCLAIMED);
5551 5551
5552 5552 /* clear doorbell interrupt */
5553 5553 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5554 5554 &phbbmu->hbb_doorbell->iop2drv_doorbell, ~outbound_doorbell);
5555 5555 /* wait a cycle */
5556 5556 (void) CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5557 5557 &phbbmu->hbb_doorbell->iop2drv_doorbell);
5558 5558 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5559 5559 &phbbmu->hbb_doorbell->drv2iop_doorbell,
5560 5560 ARCMSR_DRV2IOP_END_OF_INTERRUPT);
5561 5561
5562 5562 /* MU ioctl transfer doorbell interrupts */
5563 5563 if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK)
5564 5564 arcmsr_iop2drv_data_wrote_handle(acb);
5565 5565
5566 5566 if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK)
5567 5567 arcmsr_iop2drv_data_read_handle(acb);
5568 5568
5569 5569 /* MU post queue interrupts */
5570 5570 if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE)
5571 5571 arcmsr_hbb_postqueue_isr(acb);
5572 5572
5573 5573 /* MU message interrupt */
5574 5574
5575 5575 if (outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
5576 5576 arcmsr_hbb_message_isr(acb);
5577 5577 }
5578 5578
5579 5579 return (DDI_INTR_CLAIMED);
5580 5580 }
5581 5581
5582 5582 static uint_t
5583 5583 arcmsr_handle_hbc_isr(struct ACB *acb)
5584 5584 {
5585 5585 uint32_t host_interrupt_status;
5586 5586 struct HBC_msgUnit *phbcmu;
5587 5587
5588 5588 phbcmu = (struct HBC_msgUnit *)acb->pmu;
5589 5589 /* check outbound intstatus */
5590 5590 host_interrupt_status=
5591 5591 CHIP_REG_READ32(acb->reg_mu_acc_handle0, &phbcmu->host_int_status);
5592 5592 if (host_interrupt_status == 0) /* it must be share irq */
5593 5593 return (DDI_INTR_UNCLAIMED);
5594 5594 /* MU ioctl transfer doorbell interrupts */
5595 5595 if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR) {
5596 5596 /* messenger of "ioctl message read write" */
5597 5597 arcmsr_hbc_doorbell_isr(acb);
5598 5598 }
5599 5599 /* MU post queue interrupts */
5600 5600 if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) {
5601 5601 /* messenger of "scsi commands" */
5602 5602 arcmsr_hbc_postqueue_isr(acb);
5603 5603 }
5604 5604 return (DDI_INTR_CLAIMED);
5605 5605 }
5606 5606
5607 5607 static uint_t
5608 5608 arcmsr_intr_handler(caddr_t arg, caddr_t arg2)
5609 5609 {
5610 5610 struct ACB *acb = (void *)arg;
5611 5611 struct CCB *ccb;
5612 5612 uint_t retrn = DDI_INTR_UNCLAIMED;
5613 5613 _NOTE(ARGUNUSED(arg2))
5614 5614
5615 5615 mutex_enter(&acb->isr_mutex);
5616 5616 switch (acb->adapter_type) {
5617 5617 case ACB_ADAPTER_TYPE_A:
5618 5618 retrn = arcmsr_handle_hba_isr(acb);
5619 5619 break;
5620 5620
5621 5621 case ACB_ADAPTER_TYPE_B:
5622 5622 retrn = arcmsr_handle_hbb_isr(acb);
5623 5623 break;
5624 5624
5625 5625 case ACB_ADAPTER_TYPE_C:
5626 5626 retrn = arcmsr_handle_hbc_isr(acb);
5627 5627 break;
5628 5628
5629 5629 default:
5630 5630 /* We should never be here */
5631 5631 ASSERT(0);
5632 5632 break;
5633 5633 }
5634 5634 mutex_exit(&acb->isr_mutex);
5635 5635 while ((ccb = arcmsr_get_complete_ccb_from_list(acb)) != NULL) {
5636 5636 arcmsr_ccb_complete(ccb, 1);
5637 5637 }
5638 5638 return (retrn);
5639 5639 }
5640 5640
5641 5641
5642 5642 static void
5643 5643 arcmsr_wait_firmware_ready(struct ACB *acb) {
5644 5644
5645 5645 uint32_t firmware_state;
5646 5646
5647 5647 firmware_state = 0;
5648 5648
5649 5649 switch (acb->adapter_type) {
5650 5650 case ACB_ADAPTER_TYPE_A:
5651 5651 {
5652 5652 struct HBA_msgUnit *phbamu;
5653 5653 phbamu = (struct HBA_msgUnit *)acb->pmu;
5654 5654 do {
5655 5655 firmware_state =
5656 5656 CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5657 5657 &phbamu->outbound_msgaddr1);
5658 5658 } while ((firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK)
5659 5659 == 0);
5660 5660 break;
5661 5661 }
5662 5662
5663 5663 case ACB_ADAPTER_TYPE_B:
5664 5664 {
5665 5665 struct HBB_msgUnit *phbbmu;
5666 5666 phbbmu = (struct HBB_msgUnit *)acb->pmu;
5667 5667 do {
5668 5668 firmware_state =
5669 5669 CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5670 5670 &phbbmu->hbb_doorbell->iop2drv_doorbell);
5671 5671 } while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0);
5672 5672 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5673 5673 &phbbmu->hbb_doorbell->drv2iop_doorbell,
5674 5674 ARCMSR_DRV2IOP_END_OF_INTERRUPT);
5675 5675 break;
5676 5676 }
5677 5677
5678 5678 case ACB_ADAPTER_TYPE_C:
5679 5679 {
5680 5680 struct HBC_msgUnit *phbcmu;
5681 5681 phbcmu = (struct HBC_msgUnit *)acb->pmu;
5682 5682 do {
5683 5683 firmware_state =
5684 5684 CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5685 5685 &phbcmu->outbound_msgaddr1);
5686 5686 } while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK)
5687 5687 == 0);
5688 5688 break;
5689 5689 }
5690 5690
5691 5691 }
5692 5692 }
5693 5693
5694 5694 static void
5695 5695 arcmsr_clear_doorbell_queue_buffer(struct ACB *acb)
5696 5696 {
5697 5697 switch (acb->adapter_type) {
5698 5698 case ACB_ADAPTER_TYPE_A: {
5699 5699 struct HBA_msgUnit *phbamu;
5700 5700 uint32_t outbound_doorbell;
5701 5701
5702 5702 phbamu = (struct HBA_msgUnit *)acb->pmu;
5703 5703 /* empty doorbell Qbuffer if door bell rung */
5704 5704 outbound_doorbell = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5705 5705 &phbamu->outbound_doorbell);
5706 5706 /* clear doorbell interrupt */
5707 5707 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5708 5708 &phbamu->outbound_doorbell, outbound_doorbell);
5709 5709 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5710 5710 &phbamu->inbound_doorbell,
5711 5711 ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
5712 5712 break;
5713 5713 }
5714 5714
5715 5715 case ACB_ADAPTER_TYPE_B: {
5716 5716 struct HBB_msgUnit *phbbmu;
5717 5717
5718 5718 phbbmu = (struct HBB_msgUnit *)acb->pmu;
5719 5719 /* clear interrupt and message state */
5720 5720 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5721 5721 &phbbmu->hbb_doorbell->iop2drv_doorbell,
5722 5722 ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
5723 5723 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5724 5724 &phbbmu->hbb_doorbell->drv2iop_doorbell,
5725 5725 ARCMSR_DRV2IOP_DATA_READ_OK);
5726 5726 /* let IOP know data has been read */
5727 5727 break;
5728 5728 }
5729 5729
5730 5730 case ACB_ADAPTER_TYPE_C: {
5731 5731 struct HBC_msgUnit *phbcmu;
5732 5732 uint32_t outbound_doorbell;
5733 5733
5734 5734 phbcmu = (struct HBC_msgUnit *)acb->pmu;
5735 5735 /* empty doorbell Qbuffer if door bell ringed */
5736 5736 outbound_doorbell = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5737 5737 &phbcmu->outbound_doorbell);
5738 5738 /* clear outbound doobell isr */
5739 5739 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5740 5740 &phbcmu->outbound_doorbell_clear, outbound_doorbell);
5741 5741 /* let IOP know data has been read */
5742 5742 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5743 5743 &phbcmu->inbound_doorbell,
5744 5744 ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK);
5745 5745 break;
5746 5746 }
5747 5747
5748 5748 }
5749 5749 }
5750 5750
5751 5751
5752 5752 static uint32_t
5753 5753 arcmsr_iop_confirm(struct ACB *acb) {
5754 5754
5755 5755 uint64_t cdb_phyaddr;
5756 5756 uint32_t cdb_phyaddr_hi32;
5757 5757
5758 5758 /*
5759 5759 * here we need to tell iop 331 about our freeccb.HighPart
5760 5760 * if freeccb.HighPart is non-zero
5761 5761 */
5762 5762 cdb_phyaddr = acb->ccb_cookie.dmac_laddress;
5763 5763 cdb_phyaddr_hi32 = (uint32_t)((cdb_phyaddr >> 16) >> 16);
5764 5764 acb->cdb_phyaddr_hi32 = cdb_phyaddr_hi32;
5765 5765 switch (acb->adapter_type) {
5766 5766 case ACB_ADAPTER_TYPE_A:
5767 5767 if (cdb_phyaddr_hi32 != 0) {
5768 5768 struct HBA_msgUnit *phbamu;
5769 5769
5770 5770 phbamu = (struct HBA_msgUnit *)acb->pmu;
5771 5771 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5772 5772 &phbamu->msgcode_rwbuffer[0],
5773 5773 ARCMSR_SIGNATURE_SET_CONFIG);
5774 5774 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5775 5775 &phbamu->msgcode_rwbuffer[1], cdb_phyaddr_hi32);
5776 5776 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5777 5777 &phbamu->inbound_msgaddr0,
5778 5778 ARCMSR_INBOUND_MESG0_SET_CONFIG);
5779 5779 if (!arcmsr_hba_wait_msgint_ready(acb)) {
5780 5780 arcmsr_warn(acb,
5781 5781 "timeout setting ccb "
5782 5782 "high physical address");
5783 5783 return (FALSE);
5784 5784 }
5785 5785 }
5786 5786 break;
5787 5787
5788 5788 /* if adapter is type B, set window of "post command queue" */
5789 5789 case ACB_ADAPTER_TYPE_B: {
5790 5790 uint32_t post_queue_phyaddr;
5791 5791 struct HBB_msgUnit *phbbmu;
5792 5792
5793 5793 phbbmu = (struct HBB_msgUnit *)acb->pmu;
5794 5794 phbbmu->postq_index = 0;
5795 5795 phbbmu->doneq_index = 0;
5796 5796 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5797 5797 &phbbmu->hbb_doorbell->drv2iop_doorbell,
5798 5798 ARCMSR_MESSAGE_SET_POST_WINDOW);
5799 5799
5800 5800 if (!arcmsr_hbb_wait_msgint_ready(acb)) {
5801 5801 arcmsr_warn(acb, "timeout setting post command "
5802 5802 "queue window");
5803 5803 return (FALSE);
5804 5804 }
5805 5805
5806 5806 post_queue_phyaddr = (uint32_t)cdb_phyaddr +
5807 5807 ARCMSR_MAX_FREECCB_NUM * P2ROUNDUP(sizeof (struct CCB), 32)
5808 5808 + offsetof(struct HBB_msgUnit, post_qbuffer);
5809 5809 /* driver "set config" signature */
5810 5810 CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
5811 5811 &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[0],
5812 5812 ARCMSR_SIGNATURE_SET_CONFIG);
5813 5813 /* normal should be zero */
5814 5814 CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
5815 5815 &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[1],
5816 5816 cdb_phyaddr_hi32);
5817 5817 /* postQ size (256+8)*4 */
5818 5818 CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
5819 5819 &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[2],
5820 5820 post_queue_phyaddr);
5821 5821 /* doneQ size (256+8)*4 */
5822 5822 CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
5823 5823 &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[3],
5824 5824 post_queue_phyaddr+1056);
5825 5825 /* ccb maxQ size must be --> [(256+8)*4] */
5826 5826 CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
5827 5827 &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[4], 1056);
5828 5828 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5829 5829 &phbbmu->hbb_doorbell->drv2iop_doorbell,
5830 5830 ARCMSR_MESSAGE_SET_CONFIG);
5831 5831
5832 5832 if (!arcmsr_hbb_wait_msgint_ready(acb)) {
5833 5833 arcmsr_warn(acb,
5834 5834 "timeout setting command queue window");
5835 5835 return (FALSE);
5836 5836 }
5837 5837 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5838 5838 &phbbmu->hbb_doorbell->drv2iop_doorbell,
5839 5839 ARCMSR_MESSAGE_START_DRIVER_MODE);
5840 5840
5841 5841 if (!arcmsr_hbb_wait_msgint_ready(acb)) {
5842 5842 arcmsr_warn(acb, "timeout in 'start driver mode'");
5843 5843 return (FALSE);
5844 5844 }
5845 5845 break;
5846 5846 }
5847 5847
5848 5848 case ACB_ADAPTER_TYPE_C:
5849 5849 if (cdb_phyaddr_hi32 != 0) {
5850 5850 struct HBC_msgUnit *phbcmu;
5851 5851
5852 5852 phbcmu = (struct HBC_msgUnit *)acb->pmu;
5853 5853 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5854 5854 &phbcmu->msgcode_rwbuffer[0],
5855 5855 ARCMSR_SIGNATURE_SET_CONFIG);
5856 5856 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5857 5857 &phbcmu->msgcode_rwbuffer[1], cdb_phyaddr_hi32);
5858 5858 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5859 5859 &phbcmu->inbound_msgaddr0,
5860 5860 ARCMSR_INBOUND_MESG0_SET_CONFIG);
5861 5861 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5862 5862 &phbcmu->inbound_doorbell,
5863 5863 ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
5864 5864 if (!arcmsr_hbc_wait_msgint_ready(acb)) {
5865 5865 arcmsr_warn(acb, "'set ccb "
5866 5866 "high part physical address' timeout");
5867 5867 return (FALSE);
5868 5868 }
5869 5869 }
5870 5870 break;
5871 5871 }
5872 5872 return (TRUE);
5873 5873 }
5874 5874
5875 5875
5876 5876 /*
5877 5877 * ONLY used for Adapter type B
5878 5878 */
5879 5879 static void
5880 5880 arcmsr_enable_eoi_mode(struct ACB *acb)
5881 5881 {
5882 5882 struct HBB_msgUnit *phbbmu;
5883 5883
5884 5884 phbbmu = (struct HBB_msgUnit *)acb->pmu;
5885 5885
5886 5886 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5887 5887 &phbbmu->hbb_doorbell->drv2iop_doorbell,
5888 5888 ARCMSR_MESSAGE_ACTIVE_EOI_MODE);
5889 5889
5890 5890 if (!arcmsr_hbb_wait_msgint_ready(acb))
5891 5891 arcmsr_warn(acb, "'iop enable eoi mode' timeout");
5892 5892 }
5893 5893
5894 5894 /* start background rebuild */
5895 5895 static void
5896 5896 arcmsr_iop_init(struct ACB *acb)
5897 5897 {
5898 5898 uint32_t intmask_org;
5899 5899
5900 5900 /* disable all outbound interrupt */
5901 5901 intmask_org = arcmsr_disable_allintr(acb);
5902 5902 arcmsr_wait_firmware_ready(acb);
5903 5903 (void) arcmsr_iop_confirm(acb);
5904 5904
5905 5905 /* start background rebuild */
5906 5906 switch (acb->adapter_type) {
5907 5907 case ACB_ADAPTER_TYPE_A:
5908 5908 arcmsr_get_hba_config(acb);
5909 5909 arcmsr_start_hba_bgrb(acb);
5910 5910 break;
5911 5911 case ACB_ADAPTER_TYPE_B:
5912 5912 arcmsr_get_hbb_config(acb);
5913 5913 arcmsr_start_hbb_bgrb(acb);
5914 5914 break;
5915 5915 case ACB_ADAPTER_TYPE_C:
5916 5916 arcmsr_get_hbc_config(acb);
5917 5917 arcmsr_start_hbc_bgrb(acb);
5918 5918 break;
5919 5919 }
5920 5920 /* empty doorbell Qbuffer if door bell rang */
5921 5921 arcmsr_clear_doorbell_queue_buffer(acb);
5922 5922
5923 5923 if (acb->adapter_type == ACB_ADAPTER_TYPE_B)
5924 5924 arcmsr_enable_eoi_mode(acb);
5925 5925
5926 5926 /* enable outbound Post Queue, outbound doorbell Interrupt */
5927 5927 arcmsr_enable_allintr(acb, intmask_org);
5928 5928 acb->acb_flags |= ACB_F_IOP_INITED;
5929 5929 }
↓ open down ↓ |
3961 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX