Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/mr_sas/mr_sas_tbolt.c
+++ new/usr/src/uts/common/io/mr_sas/mr_sas_tbolt.c
1 1 /*
2 2 * mr_sas_tbolt.c: source for mr_sas driver for New Generation.
3 3 * i.e. Thunderbolt and Invader
4 4 *
5 5 * Solaris MegaRAID device driver for SAS2.0 controllers
6 6 * Copyright (c) 2008-2012, LSI Logic Corporation.
7 7 * All rights reserved.
8 8 *
9 9 * Version:
10 10 * Author:
11 11 * Swaminathan K S
12 12 * Arun Chandrashekhar
13 13 * Manju R
14 14 * Rasheed
15 15 * Shakeel Bukhari
16 16 */
17 17
18 18 /*
19 19 * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
20 20 */
21 21
22 22
23 23 #include <sys/types.h>
24 24 #include <sys/file.h>
25 25 #include <sys/atomic.h>
26 26 #include <sys/scsi/scsi.h>
27 27 #include <sys/byteorder.h>
28 28 #include "ld_pd_map.h"
29 29 #include "mr_sas.h"
30 30 #include "fusion.h"
31 31
32 32 /*
33 33 * FMA header files
34 34 */
35 35 #include <sys/ddifm.h>
36 36 #include <sys/fm/protocol.h>
37 37 #include <sys/fm/util.h>
38 38 #include <sys/fm/io/ddi.h>
39 39
40 40
41 41 /* Pre-TB command size and TB command size. */
42 42 #define MR_COMMAND_SIZE (64*20) /* 1280 bytes */
43 43 MR_LD_RAID *MR_LdRaidGet(U32 ld, MR_FW_RAID_MAP_ALL *map);
44 44 U16 MR_TargetIdToLdGet(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map);
45 45 U16 MR_GetLDTgtId(U32 ld, MR_FW_RAID_MAP_ALL *map);
46 46 U16 get_updated_dev_handle(PLD_LOAD_BALANCE_INFO, struct IO_REQUEST_INFO *);
47 47 extern ddi_dma_attr_t mrsas_generic_dma_attr;
48 48 extern uint32_t mrsas_tbolt_max_cap_maxxfer;
49 49 extern struct ddi_device_acc_attr endian_attr;
50 50 extern int debug_level_g;
51 51 extern unsigned int enable_fp;
52 52 volatile int dump_io_wait_time = 90;
53 53 extern volatile int debug_timeout_g;
54 54 extern int mrsas_issue_pending_cmds(struct mrsas_instance *);
55 55 extern int mrsas_complete_pending_cmds(struct mrsas_instance *instance);
56 56 extern void push_pending_mfi_pkt(struct mrsas_instance *,
57 57 struct mrsas_cmd *);
58 58 extern U8 MR_BuildRaidContext(struct mrsas_instance *, struct IO_REQUEST_INFO *,
59 59 MPI2_SCSI_IO_VENDOR_UNIQUE *, MR_FW_RAID_MAP_ALL *);
60 60
61 61 /* Local static prototypes. */
62 62 static struct mrsas_cmd *mrsas_tbolt_build_cmd(struct mrsas_instance *,
63 63 struct scsi_address *, struct scsi_pkt *, uchar_t *);
64 64 static void mrsas_tbolt_set_pd_lba(U8 cdb[], uint8_t *cdb_len_ptr,
65 65 U64 start_blk, U32 num_blocks);
66 66 static int mrsas_tbolt_check_map_info(struct mrsas_instance *);
67 67 static int mrsas_tbolt_sync_map_info(struct mrsas_instance *);
68 68 static int mrsas_tbolt_prepare_pkt(struct scsa_cmd *);
69 69 static int mrsas_tbolt_ioc_init(struct mrsas_instance *, dma_obj_t *);
70 70 #ifdef PDSUPPORT
71 71 static void mrsas_tbolt_get_pd_info(struct mrsas_instance *,
72 72 struct mrsas_tbolt_pd_info *, int);
73 73 #endif /* PDSUPPORT */
74 74
75 75 static int debug_tbolt_fw_faults_after_ocr_g = 0;
76 76
77 77 /*
78 78 * destroy_mfi_mpi_frame_pool
79 79 */
80 80 void
81 81 destroy_mfi_mpi_frame_pool(struct mrsas_instance *instance)
82 82 {
83 83 int i;
84 84
85 85 struct mrsas_cmd *cmd;
86 86
87 87 /* return all mfi frames to pool */
88 88 for (i = 0; i < MRSAS_APP_RESERVED_CMDS; i++) {
89 89 cmd = instance->cmd_list[i];
90 90 if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED) {
91 91 (void) mrsas_free_dma_obj(instance,
92 92 cmd->frame_dma_obj);
93 93 }
94 94 cmd->frame_dma_obj_status = DMA_OBJ_FREED;
95 95 }
96 96 }
97 97
98 98 /*
99 99 * destroy_mpi2_frame_pool
100 100 */
101 101 void
102 102 destroy_mpi2_frame_pool(struct mrsas_instance *instance)
103 103 {
104 104
105 105 if (instance->mpi2_frame_pool_dma_obj.status == DMA_OBJ_ALLOCATED) {
106 106 (void) mrsas_free_dma_obj(instance,
107 107 instance->mpi2_frame_pool_dma_obj);
108 108 instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_FREED;
109 109 }
110 110 }
111 111
112 112
113 113 /*
114 114 * mrsas_tbolt_free_additional_dma_buffer
115 115 */
116 116 void
117 117 mrsas_tbolt_free_additional_dma_buffer(struct mrsas_instance *instance)
118 118 {
119 119 int i;
120 120
121 121 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
122 122 (void) mrsas_free_dma_obj(instance,
123 123 instance->mfi_internal_dma_obj);
124 124 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
125 125 }
126 126 if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) {
127 127 (void) mrsas_free_dma_obj(instance,
128 128 instance->mfi_evt_detail_obj);
129 129 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
130 130 }
131 131
132 132 for (i = 0; i < 2; i++) {
133 133 if (instance->ld_map_obj[i].status == DMA_OBJ_ALLOCATED) {
134 134 (void) mrsas_free_dma_obj(instance,
135 135 instance->ld_map_obj[i]);
136 136 instance->ld_map_obj[i].status = DMA_OBJ_FREED;
137 137 }
138 138 }
139 139 }
140 140
141 141
142 142 /*
143 143 * free_req_desc_pool
144 144 */
145 145 void
146 146 free_req_rep_desc_pool(struct mrsas_instance *instance)
147 147 {
148 148 if (instance->request_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
149 149 (void) mrsas_free_dma_obj(instance,
150 150 instance->request_desc_dma_obj);
151 151 instance->request_desc_dma_obj.status = DMA_OBJ_FREED;
152 152 }
153 153
154 154 if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
155 155 (void) mrsas_free_dma_obj(instance,
156 156 instance->reply_desc_dma_obj);
157 157 instance->reply_desc_dma_obj.status = DMA_OBJ_FREED;
158 158 }
159 159
160 160
161 161 }
162 162
163 163
164 164 /*
165 165 * ThunderBolt(TB) Request Message Frame Pool
166 166 */
167 167 int
168 168 create_mpi2_frame_pool(struct mrsas_instance *instance)
169 169 {
170 170 int i = 0;
171 171 uint16_t max_cmd;
172 172 uint32_t sgl_sz;
173 173 uint32_t raid_msg_size;
174 174 uint32_t total_size;
175 175 uint32_t offset;
176 176 uint32_t io_req_base_phys;
177 177 uint8_t *io_req_base;
178 178 struct mrsas_cmd *cmd;
179 179
180 180 max_cmd = instance->max_fw_cmds;
181 181
182 182 sgl_sz = 1024;
183 183 raid_msg_size = MRSAS_THUNDERBOLT_MSG_SIZE;
184 184
185 185 /* Allocating additional 256 bytes to accomodate SMID 0. */
186 186 total_size = MRSAS_THUNDERBOLT_MSG_SIZE + (max_cmd * raid_msg_size) +
187 187 (max_cmd * sgl_sz) + (max_cmd * SENSE_LENGTH);
188 188
189 189 con_log(CL_ANN1, (CE_NOTE, "create_mpi2_frame_pool: "
190 190 "max_cmd %x", max_cmd));
191 191
192 192 con_log(CL_DLEVEL3, (CE_NOTE, "create_mpi2_frame_pool: "
193 193 "request message frame pool size %x", total_size));
194 194
195 195 /*
196 196 * ThunderBolt(TB) We need to create a single chunk of DMA'ble memory
197 197 * and then split the memory to 1024 commands. Each command should be
198 198 * able to contain a RAID MESSAGE FRAME which will embed a MFI_FRAME
199 199 * within it. Further refer the "alloc_req_rep_desc" function where
200 200 * we allocate request/reply descriptors queues for a clue.
201 201 */
202 202
203 203 instance->mpi2_frame_pool_dma_obj.size = total_size;
204 204 instance->mpi2_frame_pool_dma_obj.dma_attr = mrsas_generic_dma_attr;
205 205 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_addr_hi =
206 206 0xFFFFFFFFU;
207 207 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_count_max =
208 208 0xFFFFFFFFU;
209 209 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_sgllen = 1;
210 210 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_align = 256;
211 211
212 212 if (mrsas_alloc_dma_obj(instance, &instance->mpi2_frame_pool_dma_obj,
213 213 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
214 214 cmn_err(CE_WARN,
215 215 "mr_sas: could not alloc mpi2 frame pool");
216 216 return (DDI_FAILURE);
217 217 }
218 218
219 219 bzero(instance->mpi2_frame_pool_dma_obj.buffer, total_size);
220 220 instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_ALLOCATED;
221 221
222 222 instance->io_request_frames =
223 223 (uint8_t *)instance->mpi2_frame_pool_dma_obj.buffer;
224 224 instance->io_request_frames_phy =
225 225 (uint32_t)
226 226 instance->mpi2_frame_pool_dma_obj.dma_cookie[0].dmac_address;
227 227
228 228 con_log(CL_DLEVEL3, (CE_NOTE, "io_request_frames 0x%p",
229 229 (void *)instance->io_request_frames));
230 230
231 231 con_log(CL_DLEVEL3, (CE_NOTE, "io_request_frames_phy 0x%x",
232 232 instance->io_request_frames_phy));
233 233
234 234 io_req_base = (uint8_t *)instance->io_request_frames +
235 235 MRSAS_THUNDERBOLT_MSG_SIZE;
236 236 io_req_base_phys = instance->io_request_frames_phy +
237 237 MRSAS_THUNDERBOLT_MSG_SIZE;
238 238
239 239 con_log(CL_DLEVEL3, (CE_NOTE,
240 240 "io req_base_phys 0x%x", io_req_base_phys));
241 241
242 242 for (i = 0; i < max_cmd; i++) {
243 243 cmd = instance->cmd_list[i];
244 244
245 245 offset = i * MRSAS_THUNDERBOLT_MSG_SIZE;
246 246
247 247 cmd->scsi_io_request = (Mpi2RaidSCSIIORequest_t *)
248 248 ((uint8_t *)io_req_base + offset);
249 249 cmd->scsi_io_request_phys_addr = io_req_base_phys + offset;
250 250
251 251 cmd->sgl = (Mpi2SGEIOUnion_t *)((uint8_t *)io_req_base +
252 252 (max_cmd * raid_msg_size) + i * sgl_sz);
253 253
254 254 cmd->sgl_phys_addr = (io_req_base_phys +
255 255 (max_cmd * raid_msg_size) + i * sgl_sz);
256 256
257 257 cmd->sense1 = (uint8_t *)((uint8_t *)io_req_base +
258 258 (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
259 259 (i * SENSE_LENGTH));
260 260
261 261 cmd->sense_phys_addr1 = (io_req_base_phys +
262 262 (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
263 263 (i * SENSE_LENGTH));
264 264
265 265
266 266 cmd->SMID = i + 1;
267 267
268 268 con_log(CL_DLEVEL3, (CE_NOTE, "Frame Pool Addr [%x]0x%p",
269 269 cmd->index, (void *)cmd->scsi_io_request));
270 270
271 271 con_log(CL_DLEVEL3, (CE_NOTE, "Frame Pool Phys Addr [%x]0x%x",
272 272 cmd->index, cmd->scsi_io_request_phys_addr));
273 273
274 274 con_log(CL_DLEVEL3, (CE_NOTE, "Sense Addr [%x]0x%p",
275 275 cmd->index, (void *)cmd->sense1));
276 276
277 277 con_log(CL_DLEVEL3, (CE_NOTE, "Sense Addr Phys [%x]0x%x",
278 278 cmd->index, cmd->sense_phys_addr1));
279 279
280 280 con_log(CL_DLEVEL3, (CE_NOTE, "Sgl bufffers [%x]0x%p",
281 281 cmd->index, (void *)cmd->sgl));
282 282
283 283 con_log(CL_DLEVEL3, (CE_NOTE, "Sgl bufffers phys [%x]0x%x",
284 284 cmd->index, cmd->sgl_phys_addr));
285 285 }
286 286
287 287 return (DDI_SUCCESS);
288 288
289 289 }
290 290
291 291
292 292 /*
293 293 * alloc_additional_dma_buffer for AEN
294 294 */
295 295 int
296 296 mrsas_tbolt_alloc_additional_dma_buffer(struct mrsas_instance *instance)
297 297 {
298 298 uint32_t internal_buf_size = PAGESIZE*2;
299 299 int i;
300 300
301 301 /* Initialize buffer status as free */
302 302 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
303 303 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
304 304 instance->ld_map_obj[0].status = DMA_OBJ_FREED;
305 305 instance->ld_map_obj[1].status = DMA_OBJ_FREED;
306 306
307 307
308 308 instance->mfi_internal_dma_obj.size = internal_buf_size;
309 309 instance->mfi_internal_dma_obj.dma_attr = mrsas_generic_dma_attr;
310 310 instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
311 311 instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max =
312 312 0xFFFFFFFFU;
313 313 instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1;
314 314
315 315 if (mrsas_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj,
316 316 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
317 317 cmn_err(CE_WARN,
318 318 "mr_sas: could not alloc reply queue");
319 319 return (DDI_FAILURE);
320 320 }
321 321
322 322 bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size);
323 323
324 324 instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED;
325 325 instance->internal_buf =
326 326 (caddr_t)(((unsigned long)instance->mfi_internal_dma_obj.buffer));
327 327 instance->internal_buf_dmac_add =
328 328 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address;
329 329 instance->internal_buf_size = internal_buf_size;
330 330
331 331 /* allocate evt_detail */
332 332 instance->mfi_evt_detail_obj.size = sizeof (struct mrsas_evt_detail);
333 333 instance->mfi_evt_detail_obj.dma_attr = mrsas_generic_dma_attr;
334 334 instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
335 335 instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
336 336 instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1;
337 337 instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 8;
338 338
339 339 if (mrsas_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj,
340 340 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
341 341 cmn_err(CE_WARN, "mrsas_tbolt_alloc_additional_dma_buffer: "
342 342 "could not allocate data transfer buffer.");
343 343 goto fail_tbolt_additional_buff;
344 344 }
345 345
346 346 bzero(instance->mfi_evt_detail_obj.buffer,
347 347 sizeof (struct mrsas_evt_detail));
348 348
349 349 instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED;
350 350
351 351 instance->size_map_info = sizeof (MR_FW_RAID_MAP) +
352 352 (sizeof (MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
353 353
354 354 for (i = 0; i < 2; i++) {
355 355 /* allocate the data transfer buffer */
356 356 instance->ld_map_obj[i].size = instance->size_map_info;
357 357 instance->ld_map_obj[i].dma_attr = mrsas_generic_dma_attr;
358 358 instance->ld_map_obj[i].dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
359 359 instance->ld_map_obj[i].dma_attr.dma_attr_count_max =
360 360 0xFFFFFFFFU;
361 361 instance->ld_map_obj[i].dma_attr.dma_attr_sgllen = 1;
362 362 instance->ld_map_obj[i].dma_attr.dma_attr_align = 1;
363 363
364 364 if (mrsas_alloc_dma_obj(instance, &instance->ld_map_obj[i],
365 365 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
366 366 cmn_err(CE_WARN,
367 367 "could not allocate data transfer buffer.");
368 368 goto fail_tbolt_additional_buff;
369 369 }
370 370
371 371 instance->ld_map_obj[i].status |= DMA_OBJ_ALLOCATED;
372 372
373 373 bzero(instance->ld_map_obj[i].buffer, instance->size_map_info);
374 374
375 375 instance->ld_map[i] =
376 376 (MR_FW_RAID_MAP_ALL *)instance->ld_map_obj[i].buffer;
377 377 instance->ld_map_phy[i] = (uint32_t)instance->
378 378 ld_map_obj[i].dma_cookie[0].dmac_address;
379 379
380 380 con_log(CL_DLEVEL3, (CE_NOTE,
381 381 "ld_map Addr Phys 0x%x", instance->ld_map_phy[i]));
382 382
383 383 con_log(CL_DLEVEL3, (CE_NOTE,
384 384 "size_map_info 0x%x", instance->size_map_info));
385 385 }
386 386
387 387 return (DDI_SUCCESS);
388 388
389 389 fail_tbolt_additional_buff:
390 390 mrsas_tbolt_free_additional_dma_buffer(instance);
391 391
392 392 return (DDI_FAILURE);
393 393 }
394 394
395 395 MRSAS_REQUEST_DESCRIPTOR_UNION *
396 396 mr_sas_get_request_descriptor(struct mrsas_instance *instance, uint16_t index)
397 397 {
398 398 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
399 399
400 400 if (index > instance->max_fw_cmds) {
401 401 con_log(CL_ANN1, (CE_NOTE,
402 402 "Invalid SMID 0x%x request for descriptor", index));
403 403 con_log(CL_ANN1, (CE_NOTE,
404 404 "max_fw_cmds : 0x%x", instance->max_fw_cmds));
405 405 return (NULL);
406 406 }
407 407
408 408 req_desc = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
409 409 ((char *)instance->request_message_pool +
410 410 (sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION) * index));
411 411
412 412 con_log(CL_ANN1, (CE_NOTE,
413 413 "request descriptor : 0x%08lx", (unsigned long)req_desc));
414 414
415 415 con_log(CL_ANN1, (CE_NOTE,
416 416 "request descriptor base phy : 0x%08lx",
417 417 (unsigned long)instance->request_message_pool_phy));
418 418
419 419 return ((MRSAS_REQUEST_DESCRIPTOR_UNION *)req_desc);
420 420 }
421 421
422 422
423 423 /*
424 424 * Allocate Request and Reply Queue Descriptors.
425 425 */
426 426 int
427 427 alloc_req_rep_desc(struct mrsas_instance *instance)
428 428 {
429 429 uint32_t request_q_sz, reply_q_sz;
430 430 int i, max_reply_q_sz;
431 431 MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
432 432
433 433 /*
434 434 * ThunderBolt(TB) There's no longer producer consumer mechanism.
435 435 * Once we have an interrupt we are supposed to scan through the list of
436 436 * reply descriptors and process them accordingly. We would be needing
437 437 * to allocate memory for 1024 reply descriptors
438 438 */
439 439
440 440 /* Allocate Reply Descriptors */
441 441 con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x",
442 442 (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
443 443
444 444 /* reply queue size should be multiple of 16 */
445 445 max_reply_q_sz = ((instance->max_fw_cmds + 1 + 15)/16)*16;
446 446
447 447 reply_q_sz = 8 * max_reply_q_sz;
448 448
449 449
450 450 con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x",
451 451 (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
452 452
453 453 instance->reply_desc_dma_obj.size = reply_q_sz;
454 454 instance->reply_desc_dma_obj.dma_attr = mrsas_generic_dma_attr;
455 455 instance->reply_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
456 456 instance->reply_desc_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
457 457 instance->reply_desc_dma_obj.dma_attr.dma_attr_sgllen = 1;
458 458 instance->reply_desc_dma_obj.dma_attr.dma_attr_align = 16;
459 459
460 460 if (mrsas_alloc_dma_obj(instance, &instance->reply_desc_dma_obj,
461 461 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
462 462 cmn_err(CE_WARN,
463 463 "mr_sas: could not alloc reply queue");
464 464 return (DDI_FAILURE);
465 465 }
466 466
467 467 bzero(instance->reply_desc_dma_obj.buffer, reply_q_sz);
468 468 instance->reply_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
469 469
470 470 /* virtual address of reply queue */
471 471 instance->reply_frame_pool = (MPI2_REPLY_DESCRIPTORS_UNION *)(
472 472 instance->reply_desc_dma_obj.buffer);
473 473
474 474 instance->reply_q_depth = max_reply_q_sz;
475 475
476 476 con_log(CL_ANN1, (CE_NOTE, "[reply queue depth]0x%x",
477 477 instance->reply_q_depth));
478 478
479 479 con_log(CL_ANN1, (CE_NOTE, "[reply queue virt addr]0x%p",
480 480 (void *)instance->reply_frame_pool));
481 481
482 482 /* initializing reply address to 0xFFFFFFFF */
483 483 reply_desc = instance->reply_frame_pool;
484 484
485 485 for (i = 0; i < instance->reply_q_depth; i++) {
486 486 reply_desc->Words = (uint64_t)~0;
487 487 reply_desc++;
488 488 }
489 489
490 490
491 491 instance->reply_frame_pool_phy =
492 492 (uint32_t)instance->reply_desc_dma_obj.dma_cookie[0].dmac_address;
493 493
494 494 con_log(CL_ANN1, (CE_NOTE,
495 495 "[reply queue phys addr]0x%x", instance->reply_frame_pool_phy));
496 496
497 497
498 498 instance->reply_pool_limit_phy = (instance->reply_frame_pool_phy +
499 499 reply_q_sz);
500 500
501 501 con_log(CL_ANN1, (CE_NOTE, "[reply pool limit phys addr]0x%x",
502 502 instance->reply_pool_limit_phy));
503 503
504 504
505 505 con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x",
506 506 (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
507 507
508 508 /* Allocate Request Descriptors */
509 509 con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x",
510 510 (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
511 511
512 512 request_q_sz = 8 *
513 513 (instance->max_fw_cmds);
514 514
515 515 instance->request_desc_dma_obj.size = request_q_sz;
516 516 instance->request_desc_dma_obj.dma_attr = mrsas_generic_dma_attr;
517 517 instance->request_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
518 518 instance->request_desc_dma_obj.dma_attr.dma_attr_count_max =
519 519 0xFFFFFFFFU;
520 520 instance->request_desc_dma_obj.dma_attr.dma_attr_sgllen = 1;
521 521 instance->request_desc_dma_obj.dma_attr.dma_attr_align = 16;
522 522
523 523 if (mrsas_alloc_dma_obj(instance, &instance->request_desc_dma_obj,
524 524 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
525 525 cmn_err(CE_WARN,
526 526 "mr_sas: could not alloc request queue desc");
527 527 goto fail_undo_reply_queue;
528 528 }
529 529
530 530 bzero(instance->request_desc_dma_obj.buffer, request_q_sz);
531 531 instance->request_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
532 532
533 533 /* virtual address of request queue desc */
534 534 instance->request_message_pool = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
535 535 (instance->request_desc_dma_obj.buffer);
536 536
537 537 instance->request_message_pool_phy =
538 538 (uint32_t)instance->request_desc_dma_obj.dma_cookie[0].dmac_address;
539 539
540 540 return (DDI_SUCCESS);
541 541
542 542 fail_undo_reply_queue:
543 543 if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
544 544 (void) mrsas_free_dma_obj(instance,
545 545 instance->reply_desc_dma_obj);
546 546 instance->reply_desc_dma_obj.status = DMA_OBJ_FREED;
547 547 }
548 548
549 549 return (DDI_FAILURE);
550 550 }
551 551
552 552 /*
553 553 * mrsas_alloc_cmd_pool_tbolt
554 554 *
555 555 * TODO: merge tbolt-specific code into mrsas_alloc_cmd_pool() to have single
556 556 * routine
557 557 */
558 558 int
559 559 mrsas_alloc_cmd_pool_tbolt(struct mrsas_instance *instance)
560 560 {
561 561 int i;
562 562 int count;
563 563 uint32_t max_cmd;
564 564 uint32_t reserve_cmd;
565 565 size_t sz;
566 566
567 567 struct mrsas_cmd *cmd;
568 568
569 569 max_cmd = instance->max_fw_cmds;
570 570 con_log(CL_ANN1, (CE_NOTE, "mrsas_alloc_cmd_pool: "
571 571 "max_cmd %x", max_cmd));
572 572
573 573
574 574 sz = sizeof (struct mrsas_cmd *) * max_cmd;
575 575
576 576 /*
577 577 * instance->cmd_list is an array of struct mrsas_cmd pointers.
578 578 * Allocate the dynamic array first and then allocate individual
579 579 * commands.
580 580 */
581 581 instance->cmd_list = kmem_zalloc(sz, KM_SLEEP);
582 582
583 583 /* create a frame pool and assign one frame to each cmd */
584 584 for (count = 0; count < max_cmd; count++) {
585 585 instance->cmd_list[count] =
586 586 kmem_zalloc(sizeof (struct mrsas_cmd), KM_SLEEP);
587 587 }
588 588
589 589 /* add all the commands to command pool */
590 590
591 591 INIT_LIST_HEAD(&instance->cmd_pool_list);
592 592 INIT_LIST_HEAD(&instance->cmd_pend_list);
593 593 INIT_LIST_HEAD(&instance->cmd_app_pool_list);
594 594
595 595 reserve_cmd = MRSAS_APP_RESERVED_CMDS;
596 596
597 597 /* cmd index 0 reservered for IOC INIT */
598 598 for (i = 1; i < reserve_cmd; i++) {
599 599 cmd = instance->cmd_list[i];
600 600 cmd->index = i;
601 601 mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list);
602 602 }
603 603
604 604
605 605 for (i = reserve_cmd; i < max_cmd; i++) {
606 606 cmd = instance->cmd_list[i];
607 607 cmd->index = i;
608 608 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
609 609 }
610 610
611 611 return (DDI_SUCCESS);
612 612
613 613 mrsas_undo_cmds:
614 614 if (count > 0) {
615 615 /* free each cmd */
616 616 for (i = 0; i < count; i++) {
617 617 if (instance->cmd_list[i] != NULL) {
618 618 kmem_free(instance->cmd_list[i],
619 619 sizeof (struct mrsas_cmd));
620 620 }
621 621 instance->cmd_list[i] = NULL;
622 622 }
623 623 }
624 624
625 625 mrsas_undo_cmd_list:
626 626 if (instance->cmd_list != NULL)
627 627 kmem_free(instance->cmd_list, sz);
628 628 instance->cmd_list = NULL;
629 629
630 630 return (DDI_FAILURE);
631 631 }
632 632
633 633
634 634 /*
635 635 * free_space_for_mpi2
636 636 */
637 637 void
638 638 free_space_for_mpi2(struct mrsas_instance *instance)
639 639 {
640 640 /* already freed */
641 641 if (instance->cmd_list == NULL) {
642 642 return;
643 643 }
644 644
645 645 /* First free the additional DMA buffer */
646 646 mrsas_tbolt_free_additional_dma_buffer(instance);
647 647
648 648 /* Free the request/reply descriptor pool */
649 649 free_req_rep_desc_pool(instance);
650 650
651 651 /* Free the MPI message pool */
652 652 destroy_mpi2_frame_pool(instance);
653 653
654 654 /* Free the MFI frame pool */
655 655 destroy_mfi_frame_pool(instance);
656 656
657 657 /* Free all the commands in the cmd_list */
658 658 /* Free the cmd_list buffer itself */
659 659 mrsas_free_cmd_pool(instance);
660 660 }
661 661
662 662
663 663 /*
664 664 * ThunderBolt(TB) memory allocations for commands/messages/frames.
665 665 */
666 666 int
667 667 alloc_space_for_mpi2(struct mrsas_instance *instance)
668 668 {
669 669 /* Allocate command pool (memory for cmd_list & individual commands) */
670 670 if (mrsas_alloc_cmd_pool_tbolt(instance)) {
671 671 cmn_err(CE_WARN, "Error creating cmd pool");
672 672 return (DDI_FAILURE);
673 673 }
674 674
675 675 /* Initialize single reply size and Message size */
676 676 instance->reply_size = MRSAS_THUNDERBOLT_REPLY_SIZE;
677 677 instance->raid_io_msg_size = MRSAS_THUNDERBOLT_MSG_SIZE;
678 678
679 679 instance->max_sge_in_main_msg = (MRSAS_THUNDERBOLT_MSG_SIZE -
680 680 (sizeof (MPI2_RAID_SCSI_IO_REQUEST) -
681 681 sizeof (MPI2_SGE_IO_UNION)))/ sizeof (MPI2_SGE_IO_UNION);
682 682 instance->max_sge_in_chain = (MR_COMMAND_SIZE -
683 683 MRSAS_THUNDERBOLT_MSG_SIZE) / sizeof (MPI2_SGE_IO_UNION);
684 684
685 685 /* Reduce SG count by 1 to take care of group cmds feature in FW */
686 686 instance->max_num_sge = (instance->max_sge_in_main_msg +
687 687 instance->max_sge_in_chain - 2);
688 688 instance->chain_offset_mpt_msg =
689 689 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16;
690 690 instance->chain_offset_io_req = (MRSAS_THUNDERBOLT_MSG_SIZE -
691 691 sizeof (MPI2_SGE_IO_UNION)) / 16;
692 692 instance->reply_read_index = 0;
693 693
694 694
695 695 /* Allocate Request and Reply descriptors Array */
696 696 /* Make sure the buffer is aligned to 8 for req/rep descriptor Pool */
697 697 if (alloc_req_rep_desc(instance)) {
698 698 cmn_err(CE_WARN,
699 699 "Error, allocating memory for descripter-pool");
700 700 goto mpi2_undo_cmd_pool;
701 701 }
702 702 con_log(CL_ANN1, (CE_NOTE, "[request message pool phys addr]0x%x",
703 703 instance->request_message_pool_phy));
704 704
705 705
706 706 /* Allocate MFI Frame pool - for MPI-MFI passthru commands */
707 707 if (create_mfi_frame_pool(instance)) {
708 708 cmn_err(CE_WARN,
709 709 "Error, allocating memory for MFI frame-pool");
710 710 goto mpi2_undo_descripter_pool;
711 711 }
712 712
713 713
714 714 /* Allocate MPI2 Message pool */
715 715 /*
716 716 * Make sure the buffer is alligned to 256 for raid message packet
717 717 * create a io request pool and assign one frame to each cmd
718 718 */
719 719
720 720 if (create_mpi2_frame_pool(instance)) {
721 721 cmn_err(CE_WARN,
722 722 "Error, allocating memory for MPI2 Message-pool");
723 723 goto mpi2_undo_mfi_frame_pool;
724 724 }
725 725
726 726 #ifdef DEBUG
727 727 con_log(CL_ANN1, (CE_CONT, "[max_sge_in_main_msg]0x%x",
728 728 instance->max_sge_in_main_msg));
729 729 con_log(CL_ANN1, (CE_CONT, "[max_sge_in_chain]0x%x",
730 730 instance->max_sge_in_chain));
731 731 con_log(CL_ANN1, (CE_CONT,
732 732 "[max_sge]0x%x", instance->max_num_sge));
733 733 con_log(CL_ANN1, (CE_CONT, "[chain_offset_mpt_msg]0x%x",
734 734 instance->chain_offset_mpt_msg));
735 735 con_log(CL_ANN1, (CE_CONT, "[chain_offset_io_req]0x%x",
736 736 instance->chain_offset_io_req));
737 737 #endif
738 738
739 739
740 740 /* Allocate additional dma buffer */
741 741 if (mrsas_tbolt_alloc_additional_dma_buffer(instance)) {
742 742 cmn_err(CE_WARN,
743 743 "Error, allocating tbolt additional DMA buffer");
744 744 goto mpi2_undo_message_pool;
745 745 }
746 746
747 747 return (DDI_SUCCESS);
748 748
749 749 mpi2_undo_message_pool:
750 750 destroy_mpi2_frame_pool(instance);
751 751
752 752 mpi2_undo_mfi_frame_pool:
753 753 destroy_mfi_frame_pool(instance);
754 754
755 755 mpi2_undo_descripter_pool:
756 756 free_req_rep_desc_pool(instance);
757 757
758 758 mpi2_undo_cmd_pool:
759 759 mrsas_free_cmd_pool(instance);
760 760
761 761 return (DDI_FAILURE);
762 762 }
763 763
764 764
765 765 /*
766 766 * mrsas_init_adapter_tbolt - Initialize fusion interface adapter.
767 767 */
768 768 int
769 769 mrsas_init_adapter_tbolt(struct mrsas_instance *instance)
770 770 {
771 771
772 772 /*
773 773 * Reduce the max supported cmds by 1. This is to ensure that the
774 774 * reply_q_sz (1 more than the max cmd that driver may send)
775 775 * does not exceed max cmds that the FW can support
776 776 */
777 777
778 778 if (instance->max_fw_cmds > 1008) {
779 779 instance->max_fw_cmds = 1008;
780 780 instance->max_fw_cmds = instance->max_fw_cmds-1;
781 781 }
782 782
783 783 con_log(CL_ANN, (CE_NOTE, "mrsas_init_adapter_tbolt: "
784 784 " instance->max_fw_cmds 0x%X.", instance->max_fw_cmds));
785 785
786 786
787 787 /* create a pool of commands */
788 788 if (alloc_space_for_mpi2(instance) != DDI_SUCCESS) {
789 789 cmn_err(CE_WARN,
790 790 " alloc_space_for_mpi2() failed.");
791 791
792 792 return (DDI_FAILURE);
793 793 }
794 794
795 795 /* Send ioc init message */
796 796 /* NOTE: the issue_init call does FMA checking already. */
797 797 if (mrsas_issue_init_mpi2(instance) != DDI_SUCCESS) {
798 798 cmn_err(CE_WARN,
799 799 " mrsas_issue_init_mpi2() failed.");
800 800
801 801 goto fail_init_fusion;
802 802 }
803 803
804 804 instance->unroll.alloc_space_mpi2 = 1;
805 805
806 806 con_log(CL_ANN, (CE_NOTE,
807 807 "mrsas_init_adapter_tbolt: SUCCESSFUL"));
808 808
809 809 return (DDI_SUCCESS);
810 810
811 811 fail_init_fusion:
812 812 free_space_for_mpi2(instance);
813 813
814 814 return (DDI_FAILURE);
815 815 }
816 816
817 817
818 818
819 819 /*
820 820 * init_mpi2
821 821 */
822 822 int
823 823 mrsas_issue_init_mpi2(struct mrsas_instance *instance)
824 824 {
825 825 dma_obj_t init2_dma_obj;
826 826 int ret_val = DDI_SUCCESS;
827 827
828 828 /* allocate DMA buffer for IOC INIT message */
829 829 init2_dma_obj.size = sizeof (Mpi2IOCInitRequest_t);
830 830 init2_dma_obj.dma_attr = mrsas_generic_dma_attr;
831 831 init2_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
832 832 init2_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
833 833 init2_dma_obj.dma_attr.dma_attr_sgllen = 1;
834 834 init2_dma_obj.dma_attr.dma_attr_align = 256;
835 835
836 836 if (mrsas_alloc_dma_obj(instance, &init2_dma_obj,
837 837 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
838 838 cmn_err(CE_WARN, "mr_sas_issue_init_mpi2 "
839 839 "could not allocate data transfer buffer.");
840 840 return (DDI_FAILURE);
841 841 }
842 842 (void) memset(init2_dma_obj.buffer, 2, sizeof (Mpi2IOCInitRequest_t));
843 843
844 844 con_log(CL_ANN1, (CE_NOTE,
845 845 "mrsas_issue_init_mpi2 _phys adr: %x",
846 846 init2_dma_obj.dma_cookie[0].dmac_address));
847 847
848 848
849 849 /* Initialize and send ioc init message */
850 850 ret_val = mrsas_tbolt_ioc_init(instance, &init2_dma_obj);
851 851 if (ret_val == DDI_FAILURE) {
852 852 con_log(CL_ANN1, (CE_WARN,
853 853 "mrsas_issue_init_mpi2: Failed"));
854 854 goto fail_init_mpi2;
855 855 }
856 856
857 857 /* free IOC init DMA buffer */
858 858 if (mrsas_free_dma_obj(instance, init2_dma_obj)
859 859 != DDI_SUCCESS) {
860 860 con_log(CL_ANN1, (CE_WARN,
861 861 "mrsas_issue_init_mpi2: Free Failed"));
862 862 return (DDI_FAILURE);
863 863 }
864 864
865 865 /* Get/Check and sync ld_map info */
866 866 instance->map_id = 0;
867 867 if (mrsas_tbolt_check_map_info(instance) == DDI_SUCCESS)
868 868 (void) mrsas_tbolt_sync_map_info(instance);
869 869
870 870
871 871 /* No mrsas_cmd to send, so send NULL. */
872 872 if (mrsas_common_check(instance, NULL) != DDI_SUCCESS)
873 873 goto fail_init_mpi2;
874 874
875 875 con_log(CL_ANN, (CE_NOTE,
876 876 "mrsas_issue_init_mpi2: SUCCESSFUL"));
877 877
878 878 return (DDI_SUCCESS);
879 879
880 880 fail_init_mpi2:
881 881 (void) mrsas_free_dma_obj(instance, init2_dma_obj);
882 882
883 883 return (DDI_FAILURE);
884 884 }
885 885
886 886 static int
887 887 mrsas_tbolt_ioc_init(struct mrsas_instance *instance, dma_obj_t *mpi2_dma_obj)
888 888 {
889 889 int numbytes;
890 890 uint16_t flags;
891 891 struct mrsas_init_frame2 *mfiFrameInit2;
892 892 struct mrsas_header *frame_hdr;
893 893 Mpi2IOCInitRequest_t *init;
894 894 struct mrsas_cmd *cmd = NULL;
895 895 struct mrsas_drv_ver drv_ver_info;
896 896 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
897 897
898 898 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
899 899
900 900
901 901 #ifdef DEBUG
902 902 con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
903 903 (int)sizeof (*mfiFrameInit2)));
904 904 con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n", (int)sizeof (*init)));
905 905 con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
906 906 (int)sizeof (struct mrsas_init_frame2)));
907 907 con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n",
908 908 (int)sizeof (Mpi2IOCInitRequest_t)));
909 909 #endif
910 910
911 911 init = (Mpi2IOCInitRequest_t *)mpi2_dma_obj->buffer;
912 912 numbytes = sizeof (*init);
913 913 bzero(init, numbytes);
914 914
915 915 ddi_put8(mpi2_dma_obj->acc_handle, &init->Function,
916 916 MPI2_FUNCTION_IOC_INIT);
917 917
918 918 ddi_put8(mpi2_dma_obj->acc_handle, &init->WhoInit,
919 919 MPI2_WHOINIT_HOST_DRIVER);
920 920
921 921 /* set MsgVersion and HeaderVersion host driver was built with */
922 922 ddi_put16(mpi2_dma_obj->acc_handle, &init->MsgVersion,
923 923 MPI2_VERSION);
924 924
925 925 ddi_put16(mpi2_dma_obj->acc_handle, &init->HeaderVersion,
926 926 MPI2_HEADER_VERSION);
927 927
928 928 ddi_put16(mpi2_dma_obj->acc_handle, &init->SystemRequestFrameSize,
929 929 instance->raid_io_msg_size / 4);
930 930
931 931 ddi_put16(mpi2_dma_obj->acc_handle, &init->ReplyFreeQueueDepth,
932 932 0);
933 933
934 934 ddi_put16(mpi2_dma_obj->acc_handle,
935 935 &init->ReplyDescriptorPostQueueDepth,
936 936 instance->reply_q_depth);
937 937 /*
938 938 * These addresses are set using the DMA cookie addresses from when the
939 939 * memory was allocated. Sense buffer hi address should be 0.
940 940 * ddi_put32(accessp, &init->SenseBufferAddressHigh, 0);
941 941 */
942 942
943 943 ddi_put32(mpi2_dma_obj->acc_handle,
944 944 &init->SenseBufferAddressHigh, 0);
945 945
946 946 ddi_put64(mpi2_dma_obj->acc_handle,
947 947 (uint64_t *)&init->SystemRequestFrameBaseAddress,
948 948 instance->io_request_frames_phy);
949 949
950 950 ddi_put64(mpi2_dma_obj->acc_handle,
951 951 &init->ReplyDescriptorPostQueueAddress,
952 952 instance->reply_frame_pool_phy);
953 953
954 954 ddi_put64(mpi2_dma_obj->acc_handle,
955 955 &init->ReplyFreeQueueAddress, 0);
956 956
957 957 cmd = instance->cmd_list[0];
958 958 if (cmd == NULL) {
959 959 return (DDI_FAILURE);
960 960 }
961 961 cmd->retry_count_for_ocr = 0;
962 962 cmd->pkt = NULL;
963 963 cmd->drv_pkt_time = 0;
964 964
965 965 mfiFrameInit2 = (struct mrsas_init_frame2 *)cmd->scsi_io_request;
966 966 con_log(CL_ANN1, (CE_CONT, "[mfi vaddr]%p", (void *)mfiFrameInit2));
967 967
968 968 frame_hdr = &cmd->frame->hdr;
969 969
970 970 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
971 971 MFI_CMD_STATUS_POLL_MODE);
972 972
973 973 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
974 974
975 975 flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
976 976
977 977 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
978 978
979 979 con_log(CL_ANN, (CE_CONT,
980 980 "mrsas_tbolt_ioc_init: SMID:%x\n", cmd->SMID));
981 981
982 982 /* Init the MFI Header */
983 983 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
984 984 &mfiFrameInit2->cmd, MFI_CMD_OP_INIT);
985 985
986 986 con_log(CL_ANN1, (CE_CONT, "[CMD]%x", mfiFrameInit2->cmd));
987 987
988 988 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
989 989 &mfiFrameInit2->cmd_status,
990 990 MFI_STAT_INVALID_STATUS);
991 991
992 992 con_log(CL_ANN1, (CE_CONT, "[Status]%x", mfiFrameInit2->cmd_status));
993 993
994 994 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
995 995 &mfiFrameInit2->queue_info_new_phys_addr_lo,
996 996 mpi2_dma_obj->dma_cookie[0].dmac_address);
997 997
998 998 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
999 999 &mfiFrameInit2->data_xfer_len,
1000 1000 sizeof (Mpi2IOCInitRequest_t));
1001 1001
1002 1002 con_log(CL_ANN1, (CE_CONT, "[reply q desc addr]%x",
1003 1003 (int)init->ReplyDescriptorPostQueueAddress));
1004 1004
1005 1005 /* fill driver version information */
1006 1006 fill_up_drv_ver(&drv_ver_info);
1007 1007
1008 1008 /* allocate the driver version data transfer buffer */
1009 1009 instance->drv_ver_dma_obj.size = sizeof (drv_ver_info.drv_ver);
1010 1010 instance->drv_ver_dma_obj.dma_attr = mrsas_generic_dma_attr;
1011 1011 instance->drv_ver_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
1012 1012 instance->drv_ver_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
1013 1013 instance->drv_ver_dma_obj.dma_attr.dma_attr_sgllen = 1;
1014 1014 instance->drv_ver_dma_obj.dma_attr.dma_attr_align = 1;
1015 1015
1016 1016 if (mrsas_alloc_dma_obj(instance, &instance->drv_ver_dma_obj,
1017 1017 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
1018 1018 cmn_err(CE_WARN,
1019 1019 "fusion init: Could not allocate driver version buffer.");
1020 1020 return (DDI_FAILURE);
1021 1021 }
1022 1022 /* copy driver version to dma buffer */
1023 1023 bzero(instance->drv_ver_dma_obj.buffer, sizeof (drv_ver_info.drv_ver));
1024 1024 ddi_rep_put8(cmd->frame_dma_obj.acc_handle,
1025 1025 (uint8_t *)drv_ver_info.drv_ver,
1026 1026 (uint8_t *)instance->drv_ver_dma_obj.buffer,
1027 1027 sizeof (drv_ver_info.drv_ver), DDI_DEV_AUTOINCR);
1028 1028
1029 1029 /* send driver version physical address to firmware */
1030 1030 ddi_put64(cmd->frame_dma_obj.acc_handle, &mfiFrameInit2->driverversion,
1031 1031 instance->drv_ver_dma_obj.dma_cookie[0].dmac_address);
1032 1032
1033 1033 con_log(CL_ANN1, (CE_CONT, "[MPIINIT2 frame Phys addr ]0x%x len = %x",
1034 1034 mfiFrameInit2->queue_info_new_phys_addr_lo,
1035 1035 (int)sizeof (Mpi2IOCInitRequest_t)));
1036 1036
1037 1037 con_log(CL_ANN1, (CE_CONT, "[Length]%x", mfiFrameInit2->data_xfer_len));
1038 1038
1039 1039 con_log(CL_ANN1, (CE_CONT, "[MFI frame Phys Address]%x len = %x",
1040 1040 cmd->scsi_io_request_phys_addr,
1041 1041 (int)sizeof (struct mrsas_init_frame2)));
1042 1042
1043 1043 /* disable interrupts before sending INIT2 frame */
1044 1044 instance->func_ptr->disable_intr(instance);
1045 1045
1046 1046 req_desc = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
1047 1047 instance->request_message_pool;
1048 1048 req_desc->Words = cmd->scsi_io_request_phys_addr;
1049 1049 req_desc->MFAIo.RequestFlags =
1050 1050 (MPI2_REQ_DESCRIPT_FLAGS_MFA << MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1051 1051
1052 1052 cmd->request_desc = req_desc;
1053 1053
1054 1054 /* issue the init frame */
1055 1055 instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd);
1056 1056
1057 1057 con_log(CL_ANN1, (CE_CONT, "[cmd = %d] ", frame_hdr->cmd));
1058 1058 con_log(CL_ANN1, (CE_CONT, "[cmd Status= %x] ",
1059 1059 frame_hdr->cmd_status));
1060 1060
1061 1061 if (ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1062 1062 &mfiFrameInit2->cmd_status) == 0) {
1063 1063 con_log(CL_ANN, (CE_NOTE, "INIT2 Success"));
1064 1064 } else {
1065 1065 con_log(CL_ANN, (CE_WARN, "INIT2 Fail"));
1066 1066 mrsas_dump_reply_desc(instance);
1067 1067 goto fail_ioc_init;
1068 1068 }
1069 1069
1070 1070 mrsas_dump_reply_desc(instance);
1071 1071
1072 1072 instance->unroll.verBuff = 1;
1073 1073
1074 1074 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_ioc_init: SUCCESSFUL"));
1075 1075
1076 1076 return (DDI_SUCCESS);
1077 1077
1078 1078
1079 1079 fail_ioc_init:
1080 1080
1081 1081 (void) mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
1082 1082
1083 1083 return (DDI_FAILURE);
1084 1084 }
1085 1085
1086 1086 int
1087 1087 wait_for_outstanding_poll_io(struct mrsas_instance *instance)
1088 1088 {
1089 1089 int i;
1090 1090 uint32_t wait_time = dump_io_wait_time;
1091 1091 for (i = 0; i < wait_time; i++) {
1092 1092 /*
1093 1093 * Check For Outstanding poll Commands
1094 1094 * except ldsync command and aen command
1095 1095 */
1096 1096 if (instance->fw_outstanding <= 2) {
1097 1097 break;
1098 1098 }
1099 1099 drv_usecwait(10*MILLISEC);
1100 1100 /* complete commands from reply queue */
1101 1101 (void) mr_sas_tbolt_process_outstanding_cmd(instance);
1102 1102 }
1103 1103 if (instance->fw_outstanding > 2) {
1104 1104 return (1);
1105 1105 }
1106 1106 return (0);
1107 1107 }
1108 1108 /*
1109 1109 * scsi_pkt handling
1110 1110 *
1111 1111 * Visible to the external world via the transport structure.
1112 1112 */
1113 1113
1114 1114 int
1115 1115 mrsas_tbolt_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt)
1116 1116 {
1117 1117 struct mrsas_instance *instance = ADDR2MR(ap);
1118 1118 struct scsa_cmd *acmd = PKT2CMD(pkt);
1119 1119 struct mrsas_cmd *cmd = NULL;
1120 1120 uchar_t cmd_done = 0;
1121 1121
1122 1122 con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1123 1123 if (instance->deadadapter == 1) {
1124 1124 cmn_err(CE_WARN,
1125 1125 "mrsas_tran_start:TBOLT return TRAN_FATAL_ERROR "
1126 1126 "for IO, as the HBA doesnt take any more IOs");
1127 1127 if (pkt) {
1128 1128 pkt->pkt_reason = CMD_DEV_GONE;
1129 1129 pkt->pkt_statistics = STAT_DISCON;
1130 1130 }
1131 1131 return (TRAN_FATAL_ERROR);
1132 1132 }
1133 1133 if (instance->adapterresetinprogress) {
1134 1134 con_log(CL_ANN, (CE_NOTE, "Reset flag set, "
1135 1135 "returning mfi_pkt and setting TRAN_BUSY\n"));
1136 1136 return (TRAN_BUSY);
1137 1137 }
1138 1138 (void) mrsas_tbolt_prepare_pkt(acmd);
1139 1139
1140 1140 cmd = mrsas_tbolt_build_cmd(instance, ap, pkt, &cmd_done);
1141 1141
1142 1142 /*
1143 1143 * Check if the command is already completed by the mrsas_build_cmd()
1144 1144 * routine. In which case the busy_flag would be clear and scb will be
1145 1145 * NULL and appropriate reason provided in pkt_reason field
1146 1146 */
1147 1147 if (cmd_done) {
1148 1148 pkt->pkt_reason = CMD_CMPLT;
1149 1149 pkt->pkt_scbp[0] = STATUS_GOOD;
1150 1150 pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET
1151 1151 | STATE_SENT_CMD;
1152 1152 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) {
1153 1153 (*pkt->pkt_comp)(pkt);
1154 1154 }
1155 1155
1156 1156 return (TRAN_ACCEPT);
1157 1157 }
1158 1158
1159 1159 if (cmd == NULL) {
1160 1160 return (TRAN_BUSY);
1161 1161 }
1162 1162
1163 1163
1164 1164 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1165 1165 if (instance->fw_outstanding > instance->max_fw_cmds) {
1166 1166 cmn_err(CE_WARN,
1167 1167 "Command Queue Full... Returning BUSY");
1168 1168 return_raid_msg_pkt(instance, cmd);
1169 1169 return (TRAN_BUSY);
1170 1170 }
1171 1171
1172 1172 /* Synchronize the Cmd frame for the controller */
1173 1173 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
1174 1174 DDI_DMA_SYNC_FORDEV);
1175 1175
1176 1176 con_log(CL_ANN, (CE_CONT, "tbolt_issue_cmd: SCSI CDB[0]=0x%x "
1177 1177 "cmd->index:0x%x SMID 0x%x\n", pkt->pkt_cdbp[0],
1178 1178 cmd->index, cmd->SMID));
1179 1179
1180 1180 instance->func_ptr->issue_cmd(cmd, instance);
1181 1181 } else {
1182 1182 instance->func_ptr->issue_cmd(cmd, instance);
1183 1183 (void) wait_for_outstanding_poll_io(instance);
1184 1184 (void) mrsas_common_check(instance, cmd);
1185 1185 }
1186 1186
1187 1187 return (TRAN_ACCEPT);
1188 1188 }
1189 1189
1190 1190 /*
1191 1191 * prepare the pkt:
1192 1192 * the pkt may have been resubmitted or just reused so
1193 1193 * initialize some fields and do some checks.
1194 1194 */
1195 1195 static int
1196 1196 mrsas_tbolt_prepare_pkt(struct scsa_cmd *acmd)
1197 1197 {
1198 1198 struct scsi_pkt *pkt = CMD2PKT(acmd);
1199 1199
1200 1200
1201 1201 /*
1202 1202 * Reinitialize some fields that need it; the packet may
1203 1203 * have been resubmitted
1204 1204 */
1205 1205 pkt->pkt_reason = CMD_CMPLT;
1206 1206 pkt->pkt_state = 0;
1207 1207 pkt->pkt_statistics = 0;
1208 1208 pkt->pkt_resid = 0;
1209 1209
1210 1210 /*
1211 1211 * zero status byte.
1212 1212 */
1213 1213 *(pkt->pkt_scbp) = 0;
1214 1214
1215 1215 return (0);
1216 1216 }
1217 1217
1218 1218
1219 1219 int
1220 1220 mr_sas_tbolt_build_sgl(struct mrsas_instance *instance,
1221 1221 struct scsa_cmd *acmd,
1222 1222 struct mrsas_cmd *cmd,
1223 1223 Mpi2RaidSCSIIORequest_t *scsi_raid_io,
1224 1224 uint32_t *datalen)
1225 1225 {
1226 1226 uint32_t MaxSGEs;
1227 1227 int sg_to_process;
1228 1228 uint32_t i, j;
1229 1229 uint32_t numElements, endElement;
1230 1230 Mpi25IeeeSgeChain64_t *ieeeChainElement = NULL;
1231 1231 Mpi25IeeeSgeChain64_t *scsi_raid_io_sgl_ieee = NULL;
1232 1232 ddi_acc_handle_t acc_handle =
1233 1233 instance->mpi2_frame_pool_dma_obj.acc_handle;
1234 1234
1235 1235 con_log(CL_ANN1, (CE_NOTE,
1236 1236 "chkpnt: Building Chained SGL :%d", __LINE__));
1237 1237
1238 1238 /* Calulate SGE size in number of Words(32bit) */
1239 1239 /* Clear the datalen before updating it. */
1240 1240 *datalen = 0;
1241 1241
1242 1242 MaxSGEs = instance->max_sge_in_main_msg;
1243 1243
1244 1244 ddi_put16(acc_handle, &scsi_raid_io->SGLFlags,
1245 1245 MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
1246 1246
1247 1247 /* set data transfer flag. */
1248 1248 if (acmd->cmd_flags & CFLAG_DMASEND) {
1249 1249 ddi_put32(acc_handle, &scsi_raid_io->Control,
1250 1250 MPI2_SCSIIO_CONTROL_WRITE);
1251 1251 } else {
1252 1252 ddi_put32(acc_handle, &scsi_raid_io->Control,
1253 1253 MPI2_SCSIIO_CONTROL_READ);
1254 1254 }
1255 1255
1256 1256
1257 1257 numElements = acmd->cmd_cookiecnt;
1258 1258
1259 1259 con_log(CL_DLEVEL1, (CE_NOTE, "[SGE Count]:%x", numElements));
1260 1260
1261 1261 if (numElements > instance->max_num_sge) {
1262 1262 con_log(CL_ANN, (CE_NOTE,
1263 1263 "[Max SGE Count Exceeded]:%x", numElements));
1264 1264 return (numElements);
1265 1265 }
1266 1266
1267 1267 ddi_put8(acc_handle, &scsi_raid_io->RaidContext.numSGE,
1268 1268 (uint8_t)numElements);
1269 1269
1270 1270 /* set end element in main message frame */
1271 1271 endElement = (numElements <= MaxSGEs) ? numElements : (MaxSGEs - 1);
1272 1272
1273 1273 /* prepare the scatter-gather list for the firmware */
1274 1274 scsi_raid_io_sgl_ieee =
1275 1275 (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
1276 1276
1277 1277 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1278 1278 Mpi25IeeeSgeChain64_t *sgl_ptr_end = scsi_raid_io_sgl_ieee;
1279 1279 sgl_ptr_end += instance->max_sge_in_main_msg - 1;
1280 1280
1281 1281 ddi_put8(acc_handle, &sgl_ptr_end->Flags, 0);
1282 1282 }
1283 1283
1284 1284 for (i = 0; i < endElement; i++, scsi_raid_io_sgl_ieee++) {
1285 1285 ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
1286 1286 acmd->cmd_dmacookies[i].dmac_laddress);
1287 1287
1288 1288 ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length,
1289 1289 acmd->cmd_dmacookies[i].dmac_size);
1290 1290
1291 1291 ddi_put8(acc_handle, &scsi_raid_io_sgl_ieee->Flags, 0);
1292 1292
1293 1293 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1294 1294 if (i == (numElements - 1)) {
1295 1295 ddi_put8(acc_handle,
1296 1296 &scsi_raid_io_sgl_ieee->Flags,
1297 1297 IEEE_SGE_FLAGS_END_OF_LIST);
1298 1298 }
1299 1299 }
1300 1300
1301 1301 *datalen += acmd->cmd_dmacookies[i].dmac_size;
1302 1302
1303 1303 #ifdef DEBUG
1304 1304 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Address]: %" PRIx64,
1305 1305 scsi_raid_io_sgl_ieee->Address));
1306 1306 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Length]:%x",
1307 1307 scsi_raid_io_sgl_ieee->Length));
1308 1308 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Flags]:%x",
1309 1309 scsi_raid_io_sgl_ieee->Flags));
1310 1310 #endif
1311 1311
1312 1312 }
1313 1313
1314 1314 ddi_put8(acc_handle, &scsi_raid_io->ChainOffset, 0);
1315 1315
1316 1316 /* check if chained SGL required */
1317 1317 if (i < numElements) {
1318 1318
1319 1319 con_log(CL_ANN1, (CE_NOTE, "[Chain Element index]:%x", i));
1320 1320
1321 1321 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1322 1322 uint16_t ioFlags =
1323 1323 ddi_get16(acc_handle, &scsi_raid_io->IoFlags);
1324 1324
1325 1325 if ((ioFlags &
1326 1326 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
1327 1327 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) {
1328 1328 ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
1329 1329 (U8)instance->chain_offset_io_req);
1330 1330 } else {
1331 1331 ddi_put8(acc_handle,
1332 1332 &scsi_raid_io->ChainOffset, 0);
1333 1333 }
1334 1334 } else {
1335 1335 ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
1336 1336 (U8)instance->chain_offset_io_req);
1337 1337 }
1338 1338
1339 1339 /* prepare physical chain element */
1340 1340 ieeeChainElement = scsi_raid_io_sgl_ieee;
1341 1341
1342 1342 ddi_put8(acc_handle, &ieeeChainElement->NextChainOffset, 0);
1343 1343
1344 1344 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1345 1345 ddi_put8(acc_handle, &ieeeChainElement->Flags,
1346 1346 IEEE_SGE_FLAGS_CHAIN_ELEMENT);
1347 1347 } else {
1348 1348 ddi_put8(acc_handle, &ieeeChainElement->Flags,
1349 1349 (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1350 1350 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
1351 1351 }
1352 1352
1353 1353 ddi_put32(acc_handle, &ieeeChainElement->Length,
1354 1354 (sizeof (MPI2_SGE_IO_UNION) * (numElements - i)));
1355 1355
1356 1356 ddi_put64(acc_handle, &ieeeChainElement->Address,
1357 1357 (U64)cmd->sgl_phys_addr);
1358 1358
1359 1359 sg_to_process = numElements - i;
1360 1360
1361 1361 con_log(CL_ANN1, (CE_NOTE,
1362 1362 "[Additional SGE Count]:%x", endElement));
1363 1363
1364 1364 /* point to the chained SGL buffer */
1365 1365 scsi_raid_io_sgl_ieee = (Mpi25IeeeSgeChain64_t *)cmd->sgl;
1366 1366
1367 1367 /* build rest of the SGL in chained buffer */
1368 1368 for (j = 0; j < sg_to_process; j++, scsi_raid_io_sgl_ieee++) {
1369 1369 con_log(CL_DLEVEL3, (CE_NOTE, "[remaining SGL]:%x", i));
1370 1370
1371 1371 ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
1372 1372 acmd->cmd_dmacookies[i].dmac_laddress);
1373 1373
1374 1374 ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length,
1375 1375 acmd->cmd_dmacookies[i].dmac_size);
1376 1376
1377 1377 ddi_put8(acc_handle, &scsi_raid_io_sgl_ieee->Flags, 0);
1378 1378
1379 1379 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1380 1380 if (i == (numElements - 1)) {
1381 1381 ddi_put8(acc_handle,
1382 1382 &scsi_raid_io_sgl_ieee->Flags,
1383 1383 IEEE_SGE_FLAGS_END_OF_LIST);
1384 1384 }
1385 1385 }
1386 1386
1387 1387 *datalen += acmd->cmd_dmacookies[i].dmac_size;
1388 1388
1389 1389 #if DEBUG
1390 1390 con_log(CL_DLEVEL1, (CE_NOTE,
1391 1391 "[SGL Address]: %" PRIx64,
1392 1392 scsi_raid_io_sgl_ieee->Address));
1393 1393 con_log(CL_DLEVEL1, (CE_NOTE,
1394 1394 "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
1395 1395 con_log(CL_DLEVEL1, (CE_NOTE,
1396 1396 "[SGL Flags]:%x", scsi_raid_io_sgl_ieee->Flags));
1397 1397 #endif
1398 1398
1399 1399 i++;
1400 1400 }
1401 1401 }
1402 1402
1403 1403 return (0);
1404 1404 } /*end of BuildScatterGather */
1405 1405
1406 1406
1407 1407 /*
1408 1408 * build_cmd
1409 1409 */
1410 1410 static struct mrsas_cmd *
1411 1411 mrsas_tbolt_build_cmd(struct mrsas_instance *instance, struct scsi_address *ap,
1412 1412 struct scsi_pkt *pkt, uchar_t *cmd_done)
1413 1413 {
1414 1414 uint8_t fp_possible = 0;
1415 1415 uint32_t index;
1416 1416 uint32_t lba_count = 0;
1417 1417 uint32_t start_lba_hi = 0;
1418 1418 uint32_t start_lba_lo = 0;
1419 1419 ddi_acc_handle_t acc_handle =
1420 1420 instance->mpi2_frame_pool_dma_obj.acc_handle;
1421 1421 struct mrsas_cmd *cmd = NULL;
1422 1422 struct scsa_cmd *acmd = PKT2CMD(pkt);
1423 1423 MRSAS_REQUEST_DESCRIPTOR_UNION *ReqDescUnion;
1424 1424 Mpi2RaidSCSIIORequest_t *scsi_raid_io;
1425 1425 uint32_t datalen;
1426 1426 struct IO_REQUEST_INFO io_info;
1427 1427 MR_FW_RAID_MAP_ALL *local_map_ptr;
1428 1428 uint16_t pd_cmd_cdblen;
1429 1429
1430 1430 con_log(CL_DLEVEL1, (CE_NOTE,
1431 1431 "chkpnt: Entered mrsas_tbolt_build_cmd:%d", __LINE__));
1432 1432
1433 1433 /* find out if this is logical or physical drive command. */
1434 1434 acmd->islogical = MRDRV_IS_LOGICAL(ap);
1435 1435 acmd->device_id = MAP_DEVICE_ID(instance, ap);
1436 1436
1437 1437 *cmd_done = 0;
1438 1438
1439 1439 /* get the command packet */
1440 1440 if (!(cmd = get_raid_msg_pkt(instance))) {
1441 1441 return (NULL);
1442 1442 }
1443 1443
1444 1444 index = cmd->index;
1445 1445 ReqDescUnion = mr_sas_get_request_descriptor(instance, index);
1446 1446 ReqDescUnion->Words = 0;
1447 1447 ReqDescUnion->SCSIIO.SMID = cmd->SMID;
1448 1448 ReqDescUnion->SCSIIO.RequestFlags =
1449 1449 (MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
1450 1450 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1451 1451
1452 1452
1453 1453 cmd->request_desc = ReqDescUnion;
1454 1454 cmd->pkt = pkt;
1455 1455 cmd->cmd = acmd;
1456 1456
1457 1457 /* lets get the command directions */
1458 1458 if (acmd->cmd_flags & CFLAG_DMASEND) {
1459 1459 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
1460 1460 (void) ddi_dma_sync(acmd->cmd_dmahandle,
1461 1461 acmd->cmd_dma_offset, acmd->cmd_dma_len,
1462 1462 DDI_DMA_SYNC_FORDEV);
1463 1463 }
1464 1464 } else if (acmd->cmd_flags & ~CFLAG_DMASEND) {
1465 1465 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
1466 1466 (void) ddi_dma_sync(acmd->cmd_dmahandle,
1467 1467 acmd->cmd_dma_offset, acmd->cmd_dma_len,
1468 1468 DDI_DMA_SYNC_FORCPU);
1469 1469 }
1470 1470 } else {
1471 1471 con_log(CL_ANN, (CE_NOTE, "NO DMA"));
1472 1472 }
1473 1473
1474 1474
1475 1475 /* get SCSI_IO raid message frame pointer */
1476 1476 scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
1477 1477
1478 1478 /* zero out SCSI_IO raid message frame */
1479 1479 bzero(scsi_raid_io, sizeof (Mpi2RaidSCSIIORequest_t));
1480 1480
1481 1481 /* Set the ldTargetId set by BuildRaidContext() */
1482 1482 ddi_put16(acc_handle, &scsi_raid_io->RaidContext.ldTargetId,
1483 1483 acmd->device_id);
1484 1484
1485 1485 /* Copy CDB to scsi_io_request message frame */
1486 1486 ddi_rep_put8(acc_handle,
1487 1487 (uint8_t *)pkt->pkt_cdbp, (uint8_t *)scsi_raid_io->CDB.CDB32,
1488 1488 acmd->cmd_cdblen, DDI_DEV_AUTOINCR);
1489 1489
1490 1490 /*
1491 1491 * Just the CDB length, rest of the Flags are zero
1492 1492 * This will be modified later.
1493 1493 */
1494 1494 ddi_put16(acc_handle, &scsi_raid_io->IoFlags, acmd->cmd_cdblen);
1495 1495
1496 1496 pd_cmd_cdblen = acmd->cmd_cdblen;
1497 1497
1498 1498 switch (pkt->pkt_cdbp[0]) {
1499 1499 case SCMD_READ:
1500 1500 case SCMD_WRITE:
1501 1501 case SCMD_READ_G1:
1502 1502 case SCMD_WRITE_G1:
1503 1503 case SCMD_READ_G4:
1504 1504 case SCMD_WRITE_G4:
1505 1505 case SCMD_READ_G5:
1506 1506 case SCMD_WRITE_G5:
1507 1507
1508 1508 if (acmd->islogical) {
1509 1509 /* Initialize sense Information */
1510 1510 if (cmd->sense1 == NULL) {
1511 1511 con_log(CL_ANN, (CE_NOTE, "tbolt_build_cmd: "
1512 1512 "Sense buffer ptr NULL "));
1513 1513 }
1514 1514 bzero(cmd->sense1, SENSE_LENGTH);
1515 1515 con_log(CL_DLEVEL2, (CE_NOTE, "tbolt_build_cmd "
1516 1516 "CDB[0] = %x\n", pkt->pkt_cdbp[0]));
1517 1517
1518 1518 if (acmd->cmd_cdblen == CDB_GROUP0) {
1519 1519 /* 6-byte cdb */
1520 1520 lba_count = (uint16_t)(pkt->pkt_cdbp[4]);
1521 1521 start_lba_lo = ((uint32_t)(pkt->pkt_cdbp[3]) |
1522 1522 ((uint32_t)(pkt->pkt_cdbp[2]) << 8) |
1523 1523 ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F)
1524 1524 << 16));
1525 1525 } else if (acmd->cmd_cdblen == CDB_GROUP1) {
1526 1526 /* 10-byte cdb */
1527 1527 lba_count =
1528 1528 (((uint16_t)(pkt->pkt_cdbp[8])) |
1529 1529 ((uint16_t)(pkt->pkt_cdbp[7]) << 8));
1530 1530
1531 1531 start_lba_lo =
1532 1532 (((uint32_t)(pkt->pkt_cdbp[5])) |
1533 1533 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1534 1534 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1535 1535 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1536 1536
1537 1537 } else if (acmd->cmd_cdblen == CDB_GROUP5) {
1538 1538 /* 12-byte cdb */
1539 1539 lba_count = (
1540 1540 ((uint32_t)(pkt->pkt_cdbp[9])) |
1541 1541 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
1542 1542 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
1543 1543 ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
1544 1544
1545 1545 start_lba_lo =
1546 1546 (((uint32_t)(pkt->pkt_cdbp[5])) |
1547 1547 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1548 1548 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1549 1549 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1550 1550
1551 1551 } else if (acmd->cmd_cdblen == CDB_GROUP4) {
1552 1552 /* 16-byte cdb */
1553 1553 lba_count = (
1554 1554 ((uint32_t)(pkt->pkt_cdbp[13])) |
1555 1555 ((uint32_t)(pkt->pkt_cdbp[12]) << 8) |
1556 1556 ((uint32_t)(pkt->pkt_cdbp[11]) << 16) |
1557 1557 ((uint32_t)(pkt->pkt_cdbp[10]) << 24));
1558 1558
1559 1559 start_lba_lo = (
1560 1560 ((uint32_t)(pkt->pkt_cdbp[9])) |
1561 1561 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
1562 1562 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
1563 1563 ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
1564 1564
1565 1565 start_lba_hi = (
1566 1566 ((uint32_t)(pkt->pkt_cdbp[5])) |
1567 1567 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1568 1568 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1569 1569 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1570 1570 }
1571 1571
1572 1572 if (instance->tbolt &&
1573 1573 ((lba_count * 512) > mrsas_tbolt_max_cap_maxxfer)) {
1574 1574 cmn_err(CE_WARN, " IO SECTOR COUNT exceeds "
1575 1575 "controller limit 0x%x sectors",
1576 1576 lba_count);
1577 1577 }
1578 1578
1579 1579 bzero(&io_info, sizeof (struct IO_REQUEST_INFO));
1580 1580 io_info.ldStartBlock = ((uint64_t)start_lba_hi << 32) |
1581 1581 start_lba_lo;
1582 1582 io_info.numBlocks = lba_count;
1583 1583 io_info.ldTgtId = acmd->device_id;
1584 1584
1585 1585 if (acmd->cmd_flags & CFLAG_DMASEND)
1586 1586 io_info.isRead = 0;
1587 1587 else
1588 1588 io_info.isRead = 1;
1589 1589
1590 1590
1591 1591 /* Acquire SYNC MAP UPDATE lock */
1592 1592 mutex_enter(&instance->sync_map_mtx);
1593 1593
1594 1594 local_map_ptr =
1595 1595 instance->ld_map[(instance->map_id & 1)];
1596 1596
1597 1597 if ((MR_TargetIdToLdGet(
1598 1598 acmd->device_id, local_map_ptr) >=
1599 1599 MAX_LOGICAL_DRIVES) || !instance->fast_path_io) {
1600 1600 cmn_err(CE_NOTE, "Fast Path NOT Possible, "
1601 1601 "targetId >= MAX_LOGICAL_DRIVES || "
1602 1602 "!instance->fast_path_io");
1603 1603 fp_possible = 0;
1604 1604 /* Set Regionlock flags to BYPASS */
1605 1605 /* io_request->RaidContext.regLockFlags = 0; */
1606 1606 ddi_put8(acc_handle,
1607 1607 &scsi_raid_io->RaidContext.regLockFlags, 0);
1608 1608 } else {
1609 1609 if (MR_BuildRaidContext(instance, &io_info,
1610 1610 &scsi_raid_io->RaidContext, local_map_ptr))
1611 1611 fp_possible = io_info.fpOkForIo;
1612 1612 }
1613 1613
1614 1614 if (!enable_fp)
1615 1615 fp_possible = 0;
1616 1616
1617 1617 con_log(CL_ANN1, (CE_NOTE, "enable_fp %d "
1618 1618 "instance->fast_path_io %d fp_possible %d",
1619 1619 enable_fp, instance->fast_path_io, fp_possible));
1620 1620
1621 1621 if (fp_possible) {
1622 1622
1623 1623 /* Check for DIF enabled LD */
1624 1624 if (MR_CheckDIF(acmd->device_id, local_map_ptr)) {
1625 1625 /* Prepare 32 Byte CDB for DIF capable Disk */
1626 1626 mrsas_tbolt_prepare_cdb(instance,
1627 1627 scsi_raid_io->CDB.CDB32,
1628 1628 &io_info, scsi_raid_io, start_lba_lo);
1629 1629 } else {
1630 1630 mrsas_tbolt_set_pd_lba(scsi_raid_io->CDB.CDB32,
1631 1631 (uint8_t *)&pd_cmd_cdblen,
1632 1632 io_info.pdBlock, io_info.numBlocks);
1633 1633 ddi_put16(acc_handle,
1634 1634 &scsi_raid_io->IoFlags, pd_cmd_cdblen);
1635 1635 }
1636 1636
1637 1637 ddi_put8(acc_handle, &scsi_raid_io->Function,
1638 1638 MPI2_FUNCTION_SCSI_IO_REQUEST);
1639 1639
1640 1640 ReqDescUnion->SCSIIO.RequestFlags =
1641 1641 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1642 1642 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1643 1643
1644 1644 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1645 1645 uint8_t regLockFlags = ddi_get8(acc_handle,
1646 1646 &scsi_raid_io->RaidContext.regLockFlags);
1647 1647 uint16_t IoFlags = ddi_get16(acc_handle,
1648 1648 &scsi_raid_io->IoFlags);
1649 1649
1650 1650 if (regLockFlags == REGION_TYPE_UNUSED)
1651 1651 ReqDescUnion->SCSIIO.RequestFlags =
1652 1652 (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1653 1653 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1654 1654
1655 1655 IoFlags |=
1656 1656 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
1657 1657 regLockFlags |=
1658 1658 (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
1659 1659 MR_RL_FLAGS_SEQ_NUM_ENABLE);
1660 1660
1661 1661 ddi_put8(acc_handle,
1662 1662 &scsi_raid_io->ChainOffset, 0);
1663 1663 ddi_put8(acc_handle,
1664 1664 &scsi_raid_io->RaidContext.nsegType,
1665 1665 ((0x01 << MPI2_NSEG_FLAGS_SHIFT) |
1666 1666 MPI2_TYPE_CUDA));
1667 1667 ddi_put8(acc_handle,
1668 1668 &scsi_raid_io->RaidContext.regLockFlags,
1669 1669 regLockFlags);
1670 1670 ddi_put16(acc_handle,
1671 1671 &scsi_raid_io->IoFlags, IoFlags);
1672 1672 }
1673 1673
1674 1674 if ((instance->load_balance_info[
1675 1675 acmd->device_id].loadBalanceFlag) &&
1676 1676 (io_info.isRead)) {
1677 1677 io_info.devHandle =
1678 1678 get_updated_dev_handle(&instance->
1679 1679 load_balance_info[acmd->device_id],
1680 1680 &io_info);
1681 1681 cmd->load_balance_flag |=
1682 1682 MEGASAS_LOAD_BALANCE_FLAG;
1683 1683 } else {
1684 1684 cmd->load_balance_flag &=
1685 1685 ~MEGASAS_LOAD_BALANCE_FLAG;
1686 1686 }
1687 1687
1688 1688 ReqDescUnion->SCSIIO.DevHandle = io_info.devHandle;
1689 1689 ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
1690 1690 io_info.devHandle);
1691 1691
1692 1692 } else {
1693 1693 ddi_put8(acc_handle, &scsi_raid_io->Function,
1694 1694 MPI2_FUNCTION_LD_IO_REQUEST);
1695 1695
1696 1696 ddi_put16(acc_handle,
1697 1697 &scsi_raid_io->DevHandle, acmd->device_id);
1698 1698
1699 1699 ReqDescUnion->SCSIIO.RequestFlags =
1700 1700 (MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
1701 1701 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1702 1702
1703 1703 ddi_put16(acc_handle,
1704 1704 &scsi_raid_io->RaidContext.timeoutValue,
1705 1705 local_map_ptr->raidMap.fpPdIoTimeoutSec);
1706 1706
1707 1707 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1708 1708 uint8_t regLockFlags = ddi_get8(acc_handle,
1709 1709 &scsi_raid_io->RaidContext.regLockFlags);
1710 1710
1711 1711 if (regLockFlags == REGION_TYPE_UNUSED) {
1712 1712 ReqDescUnion->SCSIIO.RequestFlags =
1713 1713 (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1714 1714 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1715 1715 }
1716 1716
1717 1717 regLockFlags |=
1718 1718 (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
1719 1719 MR_RL_FLAGS_SEQ_NUM_ENABLE);
1720 1720
1721 1721 ddi_put8(acc_handle,
1722 1722 &scsi_raid_io->RaidContext.nsegType,
1723 1723 ((0x01 << MPI2_NSEG_FLAGS_SHIFT) |
1724 1724 MPI2_TYPE_CUDA));
1725 1725 ddi_put8(acc_handle,
1726 1726 &scsi_raid_io->RaidContext.regLockFlags,
1727 1727 regLockFlags);
1728 1728 }
1729 1729 } /* Not FP */
1730 1730
1731 1731 /* Release SYNC MAP UPDATE lock */
1732 1732 mutex_exit(&instance->sync_map_mtx);
1733 1733
1734 1734
1735 1735 /*
1736 1736 * Set sense buffer physical address/length in scsi_io_request.
1737 1737 */
1738 1738 ddi_put32(acc_handle, &scsi_raid_io->SenseBufferLowAddress,
1739 1739 cmd->sense_phys_addr1);
1740 1740 ddi_put8(acc_handle, &scsi_raid_io->SenseBufferLength,
1741 1741 SENSE_LENGTH);
1742 1742
1743 1743 /* Construct SGL */
1744 1744 ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
1745 1745 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
1746 1746
1747 1747 (void) mr_sas_tbolt_build_sgl(instance, acmd, cmd,
1748 1748 scsi_raid_io, &datalen);
1749 1749
1750 1750 ddi_put32(acc_handle, &scsi_raid_io->DataLength, datalen);
1751 1751
1752 1752 break;
1753 1753 #ifndef PDSUPPORT /* if PDSUPPORT, skip break and fall through */
1754 1754 } else {
1755 1755 break;
1756 1756 #endif
1757 1757 }
1758 1758 /* fall through For all non-rd/wr cmds */
1759 1759 default:
1760 1760 switch (pkt->pkt_cdbp[0]) {
1761 1761 case 0x35: { /* SCMD_SYNCHRONIZE_CACHE */
1762 1762 return_raid_msg_pkt(instance, cmd);
1763 1763 *cmd_done = 1;
1764 1764 return (NULL);
1765 1765 }
1766 1766
1767 1767 case SCMD_MODE_SENSE:
1768 1768 case SCMD_MODE_SENSE_G1: {
1769 1769 union scsi_cdb *cdbp;
1770 1770 uint16_t page_code;
1771 1771
1772 1772 cdbp = (void *)pkt->pkt_cdbp;
1773 1773 page_code = (uint16_t)cdbp->cdb_un.sg.scsi[0];
1774 1774 switch (page_code) {
1775 1775 case 0x3:
1776 1776 case 0x4:
1777 1777 (void) mrsas_mode_sense_build(pkt);
1778 1778 return_raid_msg_pkt(instance, cmd);
1779 1779 *cmd_done = 1;
1780 1780 return (NULL);
1781 1781 }
1782 1782 break;
1783 1783 }
1784 1784
1785 1785 default: {
1786 1786 /*
1787 1787 * Here we need to handle PASSTHRU for
1788 1788 * Logical Devices. Like Inquiry etc.
1789 1789 */
1790 1790
1791 1791 if (!(acmd->islogical)) {
1792 1792
1793 1793 /* Acquire SYNC MAP UPDATE lock */
1794 1794 mutex_enter(&instance->sync_map_mtx);
1795 1795
1796 1796 local_map_ptr =
1797 1797 instance->ld_map[(instance->map_id & 1)];
1798 1798
1799 1799 ddi_put8(acc_handle, &scsi_raid_io->Function,
1800 1800 MPI2_FUNCTION_SCSI_IO_REQUEST);
1801 1801
1802 1802 ReqDescUnion->SCSIIO.RequestFlags =
1803 1803 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1804 1804 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1805 1805
1806 1806 ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
1807 1807 local_map_ptr->raidMap.
1808 1808 devHndlInfo[acmd->device_id].curDevHdl);
1809 1809
1810 1810
1811 1811 /* Set regLockFlasgs to REGION_TYPE_BYPASS */
1812 1812 ddi_put8(acc_handle,
1813 1813 &scsi_raid_io->RaidContext.regLockFlags, 0);
1814 1814 ddi_put64(acc_handle,
1815 1815 &scsi_raid_io->RaidContext.regLockRowLBA,
1816 1816 0);
1817 1817 ddi_put32(acc_handle,
1818 1818 &scsi_raid_io->RaidContext.regLockLength,
1819 1819 0);
1820 1820 ddi_put8(acc_handle,
1821 1821 &scsi_raid_io->RaidContext.RAIDFlags,
1822 1822 MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD <<
1823 1823 MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
1824 1824 ddi_put16(acc_handle,
1825 1825 &scsi_raid_io->RaidContext.timeoutValue,
1826 1826 local_map_ptr->raidMap.fpPdIoTimeoutSec);
1827 1827 ddi_put16(acc_handle,
1828 1828 &scsi_raid_io->RaidContext.ldTargetId,
1829 1829 acmd->device_id);
1830 1830 ddi_put8(acc_handle,
1831 1831 &scsi_raid_io->LUN[1], acmd->lun);
1832 1832
1833 1833 /* Release SYNC MAP UPDATE lock */
1834 1834 mutex_exit(&instance->sync_map_mtx);
1835 1835
1836 1836 } else {
1837 1837 ddi_put8(acc_handle, &scsi_raid_io->Function,
1838 1838 MPI2_FUNCTION_LD_IO_REQUEST);
1839 1839 ddi_put8(acc_handle,
1840 1840 &scsi_raid_io->LUN[1], acmd->lun);
1841 1841 ddi_put16(acc_handle,
1842 1842 &scsi_raid_io->DevHandle, acmd->device_id);
1843 1843 ReqDescUnion->SCSIIO.RequestFlags =
1844 1844 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1845 1845 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1846 1846 }
1847 1847
1848 1848 /*
1849 1849 * Set sense buffer physical address/length in
1850 1850 * scsi_io_request.
1851 1851 */
1852 1852 ddi_put32(acc_handle,
1853 1853 &scsi_raid_io->SenseBufferLowAddress,
1854 1854 cmd->sense_phys_addr1);
1855 1855 ddi_put8(acc_handle,
1856 1856 &scsi_raid_io->SenseBufferLength, SENSE_LENGTH);
1857 1857
1858 1858 /* Construct SGL */
1859 1859 ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
1860 1860 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
1861 1861
1862 1862 (void) mr_sas_tbolt_build_sgl(instance, acmd, cmd,
1863 1863 scsi_raid_io, &datalen);
1864 1864
1865 1865 ddi_put32(acc_handle,
1866 1866 &scsi_raid_io->DataLength, datalen);
1867 1867
1868 1868
1869 1869 con_log(CL_ANN, (CE_CONT,
1870 1870 "tbolt_build_cmd CDB[0] =%x, TargetID =%x\n",
1871 1871 pkt->pkt_cdbp[0], acmd->device_id));
1872 1872 con_log(CL_DLEVEL1, (CE_CONT,
1873 1873 "data length = %x\n",
1874 1874 scsi_raid_io->DataLength));
1875 1875 con_log(CL_DLEVEL1, (CE_CONT,
1876 1876 "cdb length = %x\n",
1877 1877 acmd->cmd_cdblen));
1878 1878 }
1879 1879 break;
1880 1880 }
1881 1881
1882 1882 }
1883 1883
1884 1884 return (cmd);
1885 1885 }
1886 1886
↓ open down ↓ |
1886 lines elided |
↑ open up ↑ |
1887 1887 uint32_t
1888 1888 tbolt_read_fw_status_reg(struct mrsas_instance *instance)
1889 1889 {
1890 1890 return ((uint32_t)RD_OB_SCRATCH_PAD_0(instance));
1891 1891 }
1892 1892
1893 1893 void
1894 1894 tbolt_issue_cmd(struct mrsas_cmd *cmd, struct mrsas_instance *instance)
1895 1895 {
1896 1896 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
1897 - atomic_add_16(&instance->fw_outstanding, 1);
1897 + atomic_inc_16(&instance->fw_outstanding);
1898 1898
1899 1899 struct scsi_pkt *pkt;
1900 1900
1901 1901 con_log(CL_ANN1,
1902 1902 (CE_NOTE, "tbolt_issue_cmd: cmd->[SMID]=0x%X", cmd->SMID));
1903 1903
1904 1904 con_log(CL_DLEVEL1, (CE_CONT,
1905 1905 " [req desc Words] %" PRIx64 " \n", req_desc->Words));
1906 1906 con_log(CL_DLEVEL1, (CE_CONT,
1907 1907 " [req desc low part] %x \n",
1908 1908 (uint_t)(req_desc->Words & 0xffffffffff)));
1909 1909 con_log(CL_DLEVEL1, (CE_CONT,
1910 1910 " [req desc high part] %x \n", (uint_t)(req_desc->Words >> 32)));
1911 1911 pkt = cmd->pkt;
1912 1912
1913 1913 if (pkt) {
1914 1914 con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
1915 1915 "ISSUED CMD TO FW : called : cmd:"
1916 1916 ": %p instance : %p pkt : %p pkt_time : %x\n",
1917 1917 gethrtime(), (void *)cmd, (void *)instance,
1918 1918 (void *)pkt, cmd->drv_pkt_time));
1919 1919 if (instance->adapterresetinprogress) {
1920 1920 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
1921 1921 con_log(CL_ANN, (CE_NOTE,
1922 1922 "TBOLT Reset the scsi_pkt timer"));
1923 1923 } else {
1924 1924 push_pending_mfi_pkt(instance, cmd);
1925 1925 }
1926 1926
1927 1927 } else {
1928 1928 con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
1929 1929 "ISSUED CMD TO FW : called : cmd : %p, instance: %p"
1930 1930 "(NO PKT)\n", gethrtime(), (void *)cmd, (void *)instance));
1931 1931 }
1932 1932
1933 1933 /* Issue the command to the FW */
1934 1934 mutex_enter(&instance->reg_write_mtx);
1935 1935 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
1936 1936 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
1937 1937 mutex_exit(&instance->reg_write_mtx);
1938 1938 }
1939 1939
1940 1940 /*
1941 1941 * issue_cmd_in_sync_mode
1942 1942 */
1943 1943 int
1944 1944 tbolt_issue_cmd_in_sync_mode(struct mrsas_instance *instance,
1945 1945 struct mrsas_cmd *cmd)
1946 1946 {
1947 1947 int i;
1948 1948 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
1949 1949 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
1950 1950
1951 1951 struct mrsas_header *hdr;
1952 1952 hdr = (struct mrsas_header *)&cmd->frame->hdr;
1953 1953
1954 1954 con_log(CL_ANN,
1955 1955 (CE_NOTE, "tbolt_issue_cmd_in_sync_mode: cmd->[SMID]=0x%X",
1956 1956 cmd->SMID));
1957 1957
1958 1958
1959 1959 if (instance->adapterresetinprogress) {
1960 1960 cmd->drv_pkt_time = ddi_get16
1961 1961 (cmd->frame_dma_obj.acc_handle, &hdr->timeout);
1962 1962 if (cmd->drv_pkt_time < debug_timeout_g)
1963 1963 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
1964 1964 con_log(CL_ANN, (CE_NOTE, "tbolt_issue_cmd_in_sync_mode:"
1965 1965 "RESET-IN-PROGRESS, issue cmd & return."));
1966 1966
1967 1967 mutex_enter(&instance->reg_write_mtx);
1968 1968 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
1969 1969 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
1970 1970 mutex_exit(&instance->reg_write_mtx);
1971 1971
1972 1972 return (DDI_SUCCESS);
1973 1973 } else {
1974 1974 con_log(CL_ANN1, (CE_NOTE,
1975 1975 "tbolt_issue_cmd_in_sync_mode: pushing the pkt"));
1976 1976 push_pending_mfi_pkt(instance, cmd);
1977 1977 }
1978 1978
1979 1979 con_log(CL_DLEVEL2, (CE_NOTE,
1980 1980 "HighQport offset :%p",
1981 1981 (void *)((uintptr_t)(instance)->regmap + IB_HIGH_QPORT)));
1982 1982 con_log(CL_DLEVEL2, (CE_NOTE,
1983 1983 "LowQport offset :%p",
1984 1984 (void *)((uintptr_t)(instance)->regmap + IB_LOW_QPORT)));
1985 1985
1986 1986 cmd->sync_cmd = MRSAS_TRUE;
1987 1987 cmd->cmd_status = ENODATA;
1988 1988
1989 1989
1990 1990 mutex_enter(&instance->reg_write_mtx);
1991 1991 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
1992 1992 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
1993 1993 mutex_exit(&instance->reg_write_mtx);
1994 1994
1995 1995 con_log(CL_ANN1, (CE_NOTE,
1996 1996 " req desc high part %x", (uint_t)(req_desc->Words >> 32)));
1997 1997 con_log(CL_ANN1, (CE_NOTE, " req desc low part %x",
1998 1998 (uint_t)(req_desc->Words & 0xffffffff)));
1999 1999
2000 2000 mutex_enter(&instance->int_cmd_mtx);
2001 2001 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
2002 2002 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
2003 2003 }
2004 2004 mutex_exit(&instance->int_cmd_mtx);
2005 2005
2006 2006
2007 2007 if (i < (msecs -1)) {
2008 2008 return (DDI_SUCCESS);
2009 2009 } else {
2010 2010 return (DDI_FAILURE);
2011 2011 }
2012 2012 }
2013 2013
2014 2014 /*
2015 2015 * issue_cmd_in_poll_mode
2016 2016 */
2017 2017 int
2018 2018 tbolt_issue_cmd_in_poll_mode(struct mrsas_instance *instance,
2019 2019 struct mrsas_cmd *cmd)
2020 2020 {
2021 2021 int i;
2022 2022 uint16_t flags;
2023 2023 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
2024 2024 struct mrsas_header *frame_hdr;
2025 2025
2026 2026 con_log(CL_ANN,
2027 2027 (CE_NOTE, "tbolt_issue_cmd_in_poll_mode: cmd->[SMID]=0x%X",
2028 2028 cmd->SMID));
2029 2029
2030 2030 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
2031 2031
2032 2032 frame_hdr = (struct mrsas_header *)&cmd->frame->hdr;
2033 2033 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
2034 2034 MFI_CMD_STATUS_POLL_MODE);
2035 2035 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
2036 2036 flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2037 2037 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
2038 2038
2039 2039 con_log(CL_ANN1, (CE_NOTE, " req desc low part %x",
2040 2040 (uint_t)(req_desc->Words & 0xffffffff)));
2041 2041 con_log(CL_ANN1, (CE_NOTE,
2042 2042 " req desc high part %x", (uint_t)(req_desc->Words >> 32)));
2043 2043
2044 2044 /* issue the frame using inbound queue port */
2045 2045 mutex_enter(&instance->reg_write_mtx);
2046 2046 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2047 2047 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2048 2048 mutex_exit(&instance->reg_write_mtx);
2049 2049
2050 2050 for (i = 0; i < msecs && (
2051 2051 ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
2052 2052 == MFI_CMD_STATUS_POLL_MODE); i++) {
2053 2053 /* wait for cmd_status to change from 0xFF */
2054 2054 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
2055 2055 }
2056 2056
2057 2057 if (ddi_get8(cmd->frame_dma_obj.acc_handle,
2058 2058 &frame_hdr->cmd_status) == MFI_CMD_STATUS_POLL_MODE) {
2059 2059 con_log(CL_ANN1, (CE_NOTE,
2060 2060 " cmd failed %" PRIx64, (req_desc->Words)));
2061 2061 return (DDI_FAILURE);
2062 2062 }
2063 2063
2064 2064 return (DDI_SUCCESS);
2065 2065 }
2066 2066
2067 2067 void
2068 2068 tbolt_enable_intr(struct mrsas_instance *instance)
2069 2069 {
2070 2070 /* TODO: For Thunderbolt/Invader also clear intr on enable */
2071 2071 /* writel(~0, ®s->outbound_intr_status); */
2072 2072 /* readl(®s->outbound_intr_status); */
2073 2073
2074 2074 WR_OB_INTR_MASK(~(MFI_FUSION_ENABLE_INTERRUPT_MASK), instance);
2075 2075
2076 2076 /* dummy read to force PCI flush */
2077 2077 (void) RD_OB_INTR_MASK(instance);
2078 2078
2079 2079 }
2080 2080
2081 2081 void
2082 2082 tbolt_disable_intr(struct mrsas_instance *instance)
2083 2083 {
2084 2084 uint32_t mask = 0xFFFFFFFF;
2085 2085
2086 2086 WR_OB_INTR_MASK(mask, instance);
2087 2087
2088 2088 /* Dummy readl to force pci flush */
2089 2089
2090 2090 (void) RD_OB_INTR_MASK(instance);
2091 2091 }
2092 2092
2093 2093
2094 2094 int
2095 2095 tbolt_intr_ack(struct mrsas_instance *instance)
2096 2096 {
2097 2097 uint32_t status;
2098 2098
2099 2099 /* check if it is our interrupt */
2100 2100 status = RD_OB_INTR_STATUS(instance);
2101 2101 con_log(CL_ANN1, (CE_NOTE,
2102 2102 "chkpnt: Entered tbolt_intr_ack status = %d", status));
2103 2103
2104 2104 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
2105 2105 return (DDI_INTR_UNCLAIMED);
2106 2106 }
2107 2107
2108 2108 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
2109 2109 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2110 2110 return (DDI_INTR_UNCLAIMED);
2111 2111 }
2112 2112
2113 2113 if ((status & 1) || (status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
2114 2114 /* clear the interrupt by writing back the same value */
2115 2115 WR_OB_INTR_STATUS(status, instance);
2116 2116 /* dummy READ */
2117 2117 (void) RD_OB_INTR_STATUS(instance);
2118 2118 }
2119 2119 return (DDI_INTR_CLAIMED);
2120 2120 }
2121 2121
2122 2122 /*
2123 2123 * get_raid_msg_pkt : Get a command from the free pool
2124 2124 * After successful allocation, the caller of this routine
2125 2125 * must clear the frame buffer (memset to zero) before
2126 2126 * using the packet further.
2127 2127 *
2128 2128 * ***** Note *****
2129 2129 * After clearing the frame buffer the context id of the
2130 2130 * frame buffer SHOULD be restored back.
2131 2131 */
2132 2132
2133 2133 struct mrsas_cmd *
2134 2134 get_raid_msg_pkt(struct mrsas_instance *instance)
2135 2135 {
2136 2136 mlist_t *head = &instance->cmd_pool_list;
2137 2137 struct mrsas_cmd *cmd = NULL;
2138 2138
2139 2139 mutex_enter(&instance->cmd_pool_mtx);
2140 2140 ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2141 2141
2142 2142
2143 2143 if (!mlist_empty(head)) {
2144 2144 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2145 2145 mlist_del_init(head->next);
2146 2146 }
2147 2147 if (cmd != NULL) {
2148 2148 cmd->pkt = NULL;
2149 2149 cmd->retry_count_for_ocr = 0;
2150 2150 cmd->drv_pkt_time = 0;
2151 2151 }
2152 2152 mutex_exit(&instance->cmd_pool_mtx);
2153 2153
2154 2154 if (cmd != NULL)
2155 2155 bzero(cmd->scsi_io_request,
2156 2156 sizeof (Mpi2RaidSCSIIORequest_t));
2157 2157 return (cmd);
2158 2158 }
2159 2159
2160 2160 struct mrsas_cmd *
2161 2161 get_raid_msg_mfi_pkt(struct mrsas_instance *instance)
2162 2162 {
2163 2163 mlist_t *head = &instance->cmd_app_pool_list;
2164 2164 struct mrsas_cmd *cmd = NULL;
2165 2165
2166 2166 mutex_enter(&instance->cmd_app_pool_mtx);
2167 2167 ASSERT(mutex_owned(&instance->cmd_app_pool_mtx));
2168 2168
2169 2169 if (!mlist_empty(head)) {
2170 2170 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2171 2171 mlist_del_init(head->next);
2172 2172 }
2173 2173 if (cmd != NULL) {
2174 2174 cmd->retry_count_for_ocr = 0;
2175 2175 cmd->drv_pkt_time = 0;
2176 2176 cmd->pkt = NULL;
2177 2177 cmd->request_desc = NULL;
2178 2178
2179 2179 }
2180 2180
2181 2181 mutex_exit(&instance->cmd_app_pool_mtx);
2182 2182
2183 2183 if (cmd != NULL) {
2184 2184 bzero(cmd->scsi_io_request,
2185 2185 sizeof (Mpi2RaidSCSIIORequest_t));
2186 2186 }
2187 2187
2188 2188 return (cmd);
2189 2189 }
2190 2190
2191 2191 /*
2192 2192 * return_raid_msg_pkt : Return a cmd to free command pool
2193 2193 */
2194 2194 void
2195 2195 return_raid_msg_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2196 2196 {
2197 2197 mutex_enter(&instance->cmd_pool_mtx);
2198 2198 ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2199 2199
2200 2200
2201 2201 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
2202 2202
2203 2203 mutex_exit(&instance->cmd_pool_mtx);
2204 2204 }
2205 2205
2206 2206 void
2207 2207 return_raid_msg_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2208 2208 {
2209 2209 mutex_enter(&instance->cmd_app_pool_mtx);
2210 2210 ASSERT(mutex_owned(&instance->cmd_app_pool_mtx));
2211 2211
2212 2212 mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list);
2213 2213
2214 2214 mutex_exit(&instance->cmd_app_pool_mtx);
2215 2215 }
2216 2216
2217 2217
2218 2218 void
2219 2219 mr_sas_tbolt_build_mfi_cmd(struct mrsas_instance *instance,
2220 2220 struct mrsas_cmd *cmd)
2221 2221 {
2222 2222 Mpi2RaidSCSIIORequest_t *scsi_raid_io;
2223 2223 Mpi25IeeeSgeChain64_t *scsi_raid_io_sgl_ieee;
2224 2224 MRSAS_REQUEST_DESCRIPTOR_UNION *ReqDescUnion;
2225 2225 uint32_t index;
2226 2226 ddi_acc_handle_t acc_handle =
2227 2227 instance->mpi2_frame_pool_dma_obj.acc_handle;
2228 2228
2229 2229 if (!instance->tbolt) {
2230 2230 con_log(CL_ANN, (CE_NOTE, "Not MFA enabled."));
2231 2231 return;
2232 2232 }
2233 2233
2234 2234 index = cmd->index;
2235 2235
2236 2236 ReqDescUnion = mr_sas_get_request_descriptor(instance, index);
2237 2237
2238 2238 if (!ReqDescUnion) {
2239 2239 con_log(CL_ANN1, (CE_NOTE, "[NULL REQDESC]"));
2240 2240 return;
2241 2241 }
2242 2242
2243 2243 con_log(CL_ANN1, (CE_NOTE, "[SMID]%x", cmd->SMID));
2244 2244
2245 2245 ReqDescUnion->Words = 0;
2246 2246
2247 2247 ReqDescUnion->SCSIIO.RequestFlags =
2248 2248 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
2249 2249 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2250 2250
2251 2251 ReqDescUnion->SCSIIO.SMID = cmd->SMID;
2252 2252
2253 2253 cmd->request_desc = ReqDescUnion;
2254 2254
2255 2255 /* get raid message frame pointer */
2256 2256 scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
2257 2257
2258 2258 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
2259 2259 Mpi25IeeeSgeChain64_t *sgl_ptr_end = (Mpi25IeeeSgeChain64_t *)
2260 2260 &scsi_raid_io->SGL.IeeeChain;
2261 2261 sgl_ptr_end += instance->max_sge_in_main_msg - 1;
2262 2262 ddi_put8(acc_handle, &sgl_ptr_end->Flags, 0);
2263 2263 }
2264 2264
2265 2265 ddi_put8(acc_handle, &scsi_raid_io->Function,
2266 2266 MPI2_FUNCTION_PASSTHRU_IO_REQUEST);
2267 2267
2268 2268 ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
2269 2269 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
2270 2270
2271 2271 ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
2272 2272 (U8)offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16);
2273 2273
2274 2274 ddi_put32(acc_handle, &scsi_raid_io->SenseBufferLowAddress,
2275 2275 cmd->sense_phys_addr1);
2276 2276
2277 2277
2278 2278 scsi_raid_io_sgl_ieee =
2279 2279 (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
2280 2280
2281 2281 ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
2282 2282 (U64)cmd->frame_phys_addr);
2283 2283
2284 2284 ddi_put8(acc_handle,
2285 2285 &scsi_raid_io_sgl_ieee->Flags, (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2286 2286 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
2287 2287 /* LSI put hardcoded 1024 instead of MEGASAS_MAX_SZ_CHAIN_FRAME. */
2288 2288 ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length, 1024);
2289 2289
2290 2290 con_log(CL_ANN1, (CE_NOTE,
2291 2291 "[MFI CMD PHY ADDRESS]:%" PRIx64,
2292 2292 scsi_raid_io_sgl_ieee->Address));
2293 2293 con_log(CL_ANN1, (CE_NOTE,
2294 2294 "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
2295 2295 con_log(CL_ANN1, (CE_NOTE, "[SGL Flags]:%x",
2296 2296 scsi_raid_io_sgl_ieee->Flags));
2297 2297 }
2298 2298
2299 2299
2300 2300 void
2301 2301 tbolt_complete_cmd(struct mrsas_instance *instance,
2302 2302 struct mrsas_cmd *cmd)
2303 2303 {
2304 2304 uint8_t status;
2305 2305 uint8_t extStatus;
2306 2306 uint8_t arm;
2307 2307 struct scsa_cmd *acmd;
2308 2308 struct scsi_pkt *pkt;
2309 2309 struct scsi_arq_status *arqstat;
2310 2310 Mpi2RaidSCSIIORequest_t *scsi_raid_io;
2311 2311 LD_LOAD_BALANCE_INFO *lbinfo;
2312 2312 ddi_acc_handle_t acc_handle =
2313 2313 instance->mpi2_frame_pool_dma_obj.acc_handle;
2314 2314
2315 2315 scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
2316 2316
2317 2317 status = ddi_get8(acc_handle, &scsi_raid_io->RaidContext.status);
2318 2318 extStatus = ddi_get8(acc_handle, &scsi_raid_io->RaidContext.extStatus);
2319 2319
2320 2320 con_log(CL_DLEVEL3, (CE_NOTE, "status %x", status));
2321 2321 con_log(CL_DLEVEL3, (CE_NOTE, "extStatus %x", extStatus));
2322 2322
2323 2323 if (status != MFI_STAT_OK) {
2324 2324 con_log(CL_ANN, (CE_WARN,
2325 2325 "IO Cmd Failed SMID %x", cmd->SMID));
2326 2326 } else {
2327 2327 con_log(CL_ANN, (CE_NOTE,
2328 2328 "IO Cmd Success SMID %x", cmd->SMID));
2329 2329 }
2330 2330
2331 2331 /* regular commands */
2332 2332
2333 2333 switch (ddi_get8(acc_handle, &scsi_raid_io->Function)) {
2334 2334
2335 2335 case MPI2_FUNCTION_SCSI_IO_REQUEST : /* Fast Path IO. */
2336 2336 acmd = (struct scsa_cmd *)cmd->cmd;
2337 2337 lbinfo = &instance->load_balance_info[acmd->device_id];
2338 2338
2339 2339 if (cmd->load_balance_flag & MEGASAS_LOAD_BALANCE_FLAG) {
2340 2340 arm = lbinfo->raid1DevHandle[0] ==
2341 2341 scsi_raid_io->DevHandle ? 0 : 1;
2342 2342
2343 2343 lbinfo->scsi_pending_cmds[arm]--;
2344 2344 cmd->load_balance_flag &= ~MEGASAS_LOAD_BALANCE_FLAG;
2345 2345 }
2346 2346 con_log(CL_DLEVEL3, (CE_NOTE,
2347 2347 "FastPath IO Completion Success "));
2348 2348 /* FALLTHRU */
2349 2349
2350 2350 case MPI2_FUNCTION_LD_IO_REQUEST : { /* Regular Path IO. */
2351 2351 acmd = (struct scsa_cmd *)cmd->cmd;
2352 2352 pkt = (struct scsi_pkt *)CMD2PKT(acmd);
2353 2353
2354 2354 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2355 2355 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
2356 2356 (void) ddi_dma_sync(acmd->cmd_dmahandle,
2357 2357 acmd->cmd_dma_offset, acmd->cmd_dma_len,
2358 2358 DDI_DMA_SYNC_FORCPU);
2359 2359 }
2360 2360 }
2361 2361
2362 2362 pkt->pkt_reason = CMD_CMPLT;
2363 2363 pkt->pkt_statistics = 0;
2364 2364 pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
2365 2365 STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS;
2366 2366
2367 2367 con_log(CL_ANN, (CE_CONT, " CDB[0] = %x completed for %s: "
2368 2368 "size %lx SMID %x cmd_status %x", pkt->pkt_cdbp[0],
2369 2369 ((acmd->islogical) ? "LD" : "PD"),
2370 2370 acmd->cmd_dmacount, cmd->SMID, status));
2371 2371
2372 2372 if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) {
2373 2373 struct scsi_inquiry *inq;
2374 2374
2375 2375 if (acmd->cmd_dmacount != 0) {
2376 2376 bp_mapin(acmd->cmd_buf);
2377 2377 inq = (struct scsi_inquiry *)
2378 2378 acmd->cmd_buf->b_un.b_addr;
2379 2379
2380 2380 /* don't expose physical drives to OS */
2381 2381 if (acmd->islogical &&
2382 2382 (status == MFI_STAT_OK)) {
2383 2383 display_scsi_inquiry((caddr_t)inq);
2384 2384 #ifdef PDSUPPORT
2385 2385 } else if ((status == MFI_STAT_OK) &&
2386 2386 inq->inq_dtype == DTYPE_DIRECT) {
2387 2387 display_scsi_inquiry((caddr_t)inq);
2388 2388 #endif
2389 2389 } else {
2390 2390 /* for physical disk */
2391 2391 status = MFI_STAT_DEVICE_NOT_FOUND;
2392 2392 }
2393 2393 }
2394 2394 }
2395 2395
2396 2396 switch (status) {
2397 2397 case MFI_STAT_OK:
2398 2398 pkt->pkt_scbp[0] = STATUS_GOOD;
2399 2399 break;
2400 2400 case MFI_STAT_LD_CC_IN_PROGRESS:
2401 2401 case MFI_STAT_LD_RECON_IN_PROGRESS:
2402 2402 pkt->pkt_scbp[0] = STATUS_GOOD;
2403 2403 break;
2404 2404 case MFI_STAT_LD_INIT_IN_PROGRESS:
2405 2405 pkt->pkt_reason = CMD_TRAN_ERR;
2406 2406 break;
2407 2407 case MFI_STAT_SCSI_IO_FAILED:
2408 2408 cmn_err(CE_WARN, "tbolt_complete_cmd: scsi_io failed");
2409 2409 pkt->pkt_reason = CMD_TRAN_ERR;
2410 2410 break;
2411 2411 case MFI_STAT_SCSI_DONE_WITH_ERROR:
2412 2412 con_log(CL_ANN, (CE_WARN,
2413 2413 "tbolt_complete_cmd: scsi_done with error"));
2414 2414
2415 2415 pkt->pkt_reason = CMD_CMPLT;
2416 2416 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
2417 2417
2418 2418 if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) {
2419 2419 con_log(CL_ANN,
2420 2420 (CE_WARN, "TEST_UNIT_READY fail"));
2421 2421 } else {
2422 2422 pkt->pkt_state |= STATE_ARQ_DONE;
2423 2423 arqstat = (void *)(pkt->pkt_scbp);
2424 2424 arqstat->sts_rqpkt_reason = CMD_CMPLT;
2425 2425 arqstat->sts_rqpkt_resid = 0;
2426 2426 arqstat->sts_rqpkt_state |=
2427 2427 STATE_GOT_BUS | STATE_GOT_TARGET
2428 2428 | STATE_SENT_CMD
2429 2429 | STATE_XFERRED_DATA;
2430 2430 *(uint8_t *)&arqstat->sts_rqpkt_status =
2431 2431 STATUS_GOOD;
2432 2432 con_log(CL_ANN1,
2433 2433 (CE_NOTE, "Copying Sense data %x",
2434 2434 cmd->SMID));
2435 2435
2436 2436 ddi_rep_get8(acc_handle,
2437 2437 (uint8_t *)&(arqstat->sts_sensedata),
2438 2438 cmd->sense1,
2439 2439 sizeof (struct scsi_extended_sense),
2440 2440 DDI_DEV_AUTOINCR);
2441 2441
2442 2442 }
2443 2443 break;
2444 2444 case MFI_STAT_LD_OFFLINE:
2445 2445 cmn_err(CE_WARN,
2446 2446 "tbolt_complete_cmd: ld offline "
2447 2447 "CDB[0]=0x%x targetId=0x%x devhandle=0x%x",
2448 2448 /* UNDO: */
2449 2449 ddi_get8(acc_handle, &scsi_raid_io->CDB.CDB32[0]),
2450 2450
2451 2451 ddi_get16(acc_handle,
2452 2452 &scsi_raid_io->RaidContext.ldTargetId),
2453 2453
2454 2454 ddi_get16(acc_handle, &scsi_raid_io->DevHandle));
2455 2455
2456 2456 pkt->pkt_reason = CMD_DEV_GONE;
2457 2457 pkt->pkt_statistics = STAT_DISCON;
2458 2458 break;
2459 2459 case MFI_STAT_DEVICE_NOT_FOUND:
2460 2460 con_log(CL_ANN, (CE_CONT,
2461 2461 "tbolt_complete_cmd: device not found error"));
2462 2462 pkt->pkt_reason = CMD_DEV_GONE;
2463 2463 pkt->pkt_statistics = STAT_DISCON;
2464 2464 break;
2465 2465
2466 2466 case MFI_STAT_LD_LBA_OUT_OF_RANGE:
2467 2467 pkt->pkt_state |= STATE_ARQ_DONE;
2468 2468 pkt->pkt_reason = CMD_CMPLT;
2469 2469 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
2470 2470
2471 2471 arqstat = (void *)(pkt->pkt_scbp);
2472 2472 arqstat->sts_rqpkt_reason = CMD_CMPLT;
2473 2473 arqstat->sts_rqpkt_resid = 0;
2474 2474 arqstat->sts_rqpkt_state |= STATE_GOT_BUS
2475 2475 | STATE_GOT_TARGET | STATE_SENT_CMD
2476 2476 | STATE_XFERRED_DATA;
2477 2477 *(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD;
2478 2478
2479 2479 arqstat->sts_sensedata.es_valid = 1;
2480 2480 arqstat->sts_sensedata.es_key = KEY_ILLEGAL_REQUEST;
2481 2481 arqstat->sts_sensedata.es_class = CLASS_EXTENDED_SENSE;
2482 2482
2483 2483 /*
2484 2484 * LOGICAL BLOCK ADDRESS OUT OF RANGE:
2485 2485 * ASC: 0x21h; ASCQ: 0x00h;
2486 2486 */
2487 2487 arqstat->sts_sensedata.es_add_code = 0x21;
2488 2488 arqstat->sts_sensedata.es_qual_code = 0x00;
2489 2489 break;
2490 2490 case MFI_STAT_INVALID_CMD:
2491 2491 case MFI_STAT_INVALID_DCMD:
2492 2492 case MFI_STAT_INVALID_PARAMETER:
2493 2493 case MFI_STAT_INVALID_SEQUENCE_NUMBER:
2494 2494 default:
2495 2495 cmn_err(CE_WARN, "tbolt_complete_cmd: Unknown status!");
2496 2496 pkt->pkt_reason = CMD_TRAN_ERR;
2497 2497
2498 2498 break;
2499 2499 }
2500 2500
2501 2501 atomic_add_16(&instance->fw_outstanding, (-1));
2502 2502
2503 2503 (void) mrsas_common_check(instance, cmd);
2504 2504 if (acmd->cmd_dmahandle) {
2505 2505 if (mrsas_check_dma_handle(acmd->cmd_dmahandle) !=
2506 2506 DDI_SUCCESS) {
2507 2507 ddi_fm_service_impact(instance->dip,
2508 2508 DDI_SERVICE_UNAFFECTED);
2509 2509 pkt->pkt_reason = CMD_TRAN_ERR;
2510 2510 pkt->pkt_statistics = 0;
2511 2511 }
2512 2512 }
2513 2513
2514 2514 /* Call the callback routine */
2515 2515 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp)
2516 2516 (*pkt->pkt_comp)(pkt);
2517 2517
2518 2518 con_log(CL_ANN1, (CE_NOTE, "Free smid %x", cmd->SMID));
2519 2519
2520 2520 ddi_put8(acc_handle, &scsi_raid_io->RaidContext.status, 0);
2521 2521
2522 2522 ddi_put8(acc_handle, &scsi_raid_io->RaidContext.extStatus, 0);
2523 2523
2524 2524 return_raid_msg_pkt(instance, cmd);
2525 2525 break;
2526 2526 }
2527 2527 case MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /* MFA command. */
2528 2528
2529 2529 if (cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO &&
2530 2530 cmd->frame->dcmd.mbox.b[1] == 1) {
2531 2531
2532 2532 mutex_enter(&instance->sync_map_mtx);
2533 2533
2534 2534 con_log(CL_ANN, (CE_NOTE,
2535 2535 "LDMAP sync command SMID RECEIVED 0x%X",
2536 2536 cmd->SMID));
2537 2537 if (cmd->frame->hdr.cmd_status != 0) {
2538 2538 cmn_err(CE_WARN,
2539 2539 "map sync failed, status = 0x%x.",
2540 2540 cmd->frame->hdr.cmd_status);
2541 2541 } else {
2542 2542 instance->map_id++;
2543 2543 cmn_err(CE_NOTE,
2544 2544 "map sync received, switched map_id to %"
2545 2545 PRIu64 " \n", instance->map_id);
2546 2546 }
2547 2547
2548 2548 if (MR_ValidateMapInfo(instance->ld_map[
2549 2549 (instance->map_id & 1)],
2550 2550 instance->load_balance_info)) {
2551 2551 instance->fast_path_io = 1;
2552 2552 } else {
2553 2553 instance->fast_path_io = 0;
2554 2554 }
2555 2555
2556 2556 con_log(CL_ANN, (CE_NOTE,
2557 2557 "instance->fast_path_io %d",
2558 2558 instance->fast_path_io));
2559 2559
2560 2560 instance->unroll.syncCmd = 0;
2561 2561
2562 2562 if (instance->map_update_cmd == cmd) {
2563 2563 return_raid_msg_pkt(instance, cmd);
2564 2564 atomic_add_16(&instance->fw_outstanding, (-1));
2565 2565 (void) mrsas_tbolt_sync_map_info(instance);
2566 2566 }
2567 2567
2568 2568 cmn_err(CE_NOTE, "LDMAP sync completed.");
2569 2569 mutex_exit(&instance->sync_map_mtx);
2570 2570 break;
2571 2571 }
2572 2572
2573 2573 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT) {
2574 2574 con_log(CL_ANN1, (CE_CONT,
2575 2575 "AEN command SMID RECEIVED 0x%X",
2576 2576 cmd->SMID));
2577 2577 if ((instance->aen_cmd == cmd) &&
2578 2578 (instance->aen_cmd->abort_aen)) {
2579 2579 con_log(CL_ANN, (CE_WARN, "mrsas_softintr: "
2580 2580 "aborted_aen returned"));
2581 2581 } else {
2582 2582 atomic_add_16(&instance->fw_outstanding, (-1));
2583 2583 service_mfi_aen(instance, cmd);
2584 2584 }
2585 2585 }
2586 2586
2587 2587 if (cmd->sync_cmd == MRSAS_TRUE) {
2588 2588 con_log(CL_ANN1, (CE_CONT,
2589 2589 "Sync-mode Command Response SMID RECEIVED 0x%X",
2590 2590 cmd->SMID));
2591 2591
2592 2592 tbolt_complete_cmd_in_sync_mode(instance, cmd);
2593 2593 } else {
2594 2594 con_log(CL_ANN, (CE_CONT,
2595 2595 "tbolt_complete_cmd: Wrong SMID RECEIVED 0x%X",
2596 2596 cmd->SMID));
2597 2597 }
2598 2598 break;
2599 2599 default:
2600 2600 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2601 2601 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2602 2602
2603 2603 /* free message */
2604 2604 con_log(CL_ANN,
2605 2605 (CE_NOTE, "tbolt_complete_cmd: Unknown Type!!!!!!!!"));
2606 2606 break;
2607 2607 }
2608 2608 }
2609 2609
2610 2610 uint_t
2611 2611 mr_sas_tbolt_process_outstanding_cmd(struct mrsas_instance *instance)
2612 2612 {
2613 2613 uint8_t replyType;
2614 2614 Mpi2SCSIIOSuccessReplyDescriptor_t *replyDesc;
2615 2615 Mpi2ReplyDescriptorsUnion_t *desc;
2616 2616 uint16_t smid;
2617 2617 union desc_value d_val;
2618 2618 struct mrsas_cmd *cmd;
2619 2619
2620 2620 struct mrsas_header *hdr;
2621 2621 struct scsi_pkt *pkt;
2622 2622
2623 2623 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2624 2624 0, 0, DDI_DMA_SYNC_FORDEV);
2625 2625
2626 2626 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2627 2627 0, 0, DDI_DMA_SYNC_FORCPU);
2628 2628
2629 2629 desc = instance->reply_frame_pool;
2630 2630 desc += instance->reply_read_index;
2631 2631
2632 2632 replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2633 2633 replyType = replyDesc->ReplyFlags &
2634 2634 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2635 2635
2636 2636 if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2637 2637 return (DDI_INTR_UNCLAIMED);
2638 2638
2639 2639 if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
2640 2640 != DDI_SUCCESS) {
2641 2641 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2642 2642 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2643 2643 con_log(CL_ANN1,
2644 2644 (CE_WARN, "mr_sas_tbolt_process_outstanding_cmd(): "
2645 2645 "FMA check, returning DDI_INTR_UNCLAIMED"));
2646 2646 return (DDI_INTR_CLAIMED);
2647 2647 }
2648 2648
2649 2649 con_log(CL_ANN1, (CE_NOTE, "Reply Desc = %p Words = %" PRIx64,
2650 2650 (void *)desc, desc->Words));
2651 2651
2652 2652 d_val.word = desc->Words;
2653 2653
2654 2654
2655 2655 /* Read Reply descriptor */
2656 2656 while ((d_val.u1.low != 0xffffffff) &&
2657 2657 (d_val.u1.high != 0xffffffff)) {
2658 2658
2659 2659 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2660 2660 0, 0, DDI_DMA_SYNC_FORCPU);
2661 2661
2662 2662 smid = replyDesc->SMID;
2663 2663
2664 2664 if (!smid || smid > instance->max_fw_cmds + 1) {
2665 2665 con_log(CL_ANN1, (CE_NOTE,
2666 2666 "Reply Desc at Break = %p Words = %" PRIx64,
2667 2667 (void *)desc, desc->Words));
2668 2668 break;
2669 2669 }
2670 2670
2671 2671 cmd = instance->cmd_list[smid - 1];
2672 2672 if (!cmd) {
2673 2673 con_log(CL_ANN1, (CE_NOTE, "mr_sas_tbolt_process_"
2674 2674 "outstanding_cmd: Invalid command "
2675 2675 " or Poll commad Received in completion path"));
2676 2676 } else {
2677 2677 mutex_enter(&instance->cmd_pend_mtx);
2678 2678 if (cmd->sync_cmd == MRSAS_TRUE) {
2679 2679 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2680 2680 if (hdr) {
2681 2681 con_log(CL_ANN1, (CE_NOTE, "mr_sas_"
2682 2682 "tbolt_process_outstanding_cmd:"
2683 2683 " mlist_del_init(&cmd->list)."));
2684 2684 mlist_del_init(&cmd->list);
2685 2685 }
2686 2686 } else {
2687 2687 pkt = cmd->pkt;
2688 2688 if (pkt) {
2689 2689 con_log(CL_ANN1, (CE_NOTE, "mr_sas_"
2690 2690 "tbolt_process_outstanding_cmd:"
2691 2691 "mlist_del_init(&cmd->list)."));
2692 2692 mlist_del_init(&cmd->list);
2693 2693 }
2694 2694 }
2695 2695
2696 2696 mutex_exit(&instance->cmd_pend_mtx);
2697 2697
2698 2698 tbolt_complete_cmd(instance, cmd);
2699 2699 }
2700 2700 /* set it back to all 1s. */
2701 2701 desc->Words = -1LL;
2702 2702
2703 2703 instance->reply_read_index++;
2704 2704
2705 2705 if (instance->reply_read_index >= (instance->reply_q_depth)) {
2706 2706 con_log(CL_ANN1, (CE_NOTE, "wrap around"));
2707 2707 instance->reply_read_index = 0;
2708 2708 }
2709 2709
2710 2710 /* Get the next reply descriptor */
2711 2711 if (!instance->reply_read_index)
2712 2712 desc = instance->reply_frame_pool;
2713 2713 else
2714 2714 desc++;
2715 2715
2716 2716 replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2717 2717
2718 2718 d_val.word = desc->Words;
2719 2719
2720 2720 con_log(CL_ANN1, (CE_NOTE,
2721 2721 "Next Reply Desc = %p Words = %" PRIx64,
2722 2722 (void *)desc, desc->Words));
2723 2723
2724 2724 replyType = replyDesc->ReplyFlags &
2725 2725 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2726 2726
2727 2727 if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2728 2728 break;
2729 2729
2730 2730 } /* End of while loop. */
2731 2731
2732 2732 /* update replyIndex to FW */
2733 2733 WR_MPI2_REPLY_POST_INDEX(instance->reply_read_index, instance);
2734 2734
2735 2735
2736 2736 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2737 2737 0, 0, DDI_DMA_SYNC_FORDEV);
2738 2738
2739 2739 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2740 2740 0, 0, DDI_DMA_SYNC_FORCPU);
2741 2741 return (DDI_INTR_CLAIMED);
2742 2742 }
2743 2743
2744 2744
2745 2745
2746 2746
2747 2747 /*
2748 2748 * complete_cmd_in_sync_mode - Completes an internal command
2749 2749 * @instance: Adapter soft state
2750 2750 * @cmd: Command to be completed
2751 2751 *
2752 2752 * The issue_cmd_in_sync_mode() function waits for a command to complete
2753 2753 * after it issues a command. This function wakes up that waiting routine by
2754 2754 * calling wake_up() on the wait queue.
2755 2755 */
2756 2756 void
2757 2757 tbolt_complete_cmd_in_sync_mode(struct mrsas_instance *instance,
2758 2758 struct mrsas_cmd *cmd)
2759 2759 {
2760 2760
2761 2761 cmd->cmd_status = ddi_get8(cmd->frame_dma_obj.acc_handle,
2762 2762 &cmd->frame->io.cmd_status);
2763 2763
2764 2764 cmd->sync_cmd = MRSAS_FALSE;
2765 2765
2766 2766 mutex_enter(&instance->int_cmd_mtx);
2767 2767 if (cmd->cmd_status == ENODATA) {
2768 2768 cmd->cmd_status = 0;
2769 2769 }
2770 2770 cv_broadcast(&instance->int_cmd_cv);
2771 2771 mutex_exit(&instance->int_cmd_mtx);
2772 2772
2773 2773 }
2774 2774
2775 2775 /*
2776 2776 * mrsas_tbolt_get_ld_map_info - Returns ld_map structure
2777 2777 * instance: Adapter soft state
2778 2778 *
2779 2779 * Issues an internal command (DCMD) to get the FW's controller PD
2780 2780 * list structure. This information is mainly used to find out SYSTEM
2781 2781 * supported by the FW.
2782 2782 */
2783 2783 int
2784 2784 mrsas_tbolt_get_ld_map_info(struct mrsas_instance *instance)
2785 2785 {
2786 2786 int ret = 0;
2787 2787 struct mrsas_cmd *cmd = NULL;
2788 2788 struct mrsas_dcmd_frame *dcmd;
2789 2789 MR_FW_RAID_MAP_ALL *ci;
2790 2790 uint32_t ci_h = 0;
2791 2791 U32 size_map_info;
2792 2792
2793 2793 cmd = get_raid_msg_pkt(instance);
2794 2794
2795 2795 if (cmd == NULL) {
2796 2796 cmn_err(CE_WARN,
2797 2797 "Failed to get a cmd from free-pool in get_ld_map_info()");
2798 2798 return (DDI_FAILURE);
2799 2799 }
2800 2800
2801 2801 dcmd = &cmd->frame->dcmd;
2802 2802
2803 2803 size_map_info = sizeof (MR_FW_RAID_MAP) +
2804 2804 (sizeof (MR_LD_SPAN_MAP) *
2805 2805 (MAX_LOGICAL_DRIVES - 1));
2806 2806
2807 2807 con_log(CL_ANN, (CE_NOTE,
2808 2808 "size_map_info : 0x%x", size_map_info));
2809 2809
2810 2810 ci = instance->ld_map[(instance->map_id & 1)];
2811 2811 ci_h = instance->ld_map_phy[(instance->map_id & 1)];
2812 2812
2813 2813 if (!ci) {
2814 2814 cmn_err(CE_WARN, "Failed to alloc mem for ld_map_info");
2815 2815 return_raid_msg_pkt(instance, cmd);
2816 2816 return (-1);
2817 2817 }
2818 2818
2819 2819 bzero(ci, sizeof (*ci));
2820 2820 bzero(dcmd->mbox.b, DCMD_MBOX_SZ);
2821 2821
2822 2822 dcmd->cmd = MFI_CMD_OP_DCMD;
2823 2823 dcmd->cmd_status = 0xFF;
2824 2824 dcmd->sge_count = 1;
2825 2825 dcmd->flags = MFI_FRAME_DIR_READ;
2826 2826 dcmd->timeout = 0;
2827 2827 dcmd->pad_0 = 0;
2828 2828 dcmd->data_xfer_len = size_map_info;
2829 2829 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
2830 2830 dcmd->sgl.sge32[0].phys_addr = ci_h;
2831 2831 dcmd->sgl.sge32[0].length = size_map_info;
2832 2832
2833 2833
2834 2834 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
2835 2835
2836 2836 if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
2837 2837 ret = 0;
2838 2838 con_log(CL_ANN1, (CE_NOTE, "Get LD Map Info success"));
2839 2839 } else {
2840 2840 cmn_err(CE_WARN, "Get LD Map Info failed");
2841 2841 ret = -1;
2842 2842 }
2843 2843
2844 2844 return_raid_msg_pkt(instance, cmd);
2845 2845
2846 2846 return (ret);
2847 2847 }
2848 2848
2849 2849 void
2850 2850 mrsas_dump_reply_desc(struct mrsas_instance *instance)
2851 2851 {
2852 2852 uint32_t i;
2853 2853 MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
2854 2854 union desc_value d_val;
2855 2855
2856 2856 reply_desc = instance->reply_frame_pool;
2857 2857
2858 2858 for (i = 0; i < instance->reply_q_depth; i++, reply_desc++) {
2859 2859 d_val.word = reply_desc->Words;
2860 2860 con_log(CL_DLEVEL3, (CE_NOTE,
2861 2861 "i=%d, %x:%x",
2862 2862 i, d_val.u1.high, d_val.u1.low));
2863 2863 }
2864 2864 }
2865 2865
2866 2866 /*
2867 2867 * mrsas_tbolt_command_create - Create command for fast path.
2868 2868 * @io_info: MegaRAID IO request packet pointer.
2869 2869 * @ref_tag: Reference tag for RD/WRPROTECT
2870 2870 *
2871 2871 * Create the command for fast path.
2872 2872 */
2873 2873 void
2874 2874 mrsas_tbolt_prepare_cdb(struct mrsas_instance *instance, U8 cdb[],
2875 2875 struct IO_REQUEST_INFO *io_info, Mpi2RaidSCSIIORequest_t *scsi_io_request,
2876 2876 U32 ref_tag)
2877 2877 {
2878 2878 uint16_t EEDPFlags;
2879 2879 uint32_t Control;
2880 2880 ddi_acc_handle_t acc_handle =
2881 2881 instance->mpi2_frame_pool_dma_obj.acc_handle;
2882 2882
2883 2883 /* Prepare 32-byte CDB if DIF is supported on this device */
2884 2884 con_log(CL_ANN, (CE_NOTE, "Prepare DIF CDB"));
2885 2885
2886 2886 bzero(cdb, 32);
2887 2887
2888 2888 cdb[0] = MRSAS_SCSI_VARIABLE_LENGTH_CMD;
2889 2889
2890 2890
2891 2891 cdb[7] = MRSAS_SCSI_ADDL_CDB_LEN;
2892 2892
2893 2893 if (io_info->isRead)
2894 2894 cdb[9] = MRSAS_SCSI_SERVICE_ACTION_READ32;
2895 2895 else
2896 2896 cdb[9] = MRSAS_SCSI_SERVICE_ACTION_WRITE32;
2897 2897
2898 2898 /* Verify within linux driver, set to MEGASAS_RD_WR_PROTECT_CHECK_ALL */
2899 2899 cdb[10] = MRSAS_RD_WR_PROTECT;
2900 2900
2901 2901 /* LOGICAL BLOCK ADDRESS */
2902 2902 cdb[12] = (U8)(((io_info->pdBlock) >> 56) & 0xff);
2903 2903 cdb[13] = (U8)(((io_info->pdBlock) >> 48) & 0xff);
2904 2904 cdb[14] = (U8)(((io_info->pdBlock) >> 40) & 0xff);
2905 2905 cdb[15] = (U8)(((io_info->pdBlock) >> 32) & 0xff);
2906 2906 cdb[16] = (U8)(((io_info->pdBlock) >> 24) & 0xff);
2907 2907 cdb[17] = (U8)(((io_info->pdBlock) >> 16) & 0xff);
2908 2908 cdb[18] = (U8)(((io_info->pdBlock) >> 8) & 0xff);
2909 2909 cdb[19] = (U8)((io_info->pdBlock) & 0xff);
2910 2910
2911 2911 /* Logical block reference tag */
2912 2912 ddi_put32(acc_handle, &scsi_io_request->CDB.EEDP32.PrimaryReferenceTag,
2913 2913 BE_32(ref_tag));
2914 2914
2915 2915 ddi_put16(acc_handle,
2916 2916 &scsi_io_request->CDB.EEDP32.PrimaryApplicationTagMask, 0xffff);
2917 2917
2918 2918 ddi_put32(acc_handle, &scsi_io_request->DataLength,
2919 2919 ((io_info->numBlocks)*512));
2920 2920 /* Specify 32-byte cdb */
2921 2921 ddi_put16(acc_handle, &scsi_io_request->IoFlags, 32);
2922 2922
2923 2923 /* Transfer length */
2924 2924 cdb[28] = (U8)(((io_info->numBlocks) >> 24) & 0xff);
2925 2925 cdb[29] = (U8)(((io_info->numBlocks) >> 16) & 0xff);
2926 2926 cdb[30] = (U8)(((io_info->numBlocks) >> 8) & 0xff);
2927 2927 cdb[31] = (U8)((io_info->numBlocks) & 0xff);
2928 2928
2929 2929 /* set SCSI IO EEDPFlags */
2930 2930 EEDPFlags = ddi_get16(acc_handle, &scsi_io_request->EEDPFlags);
2931 2931 Control = ddi_get32(acc_handle, &scsi_io_request->Control);
2932 2932
2933 2933 /* set SCSI IO EEDPFlags bits */
2934 2934 if (io_info->isRead) {
2935 2935 /*
2936 2936 * For READ commands, the EEDPFlags shall be set to specify to
2937 2937 * Increment the Primary Reference Tag, to Check the Reference
2938 2938 * Tag, and to Check and Remove the Protection Information
2939 2939 * fields.
2940 2940 */
2941 2941 EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2942 2942 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
2943 2943 MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP |
2944 2944 MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG |
2945 2945 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
2946 2946 } else {
2947 2947 /*
2948 2948 * For WRITE commands, the EEDPFlags shall be set to specify to
2949 2949 * Increment the Primary Reference Tag, and to Insert
2950 2950 * Protection Information fields.
2951 2951 */
2952 2952 EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2953 2953 MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
2954 2954 }
2955 2955 Control |= (0x4 << 26);
2956 2956
2957 2957 ddi_put16(acc_handle, &scsi_io_request->EEDPFlags, EEDPFlags);
2958 2958 ddi_put32(acc_handle, &scsi_io_request->Control, Control);
2959 2959 ddi_put32(acc_handle,
2960 2960 &scsi_io_request->EEDPBlockSize, MRSAS_EEDPBLOCKSIZE);
2961 2961 }
2962 2962
2963 2963
2964 2964 /*
2965 2965 * mrsas_tbolt_set_pd_lba - Sets PD LBA
2966 2966 * @cdb: CDB
2967 2967 * @cdb_len: cdb length
2968 2968 * @start_blk: Start block of IO
2969 2969 *
2970 2970 * Used to set the PD LBA in CDB for FP IOs
2971 2971 */
2972 2972 static void
2973 2973 mrsas_tbolt_set_pd_lba(U8 cdb[], uint8_t *cdb_len_ptr, U64 start_blk,
2974 2974 U32 num_blocks)
2975 2975 {
2976 2976 U8 cdb_len = *cdb_len_ptr;
2977 2977 U8 flagvals = 0, opcode = 0, groupnum = 0, control = 0;
2978 2978
2979 2979 /* Some drives don't support 16/12 byte CDB's, convert to 10 */
2980 2980 if (((cdb_len == 12) || (cdb_len == 16)) &&
2981 2981 (start_blk <= 0xffffffff)) {
2982 2982 if (cdb_len == 16) {
2983 2983 con_log(CL_ANN,
2984 2984 (CE_NOTE, "Converting READ/WRITE(16) to READ10"));
2985 2985 opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10;
2986 2986 flagvals = cdb[1];
2987 2987 groupnum = cdb[14];
2988 2988 control = cdb[15];
2989 2989 } else {
2990 2990 con_log(CL_ANN,
2991 2991 (CE_NOTE, "Converting READ/WRITE(12) to READ10"));
2992 2992 opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10;
2993 2993 flagvals = cdb[1];
2994 2994 groupnum = cdb[10];
2995 2995 control = cdb[11];
2996 2996 }
2997 2997
2998 2998 bzero(cdb, sizeof (cdb));
2999 2999
3000 3000 cdb[0] = opcode;
3001 3001 cdb[1] = flagvals;
3002 3002 cdb[6] = groupnum;
3003 3003 cdb[9] = control;
3004 3004 /* Set transfer length */
3005 3005 cdb[8] = (U8)(num_blocks & 0xff);
3006 3006 cdb[7] = (U8)((num_blocks >> 8) & 0xff);
3007 3007 cdb_len = 10;
3008 3008 } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
3009 3009 /* Convert to 16 byte CDB for large LBA's */
3010 3010 con_log(CL_ANN,
3011 3011 (CE_NOTE, "Converting 6/10/12 CDB to 16 byte CDB"));
3012 3012 switch (cdb_len) {
3013 3013 case 6:
3014 3014 opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
3015 3015 control = cdb[5];
3016 3016 break;
3017 3017 case 10:
3018 3018 opcode = cdb[0] == READ_10 ? READ_16 : WRITE_16;
3019 3019 flagvals = cdb[1];
3020 3020 groupnum = cdb[6];
3021 3021 control = cdb[9];
3022 3022 break;
3023 3023 case 12:
3024 3024 opcode = cdb[0] == READ_12 ? READ_16 : WRITE_16;
3025 3025 flagvals = cdb[1];
3026 3026 groupnum = cdb[10];
3027 3027 control = cdb[11];
3028 3028 break;
3029 3029 }
3030 3030
3031 3031 bzero(cdb, sizeof (cdb));
3032 3032
3033 3033 cdb[0] = opcode;
3034 3034 cdb[1] = flagvals;
3035 3035 cdb[14] = groupnum;
3036 3036 cdb[15] = control;
3037 3037
3038 3038 /* Transfer length */
3039 3039 cdb[13] = (U8)(num_blocks & 0xff);
3040 3040 cdb[12] = (U8)((num_blocks >> 8) & 0xff);
3041 3041 cdb[11] = (U8)((num_blocks >> 16) & 0xff);
3042 3042 cdb[10] = (U8)((num_blocks >> 24) & 0xff);
3043 3043
3044 3044 /* Specify 16-byte cdb */
3045 3045 cdb_len = 16;
3046 3046 } else if ((cdb_len == 6) && (start_blk > 0x1fffff)) {
3047 3047 /* convert to 10 byte CDB */
3048 3048 opcode = cdb[0] == READ_6 ? READ_10 : WRITE_10;
3049 3049 control = cdb[5];
3050 3050
3051 3051 bzero(cdb, sizeof (cdb));
3052 3052 cdb[0] = opcode;
3053 3053 cdb[9] = control;
3054 3054
3055 3055 /* Set transfer length */
3056 3056 cdb[8] = (U8)(num_blocks & 0xff);
3057 3057 cdb[7] = (U8)((num_blocks >> 8) & 0xff);
3058 3058
3059 3059 /* Specify 10-byte cdb */
3060 3060 cdb_len = 10;
3061 3061 }
3062 3062
3063 3063
3064 3064 /* Fall through Normal case, just load LBA here */
3065 3065 switch (cdb_len) {
3066 3066 case 6:
3067 3067 {
3068 3068 U8 val = cdb[1] & 0xE0;
3069 3069 cdb[3] = (U8)(start_blk & 0xff);
3070 3070 cdb[2] = (U8)((start_blk >> 8) & 0xff);
3071 3071 cdb[1] = val | ((U8)(start_blk >> 16) & 0x1f);
3072 3072 break;
3073 3073 }
3074 3074 case 10:
3075 3075 cdb[5] = (U8)(start_blk & 0xff);
3076 3076 cdb[4] = (U8)((start_blk >> 8) & 0xff);
3077 3077 cdb[3] = (U8)((start_blk >> 16) & 0xff);
3078 3078 cdb[2] = (U8)((start_blk >> 24) & 0xff);
3079 3079 break;
3080 3080 case 12:
3081 3081 cdb[5] = (U8)(start_blk & 0xff);
3082 3082 cdb[4] = (U8)((start_blk >> 8) & 0xff);
3083 3083 cdb[3] = (U8)((start_blk >> 16) & 0xff);
3084 3084 cdb[2] = (U8)((start_blk >> 24) & 0xff);
3085 3085 break;
3086 3086
3087 3087 case 16:
3088 3088 cdb[9] = (U8)(start_blk & 0xff);
3089 3089 cdb[8] = (U8)((start_blk >> 8) & 0xff);
3090 3090 cdb[7] = (U8)((start_blk >> 16) & 0xff);
3091 3091 cdb[6] = (U8)((start_blk >> 24) & 0xff);
3092 3092 cdb[5] = (U8)((start_blk >> 32) & 0xff);
3093 3093 cdb[4] = (U8)((start_blk >> 40) & 0xff);
3094 3094 cdb[3] = (U8)((start_blk >> 48) & 0xff);
3095 3095 cdb[2] = (U8)((start_blk >> 56) & 0xff);
3096 3096 break;
3097 3097 }
3098 3098
3099 3099 *cdb_len_ptr = cdb_len;
3100 3100 }
3101 3101
3102 3102
3103 3103 static int
3104 3104 mrsas_tbolt_check_map_info(struct mrsas_instance *instance)
3105 3105 {
3106 3106 MR_FW_RAID_MAP_ALL *ld_map;
3107 3107
3108 3108 if (!mrsas_tbolt_get_ld_map_info(instance)) {
3109 3109
3110 3110 ld_map = instance->ld_map[(instance->map_id & 1)];
3111 3111
3112 3112 con_log(CL_ANN1, (CE_NOTE, "ldCount=%d, map size=%d",
3113 3113 ld_map->raidMap.ldCount, ld_map->raidMap.totalSize));
3114 3114
3115 3115 if (MR_ValidateMapInfo(instance->ld_map[
3116 3116 (instance->map_id & 1)], instance->load_balance_info)) {
3117 3117 con_log(CL_ANN,
3118 3118 (CE_CONT, "MR_ValidateMapInfo success"));
3119 3119
3120 3120 instance->fast_path_io = 1;
3121 3121 con_log(CL_ANN,
3122 3122 (CE_NOTE, "instance->fast_path_io %d",
3123 3123 instance->fast_path_io));
3124 3124
3125 3125 return (DDI_SUCCESS);
3126 3126 }
3127 3127
3128 3128 }
3129 3129
3130 3130 instance->fast_path_io = 0;
3131 3131 cmn_err(CE_WARN, "MR_ValidateMapInfo failed");
3132 3132 con_log(CL_ANN, (CE_NOTE,
3133 3133 "instance->fast_path_io %d", instance->fast_path_io));
3134 3134
3135 3135 return (DDI_FAILURE);
3136 3136 }
3137 3137
3138 3138 /*
3139 3139 * Marks HBA as bad. This will be called either when an
3140 3140 * IO packet times out even after 3 FW resets
3141 3141 * or FW is found to be fault even after 3 continuous resets.
3142 3142 */
3143 3143
3144 3144 void
3145 3145 mrsas_tbolt_kill_adapter(struct mrsas_instance *instance)
3146 3146 {
3147 3147 cmn_err(CE_NOTE, "TBOLT Kill adapter called");
3148 3148
3149 3149 if (instance->deadadapter == 1)
3150 3150 return;
3151 3151
3152 3152 con_log(CL_ANN1, (CE_NOTE, "tbolt_kill_adapter: "
3153 3153 "Writing to doorbell with MFI_STOP_ADP "));
3154 3154 mutex_enter(&instance->ocr_flags_mtx);
3155 3155 instance->deadadapter = 1;
3156 3156 mutex_exit(&instance->ocr_flags_mtx);
3157 3157 instance->func_ptr->disable_intr(instance);
3158 3158 WR_RESERVED0_REGISTER(MFI_STOP_ADP, instance);
3159 3159 /* Flush */
3160 3160 (void) RD_RESERVED0_REGISTER(instance);
3161 3161
3162 3162 (void) mrsas_print_pending_cmds(instance);
3163 3163 (void) mrsas_complete_pending_cmds(instance);
3164 3164 }
3165 3165
3166 3166 void
3167 3167 mrsas_reset_reply_desc(struct mrsas_instance *instance)
3168 3168 {
3169 3169 int i;
3170 3170 MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
3171 3171 instance->reply_read_index = 0;
3172 3172
3173 3173 /* initializing reply address to 0xFFFFFFFF */
3174 3174 reply_desc = instance->reply_frame_pool;
3175 3175
3176 3176 for (i = 0; i < instance->reply_q_depth; i++) {
3177 3177 reply_desc->Words = (uint64_t)~0;
3178 3178 reply_desc++;
3179 3179 }
3180 3180 }
3181 3181
3182 3182 int
3183 3183 mrsas_tbolt_reset_ppc(struct mrsas_instance *instance)
3184 3184 {
3185 3185 uint32_t status = 0x00;
3186 3186 uint32_t retry = 0;
3187 3187 uint32_t cur_abs_reg_val;
3188 3188 uint32_t fw_state;
3189 3189 uint32_t abs_state;
3190 3190 uint32_t i;
3191 3191
3192 3192 con_log(CL_ANN, (CE_NOTE,
3193 3193 "mrsas_tbolt_reset_ppc entered"));
3194 3194
3195 3195 if (instance->deadadapter == 1) {
3196 3196 cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: "
3197 3197 "no more resets as HBA has been marked dead ");
3198 3198 return (DDI_FAILURE);
3199 3199 }
3200 3200
3201 3201 mutex_enter(&instance->ocr_flags_mtx);
3202 3202 instance->adapterresetinprogress = 1;
3203 3203 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc:"
3204 3204 "adpterresetinprogress flag set, time %llx", gethrtime()));
3205 3205 mutex_exit(&instance->ocr_flags_mtx);
3206 3206
3207 3207 instance->func_ptr->disable_intr(instance);
3208 3208
3209 3209 /* Add delay inorder to complete the ioctl & io cmds in-flight */
3210 3210 for (i = 0; i < 3000; i++) {
3211 3211 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
3212 3212 }
3213 3213
3214 3214 instance->reply_read_index = 0;
3215 3215
3216 3216 retry_reset:
3217 3217 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3218 3218 ":Resetting TBOLT "));
3219 3219
3220 3220 WR_TBOLT_IB_WRITE_SEQ(0xF, instance);
3221 3221 WR_TBOLT_IB_WRITE_SEQ(4, instance);
3222 3222 WR_TBOLT_IB_WRITE_SEQ(0xb, instance);
3223 3223 WR_TBOLT_IB_WRITE_SEQ(2, instance);
3224 3224 WR_TBOLT_IB_WRITE_SEQ(7, instance);
3225 3225 WR_TBOLT_IB_WRITE_SEQ(0xd, instance);
3226 3226 con_log(CL_ANN1, (CE_NOTE,
3227 3227 "mrsas_tbolt_reset_ppc: magic number written "
3228 3228 "to write sequence register"));
3229 3229 delay(100 * drv_usectohz(MILLISEC));
3230 3230 status = RD_TBOLT_HOST_DIAG(instance);
3231 3231 con_log(CL_ANN1, (CE_NOTE,
3232 3232 "mrsas_tbolt_reset_ppc: READ HOSTDIAG SUCCESS "
3233 3233 "to write sequence register"));
3234 3234
3235 3235 while (status & DIAG_TBOLT_RESET_ADAPTER) {
3236 3236 delay(100 * drv_usectohz(MILLISEC));
3237 3237 status = RD_TBOLT_HOST_DIAG(instance);
3238 3238 if (retry++ == 100) {
3239 3239 cmn_err(CE_WARN,
3240 3240 "mrsas_tbolt_reset_ppc:"
3241 3241 "resetadapter bit is set already "
3242 3242 "check retry count %d", retry);
3243 3243 return (DDI_FAILURE);
3244 3244 }
3245 3245 }
3246 3246
3247 3247 WR_TBOLT_HOST_DIAG(status | DIAG_TBOLT_RESET_ADAPTER, instance);
3248 3248 delay(100 * drv_usectohz(MILLISEC));
3249 3249
3250 3250 ddi_rep_get8((instance)->regmap_handle, (uint8_t *)&status,
3251 3251 (uint8_t *)((uintptr_t)(instance)->regmap +
3252 3252 RESET_TBOLT_STATUS_OFF), 4, DDI_DEV_AUTOINCR);
3253 3253
3254 3254 while ((status & DIAG_TBOLT_RESET_ADAPTER)) {
3255 3255 delay(100 * drv_usectohz(MILLISEC));
3256 3256 ddi_rep_get8((instance)->regmap_handle, (uint8_t *)&status,
3257 3257 (uint8_t *)((uintptr_t)(instance)->regmap +
3258 3258 RESET_TBOLT_STATUS_OFF), 4, DDI_DEV_AUTOINCR);
3259 3259 if (retry++ == 100) {
3260 3260 /* Dont call kill adapter here */
3261 3261 /* RESET BIT ADAPTER is cleared by firmare */
3262 3262 /* mrsas_tbolt_kill_adapter(instance); */
3263 3263 cmn_err(CE_WARN,
3264 3264 "mr_sas %d: %s(): RESET FAILED; return failure!!!",
3265 3265 instance->instance, __func__);
3266 3266 return (DDI_FAILURE);
3267 3267 }
3268 3268 }
3269 3269
3270 3270 con_log(CL_ANN,
3271 3271 (CE_NOTE, "mrsas_tbolt_reset_ppc: Adapter reset complete"));
3272 3272 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3273 3273 "Calling mfi_state_transition_to_ready"));
3274 3274
3275 3275 abs_state = instance->func_ptr->read_fw_status_reg(instance);
3276 3276 retry = 0;
3277 3277 while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
3278 3278 delay(100 * drv_usectohz(MILLISEC));
3279 3279 abs_state = instance->func_ptr->read_fw_status_reg(instance);
3280 3280 }
3281 3281 if (abs_state <= MFI_STATE_FW_INIT) {
3282 3282 cmn_err(CE_WARN,
3283 3283 "mrsas_tbolt_reset_ppc: firmware state < MFI_STATE_FW_INIT"
3284 3284 "state = 0x%x, RETRY RESET.", abs_state);
3285 3285 goto retry_reset;
3286 3286 }
3287 3287
3288 3288 /* Mark HBA as bad, if FW is fault after 3 continuous resets */
3289 3289 if (mfi_state_transition_to_ready(instance) ||
3290 3290 debug_tbolt_fw_faults_after_ocr_g == 1) {
3291 3291 cur_abs_reg_val =
3292 3292 instance->func_ptr->read_fw_status_reg(instance);
3293 3293 fw_state = cur_abs_reg_val & MFI_STATE_MASK;
3294 3294
3295 3295 con_log(CL_ANN1, (CE_NOTE,
3296 3296 "mrsas_tbolt_reset_ppc :before fake: FW is not ready "
3297 3297 "FW state = 0x%x", fw_state));
3298 3298 if (debug_tbolt_fw_faults_after_ocr_g == 1)
3299 3299 fw_state = MFI_STATE_FAULT;
3300 3300
3301 3301 con_log(CL_ANN,
3302 3302 (CE_NOTE, "mrsas_tbolt_reset_ppc : FW is not ready "
3303 3303 "FW state = 0x%x", fw_state));
3304 3304
3305 3305 if (fw_state == MFI_STATE_FAULT) {
3306 3306 /* increment the count */
3307 3307 instance->fw_fault_count_after_ocr++;
3308 3308 if (instance->fw_fault_count_after_ocr
3309 3309 < MAX_FW_RESET_COUNT) {
3310 3310 cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: "
3311 3311 "FW is in fault after OCR count %d "
3312 3312 "Retry Reset",
3313 3313 instance->fw_fault_count_after_ocr);
3314 3314 goto retry_reset;
3315 3315
3316 3316 } else {
3317 3317 cmn_err(CE_WARN, "mrsas %d: %s:"
3318 3318 "Max Reset Count exceeded >%d"
3319 3319 "Mark HBA as bad, KILL adapter",
3320 3320 instance->instance, __func__,
3321 3321 MAX_FW_RESET_COUNT);
3322 3322
3323 3323 mrsas_tbolt_kill_adapter(instance);
3324 3324 return (DDI_FAILURE);
3325 3325 }
3326 3326 }
3327 3327 }
3328 3328
3329 3329 /* reset the counter as FW is up after OCR */
3330 3330 instance->fw_fault_count_after_ocr = 0;
3331 3331
3332 3332 mrsas_reset_reply_desc(instance);
3333 3333
3334 3334
3335 3335 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3336 3336 "Calling mrsas_issue_init_mpi2"));
3337 3337 abs_state = mrsas_issue_init_mpi2(instance);
3338 3338 if (abs_state == (uint32_t)DDI_FAILURE) {
3339 3339 cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: "
3340 3340 "INIT failed Retrying Reset");
3341 3341 goto retry_reset;
3342 3342 }
3343 3343 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3344 3344 "mrsas_issue_init_mpi2 Done"));
3345 3345
3346 3346 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3347 3347 "Calling mrsas_print_pending_cmd"));
3348 3348 (void) mrsas_print_pending_cmds(instance);
3349 3349 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3350 3350 "mrsas_print_pending_cmd done"));
3351 3351
3352 3352 instance->func_ptr->enable_intr(instance);
3353 3353 instance->fw_outstanding = 0;
3354 3354
3355 3355 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3356 3356 "Calling mrsas_issue_pending_cmds"));
3357 3357 (void) mrsas_issue_pending_cmds(instance);
3358 3358 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3359 3359 "issue_pending_cmds done."));
3360 3360
3361 3361 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3362 3362 "Calling aen registration"));
3363 3363
3364 3364 instance->aen_cmd->retry_count_for_ocr = 0;
3365 3365 instance->aen_cmd->drv_pkt_time = 0;
3366 3366
3367 3367 instance->func_ptr->issue_cmd(instance->aen_cmd, instance);
3368 3368
3369 3369 con_log(CL_ANN1, (CE_NOTE, "Unsetting adpresetinprogress flag."));
3370 3370 mutex_enter(&instance->ocr_flags_mtx);
3371 3371 instance->adapterresetinprogress = 0;
3372 3372 mutex_exit(&instance->ocr_flags_mtx);
3373 3373 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3374 3374 "adpterresetinprogress flag unset"));
3375 3375
3376 3376 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc done"));
3377 3377 return (DDI_SUCCESS);
3378 3378
3379 3379 }
3380 3380
3381 3381
3382 3382 /*
3383 3383 * mrsas_sync_map_info - Returns FW's ld_map structure
3384 3384 * @instance: Adapter soft state
3385 3385 *
3386 3386 * Issues an internal command (DCMD) to get the FW's controller PD
3387 3387 * list structure. This information is mainly used to find out SYSTEM
3388 3388 * supported by the FW.
3389 3389 */
3390 3390
3391 3391 static int
3392 3392 mrsas_tbolt_sync_map_info(struct mrsas_instance *instance)
3393 3393 {
3394 3394 int ret = 0, i;
3395 3395 struct mrsas_cmd *cmd = NULL;
3396 3396 struct mrsas_dcmd_frame *dcmd;
3397 3397 uint32_t size_sync_info, num_lds;
3398 3398 LD_TARGET_SYNC *ci = NULL;
3399 3399 MR_FW_RAID_MAP_ALL *map;
3400 3400 MR_LD_RAID *raid;
3401 3401 LD_TARGET_SYNC *ld_sync;
3402 3402 uint32_t ci_h = 0;
3403 3403 uint32_t size_map_info;
3404 3404
3405 3405 cmd = get_raid_msg_pkt(instance);
3406 3406
3407 3407 if (cmd == NULL) {
3408 3408 cmn_err(CE_WARN, "Failed to get a cmd from free-pool in "
3409 3409 "mrsas_tbolt_sync_map_info(). ");
3410 3410 return (DDI_FAILURE);
3411 3411 }
3412 3412
3413 3413 /* Clear the frame buffer and assign back the context id */
3414 3414 bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3415 3415 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3416 3416 cmd->index);
3417 3417 bzero(cmd->scsi_io_request, sizeof (Mpi2RaidSCSIIORequest_t));
3418 3418
3419 3419
3420 3420 map = instance->ld_map[instance->map_id & 1];
3421 3421
3422 3422 num_lds = map->raidMap.ldCount;
3423 3423
3424 3424 dcmd = &cmd->frame->dcmd;
3425 3425
3426 3426 size_sync_info = sizeof (LD_TARGET_SYNC) * num_lds;
3427 3427
3428 3428 con_log(CL_ANN, (CE_NOTE, "size_sync_info =0x%x ; ld count = 0x%x",
3429 3429 size_sync_info, num_lds));
3430 3430
3431 3431 ci = (LD_TARGET_SYNC *)instance->ld_map[(instance->map_id - 1) & 1];
3432 3432
3433 3433 bzero(ci, sizeof (MR_FW_RAID_MAP_ALL));
3434 3434 ci_h = instance->ld_map_phy[(instance->map_id - 1) & 1];
3435 3435
3436 3436 bzero(dcmd->mbox.b, DCMD_MBOX_SZ);
3437 3437
3438 3438 ld_sync = (LD_TARGET_SYNC *)ci;
3439 3439
3440 3440 for (i = 0; i < num_lds; i++, ld_sync++) {
3441 3441 raid = MR_LdRaidGet(i, map);
3442 3442
3443 3443 con_log(CL_ANN1,
3444 3444 (CE_NOTE, "i : 0x%x, Seq Num : 0x%x, Sync Reqd : 0x%x",
3445 3445 i, raid->seqNum, raid->flags.ldSyncRequired));
3446 3446
3447 3447 ld_sync->ldTargetId = MR_GetLDTgtId(i, map);
3448 3448
3449 3449 con_log(CL_ANN1, (CE_NOTE, "i : 0x%x, tgt : 0x%x",
3450 3450 i, ld_sync->ldTargetId));
3451 3451
3452 3452 ld_sync->seqNum = raid->seqNum;
3453 3453 }
3454 3454
3455 3455
3456 3456 size_map_info = sizeof (MR_FW_RAID_MAP) +
3457 3457 (sizeof (MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
3458 3458
3459 3459 dcmd->cmd = MFI_CMD_OP_DCMD;
3460 3460 dcmd->cmd_status = 0xFF;
3461 3461 dcmd->sge_count = 1;
3462 3462 dcmd->flags = MFI_FRAME_DIR_WRITE;
3463 3463 dcmd->timeout = 0;
3464 3464 dcmd->pad_0 = 0;
3465 3465 dcmd->data_xfer_len = size_map_info;
3466 3466 ASSERT(num_lds <= 255);
3467 3467 dcmd->mbox.b[0] = (U8)num_lds;
3468 3468 dcmd->mbox.b[1] = 1; /* Pend */
3469 3469 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3470 3470 dcmd->sgl.sge32[0].phys_addr = ci_h;
3471 3471 dcmd->sgl.sge32[0].length = size_map_info;
3472 3472
3473 3473
3474 3474 instance->map_update_cmd = cmd;
3475 3475 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3476 3476
3477 3477 instance->func_ptr->issue_cmd(cmd, instance);
3478 3478
3479 3479 instance->unroll.syncCmd = 1;
3480 3480 con_log(CL_ANN1, (CE_NOTE, "sync cmd issued. [SMID]:%x", cmd->SMID));
3481 3481
3482 3482 return (ret);
3483 3483 }
3484 3484
3485 3485 /*
3486 3486 * abort_syncmap_cmd
3487 3487 */
3488 3488 int
3489 3489 abort_syncmap_cmd(struct mrsas_instance *instance,
3490 3490 struct mrsas_cmd *cmd_to_abort)
3491 3491 {
3492 3492 int ret = 0;
3493 3493
3494 3494 struct mrsas_cmd *cmd;
3495 3495 struct mrsas_abort_frame *abort_fr;
3496 3496
3497 3497 con_log(CL_ANN1, (CE_NOTE, "chkpnt: abort_ldsync:%d", __LINE__));
3498 3498
3499 3499 cmd = get_raid_msg_mfi_pkt(instance);
3500 3500
3501 3501 if (!cmd) {
3502 3502 cmn_err(CE_WARN,
3503 3503 "Failed to get a cmd from free-pool abort_syncmap_cmd().");
3504 3504 return (DDI_FAILURE);
3505 3505 }
3506 3506 /* Clear the frame buffer and assign back the context id */
3507 3507 bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3508 3508 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3509 3509 cmd->index);
3510 3510
3511 3511 abort_fr = &cmd->frame->abort;
3512 3512
3513 3513 /* prepare and issue the abort frame */
3514 3514 ddi_put8(cmd->frame_dma_obj.acc_handle,
3515 3515 &abort_fr->cmd, MFI_CMD_OP_ABORT);
3516 3516 ddi_put8(cmd->frame_dma_obj.acc_handle, &abort_fr->cmd_status,
3517 3517 MFI_CMD_STATUS_SYNC_MODE);
3518 3518 ddi_put16(cmd->frame_dma_obj.acc_handle, &abort_fr->flags, 0);
3519 3519 ddi_put32(cmd->frame_dma_obj.acc_handle, &abort_fr->abort_context,
3520 3520 cmd_to_abort->index);
3521 3521 ddi_put32(cmd->frame_dma_obj.acc_handle,
3522 3522 &abort_fr->abort_mfi_phys_addr_lo, cmd_to_abort->frame_phys_addr);
3523 3523 ddi_put32(cmd->frame_dma_obj.acc_handle,
3524 3524 &abort_fr->abort_mfi_phys_addr_hi, 0);
3525 3525
3526 3526 cmd->frame_count = 1;
3527 3527
3528 3528 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3529 3529
3530 3530 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3531 3531 con_log(CL_ANN1, (CE_WARN,
3532 3532 "abort_ldsync_cmd: issue_cmd_in_poll_mode failed"));
3533 3533 ret = -1;
3534 3534 } else {
3535 3535 ret = 0;
3536 3536 }
3537 3537
3538 3538 return_raid_msg_mfi_pkt(instance, cmd);
3539 3539
3540 3540 atomic_add_16(&instance->fw_outstanding, (-1));
3541 3541
3542 3542 return (ret);
3543 3543 }
3544 3544
3545 3545
3546 3546 #ifdef PDSUPPORT
3547 3547 /*
3548 3548 * Even though these functions were originally intended for 2208 only, it
3549 3549 * turns out they're useful for "Skinny" support as well. In a perfect world,
3550 3550 * these two functions would be either in mr_sas.c, or in their own new source
3551 3551 * file. Since this driver needs some cleanup anyway, keep this portion in
3552 3552 * mind as well.
3553 3553 */
3554 3554
3555 3555 int
3556 3556 mrsas_tbolt_config_pd(struct mrsas_instance *instance, uint16_t tgt,
3557 3557 uint8_t lun, dev_info_t **ldip)
3558 3558 {
3559 3559 struct scsi_device *sd;
3560 3560 dev_info_t *child;
3561 3561 int rval, dtype;
3562 3562 struct mrsas_tbolt_pd_info *pds = NULL;
3563 3563
3564 3564 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_config_pd: t = %d l = %d",
3565 3565 tgt, lun));
3566 3566
3567 3567 if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
3568 3568 if (ldip) {
3569 3569 *ldip = child;
3570 3570 }
3571 3571 if (instance->mr_tbolt_pd_list[tgt].flag != MRDRV_TGT_VALID) {
3572 3572 rval = mrsas_service_evt(instance, tgt, 1,
3573 3573 MRSAS_EVT_UNCONFIG_TGT, NULL);
3574 3574 con_log(CL_ANN1, (CE_WARN,
3575 3575 "mr_sas:DELETING STALE ENTRY rval = %d "
3576 3576 "tgt id = %d", rval, tgt));
3577 3577 return (NDI_FAILURE);
3578 3578 }
3579 3579 return (NDI_SUCCESS);
3580 3580 }
3581 3581
3582 3582 pds = (struct mrsas_tbolt_pd_info *)
3583 3583 kmem_zalloc(sizeof (struct mrsas_tbolt_pd_info), KM_SLEEP);
3584 3584 mrsas_tbolt_get_pd_info(instance, pds, tgt);
3585 3585 dtype = pds->scsiDevType;
3586 3586
3587 3587 /* Check for Disk */
3588 3588 if ((dtype == DTYPE_DIRECT)) {
3589 3589 if ((dtype == DTYPE_DIRECT) &&
3590 3590 (LE_16(pds->fwState) != PD_SYSTEM)) {
3591 3591 kmem_free(pds, sizeof (struct mrsas_tbolt_pd_info));
3592 3592 return (NDI_FAILURE);
3593 3593 }
3594 3594 sd = kmem_zalloc(sizeof (struct scsi_device), KM_SLEEP);
3595 3595 sd->sd_address.a_hba_tran = instance->tran;
3596 3596 sd->sd_address.a_target = (uint16_t)tgt;
3597 3597 sd->sd_address.a_lun = (uint8_t)lun;
3598 3598
3599 3599 if (scsi_hba_probe(sd, NULL) == SCSIPROBE_EXISTS) {
3600 3600 rval = mrsas_config_scsi_device(instance, sd, ldip);
3601 3601 con_log(CL_DLEVEL1, (CE_NOTE,
3602 3602 "Phys. device found: tgt %d dtype %d: %s",
3603 3603 tgt, dtype, sd->sd_inq->inq_vid));
3604 3604 } else {
3605 3605 rval = NDI_FAILURE;
3606 3606 con_log(CL_DLEVEL1, (CE_NOTE, "Phys. device Not found "
3607 3607 "scsi_hba_probe Failed: tgt %d dtype %d: %s",
3608 3608 tgt, dtype, sd->sd_inq->inq_vid));
3609 3609 }
3610 3610
3611 3611 /* sd_unprobe is blank now. Free buffer manually */
3612 3612 if (sd->sd_inq) {
3613 3613 kmem_free(sd->sd_inq, SUN_INQSIZE);
3614 3614 sd->sd_inq = (struct scsi_inquiry *)NULL;
3615 3615 }
3616 3616 kmem_free(sd, sizeof (struct scsi_device));
3617 3617 } else {
3618 3618 con_log(CL_ANN1, (CE_NOTE,
3619 3619 "Device not supported: tgt %d lun %d dtype %d",
3620 3620 tgt, lun, dtype));
3621 3621 rval = NDI_FAILURE;
3622 3622 }
3623 3623
3624 3624 kmem_free(pds, sizeof (struct mrsas_tbolt_pd_info));
3625 3625 con_log(CL_ANN1, (CE_NOTE, "mrsas_config_pd: return rval = %d",
3626 3626 rval));
3627 3627 return (rval);
3628 3628 }
3629 3629
3630 3630 static void
3631 3631 mrsas_tbolt_get_pd_info(struct mrsas_instance *instance,
3632 3632 struct mrsas_tbolt_pd_info *pds, int tgt)
3633 3633 {
3634 3634 struct mrsas_cmd *cmd;
3635 3635 struct mrsas_dcmd_frame *dcmd;
3636 3636 dma_obj_t dcmd_dma_obj;
3637 3637
3638 3638 ASSERT(instance->tbolt || instance->skinny);
3639 3639
3640 3640 if (instance->tbolt)
3641 3641 cmd = get_raid_msg_pkt(instance);
3642 3642 else
3643 3643 cmd = mrsas_get_mfi_pkt(instance);
3644 3644
3645 3645 if (!cmd) {
3646 3646 con_log(CL_ANN1,
3647 3647 (CE_WARN, "Failed to get a cmd for get pd info"));
3648 3648 return;
3649 3649 }
3650 3650
3651 3651 /* Clear the frame buffer and assign back the context id */
3652 3652 bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3653 3653 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3654 3654 cmd->index);
3655 3655
3656 3656
3657 3657 dcmd = &cmd->frame->dcmd;
3658 3658 dcmd_dma_obj.size = sizeof (struct mrsas_tbolt_pd_info);
3659 3659 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr;
3660 3660 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff;
3661 3661 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff;
3662 3662 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
3663 3663 dcmd_dma_obj.dma_attr.dma_attr_align = 1;
3664 3664
3665 3665 (void) mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
3666 3666 DDI_STRUCTURE_LE_ACC);
3667 3667 bzero(dcmd_dma_obj.buffer, sizeof (struct mrsas_tbolt_pd_info));
3668 3668 bzero(dcmd->mbox.b, 12);
3669 3669 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
3670 3670 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0);
3671 3671 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
3672 3672 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
3673 3673 MFI_FRAME_DIR_READ);
3674 3674 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
3675 3675 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
3676 3676 sizeof (struct mrsas_tbolt_pd_info));
3677 3677 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
3678 3678 MR_DCMD_PD_GET_INFO);
3679 3679 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[0], tgt);
3680 3680 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
3681 3681 sizeof (struct mrsas_tbolt_pd_info));
3682 3682 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
3683 3683 dcmd_dma_obj.dma_cookie[0].dmac_address);
3684 3684
3685 3685 cmd->sync_cmd = MRSAS_TRUE;
3686 3686 cmd->frame_count = 1;
3687 3687
3688 3688 if (instance->tbolt)
3689 3689 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3690 3690
3691 3691 instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd);
3692 3692
3693 3693 ddi_rep_get8(cmd->frame_dma_obj.acc_handle, (uint8_t *)pds,
3694 3694 (uint8_t *)dcmd_dma_obj.buffer, sizeof (struct mrsas_tbolt_pd_info),
3695 3695 DDI_DEV_AUTOINCR);
3696 3696 (void) mrsas_free_dma_obj(instance, dcmd_dma_obj);
3697 3697
3698 3698 if (instance->tbolt)
3699 3699 return_raid_msg_pkt(instance, cmd);
3700 3700 else
3701 3701 mrsas_return_mfi_pkt(instance, cmd);
3702 3702 }
3703 3703 #endif
↓ open down ↓ |
1796 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX