Print this page
XXXX introduce drv_sectohz
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/scsi/impl/scsi_watch.c
+++ new/usr/src/uts/common/io/scsi/impl/scsi_watch.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25
26 26 /*
27 27 * generic scsi device watch
28 28 */
29 29
30 30 #if DEBUG || lint
31 31 #define SWDEBUG
32 32 #endif
33 33
34 34 /*
35 35 * debug goodies
36 36 */
37 37 #ifdef SWDEBUG
38 38 static int swdebug = 0;
39 39 #define DEBUGGING ((scsi_options & SCSI_DEBUG_TGT) && sddebug > 1)
40 40 #define SW_DEBUG if (swdebug == 1) scsi_log
41 41 #define SW_DEBUG2 if (swdebug > 1) scsi_log
42 42 #else /* SWDEBUG */
43 43 #define swdebug (0)
44 44 #define DEBUGGING (0)
45 45 #define SW_DEBUG if (0) scsi_log
46 46 #define SW_DEBUG2 if (0) scsi_log
47 47 #endif
48 48
49 49
50 50
51 51 /*
52 52 * Includes, Declarations and Local Data
53 53 */
54 54
55 55 #include <sys/note.h>
56 56 #include <sys/scsi/scsi.h>
57 57 #include <sys/var.h>
58 58 #include <sys/proc.h>
59 59 #include <sys/thread.h>
60 60 #include <sys/callb.h>
61 61
62 62 /*
63 63 * macro for filling in lun value for scsi-1 support
64 64 */
65 65 #define FILL_SCSI1_LUN(devp, pkt) \
66 66 if ((devp->sd_address.a_lun > 0) && \
67 67 (devp->sd_inq->inq_ansi == 0x1)) { \
68 68 ((union scsi_cdb *)(pkt)->pkt_cdbp)->scc_lun = \
69 69 devp->sd_address.a_lun; \
70 70 }
71 71
72 72 char *sw_label = "scsi-watch";
73 73
74 74 static int scsi_watch_io_time = SCSI_WATCH_IO_TIME;
75 75
76 76 /*
77 77 * all info resides in the scsi watch structure
78 78 *
79 79 * the monitoring is performed by one separate thread which works
80 80 * from a linked list of scsi_watch_request packets
81 81 */
82 82 static struct scsi_watch {
83 83 kthread_t *sw_thread; /* the watch thread */
84 84 kmutex_t sw_mutex; /* mutex protecting list */
85 85 /* and this structure */
86 86 kcondvar_t sw_cv; /* cv for waking up thread */
87 87 struct scsi_watch_request *sw_head; /* head of linked list */
88 88 /* of request structures */
89 89 uchar_t sw_state; /* for suspend-resume */
90 90 uchar_t sw_flags; /* to start at head of list */
91 91 /* for watch thread */
92 92 struct scsi_watch_request *swr_current; /* the command waiting to be */
93 93 /* processed by the watch */
94 94 /* thread which is being */
95 95 /* blocked */
96 96 } sw;
97 97
98 98 #if !defined(lint)
99 99 _NOTE(MUTEX_PROTECTS_DATA(scsi_watch::sw_mutex, scsi_watch))
100 100 #endif
101 101
102 102 /*
103 103 * Values for sw_state
104 104 */
105 105 #define SW_RUNNING 0
106 106 #define SW_SUSPEND_REQUESTED 1
107 107 #define SW_SUSPENDED 2
108 108
109 109 /*
110 110 * values for sw_flags
111 111 */
112 112 #define SW_START_HEAD 0x1
113 113
114 114 struct scsi_watch_request {
115 115 struct scsi_watch_request *swr_next; /* linked request list */
116 116 struct scsi_watch_request *swr_prev;
117 117 clock_t swr_interval; /* interval between TURs */
118 118 clock_t swr_timeout; /* count down */
119 119 uchar_t swr_busy; /* TUR in progress */
120 120 uchar_t swr_what; /* watch or stop */
121 121 uchar_t swr_sense_length; /* required sense length */
122 122 struct scsi_pkt *swr_pkt; /* TUR pkt itself */
123 123 struct scsi_pkt *swr_rqpkt; /* request sense pkt */
124 124 struct buf *swr_rqbp; /* bp for request sense data */
125 125 struct buf *swr_mmcbp; /* bp for MMC command data */
126 126 int (*swr_callback)(); /* callback to driver */
127 127 caddr_t swr_callback_arg;
128 128 kcondvar_t swr_terminate_cv; /* cv to wait on to cleanup */
129 129 /* request synchronously */
130 130 int swr_ref; /* refer count to the swr */
131 131 uchar_t suspend_destroy; /* flag for free later */
132 132 };
133 133
134 134 /*
135 135 * values for swr flags
136 136 */
137 137 #define SUSPEND_DESTROY 1
138 138
139 139 #if !defined(lint)
140 140 _NOTE(SCHEME_PROTECTS_DATA("unshared data", scsi_watch_request))
141 141 #endif
142 142
143 143 /*
144 144 * values for sw_what
145 145 */
146 146 #define SWR_WATCH 0 /* device watch */
147 147 #define SWR_STOP 1 /* stop monitoring and destroy swr */
148 148 #define SWR_SUSPEND_REQUESTED 2 /* req. pending suspend */
149 149 #define SWR_SUSPENDED 3 /* req. is suspended */
150 150
151 151 static opaque_t scsi_watch_request_submit_impl(struct scsi_device *devp,
152 152 int interval, int sense_length, int (*callback)(), caddr_t cb_arg,
153 153 boolean_t mmc);
154 154 static void scsi_watch_request_destroy(struct scsi_watch_request *swr);
155 155 static void scsi_watch_thread(void);
156 156 static void scsi_watch_request_intr(struct scsi_pkt *pkt);
157 157
158 158 /*
159 159 * setup, called from _init(), the thread is created when we need it
160 160 * and exits when there is nothing to do anymore and everything has been
161 161 * cleaned up (ie. resources deallocated)
162 162 */
163 163 void
164 164 scsi_watch_init()
165 165 {
166 166 /* NO OTHER THREADS ARE RUNNING */
167 167 mutex_init(&sw.sw_mutex, NULL, MUTEX_DRIVER, NULL);
168 168 cv_init(&sw.sw_cv, NULL, CV_DRIVER, NULL);
169 169 sw.sw_state = SW_RUNNING;
170 170 sw.sw_flags = 0;
171 171 sw.swr_current = NULL;
172 172 }
173 173
174 174 /*
175 175 * cleaning up, called from _fini()
176 176 */
177 177 void
178 178 scsi_watch_fini()
179 179 {
180 180 /* NO OTHER THREADS ARE RUNNING */
181 181 /*
182 182 * hope and pray that the thread has exited
183 183 */
184 184 ASSERT(sw.sw_thread == 0);
185 185 mutex_destroy(&sw.sw_mutex);
186 186 cv_destroy(&sw.sw_cv);
187 187 }
188 188
189 189 /*
190 190 * allocate an swr (scsi watch request structure) and initialize pkts
191 191 */
192 192 #define ROUTE &devp->sd_address
193 193
194 194 opaque_t
195 195 scsi_watch_request_submit(
196 196 struct scsi_device *devp,
197 197 int interval,
198 198 int sense_length,
199 199 int (*callback)(), /* callback function */
200 200 caddr_t cb_arg) /* device number */
201 201 {
202 202 return (scsi_watch_request_submit_impl(devp, interval, sense_length,
203 203 callback, cb_arg, B_FALSE));
204 204 }
205 205
206 206 opaque_t
207 207 scsi_mmc_watch_request_submit(
208 208 struct scsi_device *devp,
209 209 int interval,
210 210 int sense_length,
211 211 int (*callback)(), /* callback function */
212 212 caddr_t cb_arg) /* device number */
213 213 {
214 214 return (scsi_watch_request_submit_impl(devp, interval, sense_length,
215 215 callback, cb_arg, B_TRUE));
216 216 }
217 217
218 218 static opaque_t
219 219 scsi_watch_request_submit_impl(
220 220 struct scsi_device *devp,
221 221 int interval,
222 222 int sense_length,
223 223 int (*callback)(), /* callback function */
224 224 caddr_t cb_arg, /* device number */
225 225 boolean_t mmc)
226 226 {
227 227 register struct scsi_watch_request *swr = NULL;
228 228 register struct scsi_watch_request *sswr, *p;
229 229 struct buf *bp = NULL;
230 230 struct buf *mmcbp = NULL;
231 231 struct scsi_pkt *rqpkt = NULL;
232 232 struct scsi_pkt *pkt = NULL;
233 233 uchar_t dtype;
234 234
235 235 SW_DEBUG((dev_info_t *)NULL, sw_label, SCSI_DEBUG,
236 236 "scsi_watch_request_submit: Entering ...\n");
237 237
238 238 mutex_enter(&sw.sw_mutex);
239 239 if (sw.sw_thread == 0) {
240 240 register kthread_t *t;
241 241
242 242 t = thread_create((caddr_t)NULL, 0, scsi_watch_thread,
243 243 NULL, 0, &p0, TS_RUN, v.v_maxsyspri - 2);
244 244 sw.sw_thread = t;
245 245 }
246 246
247 247 for (p = sw.sw_head; p != NULL; p = p->swr_next) {
248 248 if ((p->swr_callback_arg == cb_arg) &&
249 249 (p->swr_callback == callback))
250 250 break;
251 251 }
252 252
253 253 /* update time interval for an existing request */
254 254 if (p) {
255 255 if (p->swr_what != SWR_STOP) {
256 256 p->swr_timeout = p->swr_interval
257 257 = drv_usectohz(interval);
258 258 p->swr_what = SWR_WATCH;
259 259 p->swr_ref++;
260 260 cv_signal(&sw.sw_cv);
261 261 mutex_exit(&sw.sw_mutex);
262 262 return ((opaque_t)p);
263 263 }
264 264 }
265 265 mutex_exit(&sw.sw_mutex);
266 266
267 267 /*
268 268 * allocate space for scsi_watch_request
269 269 */
270 270 swr = kmem_zalloc(sizeof (struct scsi_watch_request), KM_SLEEP);
271 271
272 272 /*
273 273 * allocate request sense bp and pkt and make cmd
274 274 * we shouldn't really need it if ARQ is enabled but it is useful
275 275 * if the ARQ failed.
276 276 */
277 277 bp = scsi_alloc_consistent_buf(ROUTE, NULL,
278 278 sense_length, B_READ, SLEEP_FUNC, NULL);
279 279
280 280 rqpkt = scsi_init_pkt(ROUTE, (struct scsi_pkt *)NULL,
281 281 bp, CDB_GROUP0, 1, 0, PKT_CONSISTENT, SLEEP_FUNC, NULL);
282 282
283 283 (void) scsi_setup_cdb((union scsi_cdb *)rqpkt->pkt_cdbp,
284 284 SCMD_REQUEST_SENSE, 0, SENSE_LENGTH, 0);
285 285 FILL_SCSI1_LUN(devp, rqpkt);
286 286 rqpkt->pkt_private = (opaque_t)swr;
287 287 rqpkt->pkt_time = scsi_watch_io_time;
288 288 rqpkt->pkt_comp = scsi_watch_request_intr;
289 289 rqpkt->pkt_flags |= FLAG_HEAD;
290 290
291 291 /*
292 292 * Create TUR pkt or GET STATUS EVENT NOTIFICATION for MMC requests or
293 293 * a zero byte WRITE(10) based on the disk-type for reservation state.
294 294 * For inq_dtype of SBC (DIRECT, dtype == 0)
295 295 * OR for RBC devices (dtype is 0xE) AND for
296 296 * ANSI version of SPC/SPC-2/SPC-3 (inq_ansi == 3-5).
297 297 */
298 298
299 299 dtype = devp->sd_inq->inq_dtype & DTYPE_MASK;
300 300 if (mmc) {
301 301 mmcbp = scsi_alloc_consistent_buf(ROUTE, NULL,
302 302 8, B_READ, SLEEP_FUNC, NULL);
303 303
304 304 pkt = scsi_init_pkt(ROUTE, (struct scsi_pkt *)NULL, mmcbp,
305 305 CDB_GROUP1, sizeof (struct scsi_arq_status),
306 306 0, 0, SLEEP_FUNC, NULL);
307 307
308 308 (void) scsi_setup_cdb((union scsi_cdb *)pkt->pkt_cdbp,
309 309 SCMD_GET_EVENT_STATUS_NOTIFICATION, 0, 8, 0);
310 310 pkt->pkt_cdbp[1] = 1; /* polled */
311 311 pkt->pkt_cdbp[4] = 1 << SD_GESN_MEDIA_CLASS;
312 312 } else if (((dtype == 0) || (dtype == 0xE)) &&
313 313 (devp->sd_inq->inq_ansi > 2)) {
314 314 pkt = scsi_init_pkt(ROUTE, (struct scsi_pkt *)NULL, NULL,
315 315 CDB_GROUP1, sizeof (struct scsi_arq_status),
316 316 0, 0, SLEEP_FUNC, NULL);
317 317
318 318 (void) scsi_setup_cdb((union scsi_cdb *)pkt->pkt_cdbp,
319 319 SCMD_WRITE_G1, 0, 0, 0);
320 320 } else {
321 321 pkt = scsi_init_pkt(ROUTE, (struct scsi_pkt *)NULL, NULL,
322 322 CDB_GROUP0, sizeof (struct scsi_arq_status),
323 323 0, 0, SLEEP_FUNC, NULL);
324 324
325 325 (void) scsi_setup_cdb((union scsi_cdb *)pkt->pkt_cdbp,
326 326 SCMD_TEST_UNIT_READY, 0, 0, 0);
327 327 FILL_SCSI1_LUN(devp, pkt);
328 328 }
329 329
330 330 pkt->pkt_private = (opaque_t)swr;
331 331 pkt->pkt_time = scsi_watch_io_time;
332 332 pkt->pkt_comp = scsi_watch_request_intr;
333 333 if (scsi_ifgetcap(&pkt->pkt_address, "tagged-qing", 1) == 1) {
334 334 pkt->pkt_flags |= FLAG_STAG;
335 335 }
336 336
337 337 /*
338 338 * set the allocated resources in swr
339 339 */
340 340 swr->swr_rqbp = bp;
341 341 swr->swr_rqpkt = rqpkt;
342 342 swr->swr_mmcbp = mmcbp;
343 343 swr->swr_pkt = pkt;
344 344 swr->swr_timeout = swr->swr_interval = drv_usectohz(interval);
345 345 swr->swr_callback = callback;
346 346 swr->swr_callback_arg = cb_arg;
347 347 swr->swr_what = SWR_WATCH;
348 348 swr->swr_sense_length = (uchar_t)sense_length;
349 349 swr->swr_ref = 1;
350 350 cv_init(&swr->swr_terminate_cv, NULL, CV_DRIVER, NULL);
351 351
352 352 /*
353 353 * add to the list and wake up the thread
354 354 */
355 355 mutex_enter(&sw.sw_mutex);
356 356 swr->swr_next = sw.sw_head;
357 357 swr->swr_prev = NULL;
358 358 if (sw.sw_head) {
359 359 sw.sw_head->swr_prev = swr;
360 360 }
361 361 sw.sw_head = swr;
362 362
363 363 /*
364 364 * reset all timeouts, so all requests are in sync again
365 365 * XXX there is a small window where the watch thread releases
366 366 * the mutex so that could upset the resyncing
367 367 */
368 368 sswr = swr;
369 369 while (sswr) {
370 370 sswr->swr_timeout = swr->swr_interval;
371 371 sswr = sswr->swr_next;
372 372 }
373 373 cv_signal(&sw.sw_cv);
374 374 mutex_exit(&sw.sw_mutex);
375 375 return ((opaque_t)swr);
376 376 }
377 377
378 378
379 379 /*
380 380 * called by (eg. pwr management) to resume the scsi_watch_thread
381 381 */
382 382 void
383 383 scsi_watch_resume(opaque_t token)
384 384 {
385 385 struct scsi_watch_request *swr = (struct scsi_watch_request *)NULL;
386 386 /*
387 387 * Change the state to SW_RUNNING and wake up the scsi_watch_thread
388 388 */
389 389 SW_DEBUG(0, sw_label, SCSI_DEBUG, "scsi_watch_resume:\n");
390 390 mutex_enter(&sw.sw_mutex);
391 391
392 392 if (!sw.sw_head)
393 393 goto exit;
394 394
395 395 /* search for token */
396 396 for (swr = sw.sw_head; swr; swr = swr->swr_next) {
397 397 if (swr == (struct scsi_watch_request *)token)
398 398 break;
399 399 }
400 400
401 401 /* if we can't find this value, then we just do nothing */
402 402 if (swr == (struct scsi_watch_request *)NULL)
403 403 goto exit;
404 404
405 405 swr->swr_what = SWR_WATCH;
406 406
407 407
408 408 /* see if all swr's are awake, then start the thread again */
409 409 for (swr = sw.sw_head; swr; swr = swr->swr_next) {
410 410 if (swr->swr_what != SWR_WATCH)
411 411 goto exit;
412 412 }
413 413
414 414 sw.sw_state = SW_RUNNING;
415 415 cv_signal(&sw.sw_cv);
416 416
417 417 exit:
418 418 mutex_exit(&sw.sw_mutex);
419 419 }
420 420
421 421
422 422 /*
423 423 * called by clients (eg. pwr management) to suspend the scsi_watch_thread
424 424 */
425 425 void
426 426 scsi_watch_suspend(opaque_t token)
427 427 {
428 428 struct scsi_watch_request *swr = (struct scsi_watch_request *)NULL;
429 429 clock_t halfsec_delay = drv_usectohz(500000);
430 430
431 431 SW_DEBUG(0, sw_label, SCSI_DEBUG, "scsi_watch_suspend:\n");
432 432
433 433 mutex_enter(&sw.sw_mutex);
434 434
435 435 if (!sw.sw_head)
436 436 goto exit;
437 437
438 438 /* search for token */
439 439 for (swr = sw.sw_head; swr; swr = swr->swr_next) {
440 440 if (swr == (struct scsi_watch_request *)token)
441 441 break;
442 442 }
443 443
444 444 /* if we can't find this value, then we just do nothing */
445 445 if (swr == (struct scsi_watch_request *)NULL)
446 446 goto exit;
447 447
448 448
449 449 for (;;) {
450 450 if (swr->swr_busy) {
451 451 /*
452 452 * XXX: Assumes that this thread can rerun
453 453 * till all outstanding cmds are complete
454 454 */
455 455 swr->swr_what = SWR_SUSPEND_REQUESTED;
456 456 (void) cv_reltimedwait(&sw.sw_cv, &sw.sw_mutex,
457 457 halfsec_delay, TR_CLOCK_TICK);
458 458 } else {
459 459 swr->swr_what = SWR_SUSPENDED;
460 460 break;
461 461 }
462 462 }
463 463
464 464 /* see if all swr's are suspended, then suspend the thread */
465 465 for (swr = sw.sw_head; swr; swr = swr->swr_next) {
466 466 if (swr->swr_what != SWR_SUSPENDED)
467 467 goto exit;
468 468 }
469 469
470 470 sw.sw_state = SW_SUSPENDED;
471 471
472 472 exit:
473 473 mutex_exit(&sw.sw_mutex);
474 474 }
475 475
476 476 /*
477 477 * destroy swr, called for watch thread
478 478 */
479 479 static void
480 480 scsi_watch_request_destroy(struct scsi_watch_request *swr)
481 481 {
482 482 ASSERT(MUTEX_HELD(&sw.sw_mutex));
483 483 ASSERT(swr->swr_busy == 0);
484 484
485 485 SW_DEBUG((dev_info_t *)NULL, sw_label, SCSI_DEBUG,
486 486 "scsi_watch_request_destroy: Entering ...\n");
487 487 if (swr->swr_ref != 0)
488 488 return;
489 489
490 490 /*
491 491 * remove swr from linked list and destroy pkts
492 492 */
493 493 if (swr->swr_prev) {
494 494 swr->swr_prev->swr_next = swr->swr_next;
495 495 }
496 496 if (swr->swr_next) {
497 497 swr->swr_next->swr_prev = swr->swr_prev;
498 498 }
499 499 if (sw.sw_head == swr) {
500 500 sw.sw_head = swr->swr_next;
501 501 }
502 502 if (sw.swr_current == swr) {
503 503 swr->suspend_destroy = SUSPEND_DESTROY;
504 504 sw.swr_current = NULL;
505 505 }
506 506
507 507 scsi_destroy_pkt(swr->swr_rqpkt);
508 508 scsi_free_consistent_buf(swr->swr_rqbp);
509 509 if (swr->swr_mmcbp != NULL) {
510 510 scsi_free_consistent_buf(swr->swr_mmcbp);
511 511 }
512 512 scsi_destroy_pkt(swr->swr_pkt);
513 513 cv_signal(&swr->swr_terminate_cv);
514 514 }
515 515
516 516 /*
517 517 * scsi_watch_request_terminate()
518 518 * called by requestor to terminate any pending watch request.
519 519 * if the request is currently "busy", and the caller cannot wait, failure
520 520 * is returned. O/w the request is cleaned up immediately.
521 521 */
522 522 int
523 523 scsi_watch_request_terminate(opaque_t token, int flags)
524 524 {
525 525 struct scsi_watch_request *swr =
526 526 (struct scsi_watch_request *)token;
527 527 struct scsi_watch_request *sswr;
528 528
529 529 int count = 0;
530 530 int free_flag = 0;
531 531
532 532 /*
533 533 * We try to clean up this request if we can. We also inform
534 534 * the watch thread that we mucked around the list so it has
535 535 * to start reading from head of list again.
536 536 */
537 537 SW_DEBUG((dev_info_t *)NULL, sw_label, SCSI_DEBUG,
538 538 "scsi_watch_request_terminate: Entering(0x%p) ...\n",
539 539 (void *)swr);
540 540 mutex_enter(&sw.sw_mutex);
541 541
542 542 /*
543 543 * check if it is still in the list
544 544 */
545 545 sswr = sw.sw_head;
546 546 while (sswr) {
547 547 if (sswr == swr) {
548 548 swr->swr_ref--;
549 549 count = swr->swr_ref;
550 550
551 551 if (swr->swr_busy) {
552 552 if (flags == SCSI_WATCH_TERMINATE_NOWAIT) {
553 553 mutex_exit(&sw.sw_mutex);
554 554 return (SCSI_WATCH_TERMINATE_FAIL);
555 555 }
556 556 if (count != 0 && flags !=
557 557 SCSI_WATCH_TERMINATE_ALL_WAIT) {
558 558 mutex_exit(&sw.sw_mutex);
559 559 return (SCSI_WATCH_TERMINATE_SUCCESS);
560 560 }
561 561 if (SCSI_WATCH_TERMINATE_ALL_WAIT == flags) {
562 562 swr->swr_ref = 0;
563 563 count = 0;
564 564 }
565 565 swr->swr_what = SWR_STOP;
566 566 cv_wait(&swr->swr_terminate_cv, &sw.sw_mutex);
567 567 free_flag = 1;
568 568 goto done;
569 569 } else {
570 570 if (SCSI_WATCH_TERMINATE_NOWAIT == flags ||
571 571 SCSI_WATCH_TERMINATE_ALL_WAIT == flags) {
572 572 swr->swr_ref = 0;
573 573 count = 0;
574 574 }
575 575 scsi_watch_request_destroy(swr);
576 576 if (0 == count) {
577 577 sw.sw_flags |= SW_START_HEAD;
578 578 free_flag = 1;
579 579 }
580 580 goto done;
581 581 }
582 582 }
583 583 sswr = sswr->swr_next;
584 584 }
585 585 done:
586 586 mutex_exit(&sw.sw_mutex);
587 587 if (!sswr) {
588 588 return (SCSI_WATCH_TERMINATE_FAIL);
589 589 }
590 590 if (1 == free_flag &&
591 591 sswr->suspend_destroy != SUSPEND_DESTROY) {
592 592 cv_destroy(&swr->swr_terminate_cv);
593 593 kmem_free((caddr_t)swr, sizeof (struct scsi_watch_request));
594 594 }
595 595
596 596 return (SCSI_WATCH_TERMINATE_SUCCESS);
597 597 }
598 598
599 599
600 600 /*
601 601 * The routines scsi_watch_thread & scsi_watch_request_intr are
602 602 * on different threads.
603 603 * If there is no work to be done by the lower level driver
604 604 * then swr->swr_busy will not be set.
605 605 * In this case we will call CALLB_CPR_SAFE_BEGIN before
606 606 * calling cv_timedwait.
607 607 * In the other case where there is work to be done by
608 608 * the lower level driver then the flag swr->swr_busy will
609 609 * be set.
610 610 * We cannot call CALLB_CPR_SAFE_BEGIN at this point the reason
611 611 * is the intr thread can interfere with our operations. So
612 612 * we do a cv_timedwait here. Now at the completion of the
613 613 * lower level driver's work we will call CALLB_CPR_SAFE_BEGIN
614 614 * in scsi_watch_request_intr.
615 615 * In all the cases we will call CALLB_CPR_SAFE_END only if
616 616 * we already called a CALLB_CPR_SAFE_BEGIN and this is flagged
617 617 * by sw_cpr_flag.
618 618 * Warlock has a problem when we use different locks
619 619 * on the same type of structure in different contexts.
620 620 * We use callb_cpr_t in both scsi_watch and esp_callback threads.
621 621 * we use different mutexe's in different threads. And
622 622 * this is not acceptable to warlock. To avoid this
623 623 * problem we use the same name for the mutex in
624 624 * both scsi_watch & esp_callback. when __lock_lint is not defined
625 625 * esp_callback uses the mutex on the stack and in scsi_watch
626 626 * a static variable. But when __lock_lint is defined
627 627 * we make a mutex which is global in esp_callback and
628 628 * a external mutex for scsi_watch.
629 629 */
630 630 static int sw_cmd_count = 0;
631 631 static int sw_cpr_flag = 0;
632 632 static callb_cpr_t cpr_info;
633 633 #ifndef __lock_lint
634 634 static kmutex_t cpr_mutex;
635 635 #else
636 636 extern kmutex_t cpr_mutex;
637 637 #endif
638 638
639 639 #if !defined(lint)
640 640 _NOTE(MUTEX_PROTECTS_DATA(cpr_mutex, cpr_info))
641 641 _NOTE(MUTEX_PROTECTS_DATA(cpr_mutex, sw_cmd_count))
642 642 #endif
643 643 /*
644 644 * the scsi watch thread:
645 645 * it either wakes up if there is work to do or if the cv_timeait
646 646 * timed out
647 647 * normally, it wakes up every <delay> seconds and checks the list.
648 648 * the interval is not very accurate if the cv was signalled but that
↓ open down ↓ |
648 lines elided |
↑ open up ↑ |
649 649 * really doesn't matter much
650 650 * it is more important that we fire off all TURs simulataneously so
651 651 * we don't have to wake up frequently
652 652 */
653 653 static void
654 654 scsi_watch_thread()
655 655 {
656 656 struct scsi_watch_request *swr, *next;
657 657 clock_t last_delay = 0;
658 658 clock_t next_delay = 0;
659 - clock_t onesec = drv_usectohz(1000000);
659 + clock_t onesec = drv_sectohz(1);
660 660 clock_t exit_delay = 60 * onesec;
661 661
662 662 SW_DEBUG((dev_info_t *)NULL, sw_label, SCSI_DEBUG,
663 663 "scsi_watch_thread: Entering ...\n");
664 664
665 665 #if !defined(lint)
666 666 _NOTE(NO_COMPETING_THREADS_NOW);
667 667 #endif
668 668 mutex_init(&cpr_mutex, NULL, MUTEX_DRIVER, NULL);
669 669 CALLB_CPR_INIT(&cpr_info,
670 670 &cpr_mutex, callb_generic_cpr, "scsi_watch");
671 671 sw_cpr_flag = 0;
672 672 #if !defined(lint)
673 673 /*LINTED*/
674 674 _NOTE(COMPETING_THREADS_NOW);
675 675 #endif
676 676 /*
677 677 * grab the mutex and wait for work
678 678 */
679 679 mutex_enter(&sw.sw_mutex);
680 680 if (sw.sw_head == NULL) {
681 681 cv_wait(&sw.sw_cv, &sw.sw_mutex);
682 682 }
683 683
684 684 /*
685 685 * now loop forever for work; if queue is empty exit
686 686 */
687 687 for (;;) {
688 688 head:
689 689 swr = sw.sw_head;
690 690 while (swr) {
691 691
692 692 /*
693 693 * If state is not running, wait for scsi_watch_resume
694 694 * to signal restart, but before going into cv_wait
695 695 * need to let the PM framework know that it is safe
696 696 * to stop this thread for CPR
697 697 */
698 698 if (sw.sw_state != SW_RUNNING) {
699 699 SW_DEBUG(0, sw_label, SCSI_DEBUG,
700 700 "scsi_watch_thread suspended\n");
701 701 mutex_enter(&cpr_mutex);
702 702 if (!sw_cmd_count) {
703 703 CALLB_CPR_SAFE_BEGIN(&cpr_info);
704 704 sw_cpr_flag = 1;
705 705 }
706 706 mutex_exit(&cpr_mutex);
707 707 sw.swr_current = swr;
708 708 cv_wait(&sw.sw_cv, &sw.sw_mutex);
709 709
710 710
711 711 /*
712 712 * Need to let the PM framework know that it
713 713 * is no longer safe to stop the thread for
714 714 * CPR.
715 715 */
716 716 mutex_exit(&sw.sw_mutex);
717 717 mutex_enter(&cpr_mutex);
718 718 if (sw_cpr_flag == 1) {
719 719 CALLB_CPR_SAFE_END(
720 720 &cpr_info, &cpr_mutex);
721 721 sw_cpr_flag = 0;
722 722 }
723 723 mutex_exit(&cpr_mutex);
724 724 mutex_enter(&sw.sw_mutex);
725 725 if (SUSPEND_DESTROY == swr->suspend_destroy) {
726 726 cv_destroy(&swr->swr_terminate_cv);
727 727 kmem_free((caddr_t)swr,
728 728 sizeof (struct scsi_watch_request));
729 729 goto head;
730 730 } else {
731 731 sw.swr_current = NULL;
732 732 }
733 733 }
734 734 if (next_delay == 0) {
735 735 next_delay = swr->swr_timeout;
736 736 } else {
737 737 next_delay = min(swr->swr_timeout, next_delay);
738 738 }
739 739
740 740 swr->swr_timeout -= last_delay;
741 741 next = swr->swr_next;
742 742
743 743 SW_DEBUG((dev_info_t *)NULL, sw_label, SCSI_DEBUG,
744 744 "scsi_watch_thread: "
745 745 "swr(0x%p),what=%x,timeout=%lx,"
746 746 "interval=%lx,delay=%lx\n",
747 747 (void *)swr, swr->swr_what, swr->swr_timeout,
748 748 swr->swr_interval, last_delay);
749 749
750 750 switch (swr->swr_what) {
751 751 case SWR_SUSPENDED:
752 752 case SWR_SUSPEND_REQUESTED:
753 753 /* if we are suspended, don't do anything */
754 754 break;
755 755
756 756 case SWR_STOP:
757 757 if (swr->swr_busy == 0) {
758 758 scsi_watch_request_destroy(swr);
759 759 }
760 760 break;
761 761
762 762 default:
763 763 if (swr->swr_timeout <= 0 && !swr->swr_busy) {
764 764 swr->swr_busy = 1;
765 765 swr->swr_timeout = swr->swr_interval;
766 766
767 767 /*
768 768 * submit the cmd and let the completion
769 769 * function handle the result
770 770 * release the mutex (good practice)
771 771 * this should be safe even if the list
772 772 * is changing
773 773 */
774 774 mutex_exit(&sw.sw_mutex);
775 775 mutex_enter(&cpr_mutex);
776 776 sw_cmd_count++;
777 777 mutex_exit(&cpr_mutex);
778 778 SW_DEBUG((dev_info_t *)NULL,
779 779 sw_label, SCSI_DEBUG,
780 780 "scsi_watch_thread: "
781 781 "Starting TUR\n");
782 782 if (scsi_transport(swr->swr_pkt) !=
783 783 TRAN_ACCEPT) {
784 784
785 785 /*
786 786 * try again later
787 787 */
788 788 swr->swr_busy = 0;
789 789 SW_DEBUG((dev_info_t *)NULL,
790 790 sw_label, SCSI_DEBUG,
791 791 "scsi_watch_thread: "
792 792 "Transport Failed\n");
793 793 mutex_enter(&cpr_mutex);
794 794 sw_cmd_count--;
795 795 mutex_exit(&cpr_mutex);
796 796 }
797 797 mutex_enter(&sw.sw_mutex);
798 798 }
799 799 break;
800 800 }
801 801 swr = next;
802 802 if (sw.sw_flags & SW_START_HEAD) {
803 803 sw.sw_flags &= ~SW_START_HEAD;
804 804 goto head;
805 805 }
806 806 }
807 807
808 808 /*
809 809 * delay using cv_timedwait; we return when
810 810 * signalled or timed out
811 811 */
812 812 if (sw.sw_head != NULL) {
813 813 if (next_delay <= 0) {
814 814 next_delay = onesec;
815 815 }
816 816 } else {
817 817 next_delay = exit_delay;
818 818 }
819 819
820 820 mutex_enter(&cpr_mutex);
821 821 if (!sw_cmd_count) {
822 822 CALLB_CPR_SAFE_BEGIN(&cpr_info);
823 823 sw_cpr_flag = 1;
824 824 }
825 825 mutex_exit(&cpr_mutex);
826 826 /*
827 827 * if we return from cv_timedwait because we were
828 828 * signalled, the delay is not accurate but that doesn't
829 829 * really matter
830 830 */
831 831 (void) cv_reltimedwait(&sw.sw_cv, &sw.sw_mutex, next_delay,
832 832 TR_CLOCK_TICK);
833 833 mutex_exit(&sw.sw_mutex);
834 834 mutex_enter(&cpr_mutex);
835 835 if (sw_cpr_flag == 1) {
836 836 CALLB_CPR_SAFE_END(&cpr_info, &cpr_mutex);
837 837 sw_cpr_flag = 0;
838 838 }
839 839 mutex_exit(&cpr_mutex);
840 840 mutex_enter(&sw.sw_mutex);
841 841 last_delay = next_delay;
842 842 next_delay = 0;
843 843
844 844 /*
845 845 * is there still work to do?
846 846 */
847 847 if (sw.sw_head == NULL) {
848 848 break;
849 849 }
850 850 }
851 851
852 852 /*
853 853 * no more work to do, reset sw_thread and exit
854 854 */
855 855 sw.sw_thread = 0;
856 856 mutex_exit(&sw.sw_mutex);
857 857 #ifndef __lock_lint
858 858 mutex_enter(&cpr_mutex);
859 859 CALLB_CPR_EXIT(&cpr_info);
860 860 #endif
861 861 mutex_destroy(&cpr_mutex);
862 862 SW_DEBUG((dev_info_t *)NULL, sw_label, SCSI_DEBUG,
863 863 "scsi_watch_thread: Exiting ...\n");
864 864 }
865 865
866 866 /*
867 867 * callback completion function for scsi watch pkt
868 868 */
869 869 #define SCBP(pkt) ((struct scsi_status *)(pkt)->pkt_scbp)
870 870 #define SCBP_C(pkt) ((*(pkt)->pkt_scbp) & STATUS_MASK)
871 871
872 872 static void
873 873 scsi_watch_request_intr(struct scsi_pkt *pkt)
874 874 {
875 875 struct scsi_watch_result result;
876 876 struct scsi_watch_request *swr =
877 877 (struct scsi_watch_request *)pkt->pkt_private;
878 878 struct scsi_status *rqstatusp;
879 879 struct scsi_extended_sense *rqsensep = NULL;
880 880 int amt = 0;
881 881
882 882 SW_DEBUG((dev_info_t *)NULL, sw_label, SCSI_DEBUG,
883 883 "scsi_watch_intr: Entering ...\n");
884 884
885 885 /*
886 886 * first check if it is the TUR or RQS pkt
887 887 */
888 888 if (pkt == swr->swr_pkt) {
889 889 if (SCBP_C(pkt) != STATUS_GOOD &&
890 890 SCBP_C(pkt) != STATUS_RESERVATION_CONFLICT) {
891 891 if (SCBP(pkt)->sts_chk &&
892 892 ((pkt->pkt_state & STATE_ARQ_DONE) == 0)) {
893 893
894 894 /*
895 895 * submit the request sense pkt
896 896 */
897 897 SW_DEBUG((dev_info_t *)NULL,
898 898 sw_label, SCSI_DEBUG,
899 899 "scsi_watch_intr: "
900 900 "Submitting a Request Sense "
901 901 "Packet\n");
902 902 if (scsi_transport(swr->swr_rqpkt) !=
903 903 TRAN_ACCEPT) {
904 904
905 905 /*
906 906 * just give up and try again later
907 907 */
908 908 SW_DEBUG((dev_info_t *)NULL,
909 909 sw_label, SCSI_DEBUG,
910 910 "scsi_watch_intr: "
911 911 "Request Sense "
912 912 "Transport Failed\n");
913 913 goto done;
914 914 }
915 915
916 916 /*
917 917 * wait for rqsense to complete
918 918 */
919 919 return;
920 920
921 921 } else if (SCBP(pkt)->sts_chk) {
922 922
923 923 /*
924 924 * check the autorequest sense data
925 925 */
926 926 struct scsi_arq_status *arqstat =
927 927 (struct scsi_arq_status *)pkt->pkt_scbp;
928 928
929 929 rqstatusp = &arqstat->sts_rqpkt_status;
930 930 rqsensep = &arqstat->sts_sensedata;
931 931 amt = swr->swr_sense_length -
932 932 arqstat->sts_rqpkt_resid;
933 933 SW_DEBUG((dev_info_t *)NULL,
934 934 sw_label, SCSI_DEBUG,
935 935 "scsi_watch_intr: "
936 936 "Auto Request Sense, amt=%x\n", amt);
937 937 }
938 938 }
939 939
940 940 } else if (pkt == swr->swr_rqpkt) {
941 941
942 942 /*
943 943 * check the request sense data
944 944 */
945 945 rqstatusp = (struct scsi_status *)pkt->pkt_scbp;
946 946 rqsensep = (struct scsi_extended_sense *)
947 947 swr->swr_rqbp->b_un.b_addr;
948 948 amt = swr->swr_sense_length - pkt->pkt_resid;
949 949 SW_DEBUG((dev_info_t *)NULL, sw_label, SCSI_DEBUG,
950 950 "scsi_watch_intr: "
951 951 "Request Sense Completed, amt=%x\n", amt);
952 952 } else {
953 953
954 954 /*
955 955 * should not reach here!!!
956 956 */
957 957 scsi_log((dev_info_t *)NULL, sw_label, CE_PANIC,
958 958 "scsi_watch_intr: Bad Packet(0x%p)", (void *)pkt);
959 959 }
960 960
961 961 if (rqsensep) {
962 962
963 963 /*
964 964 * check rqsense status and data
965 965 */
966 966 if (rqstatusp->sts_busy || rqstatusp->sts_chk) {
967 967
968 968 /*
969 969 * try again later
970 970 */
971 971 SW_DEBUG((dev_info_t *)NULL, sw_label, SCSI_DEBUG,
972 972 "scsi_watch_intr: "
973 973 "Auto Request Sense Failed - "
974 974 "Busy or Check Condition\n");
975 975 goto done;
976 976 }
977 977
978 978 SW_DEBUG((dev_info_t *)NULL, sw_label, SCSI_DEBUG,
979 979 "scsi_watch_intr: "
980 980 "es_key=%x, adq=%x, amt=%x\n",
981 981 rqsensep->es_key, rqsensep->es_add_code, amt);
982 982 }
983 983
984 984 /*
985 985 * callback to target driver to do the real work
986 986 */
987 987 result.statusp = SCBP(swr->swr_pkt);
988 988 result.sensep = rqsensep;
989 989 result.actual_sense_length = (uchar_t)amt;
990 990 result.pkt = swr->swr_pkt;
991 991 if (swr->swr_mmcbp != NULL) {
992 992 bcopy(swr->swr_mmcbp->b_un.b_addr, result.mmc_data, 8);
993 993 }
994 994
995 995 if ((*swr->swr_callback)(swr->swr_callback_arg, &result)) {
996 996 swr->swr_what = SWR_STOP;
997 997 }
998 998
999 999 done:
1000 1000 swr->swr_busy = 0;
1001 1001 mutex_enter(&cpr_mutex);
1002 1002 sw_cmd_count --;
1003 1003 if (!sw_cmd_count) {
1004 1004 CALLB_CPR_SAFE_BEGIN(&cpr_info);
1005 1005 sw_cpr_flag = 1;
1006 1006 }
1007 1007 mutex_exit(&cpr_mutex);
1008 1008 }
1009 1009
1010 1010 /*
1011 1011 * scsi_watch_get_ref_count
1012 1012 * called by clients to query the reference count for a given token.
1013 1013 * return the number of reference count or 0 if the given token is
1014 1014 * not found.
1015 1015 */
1016 1016 int
1017 1017 scsi_watch_get_ref_count(opaque_t token)
1018 1018 {
1019 1019 struct scsi_watch_request *swr =
1020 1020 (struct scsi_watch_request *)token;
1021 1021 struct scsi_watch_request *sswr;
1022 1022 int rval = 0;
1023 1023
1024 1024 SW_DEBUG((dev_info_t *)NULL, sw_label, SCSI_DEBUG,
1025 1025 "scsi_watch_get_ref_count: Entering(0x%p) ...\n",
1026 1026 (void *)swr);
1027 1027 mutex_enter(&sw.sw_mutex);
1028 1028
1029 1029 sswr = sw.sw_head;
1030 1030 while (sswr) {
1031 1031 if (sswr == swr) {
1032 1032 rval = swr->swr_ref;
1033 1033 mutex_exit(&sw.sw_mutex);
1034 1034 return (rval);
1035 1035 }
1036 1036 sswr = sswr->swr_next;
1037 1037 }
1038 1038
1039 1039 mutex_exit(&sw.sw_mutex);
1040 1040 return (rval);
1041 1041 }
↓ open down ↓ |
372 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX