Print this page
XXXX introduce drv_sectohz
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/crypto/spi/kcf_spi.c
+++ new/usr/src/uts/common/crypto/spi/kcf_spi.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25 /*
26 26 * Copyright 2010 Nexenta Systems, Inc. All rights reserved.
27 27 */
28 28
29 29 /*
30 30 * This file is part of the core Kernel Cryptographic Framework.
31 31 * It implements the SPI functions exported to cryptographic
32 32 * providers.
33 33 */
34 34
35 35 #include <sys/ksynch.h>
36 36 #include <sys/cmn_err.h>
37 37 #include <sys/ddi.h>
38 38 #include <sys/sunddi.h>
39 39 #include <sys/modctl.h>
40 40 #include <sys/crypto/common.h>
41 41 #include <sys/crypto/impl.h>
42 42 #include <sys/crypto/sched_impl.h>
43 43 #include <sys/crypto/spi.h>
44 44 #include <sys/crypto/ioctladmin.h>
45 45 #include <sys/taskq.h>
46 46 #include <sys/disp.h>
47 47 #include <sys/kstat.h>
48 48 #include <sys/policy.h>
49 49 #include <sys/cpuvar.h>
50 50
51 51 /*
52 52 * minalloc and maxalloc values to be used for taskq_create().
53 53 */
54 54 int crypto_taskq_threads = CRYPTO_TASKQ_THREADS;
55 55 int crypto_taskq_minalloc = CYRPTO_TASKQ_MIN;
56 56 int crypto_taskq_maxalloc = CRYPTO_TASKQ_MAX;
57 57
58 58 static void remove_provider(kcf_provider_desc_t *);
59 59 static void process_logical_providers(crypto_provider_info_t *,
60 60 kcf_provider_desc_t *);
61 61 static int init_prov_mechs(crypto_provider_info_t *, kcf_provider_desc_t *);
62 62 static int kcf_prov_kstat_update(kstat_t *, int);
63 63 static void delete_kstat(kcf_provider_desc_t *);
64 64
65 65 static kcf_prov_stats_t kcf_stats_ks_data_template = {
66 66 { "kcf_ops_total", KSTAT_DATA_UINT64 },
67 67 { "kcf_ops_passed", KSTAT_DATA_UINT64 },
68 68 { "kcf_ops_failed", KSTAT_DATA_UINT64 },
69 69 { "kcf_ops_returned_busy", KSTAT_DATA_UINT64 }
70 70 };
71 71
72 72 #define KCF_SPI_COPY_OPS(src, dst, ops) if ((src)->ops != NULL) \
73 73 *((dst)->ops) = *((src)->ops);
74 74
75 75 extern int sys_shutdown;
76 76
77 77 /*
78 78 * Copy an ops vector from src to dst. Used during provider registration
79 79 * to copy the ops vector from the provider info structure to the
80 80 * provider descriptor maintained by KCF.
81 81 * Copying the ops vector specified by the provider is needed since the
82 82 * framework does not require the provider info structure to be
83 83 * persistent.
84 84 */
85 85 static void
86 86 copy_ops_vector_v1(crypto_ops_t *src_ops, crypto_ops_t *dst_ops)
87 87 {
88 88 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_control_ops);
89 89 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_digest_ops);
90 90 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_cipher_ops);
91 91 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_mac_ops);
92 92 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_sign_ops);
93 93 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_verify_ops);
94 94 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_dual_ops);
95 95 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_dual_cipher_mac_ops);
96 96 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_random_ops);
97 97 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_session_ops);
98 98 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_object_ops);
99 99 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_key_ops);
100 100 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_provider_ops);
101 101 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_ctx_ops);
102 102 }
103 103
104 104 static void
105 105 copy_ops_vector_v2(crypto_ops_t *src_ops, crypto_ops_t *dst_ops)
106 106 {
107 107 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_mech_ops);
108 108 }
109 109
110 110 static void
111 111 copy_ops_vector_v3(crypto_ops_t *src_ops, crypto_ops_t *dst_ops)
112 112 {
113 113 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_nostore_key_ops);
114 114 }
115 115
116 116 static void
117 117 copy_ops_vector_v4(crypto_ops_t *src_ops, crypto_ops_t *dst_ops)
118 118 {
119 119 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_fips140_ops);
120 120 }
121 121
122 122 /*
123 123 * This routine is used to add cryptographic providers to the KEF framework.
124 124 * Providers pass a crypto_provider_info structure to crypto_register_provider()
125 125 * and get back a handle. The crypto_provider_info structure contains a
126 126 * list of mechanisms supported by the provider and an ops vector containing
127 127 * provider entry points. Hardware providers call this routine in their attach
128 128 * routines. Software providers call this routine in their _init() routine.
129 129 */
130 130 int
131 131 crypto_register_provider(crypto_provider_info_t *info,
132 132 crypto_kcf_provider_handle_t *handle)
133 133 {
134 134 struct modctl *mcp;
135 135 char *name;
136 136 char ks_name[KSTAT_STRLEN];
137 137 kcf_provider_desc_t *prov_desc = NULL;
138 138 int ret = CRYPTO_ARGUMENTS_BAD;
139 139
140 140 if (info->pi_interface_version > CRYPTO_SPI_VERSION_4) {
141 141 ret = CRYPTO_VERSION_MISMATCH;
142 142 goto errormsg;
143 143 }
144 144
145 145 /*
146 146 * Check provider type, must be software, hardware, or logical.
147 147 */
148 148 if (info->pi_provider_type != CRYPTO_HW_PROVIDER &&
149 149 info->pi_provider_type != CRYPTO_SW_PROVIDER &&
150 150 info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER)
151 151 goto errormsg;
152 152
153 153 /*
154 154 * Allocate and initialize a new provider descriptor. We also
155 155 * hold it and release it when done.
156 156 */
157 157 prov_desc = kcf_alloc_provider_desc(info);
158 158 KCF_PROV_REFHOLD(prov_desc);
159 159
160 160 prov_desc->pd_prov_type = info->pi_provider_type;
161 161
162 162 /* provider-private handle, opaque to KCF */
163 163 prov_desc->pd_prov_handle = info->pi_provider_handle;
164 164
165 165 /* copy provider description string */
166 166 if (info->pi_provider_description != NULL) {
167 167 /*
168 168 * pi_provider_descriptor is a string that can contain
169 169 * up to CRYPTO_PROVIDER_DESCR_MAX_LEN + 1 characters
170 170 * INCLUDING the terminating null character. A bcopy()
171 171 * is necessary here as pd_description should not have
172 172 * a null character. See comments in kcf_alloc_provider_desc()
173 173 * for details on pd_description field.
174 174 */
175 175 bcopy(info->pi_provider_description, prov_desc->pd_description,
176 176 min(strlen(info->pi_provider_description),
177 177 CRYPTO_PROVIDER_DESCR_MAX_LEN));
178 178 }
179 179
180 180 if (info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER) {
181 181 if (info->pi_ops_vector == NULL) {
182 182 goto bail;
183 183 }
184 184 copy_ops_vector_v1(info->pi_ops_vector,
185 185 prov_desc->pd_ops_vector);
186 186 if (info->pi_interface_version >= CRYPTO_SPI_VERSION_2) {
187 187 copy_ops_vector_v2(info->pi_ops_vector,
188 188 prov_desc->pd_ops_vector);
189 189 prov_desc->pd_flags = info->pi_flags;
190 190 }
191 191 if (info->pi_interface_version >= CRYPTO_SPI_VERSION_3) {
192 192 copy_ops_vector_v3(info->pi_ops_vector,
193 193 prov_desc->pd_ops_vector);
194 194 }
195 195 if (info->pi_interface_version == CRYPTO_SPI_VERSION_4) {
196 196 copy_ops_vector_v4(info->pi_ops_vector,
197 197 prov_desc->pd_ops_vector);
198 198 }
199 199 }
200 200
201 201 /* object_ops and nostore_key_ops are mutually exclusive */
202 202 if (prov_desc->pd_ops_vector->co_object_ops &&
203 203 prov_desc->pd_ops_vector->co_nostore_key_ops) {
204 204 goto bail;
205 205 }
206 206 /*
207 207 * For software providers, copy the module name and module ID.
208 208 * For hardware providers, copy the driver name and instance.
209 209 */
210 210 switch (info->pi_provider_type) {
211 211 case CRYPTO_SW_PROVIDER:
212 212 if (info->pi_provider_dev.pd_sw == NULL)
213 213 goto bail;
214 214
215 215 if ((mcp = mod_getctl(info->pi_provider_dev.pd_sw)) == NULL)
216 216 goto bail;
217 217
218 218 prov_desc->pd_module_id = mcp->mod_id;
219 219 name = mcp->mod_modname;
220 220 break;
221 221
222 222 case CRYPTO_HW_PROVIDER:
223 223 case CRYPTO_LOGICAL_PROVIDER:
224 224 if (info->pi_provider_dev.pd_hw == NULL)
225 225 goto bail;
226 226
227 227 prov_desc->pd_instance =
228 228 ddi_get_instance(info->pi_provider_dev.pd_hw);
229 229 name = (char *)ddi_driver_name(info->pi_provider_dev.pd_hw);
230 230 break;
231 231 }
232 232 if (name == NULL)
233 233 goto bail;
234 234
235 235 prov_desc->pd_name = kmem_alloc(strlen(name) + 1, KM_SLEEP);
236 236 (void) strcpy(prov_desc->pd_name, name);
237 237
238 238 if ((prov_desc->pd_mctlp = kcf_get_modctl(info)) == NULL)
239 239 goto bail;
240 240
241 241 /* process the mechanisms supported by the provider */
242 242 if ((ret = init_prov_mechs(info, prov_desc)) != CRYPTO_SUCCESS)
243 243 goto bail;
244 244
245 245 /*
246 246 * Add provider to providers tables, also sets the descriptor
247 247 * pd_prov_id field.
248 248 */
249 249 if ((ret = kcf_prov_tab_add_provider(prov_desc)) != CRYPTO_SUCCESS) {
250 250 undo_register_provider(prov_desc, B_FALSE);
251 251 goto bail;
252 252 }
253 253
254 254 /*
255 255 * We create a taskq only for a hardware provider. The global
256 256 * software queue is used for software providers. We handle ordering
257 257 * of multi-part requests in the taskq routine. So, it is safe to
258 258 * have multiple threads for the taskq. We pass TASKQ_PREPOPULATE flag
259 259 * to keep some entries cached to improve performance.
260 260 */
261 261 if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER)
262 262 prov_desc->pd_taskq = taskq_create("kcf_taskq",
263 263 crypto_taskq_threads, minclsyspri,
264 264 crypto_taskq_minalloc, crypto_taskq_maxalloc,
265 265 TASKQ_PREPOPULATE);
266 266 else
267 267 prov_desc->pd_taskq = NULL;
268 268
269 269 /* no kernel session to logical providers and no pd_flags */
270 270 if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
271 271 /*
272 272 * Open a session for session-oriented providers. This session
273 273 * is used for all kernel consumers. This is fine as a provider
274 274 * is required to support multiple thread access to a session.
275 275 * We can do this only after the taskq has been created as we
276 276 * do a kcf_submit_request() to open the session.
277 277 */
278 278 if (KCF_PROV_SESSION_OPS(prov_desc) != NULL) {
279 279 kcf_req_params_t params;
280 280
281 281 KCF_WRAP_SESSION_OPS_PARAMS(¶ms,
282 282 KCF_OP_SESSION_OPEN, &prov_desc->pd_sid, 0,
283 283 CRYPTO_USER, NULL, 0, prov_desc);
284 284 ret = kcf_submit_request(prov_desc, NULL, NULL, ¶ms,
285 285 B_FALSE);
286 286 if (ret != CRYPTO_SUCCESS)
287 287 goto undo_then_bail;
288 288 }
289 289
290 290 /*
291 291 * Get the value for the maximum input length allowed if
292 292 * CRYPTO_HASH_NO_UPDATE or CRYPTO_HASH_NO_UPDATE is specified.
293 293 */
294 294 if (prov_desc->pd_flags &
295 295 (CRYPTO_HASH_NO_UPDATE | CRYPTO_HMAC_NO_UPDATE)) {
296 296 kcf_req_params_t params;
297 297 crypto_provider_ext_info_t ext_info;
298 298
299 299 if (KCF_PROV_PROVMGMT_OPS(prov_desc) == NULL)
300 300 goto undo_then_bail;
301 301
302 302 bzero(&ext_info, sizeof (ext_info));
303 303 KCF_WRAP_PROVMGMT_OPS_PARAMS(¶ms,
304 304 KCF_OP_MGMT_EXTINFO,
305 305 0, NULL, 0, NULL, 0, NULL, &ext_info, prov_desc);
306 306 ret = kcf_submit_request(prov_desc, NULL, NULL,
307 307 ¶ms, B_FALSE);
308 308 if (ret != CRYPTO_SUCCESS)
309 309 goto undo_then_bail;
310 310
311 311 if (prov_desc->pd_flags & CRYPTO_HASH_NO_UPDATE) {
312 312 prov_desc->pd_hash_limit =
313 313 ext_info.ei_hash_max_input_len;
314 314 }
315 315 if (prov_desc->pd_flags & CRYPTO_HMAC_NO_UPDATE) {
316 316 prov_desc->pd_hmac_limit =
317 317 ext_info.ei_hmac_max_input_len;
318 318 }
319 319 }
320 320 }
321 321
322 322 if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
323 323 /*
324 324 * Create the kstat for this provider. There is a kstat
325 325 * installed for each successfully registered provider.
326 326 * This kstat is deleted, when the provider unregisters.
327 327 */
328 328 if (prov_desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
329 329 (void) snprintf(ks_name, KSTAT_STRLEN, "%s_%s",
330 330 prov_desc->pd_name, "provider_stats");
331 331 } else {
332 332 (void) snprintf(ks_name, KSTAT_STRLEN, "%s_%d_%u_%s",
333 333 prov_desc->pd_name, prov_desc->pd_instance,
334 334 prov_desc->pd_prov_id, "provider_stats");
335 335 }
336 336
337 337 prov_desc->pd_kstat = kstat_create("kcf", 0, ks_name, "crypto",
338 338 KSTAT_TYPE_NAMED, sizeof (kcf_prov_stats_t) /
339 339 sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
340 340
341 341 if (prov_desc->pd_kstat != NULL) {
342 342 bcopy(&kcf_stats_ks_data_template,
343 343 &prov_desc->pd_ks_data,
344 344 sizeof (kcf_stats_ks_data_template));
345 345 prov_desc->pd_kstat->ks_data = &prov_desc->pd_ks_data;
346 346 KCF_PROV_REFHOLD(prov_desc);
347 347 prov_desc->pd_kstat->ks_private = prov_desc;
348 348 prov_desc->pd_kstat->ks_update = kcf_prov_kstat_update;
349 349 kstat_install(prov_desc->pd_kstat);
350 350 }
351 351 }
352 352
353 353 if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER)
354 354 process_logical_providers(info, prov_desc);
355 355
356 356 mutex_enter(&prov_desc->pd_lock);
357 357 prov_desc->pd_state = KCF_PROV_READY;
358 358 mutex_exit(&prov_desc->pd_lock);
359 359 kcf_do_notify(prov_desc, B_TRUE);
360 360
361 361 exit:
362 362 *handle = prov_desc->pd_kcf_prov_handle;
363 363 KCF_PROV_REFRELE(prov_desc);
364 364 return (CRYPTO_SUCCESS);
365 365
366 366 undo_then_bail:
367 367 undo_register_provider(prov_desc, B_TRUE);
368 368 ret = CRYPTO_FAILED;
369 369 bail:
370 370 KCF_PROV_REFRELE(prov_desc);
371 371
372 372 errormsg:
373 373 if (ret != CRYPTO_SUCCESS && sys_shutdown == 0) {
374 374 switch (ret) {
375 375 case CRYPTO_FAILED:
376 376 cmn_err(CE_WARN, "%s failed when registering with the "
377 377 "Cryptographic Framework.",
378 378 info->pi_provider_description);
379 379 break;
380 380
381 381 case CRYPTO_MODVERIFICATION_FAILED:
382 382 cmn_err(CE_WARN, "%s failed module verification when "
383 383 "registering with the Cryptographic Framework.",
384 384 info->pi_provider_description);
385 385 break;
386 386
387 387 case CRYPTO_ARGUMENTS_BAD:
388 388 cmn_err(CE_WARN, "%s provided bad arguments and was "
389 389 "not registered with the Cryptographic Framework.",
390 390 info->pi_provider_description);
391 391 break;
392 392
393 393 case CRYPTO_VERSION_MISMATCH:
394 394 cmn_err(CE_WARN, "%s was not registered with the "
395 395 "Cryptographic Framework as there is a SPI version "
396 396 "mismatch (%d) error.",
397 397 info->pi_provider_description,
398 398 info->pi_interface_version);
399 399 break;
400 400
401 401 case CRYPTO_FIPS140_ERROR:
402 402 cmn_err(CE_WARN, "%s was not registered with the "
403 403 "Cryptographic Framework as there was a FIPS 140 "
404 404 "validation error.", info->pi_provider_description);
405 405 break;
406 406
407 407 default:
408 408 cmn_err(CE_WARN, "%s did not register with the "
409 409 "Cryptographic Framework. (0x%x)",
410 410 info->pi_provider_description, ret);
411 411 };
412 412 }
413 413
414 414 return (ret);
415 415 }
416 416
417 417 /* Return the number of holds on a provider. */
418 418 int
419 419 kcf_get_refcnt(kcf_provider_desc_t *pd, boolean_t do_lock)
420 420 {
421 421 int i;
422 422 int refcnt = 0;
423 423
424 424 if (do_lock)
425 425 for (i = 0; i < pd->pd_nbins; i++)
426 426 mutex_enter(&(pd->pd_percpu_bins[i].kp_lock));
427 427
428 428 for (i = 0; i < pd->pd_nbins; i++)
429 429 refcnt += pd->pd_percpu_bins[i].kp_holdcnt;
430 430
431 431 if (do_lock)
432 432 for (i = 0; i < pd->pd_nbins; i++)
433 433 mutex_exit(&(pd->pd_percpu_bins[i].kp_lock));
434 434
435 435 return (refcnt);
436 436 }
437 437
438 438 /*
439 439 * This routine is used to notify the framework when a provider is being
440 440 * removed. Hardware providers call this routine in their detach routines.
441 441 * Software providers call this routine in their _fini() routine.
442 442 */
443 443 int
444 444 crypto_unregister_provider(crypto_kcf_provider_handle_t handle)
445 445 {
446 446 uint_t mech_idx;
447 447 kcf_provider_desc_t *desc;
448 448 kcf_prov_state_t saved_state;
449 449 int ret = CRYPTO_SUCCESS;
450 450
451 451 /* lookup provider descriptor */
452 452 if ((desc = kcf_prov_tab_lookup((crypto_provider_id_t)handle)) ==
453 453 NULL) {
454 454 ret = CRYPTO_UNKNOWN_PROVIDER;
455 455 goto errormsg;
456 456 }
457 457
458 458 mutex_enter(&desc->pd_lock);
459 459 /*
460 460 * Check if any other thread is disabling or removing
461 461 * this provider. We return if this is the case.
462 462 */
463 463 if (desc->pd_state >= KCF_PROV_DISABLED) {
464 464 mutex_exit(&desc->pd_lock);
465 465 /* Release reference held by kcf_prov_tab_lookup(). */
466 466 KCF_PROV_REFRELE(desc);
467 467 ret = CRYPTO_BUSY;
468 468 goto errormsg;
469 469 }
470 470
471 471 saved_state = desc->pd_state;
472 472 desc->pd_state = KCF_PROV_UNREGISTERING;
473 473
474 474 if (saved_state == KCF_PROV_BUSY) {
475 475 /*
476 476 * The per-provider taskq threads may be waiting. We
477 477 * signal them so that they can start failing requests.
478 478 */
479 479 cv_broadcast(&desc->pd_resume_cv);
480 480 }
481 481
482 482 mutex_exit(&desc->pd_lock);
483 483
484 484 if (desc->pd_prov_type != CRYPTO_SW_PROVIDER) {
485 485 remove_provider(desc);
486 486 }
487 487
488 488 if (desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
489 489 /* remove the provider from the mechanisms tables */
490 490 for (mech_idx = 0; mech_idx < desc->pd_mech_list_count;
491 491 mech_idx++) {
492 492 kcf_remove_mech_provider(
493 493 desc->pd_mechanisms[mech_idx].cm_mech_name, desc);
494 494 }
495 495 }
496 496
497 497 /* remove provider from providers table */
498 498 if (kcf_prov_tab_rem_provider((crypto_provider_id_t)handle) !=
499 499 CRYPTO_SUCCESS) {
500 500 /* Release reference held by kcf_prov_tab_lookup(). */
501 501 KCF_PROV_REFRELE(desc);
502 502 ret = CRYPTO_UNKNOWN_PROVIDER;
503 503 goto errormsg;
504 504 }
505 505
506 506 delete_kstat(desc);
↓ open down ↓ |
506 lines elided |
↑ open up ↑ |
507 507
508 508 if (desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
509 509 /*
510 510 * Wait till the existing requests with the provider complete
511 511 * and all the holds are released. All the holds on a software
512 512 * provider are from kernel clients and the hold time
513 513 * is expected to be short. So, we won't be stuck here forever.
514 514 */
515 515 while (kcf_get_refcnt(desc, B_TRUE) > 1) {
516 516 /* wait 1 second and try again. */
517 - delay(1 * drv_usectohz(1000000));
517 + delay(drv_sectohz(1));
518 518 }
519 519 } else {
520 520 int i;
521 521 kcf_prov_cpu_t *mp;
522 522
523 523 /*
524 524 * Wait until requests that have been sent to the provider
525 525 * complete.
526 526 */
527 527 for (i = 0; i < desc->pd_nbins; i++) {
528 528 mp = &(desc->pd_percpu_bins[i]);
529 529
530 530 mutex_enter(&mp->kp_lock);
531 531 while (mp->kp_jobcnt > 0) {
532 532 cv_wait(&mp->kp_cv, &mp->kp_lock);
533 533 }
534 534 mutex_exit(&mp->kp_lock);
535 535 }
536 536 }
537 537
538 538 mutex_enter(&desc->pd_lock);
539 539 desc->pd_state = KCF_PROV_UNREGISTERED;
540 540 mutex_exit(&desc->pd_lock);
541 541
542 542 kcf_do_notify(desc, B_FALSE);
543 543
544 544 mutex_enter(&prov_tab_mutex);
545 545 /* Release reference held by kcf_prov_tab_lookup(). */
546 546 KCF_PROV_REFRELE(desc);
547 547
548 548 if (kcf_get_refcnt(desc, B_TRUE) == 0) {
549 549 /* kcf_free_provider_desc drops prov_tab_mutex */
550 550 kcf_free_provider_desc(desc);
551 551 } else {
552 552 ASSERT(desc->pd_prov_type != CRYPTO_SW_PROVIDER);
553 553 /*
554 554 * We could avoid this if /dev/crypto can proactively
555 555 * remove any holds on us from a dormant PKCS #11 app.
556 556 * For now, we check the provider table for
557 557 * KCF_PROV_UNREGISTERED entries when a provider is
558 558 * added to the table or when a provider is removed from it
559 559 * and free them when refcnt reaches zero.
560 560 */
561 561 kcf_need_provtab_walk = B_TRUE;
562 562 mutex_exit(&prov_tab_mutex);
563 563 }
564 564
565 565 errormsg:
566 566 if (ret != CRYPTO_SUCCESS && sys_shutdown == 0) {
567 567 switch (ret) {
568 568 case CRYPTO_UNKNOWN_PROVIDER:
569 569 cmn_err(CE_WARN, "Unknown provider \"%s\" was "
570 570 "requested to unregister from the cryptographic "
571 571 "framework.", desc->pd_description);
572 572 break;
573 573
574 574 case CRYPTO_BUSY:
575 575 cmn_err(CE_WARN, "%s could not be unregistered from "
576 576 "the Cryptographic Framework as it is busy.",
577 577 desc->pd_description);
578 578 break;
579 579
580 580 default:
581 581 cmn_err(CE_WARN, "%s did not unregister with the "
582 582 "Cryptographic Framework. (0x%x)",
583 583 desc->pd_description, ret);
584 584 };
585 585 }
586 586
587 587 return (ret);
588 588 }
589 589
590 590 /*
591 591 * This routine is used to notify the framework that the state of
592 592 * a cryptographic provider has changed. Valid state codes are:
593 593 *
594 594 * CRYPTO_PROVIDER_READY
595 595 * The provider indicates that it can process more requests. A provider
596 596 * will notify with this event if it previously has notified us with a
597 597 * CRYPTO_PROVIDER_BUSY.
598 598 *
599 599 * CRYPTO_PROVIDER_BUSY
600 600 * The provider can not take more requests.
601 601 *
602 602 * CRYPTO_PROVIDER_FAILED
603 603 * The provider encountered an internal error. The framework will not
604 604 * be sending any more requests to the provider. The provider may notify
605 605 * with a CRYPTO_PROVIDER_READY, if it is able to recover from the error.
606 606 *
607 607 * This routine can be called from user or interrupt context.
608 608 */
609 609 void
610 610 crypto_provider_notification(crypto_kcf_provider_handle_t handle, uint_t state)
611 611 {
612 612 kcf_provider_desc_t *pd;
613 613
614 614 /* lookup the provider from the given handle */
615 615 if ((pd = kcf_prov_tab_lookup((crypto_provider_id_t)handle)) == NULL)
616 616 return;
617 617
618 618 mutex_enter(&pd->pd_lock);
619 619
620 620 if (pd->pd_state <= KCF_PROV_VERIFICATION_FAILED)
621 621 goto out;
622 622
623 623 if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
624 624 cmn_err(CE_WARN, "crypto_provider_notification: "
625 625 "logical provider (%x) ignored\n", handle);
626 626 goto out;
627 627 }
628 628 switch (state) {
629 629 case CRYPTO_PROVIDER_READY:
630 630 switch (pd->pd_state) {
631 631 case KCF_PROV_BUSY:
632 632 pd->pd_state = KCF_PROV_READY;
633 633 /*
634 634 * Signal the per-provider taskq threads that they
635 635 * can start submitting requests.
636 636 */
637 637 cv_broadcast(&pd->pd_resume_cv);
638 638 break;
639 639
640 640 case KCF_PROV_FAILED:
641 641 /*
642 642 * The provider recovered from the error. Let us
643 643 * use it now.
644 644 */
645 645 pd->pd_state = KCF_PROV_READY;
646 646 break;
647 647 }
648 648 break;
649 649
650 650 case CRYPTO_PROVIDER_BUSY:
651 651 switch (pd->pd_state) {
652 652 case KCF_PROV_READY:
653 653 pd->pd_state = KCF_PROV_BUSY;
654 654 break;
655 655 }
656 656 break;
657 657
658 658 case CRYPTO_PROVIDER_FAILED:
659 659 /*
660 660 * We note the failure and return. The per-provider taskq
661 661 * threads check this flag and start failing the
662 662 * requests, if it is set. See process_req_hwp() for details.
663 663 */
664 664 switch (pd->pd_state) {
665 665 case KCF_PROV_READY:
666 666 pd->pd_state = KCF_PROV_FAILED;
667 667 break;
668 668
669 669 case KCF_PROV_BUSY:
670 670 pd->pd_state = KCF_PROV_FAILED;
671 671 /*
672 672 * The per-provider taskq threads may be waiting. We
673 673 * signal them so that they can start failing requests.
674 674 */
675 675 cv_broadcast(&pd->pd_resume_cv);
676 676 break;
677 677 }
678 678 break;
679 679 }
680 680 out:
681 681 mutex_exit(&pd->pd_lock);
682 682 KCF_PROV_REFRELE(pd);
683 683 }
684 684
685 685 /*
686 686 * This routine is used to notify the framework the result of
687 687 * an asynchronous request handled by a provider. Valid error
688 688 * codes are the same as the CRYPTO_* errors defined in common.h.
689 689 *
690 690 * This routine can be called from user or interrupt context.
691 691 */
692 692 void
693 693 crypto_op_notification(crypto_req_handle_t handle, int error)
694 694 {
695 695 kcf_call_type_t ctype;
696 696
697 697 if (handle == NULL)
698 698 return;
699 699
700 700 if ((ctype = GET_REQ_TYPE(handle)) == CRYPTO_SYNCH) {
701 701 kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)handle;
702 702
703 703 KCF_PROV_JOB_RELE_STAT(sreq->sn_mp, (error != CRYPTO_SUCCESS));
704 704 kcf_sop_done(sreq, error);
705 705 } else {
706 706 kcf_areq_node_t *areq = (kcf_areq_node_t *)handle;
707 707
708 708 ASSERT(ctype == CRYPTO_ASYNCH);
709 709 KCF_PROV_JOB_RELE_STAT(areq->an_mp, (error != CRYPTO_SUCCESS));
710 710 kcf_aop_done(areq, error);
711 711 }
712 712 }
713 713
714 714 /*
715 715 * This routine is used by software providers to determine
716 716 * whether to use KM_SLEEP or KM_NOSLEEP during memory allocation.
717 717 * Note that hardware providers can always use KM_SLEEP. So,
718 718 * they do not need to call this routine.
719 719 *
720 720 * This routine can be called from user or interrupt context.
721 721 */
722 722 int
723 723 crypto_kmflag(crypto_req_handle_t handle)
724 724 {
725 725 return (REQHNDL2_KMFLAG(handle));
726 726 }
727 727
728 728 /*
729 729 * Process the mechanism info structures specified by the provider
730 730 * during registration. A NULL crypto_provider_info_t indicates
731 731 * an already initialized provider descriptor.
732 732 *
733 733 * Mechanisms are not added to the kernel's mechanism table if the
734 734 * provider is a logical provider.
735 735 *
736 736 * Returns CRYPTO_SUCCESS on success, CRYPTO_ARGUMENTS if one
737 737 * of the specified mechanisms was malformed, or CRYPTO_HOST_MEMORY
738 738 * if the table of mechanisms is full.
739 739 */
740 740 static int
741 741 init_prov_mechs(crypto_provider_info_t *info, kcf_provider_desc_t *desc)
742 742 {
743 743 uint_t mech_idx;
744 744 uint_t cleanup_idx;
745 745 int err = CRYPTO_SUCCESS;
746 746 kcf_prov_mech_desc_t *pmd;
747 747 int desc_use_count = 0;
748 748 int mcount = desc->pd_mech_list_count;
749 749
750 750 if (desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
751 751 if (info != NULL) {
752 752 ASSERT(info->pi_mechanisms != NULL);
753 753 bcopy(info->pi_mechanisms, desc->pd_mechanisms,
754 754 sizeof (crypto_mech_info_t) * mcount);
755 755 }
756 756 return (CRYPTO_SUCCESS);
757 757 }
758 758
759 759 /*
760 760 * Copy the mechanism list from the provider info to the provider
761 761 * descriptor. desc->pd_mechanisms has an extra crypto_mech_info_t
762 762 * element if the provider has random_ops since we keep an internal
763 763 * mechanism, SUN_RANDOM, in this case.
764 764 */
765 765 if (info != NULL) {
766 766 if (info->pi_ops_vector->co_random_ops != NULL) {
767 767 crypto_mech_info_t *rand_mi;
768 768
769 769 /*
770 770 * Need the following check as it is possible to have
771 771 * a provider that implements just random_ops and has
772 772 * pi_mechanisms == NULL.
773 773 */
774 774 if (info->pi_mechanisms != NULL) {
775 775 bcopy(info->pi_mechanisms, desc->pd_mechanisms,
776 776 sizeof (crypto_mech_info_t) * (mcount - 1));
777 777 }
778 778 rand_mi = &desc->pd_mechanisms[mcount - 1];
779 779
780 780 bzero(rand_mi, sizeof (crypto_mech_info_t));
781 781 (void) strncpy(rand_mi->cm_mech_name, SUN_RANDOM,
782 782 CRYPTO_MAX_MECH_NAME);
783 783 rand_mi->cm_func_group_mask = CRYPTO_FG_RANDOM;
784 784 } else {
785 785 ASSERT(info->pi_mechanisms != NULL);
786 786 bcopy(info->pi_mechanisms, desc->pd_mechanisms,
787 787 sizeof (crypto_mech_info_t) * mcount);
788 788 }
789 789 }
790 790
791 791 /*
792 792 * For each mechanism support by the provider, add the provider
793 793 * to the corresponding KCF mechanism mech_entry chain.
794 794 */
795 795 for (mech_idx = 0; mech_idx < desc->pd_mech_list_count; mech_idx++) {
796 796 crypto_mech_info_t *mi = &desc->pd_mechanisms[mech_idx];
797 797
798 798 if ((mi->cm_mech_flags & CRYPTO_KEYSIZE_UNIT_IN_BITS) &&
799 799 (mi->cm_mech_flags & CRYPTO_KEYSIZE_UNIT_IN_BYTES)) {
800 800 err = CRYPTO_ARGUMENTS_BAD;
801 801 break;
802 802 }
803 803
804 804 if ((err = kcf_add_mech_provider(mech_idx, desc, &pmd)) !=
805 805 KCF_SUCCESS)
806 806 break;
807 807
808 808 if (pmd == NULL)
809 809 continue;
810 810
811 811 /* The provider will be used for this mechanism */
812 812 desc_use_count++;
813 813 }
814 814
815 815 /*
816 816 * Don't allow multiple software providers with disabled mechanisms
817 817 * to register. Subsequent enabling of mechanisms will result in
818 818 * an unsupported configuration, i.e. multiple software providers
819 819 * per mechanism.
820 820 */
821 821 if (desc_use_count == 0 && desc->pd_prov_type == CRYPTO_SW_PROVIDER)
822 822 return (CRYPTO_ARGUMENTS_BAD);
823 823
824 824 if (err == KCF_SUCCESS)
825 825 return (CRYPTO_SUCCESS);
826 826
827 827 /*
828 828 * An error occurred while adding the mechanism, cleanup
829 829 * and bail.
830 830 */
831 831 for (cleanup_idx = 0; cleanup_idx < mech_idx; cleanup_idx++) {
832 832 kcf_remove_mech_provider(
833 833 desc->pd_mechanisms[cleanup_idx].cm_mech_name, desc);
834 834 }
835 835
836 836 if (err == KCF_MECH_TAB_FULL)
837 837 return (CRYPTO_HOST_MEMORY);
838 838
839 839 return (CRYPTO_ARGUMENTS_BAD);
840 840 }
841 841
842 842 /*
843 843 * Update routine for kstat. Only privileged users are allowed to
844 844 * access this information, since this information is sensitive.
845 845 * There are some cryptographic attacks (e.g. traffic analysis)
846 846 * which can use this information.
847 847 */
848 848 static int
849 849 kcf_prov_kstat_update(kstat_t *ksp, int rw)
850 850 {
851 851 kcf_prov_stats_t *ks_data;
852 852 kcf_provider_desc_t *pd = (kcf_provider_desc_t *)ksp->ks_private;
853 853 int i;
854 854
855 855 if (rw == KSTAT_WRITE)
856 856 return (EACCES);
857 857
858 858 ks_data = ksp->ks_data;
859 859
860 860 if (secpolicy_sys_config(CRED(), B_TRUE) != 0) {
861 861 ks_data->ps_ops_total.value.ui64 = 0;
862 862 ks_data->ps_ops_passed.value.ui64 = 0;
863 863 ks_data->ps_ops_failed.value.ui64 = 0;
864 864 ks_data->ps_ops_busy_rval.value.ui64 = 0;
865 865 } else {
866 866 uint64_t dtotal, ftotal, btotal;
867 867
868 868 dtotal = ftotal = btotal = 0;
869 869 /* No locking done since an exact count is not required. */
870 870 for (i = 0; i < pd->pd_nbins; i++) {
871 871 dtotal += pd->pd_percpu_bins[i].kp_ndispatches;
872 872 ftotal += pd->pd_percpu_bins[i].kp_nfails;
873 873 btotal += pd->pd_percpu_bins[i].kp_nbusy_rval;
874 874 }
875 875
876 876 ks_data->ps_ops_total.value.ui64 = dtotal;
877 877 ks_data->ps_ops_failed.value.ui64 = ftotal;
878 878 ks_data->ps_ops_busy_rval.value.ui64 = btotal;
879 879 ks_data->ps_ops_passed.value.ui64 = dtotal - ftotal - btotal;
880 880 }
881 881
882 882 return (0);
883 883 }
884 884
885 885
886 886 /*
887 887 * Utility routine called from failure paths in crypto_register_provider()
888 888 * and from crypto_load_soft_disabled().
889 889 */
890 890 void
891 891 undo_register_provider(kcf_provider_desc_t *desc, boolean_t remove_prov)
892 892 {
893 893 uint_t mech_idx;
894 894
895 895 /* remove the provider from the mechanisms tables */
896 896 for (mech_idx = 0; mech_idx < desc->pd_mech_list_count;
897 897 mech_idx++) {
898 898 kcf_remove_mech_provider(
899 899 desc->pd_mechanisms[mech_idx].cm_mech_name, desc);
900 900 }
901 901
902 902 /* remove provider from providers table */
903 903 if (remove_prov)
904 904 (void) kcf_prov_tab_rem_provider(desc->pd_prov_id);
905 905 }
906 906
907 907 /*
908 908 * Utility routine called from crypto_load_soft_disabled(). Callers
909 909 * should have done a prior undo_register_provider().
910 910 */
911 911 void
912 912 redo_register_provider(kcf_provider_desc_t *pd)
913 913 {
914 914 /* process the mechanisms supported by the provider */
915 915 (void) init_prov_mechs(NULL, pd);
916 916
917 917 /*
918 918 * Hold provider in providers table. We should not call
919 919 * kcf_prov_tab_add_provider() here as the provider descriptor
920 920 * is still valid which means it has an entry in the provider
921 921 * table.
922 922 */
923 923 KCF_PROV_REFHOLD(pd);
924 924 }
925 925
926 926 /*
927 927 * Add provider (p1) to another provider's array of providers (p2).
928 928 * Hardware and logical providers use this array to cross-reference
929 929 * each other.
930 930 */
931 931 static void
932 932 add_provider_to_array(kcf_provider_desc_t *p1, kcf_provider_desc_t *p2)
933 933 {
934 934 kcf_provider_list_t *new;
935 935
936 936 new = kmem_alloc(sizeof (kcf_provider_list_t), KM_SLEEP);
937 937 mutex_enter(&p2->pd_lock);
938 938 new->pl_next = p2->pd_provider_list;
939 939 p2->pd_provider_list = new;
940 940 new->pl_provider = p1;
941 941 mutex_exit(&p2->pd_lock);
942 942 }
943 943
944 944 /*
945 945 * Remove provider (p1) from another provider's array of providers (p2).
946 946 * Hardware and logical providers use this array to cross-reference
947 947 * each other.
948 948 */
949 949 static void
950 950 remove_provider_from_array(kcf_provider_desc_t *p1, kcf_provider_desc_t *p2)
951 951 {
952 952
953 953 kcf_provider_list_t *pl = NULL, **prev;
954 954
955 955 mutex_enter(&p2->pd_lock);
956 956 for (pl = p2->pd_provider_list, prev = &p2->pd_provider_list;
957 957 pl != NULL; prev = &pl->pl_next, pl = pl->pl_next) {
958 958 if (pl->pl_provider == p1) {
959 959 break;
960 960 }
961 961 }
962 962
963 963 if (p1 == NULL) {
964 964 mutex_exit(&p2->pd_lock);
965 965 return;
966 966 }
967 967
968 968 /* detach and free kcf_provider_list structure */
969 969 *prev = pl->pl_next;
970 970 kmem_free(pl, sizeof (*pl));
971 971 mutex_exit(&p2->pd_lock);
972 972 }
973 973
974 974 /*
975 975 * Convert an array of logical provider handles (crypto_provider_id)
976 976 * stored in a crypto_provider_info structure into an array of provider
977 977 * descriptors (kcf_provider_desc_t) attached to a logical provider.
978 978 */
979 979 static void
980 980 process_logical_providers(crypto_provider_info_t *info, kcf_provider_desc_t *hp)
981 981 {
982 982 kcf_provider_desc_t *lp;
983 983 crypto_provider_id_t handle;
984 984 int count = info->pi_logical_provider_count;
985 985 int i;
986 986
987 987 /* add hardware provider to each logical provider */
988 988 for (i = 0; i < count; i++) {
989 989 handle = info->pi_logical_providers[i];
990 990 lp = kcf_prov_tab_lookup((crypto_provider_id_t)handle);
991 991 if (lp == NULL) {
992 992 continue;
993 993 }
994 994 add_provider_to_array(hp, lp);
995 995 hp->pd_flags |= KCF_LPROV_MEMBER;
996 996
997 997 /*
998 998 * A hardware provider has to have the provider descriptor of
999 999 * every logical provider it belongs to, so it can be removed
1000 1000 * from the logical provider if the hardware provider
1001 1001 * unregisters from the framework.
1002 1002 */
1003 1003 add_provider_to_array(lp, hp);
1004 1004 KCF_PROV_REFRELE(lp);
1005 1005 }
1006 1006 }
1007 1007
1008 1008 /*
1009 1009 * This routine removes a provider from all of the logical or
1010 1010 * hardware providers it belongs to, and frees the provider's
1011 1011 * array of pointers to providers.
1012 1012 */
1013 1013 static void
1014 1014 remove_provider(kcf_provider_desc_t *pp)
1015 1015 {
1016 1016 kcf_provider_desc_t *p;
1017 1017 kcf_provider_list_t *e, *next;
1018 1018
1019 1019 mutex_enter(&pp->pd_lock);
1020 1020 for (e = pp->pd_provider_list; e != NULL; e = next) {
1021 1021 p = e->pl_provider;
1022 1022 remove_provider_from_array(pp, p);
1023 1023 if (p->pd_prov_type == CRYPTO_HW_PROVIDER &&
1024 1024 p->pd_provider_list == NULL)
1025 1025 p->pd_flags &= ~KCF_LPROV_MEMBER;
1026 1026 next = e->pl_next;
1027 1027 kmem_free(e, sizeof (*e));
1028 1028 }
1029 1029 pp->pd_provider_list = NULL;
1030 1030 mutex_exit(&pp->pd_lock);
1031 1031 }
1032 1032
1033 1033 /*
1034 1034 * Dispatch events as needed for a provider. is_added flag tells
1035 1035 * whether the provider is registering or unregistering.
1036 1036 */
1037 1037 void
1038 1038 kcf_do_notify(kcf_provider_desc_t *prov_desc, boolean_t is_added)
1039 1039 {
1040 1040 int i;
1041 1041 crypto_notify_event_change_t ec;
1042 1042
1043 1043 ASSERT(prov_desc->pd_state > KCF_PROV_VERIFICATION_FAILED);
1044 1044
1045 1045 /*
1046 1046 * Inform interested clients of the mechanisms becoming
1047 1047 * available/unavailable. We skip this for logical providers
1048 1048 * as they do not affect mechanisms.
1049 1049 */
1050 1050 if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
1051 1051 ec.ec_provider_type = prov_desc->pd_prov_type;
1052 1052 ec.ec_change = is_added ? CRYPTO_MECH_ADDED :
1053 1053 CRYPTO_MECH_REMOVED;
1054 1054 for (i = 0; i < prov_desc->pd_mech_list_count; i++) {
1055 1055 /* Skip any mechanisms not allowed by the policy */
1056 1056 if (is_mech_disabled(prov_desc,
1057 1057 prov_desc->pd_mechanisms[i].cm_mech_name))
1058 1058 continue;
1059 1059
1060 1060 (void) strncpy(ec.ec_mech_name,
1061 1061 prov_desc->pd_mechanisms[i].cm_mech_name,
1062 1062 CRYPTO_MAX_MECH_NAME);
1063 1063 kcf_walk_ntfylist(CRYPTO_EVENT_MECHS_CHANGED, &ec);
1064 1064 }
1065 1065
1066 1066 }
1067 1067
1068 1068 /*
1069 1069 * Inform interested clients about the new or departing provider.
1070 1070 * In case of a logical provider, we need to notify the event only
1071 1071 * for the logical provider and not for the underlying
1072 1072 * providers which are known by the KCF_LPROV_MEMBER bit.
1073 1073 */
1074 1074 if (prov_desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER ||
1075 1075 (prov_desc->pd_flags & KCF_LPROV_MEMBER) == 0) {
1076 1076 kcf_walk_ntfylist(is_added ? CRYPTO_EVENT_PROVIDER_REGISTERED :
1077 1077 CRYPTO_EVENT_PROVIDER_UNREGISTERED, prov_desc);
1078 1078 }
1079 1079 }
1080 1080
1081 1081 static void
1082 1082 delete_kstat(kcf_provider_desc_t *desc)
1083 1083 {
1084 1084 /* destroy the kstat created for this provider */
1085 1085 if (desc->pd_kstat != NULL) {
1086 1086 kcf_provider_desc_t *kspd = desc->pd_kstat->ks_private;
1087 1087
1088 1088 /* release reference held by desc->pd_kstat->ks_private */
1089 1089 ASSERT(desc == kspd);
1090 1090 kstat_delete(kspd->pd_kstat);
1091 1091 desc->pd_kstat = NULL;
1092 1092 KCF_PROV_REFRELE(kspd);
1093 1093 }
1094 1094 }
↓ open down ↓ |
567 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX