1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
  24  * Use is subject to license terms.
  25  */
  26 
  27 
  28 /*
  29  * Deimos - cryptographic acceleration based upon Broadcom 582x.
  30  */
  31 
  32 #include <sys/types.h>
  33 #include <sys/modctl.h>
  34 #include <sys/conf.h>
  35 #include <sys/devops.h>
  36 #include <sys/ddi.h>
  37 #include <sys/sunddi.h>
  38 #include <sys/cmn_err.h>
  39 #include <sys/varargs.h>
  40 #include <sys/file.h>
  41 #include <sys/stat.h>
  42 #include <sys/kmem.h>
  43 #include <sys/ioccom.h>
  44 #include <sys/open.h>
  45 #include <sys/cred.h>
  46 #include <sys/kstat.h>
  47 #include <sys/strsun.h>
  48 #include <sys/note.h>
  49 #include <sys/crypto/common.h>
  50 #include <sys/crypto/spi.h>
  51 #include <sys/ddifm.h>
  52 #include <sys/fm/protocol.h>
  53 #include <sys/fm/util.h>
  54 #include <sys/fm/io/ddi.h>
  55 #include <sys/crypto/dca.h>
  56 
  57 /*
  58  * Core Deimos driver.
  59  */
  60 
  61 static void             dca_enlist2(dca_listnode_t *, dca_listnode_t *,
  62     kmutex_t *);
  63 static void             dca_rmlist2(dca_listnode_t *node, kmutex_t *);
  64 static dca_listnode_t   *dca_delist2(dca_listnode_t *q, kmutex_t *);
  65 static void             dca_free_context_list(dca_t *dca);
  66 static int              dca_free_context_low(crypto_ctx_t *ctx);
  67 static int              dca_attach(dev_info_t *, ddi_attach_cmd_t);
  68 static int              dca_detach(dev_info_t *, ddi_detach_cmd_t);
  69 static int              dca_suspend(dca_t *);
  70 static int              dca_resume(dca_t *);
  71 static int              dca_init(dca_t *);
  72 static int              dca_reset(dca_t *, int);
  73 static int              dca_initworklist(dca_t *, dca_worklist_t *);
  74 static void             dca_uninit(dca_t *);
  75 static void             dca_initq(dca_listnode_t *);
  76 static void             dca_enqueue(dca_listnode_t *, dca_listnode_t *);
  77 static dca_listnode_t   *dca_dequeue(dca_listnode_t *);
  78 static dca_listnode_t   *dca_unqueue(dca_listnode_t *);
  79 static dca_request_t    *dca_newreq(dca_t *);
  80 static dca_work_t       *dca_getwork(dca_t *, int);
  81 static void             dca_freework(dca_work_t *);
  82 static dca_work_t       *dca_newwork(dca_t *);
  83 static void             dca_destroywork(dca_work_t *);
  84 static void             dca_schedule(dca_t *, int);
  85 static void             dca_reclaim(dca_t *, int);
  86 static uint_t           dca_intr(char *);
  87 static void             dca_failure(dca_t *, ddi_fault_location_t,
  88                             dca_fma_eclass_t index, uint64_t, int, char *, ...);
  89 static void             dca_jobtimeout(void *);
  90 static int              dca_drain(dca_t *);
  91 static void             dca_undrain(dca_t *);
  92 static void             dca_rejectjobs(dca_t *);
  93 
  94 #ifdef  SCHEDDELAY
  95 static void             dca_schedtimeout(void *);
  96 #endif
  97 
  98 /*
  99  * We want these inlined for performance.
 100  */
 101 #ifndef DEBUG
 102 #pragma inline(dca_freereq, dca_getreq, dca_freework, dca_getwork)
 103 #pragma inline(dca_enqueue, dca_dequeue, dca_rmqueue, dca_done)
 104 #pragma inline(dca_reverse, dca_length)
 105 #endif
 106 
 107 /*
 108  * Device operations.
 109  */
 110 static struct dev_ops devops = {
 111         DEVO_REV,               /* devo_rev */
 112         0,                      /* devo_refcnt */
 113         nodev,                  /* devo_getinfo */
 114         nulldev,                /* devo_identify */
 115         nulldev,                /* devo_probe */
 116         dca_attach,             /* devo_attach */
 117         dca_detach,             /* devo_detach */
 118         nodev,                  /* devo_reset */
 119         NULL,                   /* devo_cb_ops */
 120         NULL,                   /* devo_bus_ops */
 121         ddi_power,              /* devo_power */
 122         ddi_quiesce_not_supported,      /* devo_quiesce */
 123 };
 124 
 125 #define IDENT           "PCI Crypto Accelerator"
 126 #define IDENT_SYM       "Crypto Accel Sym 2.0"
 127 #define IDENT_ASYM      "Crypto Accel Asym 2.0"
 128 
 129 /* Space-padded, will be filled in dynamically during registration */
 130 #define IDENT3  "PCI Crypto Accelerator Mod 2.0"
 131 
 132 #define VENDOR  "Sun Microsystems, Inc."
 133 
 134 #define STALETIME       (30 * SECOND)
 135 
 136 #define crypto_prov_notify      crypto_provider_notification
 137                 /* A 28 char function name doesn't leave much line space */
 138 
 139 /*
 140  * Module linkage.
 141  */
 142 static struct modldrv modldrv = {
 143         &mod_driverops,             /* drv_modops */
 144         IDENT,                  /* drv_linkinfo */
 145         &devops,            /* drv_dev_ops */
 146 };
 147 
 148 extern struct mod_ops mod_cryptoops;
 149 
 150 static struct modlcrypto modlcrypto = {
 151         &mod_cryptoops,
 152         IDENT3
 153 };
 154 
 155 static struct modlinkage modlinkage = {
 156         MODREV_1,               /* ml_rev */
 157         &modldrv,           /* ml_linkage */
 158         &modlcrypto,
 159         NULL
 160 };
 161 
 162 /*
 163  * CSPI information (entry points, provider info, etc.)
 164  */
 165 
 166 /* Mechanisms for the symmetric cipher provider */
 167 static crypto_mech_info_t dca_mech_info_tab1[] = {
 168         /* DES-CBC */
 169         {SUN_CKM_DES_CBC, DES_CBC_MECH_INFO_TYPE,
 170             CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT |
 171             CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC,
 172             DES_KEY_LEN, DES_KEY_LEN, CRYPTO_KEYSIZE_UNIT_IN_BYTES},
 173         /* 3DES-CBC */
 174         {SUN_CKM_DES3_CBC, DES3_CBC_MECH_INFO_TYPE,
 175             CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT |
 176             CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC,
 177             DES3_MIN_KEY_LEN, DES3_MAX_KEY_LEN, CRYPTO_KEYSIZE_UNIT_IN_BYTES}
 178 };
 179 
 180 /* Mechanisms for the asymmetric cipher provider */
 181 static crypto_mech_info_t dca_mech_info_tab2[] = {
 182         /* DSA */
 183         {SUN_CKM_DSA, DSA_MECH_INFO_TYPE,
 184             CRYPTO_FG_SIGN | CRYPTO_FG_VERIFY |
 185             CRYPTO_FG_SIGN_ATOMIC | CRYPTO_FG_VERIFY_ATOMIC,
 186             CRYPTO_BYTES2BITS(DSA_MIN_KEY_LEN),
 187             CRYPTO_BYTES2BITS(DSA_MAX_KEY_LEN),
 188             CRYPTO_KEYSIZE_UNIT_IN_BITS},
 189 
 190         /* RSA */
 191         {SUN_CKM_RSA_X_509, RSA_X_509_MECH_INFO_TYPE,
 192             CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT | CRYPTO_FG_SIGN |
 193             CRYPTO_FG_SIGN_RECOVER | CRYPTO_FG_VERIFY |
 194             CRYPTO_FG_VERIFY_RECOVER |
 195             CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC |
 196             CRYPTO_FG_SIGN_ATOMIC | CRYPTO_FG_SIGN_RECOVER_ATOMIC |
 197             CRYPTO_FG_VERIFY_ATOMIC | CRYPTO_FG_VERIFY_RECOVER_ATOMIC,
 198             CRYPTO_BYTES2BITS(RSA_MIN_KEY_LEN),
 199             CRYPTO_BYTES2BITS(RSA_MAX_KEY_LEN),
 200             CRYPTO_KEYSIZE_UNIT_IN_BITS},
 201         {SUN_CKM_RSA_PKCS, RSA_PKCS_MECH_INFO_TYPE,
 202             CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT | CRYPTO_FG_SIGN |
 203             CRYPTO_FG_SIGN_RECOVER | CRYPTO_FG_VERIFY |
 204             CRYPTO_FG_VERIFY_RECOVER |
 205             CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC |
 206             CRYPTO_FG_SIGN_ATOMIC | CRYPTO_FG_SIGN_RECOVER_ATOMIC |
 207             CRYPTO_FG_VERIFY_ATOMIC | CRYPTO_FG_VERIFY_RECOVER_ATOMIC,
 208             CRYPTO_BYTES2BITS(RSA_MIN_KEY_LEN),
 209             CRYPTO_BYTES2BITS(RSA_MAX_KEY_LEN),
 210             CRYPTO_KEYSIZE_UNIT_IN_BITS}
 211 };
 212 
 213 static void dca_provider_status(crypto_provider_handle_t, uint_t *);
 214 
 215 static crypto_control_ops_t dca_control_ops = {
 216         dca_provider_status
 217 };
 218 
 219 static int dca_encrypt_init(crypto_ctx_t *, crypto_mechanism_t *,
 220     crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
 221 static int dca_encrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
 222     crypto_req_handle_t);
 223 static int dca_encrypt_update(crypto_ctx_t *, crypto_data_t *,
 224     crypto_data_t *, crypto_req_handle_t);
 225 static int dca_encrypt_final(crypto_ctx_t *, crypto_data_t *,
 226     crypto_req_handle_t);
 227 static int dca_encrypt_atomic(crypto_provider_handle_t, crypto_session_id_t,
 228     crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
 229     crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
 230 
 231 static int dca_decrypt_init(crypto_ctx_t *, crypto_mechanism_t *,
 232     crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
 233 static int dca_decrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
 234     crypto_req_handle_t);
 235 static int dca_decrypt_update(crypto_ctx_t *, crypto_data_t *,
 236     crypto_data_t *, crypto_req_handle_t);
 237 static int dca_decrypt_final(crypto_ctx_t *, crypto_data_t *,
 238     crypto_req_handle_t);
 239 static int dca_decrypt_atomic(crypto_provider_handle_t, crypto_session_id_t,
 240     crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
 241     crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
 242 
 243 static crypto_cipher_ops_t dca_cipher_ops = {
 244         dca_encrypt_init,
 245         dca_encrypt,
 246         dca_encrypt_update,
 247         dca_encrypt_final,
 248         dca_encrypt_atomic,
 249         dca_decrypt_init,
 250         dca_decrypt,
 251         dca_decrypt_update,
 252         dca_decrypt_final,
 253         dca_decrypt_atomic
 254 };
 255 
 256 static int dca_sign_init(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *,
 257     crypto_spi_ctx_template_t, crypto_req_handle_t);
 258 static int dca_sign(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
 259     crypto_req_handle_t);
 260 static int dca_sign_update(crypto_ctx_t *, crypto_data_t *,
 261     crypto_req_handle_t);
 262 static int dca_sign_final(crypto_ctx_t *, crypto_data_t *,
 263     crypto_req_handle_t);
 264 static int dca_sign_atomic(crypto_provider_handle_t, crypto_session_id_t,
 265     crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
 266     crypto_spi_ctx_template_t, crypto_req_handle_t);
 267 static int dca_sign_recover_init(crypto_ctx_t *, crypto_mechanism_t *,
 268     crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
 269 static int dca_sign_recover(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
 270     crypto_req_handle_t);
 271 static int dca_sign_recover_atomic(crypto_provider_handle_t,
 272     crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
 273     crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
 274 
 275 static crypto_sign_ops_t dca_sign_ops = {
 276         dca_sign_init,
 277         dca_sign,
 278         dca_sign_update,
 279         dca_sign_final,
 280         dca_sign_atomic,
 281         dca_sign_recover_init,
 282         dca_sign_recover,
 283         dca_sign_recover_atomic
 284 };
 285 
 286 static int dca_verify_init(crypto_ctx_t *, crypto_mechanism_t *,
 287     crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
 288 static int dca_verify(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
 289     crypto_req_handle_t);
 290 static int dca_verify_update(crypto_ctx_t *, crypto_data_t *,
 291     crypto_req_handle_t);
 292 static int dca_verify_final(crypto_ctx_t *, crypto_data_t *,
 293     crypto_req_handle_t);
 294 static int dca_verify_atomic(crypto_provider_handle_t, crypto_session_id_t,
 295     crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
 296     crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
 297 static int dca_verify_recover_init(crypto_ctx_t *, crypto_mechanism_t *,
 298     crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
 299 static int dca_verify_recover(crypto_ctx_t *, crypto_data_t *,
 300     crypto_data_t *, crypto_req_handle_t);
 301 static int dca_verify_recover_atomic(crypto_provider_handle_t,
 302     crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
 303     crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
 304 
 305 static crypto_verify_ops_t dca_verify_ops = {
 306         dca_verify_init,
 307         dca_verify,
 308         dca_verify_update,
 309         dca_verify_final,
 310         dca_verify_atomic,
 311         dca_verify_recover_init,
 312         dca_verify_recover,
 313         dca_verify_recover_atomic
 314 };
 315 
 316 static int dca_generate_random(crypto_provider_handle_t, crypto_session_id_t,
 317     uchar_t *, size_t, crypto_req_handle_t);
 318 
 319 static crypto_random_number_ops_t dca_random_number_ops = {
 320         NULL,
 321         dca_generate_random
 322 };
 323 
 324 static int ext_info_sym(crypto_provider_handle_t prov,
 325     crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq);
 326 static int ext_info_asym(crypto_provider_handle_t prov,
 327     crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq);
 328 static int ext_info_base(crypto_provider_handle_t prov,
 329     crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq, char *id);
 330 
 331 static crypto_provider_management_ops_t dca_provmanage_ops_1 = {
 332         ext_info_sym,           /* ext_info */
 333         NULL,                   /* init_token */
 334         NULL,                   /* init_pin */
 335         NULL                    /* set_pin */
 336 };
 337 
 338 static crypto_provider_management_ops_t dca_provmanage_ops_2 = {
 339         ext_info_asym,          /* ext_info */
 340         NULL,                   /* init_token */
 341         NULL,                   /* init_pin */
 342         NULL                    /* set_pin */
 343 };
 344 
 345 int dca_free_context(crypto_ctx_t *);
 346 
 347 static crypto_ctx_ops_t dca_ctx_ops = {
 348         NULL,
 349         dca_free_context
 350 };
 351 
 352 /* Operations for the symmetric cipher provider */
 353 static crypto_ops_t dca_crypto_ops1 = {
 354         &dca_control_ops,
 355         NULL,                           /* digest_ops */
 356         &dca_cipher_ops,
 357         NULL,                           /* mac_ops */
 358         NULL,                           /* sign_ops */
 359         NULL,                           /* verify_ops */
 360         NULL,                           /* dual_ops */
 361         NULL,                           /* cipher_mac_ops */
 362         NULL,                           /* random_number_ops */
 363         NULL,                           /* session_ops */
 364         NULL,                           /* object_ops */
 365         NULL,                           /* key_ops */
 366         &dca_provmanage_ops_1,              /* management_ops */
 367         &dca_ctx_ops
 368 };
 369 
 370 /* Operations for the asymmetric cipher provider */
 371 static crypto_ops_t dca_crypto_ops2 = {
 372         &dca_control_ops,
 373         NULL,                           /* digest_ops */
 374         &dca_cipher_ops,
 375         NULL,                           /* mac_ops */
 376         &dca_sign_ops,
 377         &dca_verify_ops,
 378         NULL,                           /* dual_ops */
 379         NULL,                           /* cipher_mac_ops */
 380         &dca_random_number_ops,
 381         NULL,                           /* session_ops */
 382         NULL,                           /* object_ops */
 383         NULL,                           /* key_ops */
 384         &dca_provmanage_ops_2,              /* management_ops */
 385         &dca_ctx_ops
 386 };
 387 
 388 /* Provider information for the symmetric cipher provider */
 389 static crypto_provider_info_t dca_prov_info1 = {
 390         CRYPTO_SPI_VERSION_1,
 391         NULL,                           /* pi_provider_description */
 392         CRYPTO_HW_PROVIDER,
 393         NULL,                           /* pi_provider_dev */
 394         NULL,                           /* pi_provider_handle */
 395         &dca_crypto_ops1,
 396         sizeof (dca_mech_info_tab1)/sizeof (crypto_mech_info_t),
 397         dca_mech_info_tab1,
 398         0,                              /* pi_logical_provider_count */
 399         NULL                            /* pi_logical_providers */
 400 };
 401 
 402 /* Provider information for the asymmetric cipher provider */
 403 static crypto_provider_info_t dca_prov_info2 = {
 404         CRYPTO_SPI_VERSION_1,
 405         NULL,                           /* pi_provider_description */
 406         CRYPTO_HW_PROVIDER,
 407         NULL,                           /* pi_provider_dev */
 408         NULL,                           /* pi_provider_handle */
 409         &dca_crypto_ops2,
 410         sizeof (dca_mech_info_tab2)/sizeof (crypto_mech_info_t),
 411         dca_mech_info_tab2,
 412         0,                              /* pi_logical_provider_count */
 413         NULL                            /* pi_logical_providers */
 414 };
 415 
 416 /* Convenience macros */
 417 /* Retrieve the softc and instance number from a SPI crypto context */
 418 #define DCA_SOFTC_FROM_CTX(ctx, softc, instance) {              \
 419         (softc) = (dca_t *)(ctx)->cc_provider;                       \
 420         (instance) = ddi_get_instance((softc)->dca_dip);     \
 421 }
 422 
 423 #define DCA_MECH_FROM_CTX(ctx) \
 424         (((dca_request_t *)(ctx)->cc_provider_private)->dr_ctx.ctx_cm_type)
 425 
 426 static int dca_bindchains_one(dca_request_t *reqp, size_t cnt, int dr_offset,
 427     caddr_t kaddr, ddi_dma_handle_t handle, uint_t flags,
 428     dca_chain_t *head, int *n_chain);
 429 static uint64_t dca_ena(uint64_t ena);
 430 static caddr_t dca_bufdaddr_out(crypto_data_t *data);
 431 static char *dca_fma_eclass_string(char *model, dca_fma_eclass_t index);
 432 static int dca_check_acc_handle(dca_t *dca, ddi_acc_handle_t handle,
 433     dca_fma_eclass_t eclass_index);
 434 
 435 static void dca_fma_init(dca_t *dca);
 436 static void dca_fma_fini(dca_t *dca);
 437 static int dca_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
 438     const void *impl_data);
 439 
 440 
 441 static dca_device_t dca_devices[] = {
 442         /* Broadcom vanilla variants */
 443         {       0x14e4, 0x5820, "Broadcom 5820" },
 444         {       0x14e4, 0x5821, "Broadcom 5821" },
 445         {       0x14e4, 0x5822, "Broadcom 5822" },
 446         {       0x14e4, 0x5825, "Broadcom 5825" },
 447         /* Sun specific OEMd variants */
 448         {       0x108e, 0x5454, "SCA" },
 449         {       0x108e, 0x5455, "SCA 1000" },
 450         {       0x108e, 0x5457, "SCA 500" },
 451         /* subsysid should be 0x5457, but got 0x1 from HW. Assume both here. */
 452         {       0x108e, 0x1, "SCA 500" },
 453 };
 454 
 455 /*
 456  * Device attributes.
 457  */
 458 static struct ddi_device_acc_attr dca_regsattr = {
 459         DDI_DEVICE_ATTR_V1,
 460         DDI_STRUCTURE_LE_ACC,
 461         DDI_STRICTORDER_ACC,
 462         DDI_FLAGERR_ACC
 463 };
 464 
 465 static struct ddi_device_acc_attr dca_devattr = {
 466         DDI_DEVICE_ATTR_V0,
 467         DDI_STRUCTURE_LE_ACC,
 468         DDI_STRICTORDER_ACC
 469 };
 470 
 471 #if !defined(i386) && !defined(__i386)
 472 static struct ddi_device_acc_attr dca_bufattr = {
 473         DDI_DEVICE_ATTR_V0,
 474         DDI_NEVERSWAP_ACC,
 475         DDI_STRICTORDER_ACC
 476 };
 477 #endif
 478 
 479 static struct ddi_dma_attr dca_dmaattr = {
 480         DMA_ATTR_V0,            /* dma_attr_version */
 481         0x0,                    /* dma_attr_addr_lo */
 482         0xffffffffUL,           /* dma_attr_addr_hi */
 483         0x00ffffffUL,           /* dma_attr_count_max */
 484         0x40,                   /* dma_attr_align */
 485         0x40,                   /* dma_attr_burstsizes */
 486         0x1,                    /* dma_attr_minxfer */
 487         0x00ffffffUL,           /* dma_attr_maxxfer */
 488         0xffffffffUL,           /* dma_attr_seg */
 489 #if defined(i386) || defined(__i386) || defined(__amd64)
 490         512,                    /* dma_attr_sgllen */
 491 #else
 492         1,                      /* dma_attr_sgllen */
 493 #endif
 494         1,                      /* dma_attr_granular */
 495         DDI_DMA_FLAGERR         /* dma_attr_flags */
 496 };
 497 
 498 static void     *dca_state = NULL;
 499 int     dca_mindma = 2500;
 500 
 501 /*
 502  * FMA eclass string definitions. Note that these string arrays must be
 503  * consistent with the dca_fma_eclass_t enum.
 504  */
 505 static char *dca_fma_eclass_sca1000[] = {
 506         "sca1000.hw.device",
 507         "sca1000.hw.timeout",
 508         "sca1000.none"
 509 };
 510 
 511 static char *dca_fma_eclass_sca500[] = {
 512         "sca500.hw.device",
 513         "sca500.hw.timeout",
 514         "sca500.none"
 515 };
 516 
 517 /*
 518  * DDI entry points.
 519  */
 520 int
 521 _init(void)
 522 {
 523         int rv;
 524 
 525         DBG(NULL, DMOD, "dca: in _init");
 526 
 527         if ((rv = ddi_soft_state_init(&dca_state, sizeof (dca_t), 1)) != 0) {
 528                 /* this should *never* happen! */
 529                 return (rv);
 530         }
 531 
 532         if ((rv = mod_install(&modlinkage)) != 0) {
 533                 /* cleanup here */
 534                 ddi_soft_state_fini(&dca_state);
 535                 return (rv);
 536         }
 537 
 538         return (0);
 539 }
 540 
 541 int
 542 _fini(void)
 543 {
 544         int rv;
 545 
 546         DBG(NULL, DMOD, "dca: in _fini");
 547 
 548         if ((rv = mod_remove(&modlinkage)) == 0) {
 549                 /* cleanup here */
 550                 ddi_soft_state_fini(&dca_state);
 551         }
 552         return (rv);
 553 }
 554 
 555 int
 556 _info(struct modinfo *modinfop)
 557 {
 558         DBG(NULL, DMOD, "dca: in _info");
 559 
 560         return (mod_info(&modlinkage, modinfop));
 561 }
 562 
 563 int
 564 dca_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
 565 {
 566         ddi_acc_handle_t        pci;
 567         int                     instance;
 568         ddi_iblock_cookie_t     ibc;
 569         int                     intr_added = 0;
 570         dca_t                   *dca;
 571         ushort_t                venid;
 572         ushort_t                devid;
 573         ushort_t                revid;
 574         ushort_t                subsysid;
 575         ushort_t                subvenid;
 576         int                     i;
 577         int                     ret;
 578         char                    ID[64];
 579         static char             *unknowndev = "Unknown device";
 580 
 581 #if DEBUG
 582         /* these are only used for debugging */
 583         ushort_t                pcicomm;
 584         ushort_t                pcistat;
 585         uchar_t                 cachelinesz;
 586         uchar_t                 mingnt;
 587         uchar_t                 maxlat;
 588         uchar_t                 lattmr;
 589 #endif
 590 
 591         instance = ddi_get_instance(dip);
 592 
 593         DBG(NULL, DMOD, "dca: in dca_attach() for %d", instance);
 594 
 595         switch (cmd) {
 596         case DDI_RESUME:
 597                 if ((dca = (dca_t *)ddi_get_driver_private(dip)) == NULL) {
 598                         dca_diperror(dip, "no soft state in detach");
 599                         return (DDI_FAILURE);
 600                 }
 601                 /* assumption: we won't be DDI_DETACHed until we return */
 602                 return (dca_resume(dca));
 603         case DDI_ATTACH:
 604                 break;
 605         default:
 606                 return (DDI_FAILURE);
 607         }
 608 
 609         if (ddi_slaveonly(dip) == DDI_SUCCESS) {
 610                 dca_diperror(dip, "slot does not support PCI bus-master");
 611                 return (DDI_FAILURE);
 612         }
 613 
 614         if (ddi_intr_hilevel(dip, 0) != 0) {
 615                 dca_diperror(dip, "hilevel interrupts not supported");
 616                 return (DDI_FAILURE);
 617         }
 618 
 619         if (pci_config_setup(dip, &pci) != DDI_SUCCESS) {
 620                 dca_diperror(dip, "unable to setup PCI config handle");
 621                 return (DDI_FAILURE);
 622         }
 623 
 624         /* common PCI attributes */
 625         venid = pci_config_get16(pci, PCI_VENID);
 626         devid = pci_config_get16(pci, PCI_DEVID);
 627         revid = pci_config_get8(pci, PCI_REVID);
 628         subvenid = pci_config_get16(pci, PCI_SUBVENID);
 629         subsysid = pci_config_get16(pci, PCI_SUBSYSID);
 630 
 631         /*
 632          * Broadcom-specific timings.
 633          * We disable these timers/counters since they can cause
 634          * incorrect false failures when the bus is just a little
 635          * bit slow, or busy.
 636          */
 637         pci_config_put8(pci, PCI_TRDYTO, 0);
 638         pci_config_put8(pci, PCI_RETRIES, 0);
 639 
 640         /* initialize PCI access settings */
 641         pci_config_put16(pci, PCI_COMM, PCICOMM_SEE |
 642             PCICOMM_PEE | PCICOMM_BME | PCICOMM_MAE);
 643 
 644         /* set up our PCI latency timer */
 645         pci_config_put8(pci, PCI_LATTMR, 0x40);
 646 
 647 #if DEBUG
 648         /* read registers (for debugging) */
 649         pcicomm = pci_config_get16(pci, PCI_COMM);
 650         pcistat = pci_config_get16(pci, PCI_STATUS);
 651         cachelinesz = pci_config_get8(pci, PCI_CACHELINESZ);
 652         mingnt = pci_config_get8(pci, PCI_MINGNT);
 653         maxlat = pci_config_get8(pci, PCI_MAXLAT);
 654         lattmr = pci_config_get8(pci, PCI_LATTMR);
 655 #endif
 656 
 657         pci_config_teardown(&pci);
 658 
 659         if (ddi_get_iblock_cookie(dip, 0, &ibc) != DDI_SUCCESS) {
 660                 dca_diperror(dip, "unable to get iblock cookie");
 661                 return (DDI_FAILURE);
 662         }
 663 
 664         if (ddi_soft_state_zalloc(dca_state, instance) != DDI_SUCCESS) {
 665                 dca_diperror(dip, "unable to allocate soft state");
 666                 return (DDI_FAILURE);
 667         }
 668 
 669         dca = ddi_get_soft_state(dca_state, instance);
 670         ASSERT(dca != NULL);
 671         dca->dca_dip = dip;
 672         WORKLIST(dca, MCR1)->dwl_prov = NULL;
 673         WORKLIST(dca, MCR2)->dwl_prov = NULL;
 674         /* figure pagesize */
 675         dca->dca_pagesize = ddi_ptob(dip, 1);
 676 
 677         /*
 678          * Search for the device in our supported devices table.  This
 679          * is here for two reasons.  First, we want to ensure that
 680          * only Sun-qualified (and presumably Sun-labeled) devices can
 681          * be used with this driver.  Second, some devices have
 682          * specific differences.  E.g. the 5821 has support for a
 683          * special mode of RC4, deeper queues, power management, and
 684          * other changes.  Also, the export versions of some of these
 685          * chips don't support RC4 or 3DES, so we catch that here.
 686          *
 687          * Note that we only look at the upper nibble of the device
 688          * id, which is used to distinguish export vs. domestic
 689          * versions of the chip.  (The lower nibble is used for
 690          * stepping information.)
 691          */
 692         for (i = 0; i < (sizeof (dca_devices) / sizeof (dca_device_t)); i++) {
 693                 /*
 694                  * Try to match the subsystem information first.
 695                  */
 696                 if (subvenid && (subvenid == dca_devices[i].dd_vendor_id) &&
 697                     subsysid && (subsysid == dca_devices[i].dd_device_id)) {
 698                         dca->dca_model = dca_devices[i].dd_model;
 699                         dca->dca_devid = dca_devices[i].dd_device_id;
 700                         break;
 701                 }
 702                 /*
 703                  * Failing that, try the generic vendor and device id.
 704                  * Even if we find a match, we keep searching anyway,
 705                  * since we would prefer to find a match based on the
 706                  * subsystem ids.
 707                  */
 708                 if ((venid == dca_devices[i].dd_vendor_id) &&
 709                     (devid == dca_devices[i].dd_device_id)) {
 710                         dca->dca_model = dca_devices[i].dd_model;
 711                         dca->dca_devid = dca_devices[i].dd_device_id;
 712                 }
 713         }
 714         /* try and handle an unrecognized device */
 715         if (dca->dca_model == NULL) {
 716                 dca->dca_model = unknowndev;
 717                 dca_error(dca, "device not recognized, not supported");
 718                 DBG(dca, DPCI, "i=%d venid=%x devid=%x rev=%d",
 719                     i, venid, devid, revid);
 720         }
 721 
 722         if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "description",
 723             dca->dca_model) != DDI_SUCCESS) {
 724                 dca_error(dca, "unable to create description property");
 725                 return (DDI_FAILURE);
 726         }
 727 
 728         DBG(dca, DPCI, "PCI command=0x%x status=%x cachelinesz=%x",
 729             pcicomm, pcistat, cachelinesz);
 730         DBG(dca, DPCI, "mingnt=0x%x maxlat=0x%x lattmr=0x%x",
 731             mingnt, maxlat, lattmr);
 732 
 733         /*
 734          * initialize locks, etc.
 735          */
 736         (void) mutex_init(&dca->dca_intrlock, NULL, MUTEX_DRIVER, ibc);
 737 
 738         /* use RNGSHA1 by default */
 739         if (ddi_getprop(DDI_DEV_T_ANY, dip,
 740             DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "rngdirect", 0) == 0) {
 741                 dca->dca_flags |= DCA_RNGSHA1;
 742         }
 743 
 744         /* initialize FMA */
 745         dca_fma_init(dca);
 746 
 747         /* initialize some key data structures */
 748         if (dca_init(dca) != DDI_SUCCESS) {
 749                 goto failed;
 750         }
 751 
 752         /* initialize kstats */
 753         dca_ksinit(dca);
 754 
 755         /* setup access to registers */
 756         if (ddi_regs_map_setup(dip, 1, (caddr_t *)&dca->dca_regs,
 757             0, 0, &dca_regsattr, &dca->dca_regs_handle) != DDI_SUCCESS) {
 758                 dca_error(dca, "unable to map registers");
 759                 goto failed;
 760         }
 761 
 762         DBG(dca, DCHATTY, "MCR1 = %x", GETCSR(dca, CSR_MCR1));
 763         DBG(dca, DCHATTY, "CONTROL = %x", GETCSR(dca, CSR_DMACTL));
 764         DBG(dca, DCHATTY, "STATUS = %x", GETCSR(dca, CSR_DMASTAT));
 765         DBG(dca, DCHATTY, "DMAEA = %x", GETCSR(dca, CSR_DMAEA));
 766         DBG(dca, DCHATTY, "MCR2 = %x", GETCSR(dca, CSR_MCR2));
 767 
 768         /* reset the chip */
 769         if (dca_reset(dca, 0) < 0) {
 770                 goto failed;
 771         }
 772 
 773         /* initialize the chip */
 774         PUTCSR(dca, CSR_DMACTL, DMACTL_BE32 | DMACTL_BE64);
 775         if (dca_check_acc_handle(dca, dca->dca_regs_handle,
 776             DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
 777                 goto failed;
 778         }
 779 
 780         /* add the interrupt */
 781         if (ddi_add_intr(dip, 0, &dca->dca_icookie, NULL, dca_intr,
 782             (void *)dca) != DDI_SUCCESS) {
 783                 DBG(dca, DWARN, "ddi_add_intr failed");
 784                 goto failed;
 785         } else {
 786                 intr_added = 1;
 787         }
 788 
 789         /* enable interrupts on the device */
 790         /*
 791          * XXX: Note, 5820A1 errata indicates that this may clobber
 792          * bits 24 and 23, which affect the speed of the RNG.  Since
 793          * we always want to run in full-speed mode, this should be
 794          * harmless.
 795          */
 796         if (dca->dca_devid == 0x5825) {
 797                 /* for 5825 - increase the DMA read size */
 798                 SETBIT(dca, CSR_DMACTL,
 799                     DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE | DMACTL_RD256);
 800         } else {
 801                 SETBIT(dca, CSR_DMACTL,
 802                     DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE);
 803         }
 804         if (dca_check_acc_handle(dca, dca->dca_regs_handle,
 805             DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
 806                 goto failed;
 807         }
 808 
 809         /* register MCR1 with the crypto framework */
 810         /* Be careful not to exceed 32 chars */
 811         (void) sprintf(ID, "%s/%d %s",
 812             ddi_driver_name(dip), ddi_get_instance(dip), IDENT_SYM);
 813         dca_prov_info1.pi_provider_description = ID;
 814         dca_prov_info1.pi_provider_dev.pd_hw = dip;
 815         dca_prov_info1.pi_provider_handle = dca;
 816         if ((ret = crypto_register_provider(&dca_prov_info1,
 817             &WORKLIST(dca, MCR1)->dwl_prov)) != CRYPTO_SUCCESS) {
 818                 cmn_err(CE_WARN,
 819                     "crypto_register_provider() failed (%d) for MCR1", ret);
 820                 goto failed;
 821         }
 822 
 823         /* register MCR2 with the crypto framework */
 824         /* Be careful not to exceed 32 chars */
 825         (void) sprintf(ID, "%s/%d %s",
 826             ddi_driver_name(dip), ddi_get_instance(dip), IDENT_ASYM);
 827         dca_prov_info2.pi_provider_description = ID;
 828         dca_prov_info2.pi_provider_dev.pd_hw = dip;
 829         dca_prov_info2.pi_provider_handle = dca;
 830         if ((ret = crypto_register_provider(&dca_prov_info2,
 831             &WORKLIST(dca, MCR2)->dwl_prov)) != CRYPTO_SUCCESS) {
 832                 cmn_err(CE_WARN,
 833                     "crypto_register_provider() failed (%d) for MCR2", ret);
 834                 goto failed;
 835         }
 836 
 837         crypto_prov_notify(WORKLIST(dca, MCR1)->dwl_prov,
 838             CRYPTO_PROVIDER_READY);
 839         crypto_prov_notify(WORKLIST(dca, MCR2)->dwl_prov,
 840             CRYPTO_PROVIDER_READY);
 841 
 842         /* Initialize the local random number pool for this instance */
 843         if ((ret = dca_random_init(dca)) != CRYPTO_SUCCESS) {
 844                 goto failed;
 845         }
 846 
 847         mutex_enter(&dca->dca_intrlock);
 848         dca->dca_jobtid = timeout(dca_jobtimeout, (void *)dca,
 849             drv_usectohz(SECOND));
 850         mutex_exit(&dca->dca_intrlock);
 851 
 852         ddi_set_driver_private(dip, (caddr_t)dca);
 853 
 854         ddi_report_dev(dip);
 855 
 856         if (ddi_get_devstate(dca->dca_dip) != DDI_DEVSTATE_UP) {
 857                 ddi_fm_service_impact(dca->dca_dip, DDI_SERVICE_RESTORED);
 858         }
 859 
 860         return (DDI_SUCCESS);
 861 
 862 failed:
 863         /* unregister from the crypto framework */
 864         if (WORKLIST(dca, MCR1)->dwl_prov != NULL) {
 865                 (void) crypto_unregister_provider(
 866                     WORKLIST(dca, MCR1)->dwl_prov);
 867         }
 868         if (WORKLIST(dca, MCR2)->dwl_prov != NULL) {
 869                 (void) crypto_unregister_provider(
 870                     WORKLIST(dca, MCR2)->dwl_prov);
 871         }
 872         if (intr_added) {
 873                 CLRBIT(dca, CSR_DMACTL,
 874                     DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE);
 875                 /* unregister intr handler */
 876                 ddi_remove_intr(dip, 0, dca->dca_icookie);
 877         }
 878         if (dca->dca_regs_handle) {
 879                 ddi_regs_map_free(&dca->dca_regs_handle);
 880         }
 881         if (dca->dca_intrstats) {
 882                 kstat_delete(dca->dca_intrstats);
 883         }
 884         if (dca->dca_ksp) {
 885                 kstat_delete(dca->dca_ksp);
 886         }
 887         dca_uninit(dca);
 888 
 889         /* finalize FMA */
 890         dca_fma_fini(dca);
 891 
 892         mutex_destroy(&dca->dca_intrlock);
 893         ddi_soft_state_free(dca_state, instance);
 894         return (DDI_FAILURE);
 895 
 896 }
 897 
 898 int
 899 dca_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
 900 {
 901         int             instance;
 902         dca_t           *dca;
 903         timeout_id_t    tid;
 904 
 905         instance = ddi_get_instance(dip);
 906 
 907         DBG(NULL, DMOD, "dca: in dca_detach() for %d", instance);
 908 
 909         switch (cmd) {
 910         case DDI_SUSPEND:
 911                 if ((dca = (dca_t *)ddi_get_driver_private(dip)) == NULL) {
 912                         dca_diperror(dip, "no soft state in detach");
 913                         return (DDI_FAILURE);
 914                 }
 915                 /* assumption: we won't be DDI_DETACHed until we return */
 916                 return (dca_suspend(dca));
 917 
 918         case DDI_DETACH:
 919                 break;
 920         default:
 921                 return (DDI_FAILURE);
 922         }
 923 
 924         if ((dca = (dca_t *)ddi_get_driver_private(dip)) == NULL) {
 925                 dca_diperror(dip, "no soft state in detach");
 926                 return (DDI_FAILURE);
 927         }
 928 
 929         /*
 930          * Unregister from kCF.
 931          * This needs to be done at the beginning of detach.
 932          */
 933         if (WORKLIST(dca, MCR1)->dwl_prov != NULL) {
 934                 if (crypto_unregister_provider(
 935                     WORKLIST(dca, MCR1)->dwl_prov) != CRYPTO_SUCCESS) {
 936                         dca_error(dca, "unable to unregister MCR1 from kcf");
 937                         return (DDI_FAILURE);
 938                 }
 939         }
 940 
 941         if (WORKLIST(dca, MCR2)->dwl_prov != NULL) {
 942                 if (crypto_unregister_provider(
 943                     WORKLIST(dca, MCR2)->dwl_prov) != CRYPTO_SUCCESS) {
 944                         dca_error(dca, "unable to unregister MCR2 from kcf");
 945                         return (DDI_FAILURE);
 946                 }
 947         }
 948 
 949         /*
 950          * Cleanup the private context list. Once the
 951          * crypto_unregister_provider returns, it is safe to do so.
 952          */
 953         dca_free_context_list(dca);
 954 
 955         /* Cleanup the local random number pool */
 956         dca_random_fini(dca);
 957 
 958         /* send any jobs in the waitq back to kCF */
 959         dca_rejectjobs(dca);
 960 
 961         /* untimeout the timeouts */
 962         mutex_enter(&dca->dca_intrlock);
 963         tid = dca->dca_jobtid;
 964         dca->dca_jobtid = 0;
 965         mutex_exit(&dca->dca_intrlock);
 966         if (tid) {
 967                 (void) untimeout(tid);
 968         }
 969 
 970         /* disable device interrupts */
 971         CLRBIT(dca, CSR_DMACTL, DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE);
 972 
 973         /* unregister interrupt handlers */
 974         ddi_remove_intr(dip, 0, dca->dca_icookie);
 975 
 976         /* release our regs handle */
 977         ddi_regs_map_free(&dca->dca_regs_handle);
 978 
 979         /* toss out kstats */
 980         if (dca->dca_intrstats) {
 981                 kstat_delete(dca->dca_intrstats);
 982         }
 983         if (dca->dca_ksp) {
 984                 kstat_delete(dca->dca_ksp);
 985         }
 986 
 987         mutex_destroy(&dca->dca_intrlock);
 988         dca_uninit(dca);
 989 
 990         /* finalize FMA */
 991         dca_fma_fini(dca);
 992 
 993         ddi_soft_state_free(dca_state, instance);
 994 
 995         return (DDI_SUCCESS);
 996 }
 997 
 998 int
 999 dca_resume(dca_t *dca)
1000 {
1001         ddi_acc_handle_t        pci;
1002 
1003         if (pci_config_setup(dca->dca_dip, &pci) != DDI_SUCCESS) {
1004                 dca_error(dca, "unable to setup PCI config handle");
1005                 return (DDI_FAILURE);
1006         }
1007 
1008         /*
1009          * Reprogram registers in PCI configuration space.
1010          */
1011 
1012         /* Broadcom-specific timers -- we disable them. */
1013         pci_config_put8(pci, PCI_TRDYTO, 0);
1014         pci_config_put8(pci, PCI_RETRIES, 0);
1015 
1016         /* initialize PCI access settings */
1017         pci_config_put16(pci, PCI_COMM, PCICOMM_SEE |
1018             PCICOMM_PEE | PCICOMM_BME | PCICOMM_MAE);
1019 
1020         /* set up our PCI latency timer */
1021         pci_config_put8(pci, PCI_LATTMR, 0x40);
1022 
1023         pci_config_teardown(&pci);
1024 
1025         if (dca_reset(dca, 0) < 0) {
1026                 dca_error(dca, "unable to reset device during resume");
1027                 return (DDI_FAILURE);
1028         }
1029 
1030         /*
1031          * Now restore the card-specific CSRs.
1032          */
1033 
1034         /* restore endianness settings */
1035         PUTCSR(dca, CSR_DMACTL, DMACTL_BE32 | DMACTL_BE64);
1036         if (dca_check_acc_handle(dca, dca->dca_regs_handle,
1037             DCA_FM_ECLASS_NONE) != DDI_SUCCESS)
1038                 return (DDI_FAILURE);
1039 
1040         /* restore interrupt enables */
1041         if (dca->dca_devid == 0x5825) {
1042                 /* for 5825 set 256 byte read size to improve performance */
1043                 SETBIT(dca, CSR_DMACTL,
1044                     DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE | DMACTL_RD256);
1045         } else {
1046                 SETBIT(dca, CSR_DMACTL,
1047                     DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE);
1048         }
1049         if (dca_check_acc_handle(dca, dca->dca_regs_handle,
1050             DCA_FM_ECLASS_NONE) != DDI_SUCCESS)
1051                 return (DDI_FAILURE);
1052 
1053         /* resume scheduling jobs on the device */
1054         dca_undrain(dca);
1055 
1056         return (DDI_SUCCESS);
1057 }
1058 
1059 int
1060 dca_suspend(dca_t *dca)
1061 {
1062         if ((dca_drain(dca)) != 0) {
1063                 return (DDI_FAILURE);
1064         }
1065         if (dca_reset(dca, 0) < 0) {
1066                 dca_error(dca, "unable to reset device during suspend");
1067                 return (DDI_FAILURE);
1068         }
1069         return (DDI_SUCCESS);
1070 }
1071 
1072 /*
1073  * Hardware access stuff.
1074  */
1075 int
1076 dca_reset(dca_t *dca, int failreset)
1077 {
1078         int i;
1079 
1080         if (dca->dca_regs_handle == NULL) {
1081                 return (-1);
1082         }
1083 
1084         PUTCSR(dca, CSR_DMACTL, DMACTL_RESET);
1085         if (!failreset) {
1086                 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
1087                     DCA_FM_ECLASS_NONE) != DDI_SUCCESS)
1088                         return (-1);
1089         }
1090 
1091         /* now wait for a reset */
1092         for (i = 1; i < 100; i++) {
1093                 uint32_t        dmactl;
1094                 drv_usecwait(100);
1095                 dmactl = GETCSR(dca, CSR_DMACTL);
1096                 if (!failreset) {
1097                         if (dca_check_acc_handle(dca, dca->dca_regs_handle,
1098                             DCA_FM_ECLASS_NONE) != DDI_SUCCESS)
1099                                 return (-1);
1100                 }
1101                 if ((dmactl & DMACTL_RESET) == 0) {
1102                         DBG(dca, DCHATTY, "reset in %d usec", i * 100);
1103                         return (0);
1104                 }
1105         }
1106         if (!failreset) {
1107                 dca_failure(dca, DDI_DEVICE_FAULT,
1108                     DCA_FM_ECLASS_NONE, dca_ena(0), CRYPTO_DEVICE_ERROR,
1109                     "timeout waiting for reset after %d usec", i * 100);
1110         }
1111         return (-1);
1112 }
1113 
1114 int
1115 dca_initworklist(dca_t *dca, dca_worklist_t *wlp)
1116 {
1117         int     i;
1118         int     reqprealloc = wlp->dwl_hiwater + (MAXWORK * MAXREQSPERMCR);
1119 
1120         /*
1121          * Set up work queue.
1122          */
1123         mutex_init(&wlp->dwl_lock, NULL, MUTEX_DRIVER, dca->dca_icookie);
1124         mutex_init(&wlp->dwl_freereqslock, NULL, MUTEX_DRIVER,
1125             dca->dca_icookie);
1126         mutex_init(&wlp->dwl_freelock, NULL, MUTEX_DRIVER, dca->dca_icookie);
1127         cv_init(&wlp->dwl_cv, NULL, CV_DRIVER, NULL);
1128 
1129         mutex_enter(&wlp->dwl_lock);
1130 
1131         dca_initq(&wlp->dwl_freereqs);
1132         dca_initq(&wlp->dwl_waitq);
1133         dca_initq(&wlp->dwl_freework);
1134         dca_initq(&wlp->dwl_runq);
1135 
1136         for (i = 0; i < MAXWORK; i++) {
1137                 dca_work_t              *workp;
1138 
1139                 if ((workp = dca_newwork(dca)) == NULL) {
1140                         dca_error(dca, "unable to allocate work");
1141                         mutex_exit(&wlp->dwl_lock);
1142                         return (DDI_FAILURE);
1143                 }
1144                 workp->dw_wlp = wlp;
1145                 dca_freework(workp);
1146         }
1147         mutex_exit(&wlp->dwl_lock);
1148 
1149         for (i = 0; i < reqprealloc; i++) {
1150                 dca_request_t *reqp;
1151 
1152                 if ((reqp = dca_newreq(dca)) == NULL) {
1153                         dca_error(dca, "unable to allocate request");
1154                         return (DDI_FAILURE);
1155                 }
1156                 reqp->dr_dca = dca;
1157                 reqp->dr_wlp = wlp;
1158                 dca_freereq(reqp);
1159         }
1160         return (DDI_SUCCESS);
1161 }
1162 
1163 int
1164 dca_init(dca_t *dca)
1165 {
1166         dca_worklist_t          *wlp;
1167 
1168         /* Initialize the private context list and the corresponding lock. */
1169         mutex_init(&dca->dca_ctx_list_lock, NULL, MUTEX_DRIVER, NULL);
1170         dca_initq(&dca->dca_ctx_list);
1171 
1172         /*
1173          * MCR1 algorithms.
1174          */
1175         wlp = WORKLIST(dca, MCR1);
1176         (void) sprintf(wlp->dwl_name, "dca%d:mcr1",
1177             ddi_get_instance(dca->dca_dip));
1178         wlp->dwl_lowater = ddi_getprop(DDI_DEV_T_ANY,
1179             dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1180             "mcr1_lowater", MCR1LOWATER);
1181         wlp->dwl_hiwater = ddi_getprop(DDI_DEV_T_ANY,
1182             dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1183             "mcr1_hiwater", MCR1HIWATER);
1184         wlp->dwl_reqspermcr = min(ddi_getprop(DDI_DEV_T_ANY,
1185             dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1186             "mcr1_maxreqs", MCR1MAXREQS), MAXREQSPERMCR);
1187         wlp->dwl_dca = dca;
1188         wlp->dwl_mcr = MCR1;
1189         if (dca_initworklist(dca, wlp) != DDI_SUCCESS) {
1190                 return (DDI_FAILURE);
1191         }
1192 
1193         /*
1194          * MCR2 algorithms.
1195          */
1196         wlp = WORKLIST(dca, MCR2);
1197         (void) sprintf(wlp->dwl_name, "dca%d:mcr2",
1198             ddi_get_instance(dca->dca_dip));
1199         wlp->dwl_lowater = ddi_getprop(DDI_DEV_T_ANY,
1200             dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1201             "mcr2_lowater", MCR2LOWATER);
1202         wlp->dwl_hiwater = ddi_getprop(DDI_DEV_T_ANY,
1203             dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1204             "mcr2_hiwater", MCR2HIWATER);
1205         wlp->dwl_reqspermcr = min(ddi_getprop(DDI_DEV_T_ANY,
1206             dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1207             "mcr2_maxreqs", MCR2MAXREQS), MAXREQSPERMCR);
1208         wlp->dwl_dca = dca;
1209         wlp->dwl_mcr = MCR2;
1210         if (dca_initworklist(dca, wlp) != DDI_SUCCESS) {
1211                 return (DDI_FAILURE);
1212         }
1213         return (DDI_SUCCESS);
1214 }
1215 
1216 /*
1217  * Uninitialize worklists.  This routine should only be called when no
1218  * active jobs (hence DMA mappings) exist.  One way to ensure this is
1219  * to unregister from kCF before calling this routine.  (This is done
1220  * e.g. in detach(9e).)
1221  */
1222 void
1223 dca_uninit(dca_t *dca)
1224 {
1225         int     mcr;
1226 
1227         mutex_destroy(&dca->dca_ctx_list_lock);
1228 
1229         for (mcr = MCR1; mcr <= MCR2; mcr++) {
1230                 dca_worklist_t  *wlp = WORKLIST(dca, mcr);
1231                 dca_work_t      *workp;
1232                 dca_request_t   *reqp;
1233 
1234                 if (dca->dca_regs_handle == NULL) {
1235                         continue;
1236                 }
1237 
1238                 mutex_enter(&wlp->dwl_lock);
1239                 while ((workp = dca_getwork(dca, mcr)) != NULL) {
1240                         dca_destroywork(workp);
1241                 }
1242                 mutex_exit(&wlp->dwl_lock);
1243                 while ((reqp = dca_getreq(dca, mcr, 0)) != NULL) {
1244                         dca_destroyreq(reqp);
1245                 }
1246 
1247                 mutex_destroy(&wlp->dwl_lock);
1248                 mutex_destroy(&wlp->dwl_freereqslock);
1249                 mutex_destroy(&wlp->dwl_freelock);
1250                 cv_destroy(&wlp->dwl_cv);
1251                 wlp->dwl_prov = NULL;
1252         }
1253 }
1254 
1255 static void
1256 dca_enlist2(dca_listnode_t *q, dca_listnode_t *node, kmutex_t *lock)
1257 {
1258         if (!q || !node)
1259                 return;
1260 
1261         mutex_enter(lock);
1262         node->dl_next2 = q;
1263         node->dl_prev2 = q->dl_prev2;
1264         node->dl_next2->dl_prev2 = node;
1265         node->dl_prev2->dl_next2 = node;
1266         mutex_exit(lock);
1267 }
1268 
1269 static void
1270 dca_rmlist2(dca_listnode_t *node, kmutex_t *lock)
1271 {
1272         if (!node)
1273                 return;
1274 
1275         mutex_enter(lock);
1276         node->dl_next2->dl_prev2 = node->dl_prev2;
1277         node->dl_prev2->dl_next2 = node->dl_next2;
1278         node->dl_next2 = NULL;
1279         node->dl_prev2 = NULL;
1280         mutex_exit(lock);
1281 }
1282 
1283 static dca_listnode_t *
1284 dca_delist2(dca_listnode_t *q, kmutex_t *lock)
1285 {
1286         dca_listnode_t *node;
1287 
1288         mutex_enter(lock);
1289         if ((node = q->dl_next2) == q) {
1290                 mutex_exit(lock);
1291                 return (NULL);
1292         }
1293 
1294         node->dl_next2->dl_prev2 = node->dl_prev2;
1295         node->dl_prev2->dl_next2 = node->dl_next2;
1296         node->dl_next2 = NULL;
1297         node->dl_prev2 = NULL;
1298         mutex_exit(lock);
1299 
1300         return (node);
1301 }
1302 
1303 void
1304 dca_initq(dca_listnode_t *q)
1305 {
1306         q->dl_next = q;
1307         q->dl_prev = q;
1308         q->dl_next2 = q;
1309         q->dl_prev2 = q;
1310 }
1311 
1312 void
1313 dca_enqueue(dca_listnode_t *q, dca_listnode_t *node)
1314 {
1315         /*
1316          * Enqueue submits at the "tail" of the list, i.e. just
1317          * behind the sentinel.
1318          */
1319         node->dl_next = q;
1320         node->dl_prev = q->dl_prev;
1321         node->dl_next->dl_prev = node;
1322         node->dl_prev->dl_next = node;
1323 }
1324 
1325 void
1326 dca_rmqueue(dca_listnode_t *node)
1327 {
1328         node->dl_next->dl_prev = node->dl_prev;
1329         node->dl_prev->dl_next = node->dl_next;
1330         node->dl_next = NULL;
1331         node->dl_prev = NULL;
1332 }
1333 
1334 dca_listnode_t *
1335 dca_dequeue(dca_listnode_t *q)
1336 {
1337         dca_listnode_t *node;
1338         /*
1339          * Dequeue takes from the "head" of the list, i.e. just after
1340          * the sentinel.
1341          */
1342         if ((node = q->dl_next) == q) {
1343                 /* queue is empty */
1344                 return (NULL);
1345         }
1346         dca_rmqueue(node);
1347         return (node);
1348 }
1349 
1350 /* this is the opposite of dequeue, it takes things off in LIFO order */
1351 dca_listnode_t *
1352 dca_unqueue(dca_listnode_t *q)
1353 {
1354         dca_listnode_t *node;
1355         /*
1356          * unqueue takes from the "tail" of the list, i.e. just before
1357          * the sentinel.
1358          */
1359         if ((node = q->dl_prev) == q) {
1360                 /* queue is empty */
1361                 return (NULL);
1362         }
1363         dca_rmqueue(node);
1364         return (node);
1365 }
1366 
1367 dca_listnode_t *
1368 dca_peekqueue(dca_listnode_t *q)
1369 {
1370         dca_listnode_t *node;
1371 
1372         if ((node = q->dl_next) == q) {
1373                 return (NULL);
1374         } else {
1375                 return (node);
1376         }
1377 }
1378 
1379 /*
1380  * Interrupt service routine.
1381  */
1382 uint_t
1383 dca_intr(char *arg)
1384 {
1385         dca_t           *dca = (dca_t *)arg;
1386         uint32_t        status;
1387 
1388         mutex_enter(&dca->dca_intrlock);
1389         status = GETCSR(dca, CSR_DMASTAT);
1390         PUTCSR(dca, CSR_DMASTAT, status & DMASTAT_INTERRUPTS);
1391         if (dca_check_acc_handle(dca, dca->dca_regs_handle,
1392             DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
1393                 mutex_exit(&dca->dca_intrlock);
1394                 return ((uint_t)DDI_FAILURE);
1395         }
1396 
1397         DBG(dca, DINTR, "interrupted, status = 0x%x!", status);
1398 
1399         if ((status & DMASTAT_INTERRUPTS) == 0) {
1400                 /* increment spurious interrupt kstat */
1401                 if (dca->dca_intrstats) {
1402                         KIOIP(dca)->intrs[KSTAT_INTR_SPURIOUS]++;
1403                 }
1404                 mutex_exit(&dca->dca_intrlock);
1405                 return (DDI_INTR_UNCLAIMED);
1406         }
1407 
1408         if (dca->dca_intrstats) {
1409                 KIOIP(dca)->intrs[KSTAT_INTR_HARD]++;
1410         }
1411         if (status & DMASTAT_MCR1INT) {
1412                 DBG(dca, DINTR, "MCR1 interrupted");
1413                 mutex_enter(&(WORKLIST(dca, MCR1)->dwl_lock));
1414                 dca_schedule(dca, MCR1);
1415                 dca_reclaim(dca, MCR1);
1416                 mutex_exit(&(WORKLIST(dca, MCR1)->dwl_lock));
1417         }
1418 
1419         if (status & DMASTAT_MCR2INT) {
1420                 DBG(dca, DINTR, "MCR2 interrupted");
1421                 mutex_enter(&(WORKLIST(dca, MCR2)->dwl_lock));
1422                 dca_schedule(dca, MCR2);
1423                 dca_reclaim(dca, MCR2);
1424                 mutex_exit(&(WORKLIST(dca, MCR2)->dwl_lock));
1425         }
1426 
1427         if (status & DMASTAT_ERRINT) {
1428                 uint32_t        erraddr;
1429                 erraddr = GETCSR(dca, CSR_DMAEA);
1430                 mutex_exit(&dca->dca_intrlock);
1431 
1432                 /*
1433                  * bit 1 of the error address indicates failure during
1434                  * read if set, during write otherwise.
1435                  */
1436                 dca_failure(dca, DDI_DEVICE_FAULT,
1437                     DCA_FM_ECLASS_HW_DEVICE, dca_ena(0), CRYPTO_DEVICE_ERROR,
1438                     "DMA master access error %s address 0x%x",
1439                     erraddr & 0x1 ? "reading" : "writing", erraddr & ~1);
1440                 return (DDI_INTR_CLAIMED);
1441         }
1442 
1443         mutex_exit(&dca->dca_intrlock);
1444 
1445         return (DDI_INTR_CLAIMED);
1446 }
1447 
1448 /*
1449  * Reverse a string of bytes from s1 into s2.  The reversal happens
1450  * from the tail of s1.  If len1 < len2, then null bytes will be
1451  * padded to the end of s2.  If len2 < len1, then (presumably null)
1452  * bytes will be dropped from the start of s1.
1453  *
1454  * The rationale here is that when s1 (source) is shorter, then we
1455  * are reversing from big-endian ordering, into device ordering, and
1456  * want to add some extra nulls to the tail (MSB) side of the device.
1457  *
1458  * Similarly, when s2 (dest) is shorter, then we are truncating what
1459  * are presumably null MSB bits from the device.
1460  *
1461  * There is an expectation when reversing from the device back into
1462  * big-endian, that the number of bytes to reverse and the target size
1463  * will match, and no truncation or padding occurs.
1464  */
1465 void
1466 dca_reverse(void *s1, void *s2, int len1, int len2)
1467 {
1468         caddr_t src, dst;
1469 
1470         if (len1 == 0) {
1471                 if (len2) {
1472                         bzero(s2, len2);
1473                 }
1474                 return;
1475         }
1476         src = (caddr_t)s1 + len1 - 1;
1477         dst = s2;
1478         while ((src >= (caddr_t)s1) && (len2)) {
1479                 *dst++ = *src--;
1480                 len2--;
1481         }
1482         while (len2 > 0) {
1483                 *dst++ = 0;
1484                 len2--;
1485         }
1486 }
1487 
1488 uint16_t
1489 dca_padfull(int num)
1490 {
1491         if (num <= 512) {
1492                 return (BITS2BYTES(512));
1493         }
1494         if (num <= 768) {
1495                 return (BITS2BYTES(768));
1496         }
1497         if (num <= 1024) {
1498                 return (BITS2BYTES(1024));
1499         }
1500         if (num <= 1536) {
1501                 return (BITS2BYTES(1536));
1502         }
1503         if (num <= 2048) {
1504                 return (BITS2BYTES(2048));
1505         }
1506         return (0);
1507 }
1508 
1509 uint16_t
1510 dca_padhalf(int num)
1511 {
1512         if (num <= 256) {
1513                 return (BITS2BYTES(256));
1514         }
1515         if (num <= 384) {
1516                 return (BITS2BYTES(384));
1517         }
1518         if (num <= 512) {
1519                 return (BITS2BYTES(512));
1520         }
1521         if (num <= 768) {
1522                 return (BITS2BYTES(768));
1523         }
1524         if (num <= 1024) {
1525                 return (BITS2BYTES(1024));
1526         }
1527         return (0);
1528 }
1529 
1530 dca_work_t *
1531 dca_newwork(dca_t *dca)
1532 {
1533         dca_work_t              *workp;
1534         size_t                  size;
1535         ddi_dma_cookie_t        c;
1536         unsigned                nc;
1537         int                     rv;
1538 
1539         workp = kmem_zalloc(sizeof (dca_work_t), KM_SLEEP);
1540 
1541         rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr,
1542             DDI_DMA_SLEEP, NULL, &workp->dw_mcr_dmah);
1543         if (rv != 0) {
1544                 dca_error(dca, "unable to alloc MCR DMA handle");
1545                 dca_destroywork(workp);
1546                 return (NULL);
1547         }
1548 
1549         rv = ddi_dma_mem_alloc(workp->dw_mcr_dmah,
1550             ROUNDUP(MCR_SIZE, dca->dca_pagesize),
1551             &dca_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
1552             &workp->dw_mcr_kaddr, &size, &workp->dw_mcr_acch);
1553         if (rv != 0) {
1554                 dca_error(dca, "unable to alloc MCR DMA memory");
1555                 dca_destroywork(workp);
1556                 return (NULL);
1557         }
1558 
1559         rv = ddi_dma_addr_bind_handle(workp->dw_mcr_dmah, NULL,
1560             workp->dw_mcr_kaddr, size, DDI_DMA_CONSISTENT | DDI_DMA_RDWR,
1561             DDI_DMA_SLEEP, NULL, &c, &nc);
1562         if (rv != DDI_DMA_MAPPED) {
1563                 dca_error(dca, "unable to map MCR DMA memory");
1564                 dca_destroywork(workp);
1565                 return (NULL);
1566         }
1567 
1568         workp->dw_mcr_paddr = c.dmac_address;
1569         return (workp);
1570 }
1571 
1572 void
1573 dca_destroywork(dca_work_t *workp)
1574 {
1575         if (workp->dw_mcr_paddr) {
1576                 (void) ddi_dma_unbind_handle(workp->dw_mcr_dmah);
1577         }
1578         if (workp->dw_mcr_acch) {
1579                 ddi_dma_mem_free(&workp->dw_mcr_acch);
1580         }
1581         if (workp->dw_mcr_dmah) {
1582                 ddi_dma_free_handle(&workp->dw_mcr_dmah);
1583         }
1584         kmem_free(workp, sizeof (dca_work_t));
1585 }
1586 
1587 dca_request_t *
1588 dca_newreq(dca_t *dca)
1589 {
1590         dca_request_t           *reqp;
1591         size_t                  size;
1592         ddi_dma_cookie_t        c;
1593         unsigned                nc;
1594         int                     rv;
1595         int                     n_chain = 0;
1596 
1597         size = (DESC_SIZE * MAXFRAGS) + CTX_MAXLENGTH;
1598 
1599         reqp = kmem_zalloc(sizeof (dca_request_t), KM_SLEEP);
1600 
1601         reqp->dr_dca = dca;
1602 
1603         /*
1604          * Setup the DMA region for the context and descriptors.
1605          */
1606         rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr, DDI_DMA_SLEEP,
1607             NULL, &reqp->dr_ctx_dmah);
1608         if (rv != DDI_SUCCESS) {
1609                 dca_error(dca, "failure allocating request DMA handle");
1610                 dca_destroyreq(reqp);
1611                 return (NULL);
1612         }
1613 
1614         /* for driver hardening, allocate in whole pages */
1615         rv = ddi_dma_mem_alloc(reqp->dr_ctx_dmah,
1616             ROUNDUP(size, dca->dca_pagesize), &dca_devattr, DDI_DMA_CONSISTENT,
1617             DDI_DMA_SLEEP, NULL, &reqp->dr_ctx_kaddr, &size,
1618             &reqp->dr_ctx_acch);
1619         if (rv != DDI_SUCCESS) {
1620                 dca_error(dca, "unable to alloc request DMA memory");
1621                 dca_destroyreq(reqp);
1622                 return (NULL);
1623         }
1624 
1625         rv = ddi_dma_addr_bind_handle(reqp->dr_ctx_dmah, NULL,
1626             reqp->dr_ctx_kaddr, size, DDI_DMA_CONSISTENT | DDI_DMA_WRITE,
1627             DDI_DMA_SLEEP, 0, &c, &nc);
1628         if (rv != DDI_DMA_MAPPED) {
1629                 dca_error(dca, "failed binding request DMA handle");
1630                 dca_destroyreq(reqp);
1631                 return (NULL);
1632         }
1633         reqp->dr_ctx_paddr = c.dmac_address;
1634 
1635         reqp->dr_dma_size = size;
1636 
1637         /*
1638          * Set up the dma for our scratch/shared buffers.
1639          */
1640         rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr,
1641             DDI_DMA_SLEEP, NULL, &reqp->dr_ibuf_dmah);
1642         if (rv != DDI_SUCCESS) {
1643                 dca_error(dca, "failure allocating ibuf DMA handle");
1644                 dca_destroyreq(reqp);
1645                 return (NULL);
1646         }
1647         rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr,
1648             DDI_DMA_SLEEP, NULL, &reqp->dr_obuf_dmah);
1649         if (rv != DDI_SUCCESS) {
1650                 dca_error(dca, "failure allocating obuf DMA handle");
1651                 dca_destroyreq(reqp);
1652                 return (NULL);
1653         }
1654 
1655         rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr,
1656             DDI_DMA_SLEEP, NULL, &reqp->dr_chain_in_dmah);
1657         if (rv != DDI_SUCCESS) {
1658                 dca_error(dca, "failure allocating chain_in DMA handle");
1659                 dca_destroyreq(reqp);
1660                 return (NULL);
1661         }
1662 
1663         rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr,
1664             DDI_DMA_SLEEP, NULL, &reqp->dr_chain_out_dmah);
1665         if (rv != DDI_SUCCESS) {
1666                 dca_error(dca, "failure allocating chain_out DMA handle");
1667                 dca_destroyreq(reqp);
1668                 return (NULL);
1669         }
1670 
1671         /*
1672          * for driver hardening, allocate in whole pages.
1673          */
1674         size = ROUNDUP(MAXPACKET, dca->dca_pagesize);
1675 #if defined(i386) || defined(__i386)
1676         /*
1677          * Use kmem_alloc instead of ddi_dma_mem_alloc here since the latter
1678          * may fail on x86 platform if a physically contiguous memory chunk
1679          * cannot be found. From initial testing, we did not see performance
1680          * degradation as seen on Sparc.
1681          */
1682         reqp->dr_ibuf_kaddr = kmem_alloc(size, KM_SLEEP);
1683         reqp->dr_obuf_kaddr = kmem_alloc(size, KM_SLEEP);
1684 #else
1685         /*
1686          * We could kmem_alloc for Sparc too. However, it gives worse
1687          * performance when transferring more than one page data. For example,
1688          * using 4 threads and 12032 byte data and 3DES on 900MHZ Sparc system,
1689          * kmem_alloc uses 80% CPU and ddi_dma_mem_alloc uses 50% CPU for
1690          * the same throughput.
1691          */
1692         rv = ddi_dma_mem_alloc(reqp->dr_ibuf_dmah,
1693             size, &dca_bufattr,
1694             DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, &reqp->dr_ibuf_kaddr,
1695             &size, &reqp->dr_ibuf_acch);
1696         if (rv != DDI_SUCCESS) {
1697                 dca_error(dca, "unable to alloc request DMA memory");
1698                 dca_destroyreq(reqp);
1699                 return (NULL);
1700         }
1701 
1702         rv = ddi_dma_mem_alloc(reqp->dr_obuf_dmah,
1703             size, &dca_bufattr,
1704             DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, &reqp->dr_obuf_kaddr,
1705             &size, &reqp->dr_obuf_acch);
1706         if (rv != DDI_SUCCESS) {
1707                 dca_error(dca, "unable to alloc request DMA memory");
1708                 dca_destroyreq(reqp);
1709                 return (NULL);
1710         }
1711 #endif
1712 
1713         /* Skip the used portion in the context page */
1714         reqp->dr_offset = CTX_MAXLENGTH;
1715         if ((rv = dca_bindchains_one(reqp, size, reqp->dr_offset,
1716             reqp->dr_ibuf_kaddr, reqp->dr_ibuf_dmah,
1717             DDI_DMA_WRITE | DDI_DMA_STREAMING,
1718             &reqp->dr_ibuf_head, &n_chain)) != DDI_SUCCESS) {
1719                 (void) dca_destroyreq(reqp);
1720                 return (NULL);
1721         }
1722         reqp->dr_ibuf_paddr = reqp->dr_ibuf_head.dc_buffer_paddr;
1723         /* Skip the space used by the input buffer */
1724         reqp->dr_offset += DESC_SIZE * n_chain;
1725 
1726         if ((rv = dca_bindchains_one(reqp, size, reqp->dr_offset,
1727             reqp->dr_obuf_kaddr, reqp->dr_obuf_dmah,
1728             DDI_DMA_READ | DDI_DMA_STREAMING,
1729             &reqp->dr_obuf_head, &n_chain)) != DDI_SUCCESS) {
1730                 (void) dca_destroyreq(reqp);
1731                 return (NULL);
1732         }
1733         reqp->dr_obuf_paddr = reqp->dr_obuf_head.dc_buffer_paddr;
1734         /* Skip the space used by the output buffer */
1735         reqp->dr_offset += DESC_SIZE * n_chain;
1736 
1737         DBG(dca, DCHATTY, "CTX is 0x%p, phys 0x%x, len %d",
1738             reqp->dr_ctx_kaddr, reqp->dr_ctx_paddr, CTX_MAXLENGTH);
1739         return (reqp);
1740 }
1741 
1742 void
1743 dca_destroyreq(dca_request_t *reqp)
1744 {
1745 #if defined(i386) || defined(__i386)
1746         dca_t           *dca = reqp->dr_dca;
1747         size_t          size = ROUNDUP(MAXPACKET, dca->dca_pagesize);
1748 #endif
1749 
1750         /*
1751          * Clean up DMA for the context structure.
1752          */
1753         if (reqp->dr_ctx_paddr) {
1754                 (void) ddi_dma_unbind_handle(reqp->dr_ctx_dmah);
1755         }
1756 
1757         if (reqp->dr_ctx_acch) {
1758                 ddi_dma_mem_free(&reqp->dr_ctx_acch);
1759         }
1760 
1761         if (reqp->dr_ctx_dmah) {
1762                 ddi_dma_free_handle(&reqp->dr_ctx_dmah);
1763         }
1764 
1765         /*
1766          * Clean up DMA for the scratch buffer.
1767          */
1768 #if defined(i386) || defined(__i386)
1769         if (reqp->dr_ibuf_dmah) {
1770                 (void) ddi_dma_unbind_handle(reqp->dr_ibuf_dmah);
1771                 ddi_dma_free_handle(&reqp->dr_ibuf_dmah);
1772         }
1773         if (reqp->dr_obuf_dmah) {
1774                 (void) ddi_dma_unbind_handle(reqp->dr_obuf_dmah);
1775                 ddi_dma_free_handle(&reqp->dr_obuf_dmah);
1776         }
1777 
1778         kmem_free(reqp->dr_ibuf_kaddr, size);
1779         kmem_free(reqp->dr_obuf_kaddr, size);
1780 #else
1781         if (reqp->dr_ibuf_paddr) {
1782                 (void) ddi_dma_unbind_handle(reqp->dr_ibuf_dmah);
1783         }
1784         if (reqp->dr_obuf_paddr) {
1785                 (void) ddi_dma_unbind_handle(reqp->dr_obuf_dmah);
1786         }
1787 
1788         if (reqp->dr_ibuf_acch) {
1789                 ddi_dma_mem_free(&reqp->dr_ibuf_acch);
1790         }
1791         if (reqp->dr_obuf_acch) {
1792                 ddi_dma_mem_free(&reqp->dr_obuf_acch);
1793         }
1794 
1795         if (reqp->dr_ibuf_dmah) {
1796                 ddi_dma_free_handle(&reqp->dr_ibuf_dmah);
1797         }
1798         if (reqp->dr_obuf_dmah) {
1799                 ddi_dma_free_handle(&reqp->dr_obuf_dmah);
1800         }
1801 #endif
1802         /*
1803          * These two DMA handles should have been unbinded in
1804          * dca_unbindchains() function
1805          */
1806         if (reqp->dr_chain_in_dmah) {
1807                 ddi_dma_free_handle(&reqp->dr_chain_in_dmah);
1808         }
1809         if (reqp->dr_chain_out_dmah) {
1810                 ddi_dma_free_handle(&reqp->dr_chain_out_dmah);
1811         }
1812 
1813         kmem_free(reqp, sizeof (dca_request_t));
1814 }
1815 
1816 dca_work_t *
1817 dca_getwork(dca_t *dca, int mcr)
1818 {
1819         dca_worklist_t  *wlp = WORKLIST(dca, mcr);
1820         dca_work_t      *workp;
1821 
1822         mutex_enter(&wlp->dwl_freelock);
1823         workp = (dca_work_t *)dca_dequeue(&wlp->dwl_freework);
1824         mutex_exit(&wlp->dwl_freelock);
1825         if (workp) {
1826                 int     nreqs;
1827                 bzero(workp->dw_mcr_kaddr, 8);
1828 
1829                 /* clear out old requests */
1830                 for (nreqs = 0; nreqs < MAXREQSPERMCR; nreqs++) {
1831                         workp->dw_reqs[nreqs] = NULL;
1832                 }
1833         }
1834         return (workp);
1835 }
1836 
1837 void
1838 dca_freework(dca_work_t *workp)
1839 {
1840         mutex_enter(&workp->dw_wlp->dwl_freelock);
1841         dca_enqueue(&workp->dw_wlp->dwl_freework, (dca_listnode_t *)workp);
1842         mutex_exit(&workp->dw_wlp->dwl_freelock);
1843 }
1844 
1845 dca_request_t *
1846 dca_getreq(dca_t *dca, int mcr, int tryhard)
1847 {
1848         dca_worklist_t  *wlp = WORKLIST(dca, mcr);
1849         dca_request_t   *reqp;
1850 
1851         mutex_enter(&wlp->dwl_freereqslock);
1852         reqp = (dca_request_t *)dca_dequeue(&wlp->dwl_freereqs);
1853         mutex_exit(&wlp->dwl_freereqslock);
1854         if (reqp) {
1855                 reqp->dr_flags = 0;
1856                 reqp->dr_callback = NULL;
1857         } else if (tryhard) {
1858                 /*
1859                  * failed to get a free one, try an allocation, the hard way.
1860                  * XXX: Kstat desired here.
1861                  */
1862                 if ((reqp = dca_newreq(dca)) != NULL) {
1863                         reqp->dr_wlp = wlp;
1864                         reqp->dr_dca = dca;
1865                         reqp->dr_flags = 0;
1866                         reqp->dr_callback = NULL;
1867                 }
1868         }
1869         return (reqp);
1870 }
1871 
1872 void
1873 dca_freereq(dca_request_t *reqp)
1874 {
1875         reqp->dr_kcf_req = NULL;
1876         if (!(reqp->dr_flags & DR_NOCACHE)) {
1877                 mutex_enter(&reqp->dr_wlp->dwl_freereqslock);
1878                 dca_enqueue(&reqp->dr_wlp->dwl_freereqs,
1879                     (dca_listnode_t *)reqp);
1880                 mutex_exit(&reqp->dr_wlp->dwl_freereqslock);
1881         }
1882 }
1883 
1884 /*
1885  * Binds user buffers to DMA handles dynamically. On Sparc, a user buffer
1886  * is mapped to a single physical address. On x86, a user buffer is mapped
1887  * to multiple physical addresses. These physical addresses are chained
1888  * using the method specified in Broadcom BCM5820 specification.
1889  */
1890 int
1891 dca_bindchains(dca_request_t *reqp, size_t incnt, size_t outcnt)
1892 {
1893         int                     rv;
1894         caddr_t                 kaddr;
1895         uint_t                  flags;
1896         int                     n_chain = 0;
1897 
1898         if (reqp->dr_flags & DR_INPLACE) {
1899                 flags = DDI_DMA_RDWR | DDI_DMA_CONSISTENT;
1900         } else {
1901                 flags = DDI_DMA_WRITE | DDI_DMA_STREAMING;
1902         }
1903 
1904         /* first the input */
1905         if (incnt) {
1906                 if ((kaddr = dca_bufdaddr(reqp->dr_in)) == NULL) {
1907                         DBG(NULL, DWARN, "unrecognised crypto data format");
1908                         return (DDI_FAILURE);
1909                 }
1910                 if ((rv = dca_bindchains_one(reqp, incnt, reqp->dr_offset,
1911                     kaddr, reqp->dr_chain_in_dmah, flags,
1912                     &reqp->dr_chain_in_head, &n_chain)) != DDI_SUCCESS) {
1913                         (void) dca_unbindchains(reqp);
1914                         return (rv);
1915                 }
1916 
1917                 /*
1918                  * The offset and length are altered by the calling routine
1919                  * reqp->dr_in->cd_offset += incnt;
1920                  * reqp->dr_in->cd_length -= incnt;
1921                  */
1922                 /* Save the first one in the chain for MCR */
1923                 reqp->dr_in_paddr = reqp->dr_chain_in_head.dc_buffer_paddr;
1924                 reqp->dr_in_next = reqp->dr_chain_in_head.dc_next_paddr;
1925                 reqp->dr_in_len = reqp->dr_chain_in_head.dc_buffer_length;
1926         } else {
1927                 reqp->dr_in_paddr = NULL;
1928                 reqp->dr_in_next = 0;
1929                 reqp->dr_in_len = 0;
1930         }
1931 
1932         if (reqp->dr_flags & DR_INPLACE) {
1933                 reqp->dr_out_paddr = reqp->dr_in_paddr;
1934                 reqp->dr_out_len = reqp->dr_in_len;
1935                 reqp->dr_out_next = reqp->dr_in_next;
1936                 return (DDI_SUCCESS);
1937         }
1938 
1939         /* then the output */
1940         if (outcnt) {
1941                 flags = DDI_DMA_READ | DDI_DMA_STREAMING;
1942                 if ((kaddr = dca_bufdaddr_out(reqp->dr_out)) == NULL) {
1943                         DBG(NULL, DWARN, "unrecognised crypto data format");
1944                         (void) dca_unbindchains(reqp);
1945                         return (DDI_FAILURE);
1946                 }
1947                 rv = dca_bindchains_one(reqp, outcnt, reqp->dr_offset +
1948                     n_chain * DESC_SIZE, kaddr, reqp->dr_chain_out_dmah,
1949                     flags, &reqp->dr_chain_out_head, &n_chain);
1950                 if (rv != DDI_SUCCESS) {
1951                         (void) dca_unbindchains(reqp);
1952                         return (DDI_FAILURE);
1953                 }
1954 
1955                 /* Save the first one in the chain for MCR */
1956                 reqp->dr_out_paddr = reqp->dr_chain_out_head.dc_buffer_paddr;
1957                 reqp->dr_out_next = reqp->dr_chain_out_head.dc_next_paddr;
1958                 reqp->dr_out_len = reqp->dr_chain_out_head.dc_buffer_length;
1959         } else {
1960                 reqp->dr_out_paddr = NULL;
1961                 reqp->dr_out_next = 0;
1962                 reqp->dr_out_len = 0;
1963         }
1964 
1965         return (DDI_SUCCESS);
1966 }
1967 
1968 /*
1969  * Unbind the user buffers from the DMA handles.
1970  */
1971 int
1972 dca_unbindchains(dca_request_t *reqp)
1973 {
1974         int rv = DDI_SUCCESS;
1975         int rv1 = DDI_SUCCESS;
1976 
1977         /* Clear the input chain */
1978         if (reqp->dr_chain_in_head.dc_buffer_paddr != NULL) {
1979                 (void) ddi_dma_unbind_handle(reqp->dr_chain_in_dmah);
1980                 reqp->dr_chain_in_head.dc_buffer_paddr = 0;
1981         }
1982 
1983         if (reqp->dr_flags & DR_INPLACE) {
1984                 return (rv);
1985         }
1986 
1987         /* Clear the output chain */
1988         if (reqp->dr_chain_out_head.dc_buffer_paddr != NULL) {
1989                 (void) ddi_dma_unbind_handle(reqp->dr_chain_out_dmah);
1990                 reqp->dr_chain_out_head.dc_buffer_paddr = 0;
1991         }
1992 
1993         return ((rv != DDI_SUCCESS)? rv : rv1);
1994 }
1995 
1996 /*
1997  * Build either input chain or output chain. It is single-item chain for Sparc,
1998  * and possible mutiple-item chain for x86.
1999  */
2000 static int
2001 dca_bindchains_one(dca_request_t *reqp, size_t cnt, int dr_offset,
2002     caddr_t kaddr, ddi_dma_handle_t handle, uint_t flags,
2003     dca_chain_t *head, int *n_chain)
2004 {
2005         ddi_dma_cookie_t        c;
2006         uint_t                  nc;
2007         int                     rv;
2008         caddr_t                 chain_kaddr_pre;
2009         caddr_t                 chain_kaddr;
2010         uint32_t                chain_paddr;
2011         int                     i;
2012 
2013         /* Advance past the context structure to the starting address */
2014         chain_paddr = reqp->dr_ctx_paddr + dr_offset;
2015         chain_kaddr = reqp->dr_ctx_kaddr + dr_offset;
2016 
2017         /*
2018          * Bind the kernel address to the DMA handle. On x86, the actual
2019          * buffer is mapped into multiple physical addresses. On Sparc,
2020          * the actual buffer is mapped into a single address.
2021          */
2022         rv = ddi_dma_addr_bind_handle(handle,
2023             NULL, kaddr, cnt, flags, DDI_DMA_DONTWAIT, NULL, &c, &nc);
2024         if (rv != DDI_DMA_MAPPED) {
2025                 return (DDI_FAILURE);
2026         }
2027 
2028         (void) ddi_dma_sync(handle, 0, cnt, DDI_DMA_SYNC_FORDEV);
2029         if ((rv = dca_check_dma_handle(reqp->dr_dca, handle,
2030             DCA_FM_ECLASS_NONE)) != DDI_SUCCESS) {
2031                 reqp->destroy = TRUE;
2032                 return (rv);
2033         }
2034 
2035         *n_chain = nc;
2036 
2037         /* Setup the data buffer chain for DMA transfer */
2038         chain_kaddr_pre = NULL;
2039         head->dc_buffer_paddr = 0;
2040         head->dc_next_paddr = 0;
2041         head->dc_buffer_length = 0;
2042         for (i = 0; i < nc; i++) {
2043                 /* PIO */
2044                 PUTDESC32(reqp, chain_kaddr, DESC_BUFADDR, c.dmac_address);
2045                 PUTDESC16(reqp, chain_kaddr, DESC_RSVD, 0);
2046                 PUTDESC16(reqp, chain_kaddr, DESC_LENGTH, c.dmac_size);
2047 
2048                 /* Remember the head of the chain */
2049                 if (head->dc_buffer_paddr == 0) {
2050                         head->dc_buffer_paddr = c.dmac_address;
2051                         head->dc_buffer_length = c.dmac_size;
2052                 }
2053 
2054                 /* Link to the previous one if one exists */
2055                 if (chain_kaddr_pre) {
2056                         PUTDESC32(reqp, chain_kaddr_pre, DESC_NEXT,
2057                             chain_paddr);
2058                         if (head->dc_next_paddr == 0)
2059                                 head->dc_next_paddr = chain_paddr;
2060                 }
2061                 chain_kaddr_pre = chain_kaddr;
2062 
2063                 /* Maintain pointers */
2064                 chain_paddr += DESC_SIZE;
2065                 chain_kaddr += DESC_SIZE;
2066 
2067                 /* Retrieve the next cookie if there is one */
2068                 if (i < nc-1)
2069                         ddi_dma_nextcookie(handle, &c);
2070         }
2071 
2072         /* Set the next pointer in the last entry to NULL */
2073         PUTDESC32(reqp, chain_kaddr_pre, DESC_NEXT, 0);
2074 
2075         return (DDI_SUCCESS);
2076 }
2077 
2078 /*
2079  * Schedule some work.
2080  */
2081 int
2082 dca_start(dca_t *dca, dca_request_t *reqp, int mcr, int dosched)
2083 {
2084         dca_worklist_t  *wlp = WORKLIST(dca, mcr);
2085 
2086         mutex_enter(&wlp->dwl_lock);
2087 
2088         DBG(dca, DCHATTY, "req=%p, in=%p, out=%p, ctx=%p, ibuf=%p, obuf=%p",
2089             reqp, reqp->dr_in, reqp->dr_out, reqp->dr_ctx_kaddr,
2090             reqp->dr_ibuf_kaddr, reqp->dr_obuf_kaddr);
2091         DBG(dca, DCHATTY, "ctx paddr = %x, ibuf paddr = %x, obuf paddr = %x",
2092             reqp->dr_ctx_paddr, reqp->dr_ibuf_paddr, reqp->dr_obuf_paddr);
2093         /* sync out the entire context and descriptor chains */
2094         (void) ddi_dma_sync(reqp->dr_ctx_dmah, 0, 0, DDI_DMA_SYNC_FORDEV);
2095         if (dca_check_dma_handle(dca, reqp->dr_ctx_dmah,
2096             DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
2097                 reqp->destroy = TRUE;
2098                 mutex_exit(&wlp->dwl_lock);
2099                 return (CRYPTO_DEVICE_ERROR);
2100         }
2101 
2102         dca_enqueue(&wlp->dwl_waitq, (dca_listnode_t *)reqp);
2103         wlp->dwl_count++;
2104         wlp->dwl_lastsubmit = ddi_get_lbolt();
2105         reqp->dr_wlp = wlp;
2106 
2107         if ((wlp->dwl_count == wlp->dwl_hiwater) && (wlp->dwl_busy == 0)) {
2108                 /* we are fully loaded now, let kCF know */
2109 
2110                 wlp->dwl_flowctl++;
2111                 wlp->dwl_busy = 1;
2112 
2113                 crypto_prov_notify(wlp->dwl_prov, CRYPTO_PROVIDER_BUSY);
2114         }
2115 
2116         if (dosched) {
2117 #ifdef  SCHEDDELAY
2118                 /* possibly wait for more work to arrive */
2119                 if (wlp->dwl_count >= wlp->dwl_reqspermcr) {
2120                         dca_schedule(dca, mcr);
2121                 } else if (!wlp->dwl_schedtid) {
2122                         /* wait 1 msec for more work before doing it */
2123                         wlp->dwl_schedtid = timeout(dca_schedtimeout,
2124                             (void *)wlp, drv_usectohz(MSEC));
2125                 }
2126 #else
2127                 dca_schedule(dca, mcr);
2128 #endif
2129         }
2130         mutex_exit(&wlp->dwl_lock);
2131 
2132         return (CRYPTO_QUEUED);
2133 }
2134 
2135 void
2136 dca_schedule(dca_t *dca, int mcr)
2137 {
2138         dca_worklist_t  *wlp = WORKLIST(dca, mcr);
2139         int             csr;
2140         int             full;
2141         uint32_t        status;
2142 
2143         ASSERT(mutex_owned(&wlp->dwl_lock));
2144         /*
2145          * If the card is draining or has an outstanding failure,
2146          * don't schedule any more work on it right now
2147          */
2148         if (wlp->dwl_drain || (dca->dca_flags & DCA_FAILED)) {
2149                 return;
2150         }
2151 
2152         if (mcr == MCR2) {
2153                 csr = CSR_MCR2;
2154                 full = DMASTAT_MCR2FULL;
2155         } else {
2156                 csr = CSR_MCR1;
2157                 full = DMASTAT_MCR1FULL;
2158         }
2159 
2160         for (;;) {
2161                 dca_work_t      *workp;
2162                 uint32_t        offset;
2163                 int             nreqs;
2164 
2165                 status = GETCSR(dca, CSR_DMASTAT);
2166                 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
2167                     DCA_FM_ECLASS_NONE) != DDI_SUCCESS)
2168                         return;
2169 
2170                 if ((status & full) != 0)
2171                         break;
2172 
2173 #ifdef  SCHEDDELAY
2174                 /* if there isn't enough to do, don't bother now */
2175                 if ((wlp->dwl_count < wlp->dwl_reqspermcr) &&
2176                     (ddi_get_lbolt() < (wlp->dwl_lastsubmit +
2177                     drv_usectohz(MSEC)))) {
2178                         /* wait a bit longer... */
2179                         if (wlp->dwl_schedtid == 0) {
2180                                 wlp->dwl_schedtid = timeout(dca_schedtimeout,
2181                                     (void *)wlp, drv_usectohz(MSEC));
2182                         }
2183                         return;
2184                 }
2185 #endif
2186 
2187                 /* grab a work structure */
2188                 workp = dca_getwork(dca, mcr);
2189 
2190                 if (workp == NULL) {
2191                         /*
2192                          * There must be work ready to be reclaimed,
2193                          * in this case, since the chip can only hold
2194                          * less work outstanding than there are total.
2195                          */
2196                         dca_reclaim(dca, mcr);
2197                         continue;
2198                 }
2199 
2200                 nreqs = 0;
2201                 offset = MCR_CTXADDR;
2202 
2203                 while (nreqs < wlp->dwl_reqspermcr) {
2204                         dca_request_t   *reqp;
2205 
2206                         reqp = (dca_request_t *)dca_dequeue(&wlp->dwl_waitq);
2207                         if (reqp == NULL) {
2208                                 /* nothing left to process */
2209                                 break;
2210                         }
2211                         /*
2212                          * Update flow control.
2213                          */
2214                         wlp->dwl_count--;
2215                         if ((wlp->dwl_count == wlp->dwl_lowater) &&
2216                             (wlp->dwl_busy))  {
2217                                 wlp->dwl_busy = 0;
2218                                 crypto_prov_notify(wlp->dwl_prov,
2219                                     CRYPTO_PROVIDER_READY);
2220                         }
2221 
2222                         /*
2223                          * Context address.
2224                          */
2225                         PUTMCR32(workp, offset, reqp->dr_ctx_paddr);
2226                         offset += 4;
2227 
2228                         /*
2229                          * Input chain.
2230                          */
2231                         /* input buffer address */
2232                         PUTMCR32(workp, offset, reqp->dr_in_paddr);
2233                         offset += 4;
2234                         /* next input buffer entry */
2235                         PUTMCR32(workp, offset, reqp->dr_in_next);
2236                         offset += 4;
2237                         /* input buffer length */
2238                         PUTMCR16(workp, offset, reqp->dr_in_len);
2239                         offset += 2;
2240                         /* zero the reserved field */
2241                         PUTMCR16(workp, offset, 0);
2242                         offset += 2;
2243 
2244                         /*
2245                          * Overall length.
2246                          */
2247                         /* reserved field */
2248                         PUTMCR16(workp, offset, 0);
2249                         offset += 2;
2250                         /* total packet length */
2251                         PUTMCR16(workp, offset, reqp->dr_pkt_length);
2252                         offset += 2;
2253 
2254                         /*
2255                          * Output chain.
2256                          */
2257                         /* output buffer address */
2258                         PUTMCR32(workp, offset, reqp->dr_out_paddr);
2259                         offset += 4;
2260                         /* next output buffer entry */
2261                         PUTMCR32(workp, offset, reqp->dr_out_next);
2262                         offset += 4;
2263                         /* output buffer length */
2264                         PUTMCR16(workp, offset, reqp->dr_out_len);
2265                         offset += 2;
2266                         /* zero the reserved field */
2267                         PUTMCR16(workp, offset, 0);
2268                         offset += 2;
2269 
2270                         /*
2271                          * Note submission.
2272                          */
2273                         workp->dw_reqs[nreqs] = reqp;
2274                         nreqs++;
2275                 }
2276 
2277                 if (nreqs == 0) {
2278                         /* nothing in the queue! */
2279                         dca_freework(workp);
2280                         return;
2281                 }
2282 
2283                 wlp->dwl_submit++;
2284 
2285                 PUTMCR16(workp, MCR_FLAGS, 0);
2286                 PUTMCR16(workp, MCR_COUNT, nreqs);
2287 
2288                 DBG(dca, DCHATTY,
2289                     "posting work (phys %x, virt 0x%p) (%d reqs) to MCR%d",
2290                     workp->dw_mcr_paddr, workp->dw_mcr_kaddr,
2291                     nreqs, mcr);
2292 
2293                 workp->dw_lbolt = ddi_get_lbolt();
2294                 /* Make sure MCR is synced out to device. */
2295                 (void) ddi_dma_sync(workp->dw_mcr_dmah, 0, 0,
2296                     DDI_DMA_SYNC_FORDEV);
2297                 if (dca_check_dma_handle(dca, workp->dw_mcr_dmah,
2298                     DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
2299                         dca_destroywork(workp);
2300                         return;
2301                 }
2302 
2303                 PUTCSR(dca, csr, workp->dw_mcr_paddr);
2304                 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
2305                     DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
2306                         dca_destroywork(workp);
2307                         return;
2308                 } else {
2309                         dca_enqueue(&wlp->dwl_runq, (dca_listnode_t *)workp);
2310                 }
2311 
2312                 DBG(dca, DCHATTY, "posted");
2313         }
2314 }
2315 
2316 /*
2317  * Reclaim completed work, called in interrupt context.
2318  */
2319 void
2320 dca_reclaim(dca_t *dca, int mcr)
2321 {
2322         dca_worklist_t  *wlp = WORKLIST(dca, mcr);
2323         dca_work_t      *workp;
2324         ushort_t        flags;
2325         int             nreclaimed = 0;
2326         int             i;
2327 
2328         DBG(dca, DRECLAIM, "worklist = 0x%p (MCR%d)", wlp, mcr);
2329         ASSERT(mutex_owned(&wlp->dwl_lock));
2330         /*
2331          * For each MCR in the submitted (runq), we check to see if
2332          * it has been processed.  If so, then we note each individual
2333          * job in the MCR, and and do the completion processing for
2334          * each of such job.
2335          */
2336         for (;;) {
2337 
2338                 workp = (dca_work_t *)dca_peekqueue(&wlp->dwl_runq);
2339                 if (workp == NULL) {
2340                         break;
2341                 }
2342 
2343                 /* only sync the MCR flags, since that's all we need */
2344                 (void) ddi_dma_sync(workp->dw_mcr_dmah, 0, 4,
2345                     DDI_DMA_SYNC_FORKERNEL);
2346                 if (dca_check_dma_handle(dca, workp->dw_mcr_dmah,
2347                     DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
2348                         dca_rmqueue((dca_listnode_t *)workp);
2349                         dca_destroywork(workp);
2350                         return;
2351                 }
2352 
2353                 flags = GETMCR16(workp, MCR_FLAGS);
2354                 if ((flags & MCRFLAG_FINISHED) == 0) {
2355                         /* chip is still working on it */
2356                         DBG(dca, DRECLAIM,
2357                             "chip still working on it (MCR%d)", mcr);
2358                         break;
2359                 }
2360 
2361                 /* its really for us, so remove it from the queue */
2362                 dca_rmqueue((dca_listnode_t *)workp);
2363 
2364                 /* if we were draining, signal on the cv */
2365                 if (wlp->dwl_drain && QEMPTY(&wlp->dwl_runq)) {
2366                         cv_signal(&wlp->dwl_cv);
2367                 }
2368 
2369                 /* update statistics, done under the lock */
2370                 for (i = 0; i < wlp->dwl_reqspermcr; i++) {
2371                         dca_request_t *reqp = workp->dw_reqs[i];
2372                         if (reqp == NULL) {
2373                                 continue;
2374                         }
2375                         if (reqp->dr_byte_stat >= 0) {
2376                                 dca->dca_stats[reqp->dr_byte_stat] +=
2377                                     reqp->dr_pkt_length;
2378                         }
2379                         if (reqp->dr_job_stat >= 0) {
2380                                 dca->dca_stats[reqp->dr_job_stat]++;
2381                         }
2382                 }
2383                 mutex_exit(&wlp->dwl_lock);
2384 
2385                 for (i = 0; i < wlp->dwl_reqspermcr; i++) {
2386                         dca_request_t *reqp = workp->dw_reqs[i];
2387 
2388                         if (reqp == NULL) {
2389                                 continue;
2390                         }
2391 
2392                         /* Do the callback. */
2393                         workp->dw_reqs[i] = NULL;
2394                         dca_done(reqp, CRYPTO_SUCCESS);
2395 
2396                         nreclaimed++;
2397                 }
2398 
2399                 /* now we can release the work */
2400                 dca_freework(workp);
2401 
2402                 mutex_enter(&wlp->dwl_lock);
2403         }
2404         DBG(dca, DRECLAIM, "reclaimed %d cmds", nreclaimed);
2405 }
2406 
2407 int
2408 dca_length(crypto_data_t *cdata)
2409 {
2410         return (cdata->cd_length);
2411 }
2412 
2413 /*
2414  * This is the callback function called from the interrupt when a kCF job
2415  * completes.  It does some driver-specific things, and then calls the
2416  * kCF-provided callback.  Finally, it cleans up the state for the work
2417  * request and drops the reference count to allow for DR.
2418  */
2419 void
2420 dca_done(dca_request_t *reqp, int err)
2421 {
2422         uint64_t        ena = 0;
2423 
2424         /* unbind any chains we were using */
2425         if (dca_unbindchains(reqp) != DDI_SUCCESS) {
2426                 /* DMA failure */
2427                 ena = dca_ena(ena);
2428                 dca_failure(reqp->dr_dca, DDI_DATAPATH_FAULT,
2429                     DCA_FM_ECLASS_NONE, ena, CRYPTO_DEVICE_ERROR,
2430                     "fault on buffer DMA handle");
2431                 if (err == CRYPTO_SUCCESS) {
2432                         err = CRYPTO_DEVICE_ERROR;
2433                 }
2434         }
2435 
2436         if (reqp->dr_callback != NULL) {
2437                 reqp->dr_callback(reqp, err);
2438         } else {
2439                 dca_freereq(reqp);
2440         }
2441 }
2442 
2443 /*
2444  * Call this when a failure is detected.  It will reset the chip,
2445  * log a message, alert kCF, and mark jobs in the runq as failed.
2446  */
2447 /* ARGSUSED */
2448 void
2449 dca_failure(dca_t *dca, ddi_fault_location_t loc, dca_fma_eclass_t index,
2450     uint64_t ena, int errno, char *mess, ...)
2451 {
2452         va_list ap;
2453         char    buf[256];
2454         int     mcr;
2455         char    *eclass;
2456         int     have_mutex;
2457 
2458         va_start(ap, mess);
2459         (void) vsprintf(buf, mess, ap);
2460         va_end(ap);
2461 
2462         eclass = dca_fma_eclass_string(dca->dca_model, index);
2463 
2464         if (DDI_FM_EREPORT_CAP(dca->fm_capabilities) &&
2465             index != DCA_FM_ECLASS_NONE) {
2466                 ddi_fm_ereport_post(dca->dca_dip, eclass, ena,
2467                     DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8,
2468                     FM_EREPORT_VERS0, NULL);
2469 
2470                 /* Report the impact of the failure to the DDI. */
2471                 ddi_fm_service_impact(dca->dca_dip, DDI_SERVICE_LOST);
2472         } else {
2473                 /* Just log the error string to the message log */
2474                 dca_error(dca, buf);
2475         }
2476 
2477         /*
2478          * Indicate a failure (keeps schedule from running).
2479          */
2480         dca->dca_flags |= DCA_FAILED;
2481 
2482         /*
2483          * Reset the chip.  This should also have as a side effect, the
2484          * disabling of all interrupts from the device.
2485          */
2486         (void) dca_reset(dca, 1);
2487 
2488         /*
2489          * Report the failure to kCF.
2490          */
2491         for (mcr = MCR1; mcr <= MCR2; mcr++) {
2492                 if (WORKLIST(dca, mcr)->dwl_prov) {
2493                         crypto_prov_notify(WORKLIST(dca, mcr)->dwl_prov,
2494                             CRYPTO_PROVIDER_FAILED);
2495                 }
2496         }
2497 
2498         /*
2499          * Return jobs not sent to hardware back to kCF.
2500          */
2501         dca_rejectjobs(dca);
2502 
2503         /*
2504          * From this point on, no new work should be arriving, and the
2505          * chip should not be doing any active DMA.
2506          */
2507 
2508         /*
2509          * Now find all the work submitted to the device and fail
2510          * them.
2511          */
2512         for (mcr = MCR1; mcr <= MCR2; mcr++) {
2513                 dca_worklist_t  *wlp;
2514                 int             i;
2515 
2516                 wlp = WORKLIST(dca, mcr);
2517 
2518                 if (wlp == NULL || wlp->dwl_waitq.dl_prev == NULL) {
2519                         continue;
2520                 }
2521                 for (;;) {
2522                         dca_work_t      *workp;
2523 
2524                         have_mutex = mutex_tryenter(&wlp->dwl_lock);
2525                         workp = (dca_work_t *)dca_dequeue(&wlp->dwl_runq);
2526                         if (workp == NULL) {
2527                                 if (have_mutex)
2528                                         mutex_exit(&wlp->dwl_lock);
2529                                 break;
2530                         }
2531                         mutex_exit(&wlp->dwl_lock);
2532 
2533                         /*
2534                          * Free up requests
2535                          */
2536                         for (i = 0; i < wlp->dwl_reqspermcr; i++) {
2537                                 dca_request_t *reqp = workp->dw_reqs[i];
2538                                 if (reqp) {
2539                                         dca_done(reqp, errno);
2540                                         workp->dw_reqs[i] = NULL;
2541                                 }
2542                         }
2543 
2544                         mutex_enter(&wlp->dwl_lock);
2545                         /*
2546                          * If waiting to drain, signal on the waiter.
2547                          */
2548                         if (wlp->dwl_drain && QEMPTY(&wlp->dwl_runq)) {
2549                                 cv_signal(&wlp->dwl_cv);
2550                         }
2551 
2552                         /*
2553                          * Return the work and request structures to
2554                          * the free pool.
2555                          */
2556                         dca_freework(workp);
2557                         if (have_mutex)
2558                                 mutex_exit(&wlp->dwl_lock);
2559                 }
2560         }
2561 
2562 }
2563 
2564 #ifdef  SCHEDDELAY
2565 /*
2566  * Reschedule worklist as needed.
2567  */
2568 void
2569 dca_schedtimeout(void *arg)
2570 {
2571         dca_worklist_t  *wlp = (dca_worklist_t *)arg;
2572         mutex_enter(&wlp->dwl_lock);
2573         wlp->dwl_schedtid = 0;
2574         dca_schedule(wlp->dwl_dca, wlp->dwl_mcr);
2575         mutex_exit(&wlp->dwl_lock);
2576 }
2577 #endif
2578 
2579 /*
2580  * Check for stalled jobs.
2581  */
2582 void
2583 dca_jobtimeout(void *arg)
2584 {
2585         int             mcr;
2586         dca_t           *dca = (dca_t *)arg;
2587         int             hung = 0;
2588 
2589         for (mcr = MCR1; mcr <= MCR2; mcr++) {
2590                 dca_worklist_t  *wlp = WORKLIST(dca, mcr);
2591                 dca_work_t      *workp;
2592                 clock_t         when;
2593 
2594                 mutex_enter(&wlp->dwl_lock);
2595                 when = ddi_get_lbolt();
2596 
2597                 workp = (dca_work_t *)dca_peekqueue(&wlp->dwl_runq);
2598                 if (workp == NULL) {
2599                         /* nothing sitting in the queue */
2600                         mutex_exit(&wlp->dwl_lock);
2601                         continue;
2602                 }
2603 
2604                 if ((when - workp->dw_lbolt) < drv_usectohz(STALETIME)) {
2605                         /* request has been queued for less than STALETIME */
2606                         mutex_exit(&wlp->dwl_lock);
2607                         continue;
2608                 }
2609 
2610                 /* job has been sitting around for over 1 second, badness */
2611                 DBG(dca, DWARN, "stale job (0x%p) found in MCR%d!", workp,
2612                     mcr);
2613 
2614                 /* put it back in the queue, until we reset the chip */
2615                 hung++;
2616                 mutex_exit(&wlp->dwl_lock);
2617         }
2618 
2619         if (hung) {
2620                 dca_failure(dca, DDI_DEVICE_FAULT,
2621                     DCA_FM_ECLASS_HW_TIMEOUT, dca_ena(0), CRYPTO_DEVICE_ERROR,
2622                     "timeout processing job.)");
2623         }
2624 
2625         /* reschedule ourself */
2626         mutex_enter(&dca->dca_intrlock);
2627         if (dca->dca_jobtid == 0) {
2628                 /* timeout has been canceled, prior to DR */
2629                 mutex_exit(&dca->dca_intrlock);
2630                 return;
2631         }
2632 
2633         /* check again in 1 second */
2634         dca->dca_jobtid = timeout(dca_jobtimeout, arg,
2635             drv_usectohz(SECOND));
2636         mutex_exit(&dca->dca_intrlock);
2637 }
2638 
2639 /*
2640  * This returns all jobs back to kCF.  It assumes that processing
2641  * on the worklist has halted.
2642  */
2643 void
2644 dca_rejectjobs(dca_t *dca)
2645 {
2646         int mcr;
2647         int have_mutex;
2648         for (mcr = MCR1; mcr <= MCR2; mcr++) {
2649                 dca_worklist_t  *wlp = WORKLIST(dca, mcr);
2650                 dca_request_t   *reqp;
2651 
2652                 if (wlp == NULL || wlp->dwl_waitq.dl_prev == NULL) {
2653                         continue;
2654                 }
2655                 have_mutex = mutex_tryenter(&wlp->dwl_lock);
2656                 for (;;) {
2657                         reqp = (dca_request_t *)dca_unqueue(&wlp->dwl_waitq);
2658                         if (reqp == NULL) {
2659                                 break;
2660                         }
2661                         /* update flow control */
2662                         wlp->dwl_count--;
2663                         if ((wlp->dwl_count == wlp->dwl_lowater) &&
2664                             (wlp->dwl_busy))  {
2665                                 wlp->dwl_busy = 0;
2666                                 crypto_prov_notify(wlp->dwl_prov,
2667                                     CRYPTO_PROVIDER_READY);
2668                         }
2669                         mutex_exit(&wlp->dwl_lock);
2670 
2671                         (void) dca_unbindchains(reqp);
2672                         reqp->dr_callback(reqp, EAGAIN);
2673                         mutex_enter(&wlp->dwl_lock);
2674                 }
2675                 if (have_mutex)
2676                         mutex_exit(&wlp->dwl_lock);
2677         }
2678 }
2679 
2680 int
2681 dca_drain(dca_t *dca)
2682 {
2683         int mcr;
2684         for (mcr = MCR1; mcr <= MCR2; mcr++) {
2685 #ifdef  SCHEDDELAY
2686                 timeout_id_t    tid;
2687 #endif
2688                 dca_worklist_t *wlp = WORKLIST(dca, mcr);
2689 
2690                 mutex_enter(&wlp->dwl_lock);
2691                 wlp->dwl_drain = 1;
2692 
2693                 /* give it up to a second to drain from the chip */
2694                 if (!QEMPTY(&wlp->dwl_runq)) {
2695                         (void) cv_reltimedwait(&wlp->dwl_cv, &wlp->dwl_lock,
2696                             drv_usectohz(STALETIME), TR_CLOCK_TICK);
2697 
2698                         if (!QEMPTY(&wlp->dwl_runq)) {
2699                                 dca_error(dca, "unable to drain device");
2700                                 mutex_exit(&wlp->dwl_lock);
2701                                 dca_undrain(dca);
2702                                 return (EBUSY);
2703                         }
2704                 }
2705 
2706 #ifdef  SCHEDDELAY
2707                 tid = wlp->dwl_schedtid;
2708                 mutex_exit(&wlp->dwl_lock);
2709 
2710                 /*
2711                  * untimeout outside the lock -- this is safe because we
2712                  * have set the drain flag, so dca_schedule() will not
2713                  * reschedule another timeout
2714                  */
2715                 if (tid) {
2716                         untimeout(tid);
2717                 }
2718 #else
2719                 mutex_exit(&wlp->dwl_lock);
2720 #endif
2721         }
2722         return (0);
2723 }
2724 
2725 void
2726 dca_undrain(dca_t *dca)
2727 {
2728         int     mcr;
2729 
2730         for (mcr = MCR1; mcr <= MCR2; mcr++) {
2731                 dca_worklist_t  *wlp = WORKLIST(dca, mcr);
2732                 mutex_enter(&wlp->dwl_lock);
2733                 wlp->dwl_drain = 0;
2734                 dca_schedule(dca, mcr);
2735                 mutex_exit(&wlp->dwl_lock);
2736         }
2737 }
2738 
2739 /*
2740  * Duplicate the crypto_data_t structure, but point to the original
2741  * buffers.
2742  */
2743 int
2744 dca_dupcrypto(crypto_data_t *input, crypto_data_t *ninput)
2745 {
2746         ninput->cd_format = input->cd_format;
2747         ninput->cd_offset = input->cd_offset;
2748         ninput->cd_length = input->cd_length;
2749         ninput->cd_miscdata = input->cd_miscdata;
2750 
2751         switch (input->cd_format) {
2752         case CRYPTO_DATA_RAW:
2753                 ninput->cd_raw.iov_base = input->cd_raw.iov_base;
2754                 ninput->cd_raw.iov_len = input->cd_raw.iov_len;
2755                 break;
2756 
2757         case CRYPTO_DATA_UIO:
2758                 ninput->cd_uio = input->cd_uio;
2759                 break;
2760 
2761         case CRYPTO_DATA_MBLK:
2762                 ninput->cd_mp = input->cd_mp;
2763                 break;
2764 
2765         default:
2766                 DBG(NULL, DWARN,
2767                     "dca_dupcrypto: unrecognised crypto data format");
2768                 return (CRYPTO_FAILED);
2769         }
2770 
2771         return (CRYPTO_SUCCESS);
2772 }
2773 
2774 /*
2775  * Performs validation checks on the input and output data structures.
2776  */
2777 int
2778 dca_verifyio(crypto_data_t *input, crypto_data_t *output)
2779 {
2780         int     rv = CRYPTO_SUCCESS;
2781 
2782         switch (input->cd_format) {
2783         case CRYPTO_DATA_RAW:
2784                 break;
2785 
2786         case CRYPTO_DATA_UIO:
2787                 /* we support only kernel buffer */
2788                 if (input->cd_uio->uio_segflg != UIO_SYSSPACE) {
2789                         DBG(NULL, DWARN, "non kernel input uio buffer");
2790                         rv = CRYPTO_ARGUMENTS_BAD;
2791                 }
2792                 break;
2793 
2794         case CRYPTO_DATA_MBLK:
2795                 break;
2796 
2797         default:
2798                 DBG(NULL, DWARN, "unrecognised input crypto data format");
2799                 rv = CRYPTO_ARGUMENTS_BAD;
2800         }
2801 
2802         switch (output->cd_format) {
2803         case CRYPTO_DATA_RAW:
2804                 break;
2805 
2806         case CRYPTO_DATA_UIO:
2807                 /* we support only kernel buffer */
2808                 if (output->cd_uio->uio_segflg != UIO_SYSSPACE) {
2809                         DBG(NULL, DWARN, "non kernel output uio buffer");
2810                         rv = CRYPTO_ARGUMENTS_BAD;
2811                 }
2812                 break;
2813 
2814         case CRYPTO_DATA_MBLK:
2815                 break;
2816 
2817         default:
2818                 DBG(NULL, DWARN, "unrecognised output crypto data format");
2819                 rv = CRYPTO_ARGUMENTS_BAD;
2820         }
2821 
2822         return (rv);
2823 }
2824 
2825 /*
2826  * data: source crypto_data_t struct
2827  * off: offset into the source before commencing copy
2828  * count: the amount of data to copy
2829  * dest: destination buffer
2830  */
2831 int
2832 dca_getbufbytes(crypto_data_t *data, size_t off, int count, uchar_t *dest)
2833 {
2834         int rv = CRYPTO_SUCCESS;
2835         uio_t *uiop;
2836         uint_t vec_idx;
2837         size_t cur_len;
2838         mblk_t *mp;
2839 
2840         if (count == 0) {
2841                 /* We don't want anything so we're done. */
2842                 return (rv);
2843         }
2844 
2845         /*
2846          * Sanity check that we haven't specified a length greater than the
2847          * offset adjusted size of the buffer.
2848          */
2849         if (count > (data->cd_length - off)) {
2850                 return (CRYPTO_DATA_LEN_RANGE);
2851         }
2852 
2853         /* Add the internal crypto_data offset to the requested offset. */
2854         off += data->cd_offset;
2855 
2856         switch (data->cd_format) {
2857         case CRYPTO_DATA_RAW:
2858                 bcopy(data->cd_raw.iov_base + off, dest, count);
2859                 break;
2860 
2861         case CRYPTO_DATA_UIO:
2862                 /*
2863                  * Jump to the first iovec containing data to be
2864                  * processed.
2865                  */
2866                 uiop = data->cd_uio;
2867                 for (vec_idx = 0; vec_idx < uiop->uio_iovcnt &&
2868                     off >= uiop->uio_iov[vec_idx].iov_len;
2869                     off -= uiop->uio_iov[vec_idx++].iov_len)
2870                         ;
2871                 if (vec_idx == uiop->uio_iovcnt) {
2872                         /*
2873                          * The caller specified an offset that is larger than
2874                          * the total size of the buffers it provided.
2875                          */
2876                         return (CRYPTO_DATA_LEN_RANGE);
2877                 }
2878 
2879                 /*
2880                  * Now process the iovecs.
2881                  */
2882                 while (vec_idx < uiop->uio_iovcnt && count > 0) {
2883                         cur_len = min(uiop->uio_iov[vec_idx].iov_len -
2884                             off, count);
2885                         bcopy(uiop->uio_iov[vec_idx].iov_base + off, dest,
2886                             cur_len);
2887                         count -= cur_len;
2888                         dest += cur_len;
2889                         vec_idx++;
2890                         off = 0;
2891                 }
2892 
2893                 if (vec_idx == uiop->uio_iovcnt && count > 0) {
2894                         /*
2895                          * The end of the specified iovec's was reached but
2896                          * the length requested could not be processed
2897                          * (requested to digest more data than it provided).
2898                          */
2899                         return (CRYPTO_DATA_LEN_RANGE);
2900                 }
2901                 break;
2902 
2903         case CRYPTO_DATA_MBLK:
2904                 /*
2905                  * Jump to the first mblk_t containing data to be processed.
2906                  */
2907                 for (mp = data->cd_mp; mp != NULL && off >= MBLKL(mp);
2908                     off -= MBLKL(mp), mp = mp->b_cont)
2909                         ;
2910                 if (mp == NULL) {
2911                         /*
2912                          * The caller specified an offset that is larger than
2913                          * the total size of the buffers it provided.
2914                          */
2915                         return (CRYPTO_DATA_LEN_RANGE);
2916                 }
2917 
2918                 /*
2919                  * Now do the processing on the mblk chain.
2920                  */
2921                 while (mp != NULL && count > 0) {
2922                         cur_len = min(MBLKL(mp) - off, count);
2923                         bcopy((char *)(mp->b_rptr + off), dest, cur_len);
2924                         count -= cur_len;
2925                         dest += cur_len;
2926                         mp = mp->b_cont;
2927                         off = 0;
2928                 }
2929 
2930                 if (mp == NULL && count > 0) {
2931                         /*
2932                          * The end of the mblk was reached but the length
2933                          * requested could not be processed, (requested to
2934                          * digest more data than it provided).
2935                          */
2936                         return (CRYPTO_DATA_LEN_RANGE);
2937                 }
2938                 break;
2939 
2940         default:
2941                 DBG(NULL, DWARN, "unrecognised crypto data format");
2942                 rv = CRYPTO_ARGUMENTS_BAD;
2943         }
2944         return (rv);
2945 }
2946 
2947 
2948 /*
2949  * Performs the input, output or hard scatter/gather checks on the specified
2950  * crypto_data_t struct. Returns true if the data is scatter/gather in nature
2951  * ie fails the test.
2952  */
2953 int
2954 dca_sgcheck(dca_t *dca, crypto_data_t *data, dca_sg_param_t val)
2955 {
2956         uio_t *uiop;
2957         mblk_t *mp;
2958         int rv = FALSE;
2959 
2960         switch (val) {
2961         case DCA_SG_CONTIG:
2962                 /*
2963                  * Check for a contiguous data buffer.
2964                  */
2965                 switch (data->cd_format) {
2966                 case CRYPTO_DATA_RAW:
2967                         /* Contiguous in nature */
2968                         break;
2969 
2970                 case CRYPTO_DATA_UIO:
2971                         if (data->cd_uio->uio_iovcnt > 1)
2972                                 rv = TRUE;
2973                         break;
2974 
2975                 case CRYPTO_DATA_MBLK:
2976                         mp = data->cd_mp;
2977                         if (mp->b_cont != NULL)
2978                                 rv = TRUE;
2979                         break;
2980 
2981                 default:
2982                         DBG(NULL, DWARN, "unrecognised crypto data format");
2983                 }
2984                 break;
2985 
2986         case DCA_SG_WALIGN:
2987                 /*
2988                  * Check for a contiguous data buffer that is 32-bit word
2989                  * aligned and is of word multiples in size.
2990                  */
2991                 switch (data->cd_format) {
2992                 case CRYPTO_DATA_RAW:
2993                         if ((data->cd_raw.iov_len % sizeof (uint32_t)) ||
2994                             ((uintptr_t)data->cd_raw.iov_base %
2995                             sizeof (uint32_t))) {
2996                                 rv = TRUE;
2997                         }
2998                         break;
2999 
3000                 case CRYPTO_DATA_UIO:
3001                         uiop = data->cd_uio;
3002                         if (uiop->uio_iovcnt > 1) {
3003                                 return (TRUE);
3004                         }
3005                         /* So there is only one iovec */
3006                         if ((uiop->uio_iov[0].iov_len % sizeof (uint32_t)) ||
3007                             ((uintptr_t)uiop->uio_iov[0].iov_base %
3008                             sizeof (uint32_t))) {
3009                                 rv = TRUE;
3010                         }
3011                         break;
3012 
3013                 case CRYPTO_DATA_MBLK:
3014                         mp = data->cd_mp;
3015                         if (mp->b_cont != NULL) {
3016                                 return (TRUE);
3017                         }
3018                         /* So there is only one mblk in the chain */
3019                         if ((MBLKL(mp) % sizeof (uint32_t)) ||
3020                             ((uintptr_t)mp->b_rptr % sizeof (uint32_t))) {
3021                                 rv = TRUE;
3022                         }
3023                         break;
3024 
3025                 default:
3026                         DBG(NULL, DWARN, "unrecognised crypto data format");
3027                 }
3028                 break;
3029 
3030         case DCA_SG_PALIGN:
3031                 /*
3032                  * Check that the data buffer is page aligned and is of
3033                  * page multiples in size.
3034                  */
3035                 switch (data->cd_format) {
3036                 case CRYPTO_DATA_RAW:
3037                         if ((data->cd_length % dca->dca_pagesize) ||
3038                             ((uintptr_t)data->cd_raw.iov_base %
3039                             dca->dca_pagesize)) {
3040                                 rv = TRUE;
3041                         }
3042                         break;
3043 
3044                 case CRYPTO_DATA_UIO:
3045                         uiop = data->cd_uio;
3046                         if ((uiop->uio_iov[0].iov_len % dca->dca_pagesize) ||
3047                             ((uintptr_t)uiop->uio_iov[0].iov_base %
3048                             dca->dca_pagesize)) {
3049                                 rv = TRUE;
3050                         }
3051                         break;
3052 
3053                 case CRYPTO_DATA_MBLK:
3054                         mp = data->cd_mp;
3055                         if ((MBLKL(mp) % dca->dca_pagesize) ||
3056                             ((uintptr_t)mp->b_rptr % dca->dca_pagesize)) {
3057                                 rv = TRUE;
3058                         }
3059                         break;
3060 
3061                 default:
3062                         DBG(NULL, DWARN, "unrecognised crypto data format");
3063                 }
3064                 break;
3065 
3066         default:
3067                 DBG(NULL, DWARN, "unrecognised scatter/gather param type");
3068         }
3069 
3070         return (rv);
3071 }
3072 
3073 /*
3074  * Increments the cd_offset and decrements the cd_length as the data is
3075  * gathered from the crypto_data_t struct.
3076  * The data is reverse-copied into the dest buffer if the flag is true.
3077  */
3078 int
3079 dca_gather(crypto_data_t *in, char *dest, int count, int reverse)
3080 {
3081         int     rv = CRYPTO_SUCCESS;
3082         uint_t  vec_idx;
3083         uio_t   *uiop;
3084         off_t   off = in->cd_offset;
3085         size_t  cur_len;
3086         mblk_t  *mp;
3087 
3088         switch (in->cd_format) {
3089         case CRYPTO_DATA_RAW:
3090                 if (count > in->cd_length) {
3091                         /*
3092                          * The caller specified a length greater than the
3093                          * size of the buffer.
3094                          */
3095                         return (CRYPTO_DATA_LEN_RANGE);
3096                 }
3097                 if (reverse)
3098                         dca_reverse(in->cd_raw.iov_base + off, dest, count,
3099                             count);
3100                 else
3101                         bcopy(in->cd_raw.iov_base + in->cd_offset, dest, count);
3102                 in->cd_offset += count;
3103                 in->cd_length -= count;
3104                 break;
3105 
3106         case CRYPTO_DATA_UIO:
3107                 /*
3108                  * Jump to the first iovec containing data to be processed.
3109                  */
3110                 uiop = in->cd_uio;
3111                 for (vec_idx = 0; vec_idx < uiop->uio_iovcnt &&
3112                     off >= uiop->uio_iov[vec_idx].iov_len;
3113                     off -= uiop->uio_iov[vec_idx++].iov_len)
3114                         ;
3115                 if (vec_idx == uiop->uio_iovcnt) {
3116                         /*
3117                          * The caller specified an offset that is larger than
3118                          * the total size of the buffers it provided.
3119                          */
3120                         return (CRYPTO_DATA_LEN_RANGE);
3121                 }
3122 
3123                 /*
3124                  * Now process the iovecs.
3125                  */
3126                 while (vec_idx < uiop->uio_iovcnt && count > 0) {
3127                         cur_len = min(uiop->uio_iov[vec_idx].iov_len -
3128                             off, count);
3129                         count -= cur_len;
3130                         if (reverse) {
3131                                 /* Fill the dest buffer from the end */
3132                                 dca_reverse(uiop->uio_iov[vec_idx].iov_base +
3133                                     off, dest+count, cur_len, cur_len);
3134                         } else {
3135                                 bcopy(uiop->uio_iov[vec_idx].iov_base + off,
3136                                     dest, cur_len);
3137                                 dest += cur_len;
3138                         }
3139                         in->cd_offset += cur_len;
3140                         in->cd_length -= cur_len;
3141                         vec_idx++;
3142                         off = 0;
3143                 }
3144 
3145                 if (vec_idx == uiop->uio_iovcnt && count > 0) {
3146                         /*
3147                          * The end of the specified iovec's was reached but
3148                          * the length requested could not be processed
3149                          * (requested to digest more data than it provided).
3150                          */
3151                         return (CRYPTO_DATA_LEN_RANGE);
3152                 }
3153                 break;
3154 
3155         case CRYPTO_DATA_MBLK:
3156                 /*
3157                  * Jump to the first mblk_t containing data to be processed.
3158                  */
3159                 for (mp = in->cd_mp; mp != NULL && off >= MBLKL(mp);
3160                     off -= MBLKL(mp), mp = mp->b_cont)
3161                         ;
3162                 if (mp == NULL) {
3163                         /*
3164                          * The caller specified an offset that is larger than
3165                          * the total size of the buffers it provided.
3166                          */
3167                         return (CRYPTO_DATA_LEN_RANGE);
3168                 }
3169 
3170                 /*
3171                  * Now do the processing on the mblk chain.
3172                  */
3173                 while (mp != NULL && count > 0) {
3174                         cur_len = min(MBLKL(mp) - off, count);
3175                         count -= cur_len;
3176                         if (reverse) {
3177                                 /* Fill the dest buffer from the end */
3178                                 dca_reverse((char *)(mp->b_rptr + off),
3179                                     dest+count, cur_len, cur_len);
3180                         } else {
3181                                 bcopy((char *)(mp->b_rptr + off), dest,
3182                                     cur_len);
3183                                 dest += cur_len;
3184                         }
3185                         in->cd_offset += cur_len;
3186                         in->cd_length -= cur_len;
3187                         mp = mp->b_cont;
3188                         off = 0;
3189                 }
3190 
3191                 if (mp == NULL && count > 0) {
3192                         /*
3193                          * The end of the mblk was reached but the length
3194                          * requested could not be processed, (requested to
3195                          * digest more data than it provided).
3196                          */
3197                         return (CRYPTO_DATA_LEN_RANGE);
3198                 }
3199                 break;
3200 
3201         default:
3202                 DBG(NULL, DWARN, "dca_gather: unrecognised crypto data format");
3203                 rv = CRYPTO_ARGUMENTS_BAD;
3204         }
3205         return (rv);
3206 }
3207 
3208 /*
3209  * Increments the cd_offset and decrements the cd_length as the data is
3210  * gathered from the crypto_data_t struct.
3211  */
3212 int
3213 dca_resid_gather(crypto_data_t *in, char *resid, int *residlen, char *dest,
3214     int count)
3215 {
3216         int     rv = CRYPTO_SUCCESS;
3217         caddr_t baddr;
3218         uint_t  vec_idx;
3219         uio_t   *uiop;
3220         off_t   off = in->cd_offset;
3221         size_t  cur_len;
3222         mblk_t  *mp;
3223 
3224         /* Process the residual first */
3225         if (*residlen > 0) {
3226                 uint_t  num = min(count, *residlen);
3227                 bcopy(resid, dest, num);
3228                 *residlen -= num;
3229                 if (*residlen > 0) {
3230                         /*
3231                          * Requested amount 'count' is less than what's in
3232                          * the residual, so shuffle any remaining resid to
3233                          * the front.
3234                          */
3235                         baddr = resid + num;
3236                         bcopy(baddr, resid, *residlen);
3237                 }
3238                 dest += num;
3239                 count -= num;
3240         }
3241 
3242         /* Now process what's in the crypto_data_t structs */
3243         switch (in->cd_format) {
3244         case CRYPTO_DATA_RAW:
3245                 if (count > in->cd_length) {
3246                         /*
3247                          * The caller specified a length greater than the
3248                          * size of the buffer.
3249                          */
3250                         return (CRYPTO_DATA_LEN_RANGE);
3251                 }
3252                 bcopy(in->cd_raw.iov_base + in->cd_offset, dest, count);
3253                 in->cd_offset += count;
3254                 in->cd_length -= count;
3255                 break;
3256 
3257         case CRYPTO_DATA_UIO:
3258                 /*
3259                  * Jump to the first iovec containing data to be processed.
3260                  */
3261                 uiop = in->cd_uio;
3262                 for (vec_idx = 0; vec_idx < uiop->uio_iovcnt &&
3263                     off >= uiop->uio_iov[vec_idx].iov_len;
3264                     off -= uiop->uio_iov[vec_idx++].iov_len)
3265                         ;
3266                 if (vec_idx == uiop->uio_iovcnt) {
3267                         /*
3268                          * The caller specified an offset that is larger than
3269                          * the total size of the buffers it provided.
3270                          */
3271                         return (CRYPTO_DATA_LEN_RANGE);
3272                 }
3273 
3274                 /*
3275                  * Now process the iovecs.
3276                  */
3277                 while (vec_idx < uiop->uio_iovcnt && count > 0) {
3278                         cur_len = min(uiop->uio_iov[vec_idx].iov_len -
3279                             off, count);
3280                         bcopy(uiop->uio_iov[vec_idx].iov_base + off, dest,
3281                             cur_len);
3282                         count -= cur_len;
3283                         dest += cur_len;
3284                         in->cd_offset += cur_len;
3285                         in->cd_length -= cur_len;
3286                         vec_idx++;
3287                         off = 0;
3288                 }
3289 
3290                 if (vec_idx == uiop->uio_iovcnt && count > 0) {
3291                         /*
3292                          * The end of the specified iovec's was reached but
3293                          * the length requested could not be processed
3294                          * (requested to digest more data than it provided).
3295                          */
3296                         return (CRYPTO_DATA_LEN_RANGE);
3297                 }
3298                 break;
3299 
3300         case CRYPTO_DATA_MBLK:
3301                 /*
3302                  * Jump to the first mblk_t containing data to be processed.
3303                  */
3304                 for (mp = in->cd_mp; mp != NULL && off >= MBLKL(mp);
3305                     off -= MBLKL(mp), mp = mp->b_cont)
3306                         ;
3307                 if (mp == NULL) {
3308                         /*
3309                          * The caller specified an offset that is larger than
3310                          * the total size of the buffers it provided.
3311                          */
3312                         return (CRYPTO_DATA_LEN_RANGE);
3313                 }
3314 
3315                 /*
3316                  * Now do the processing on the mblk chain.
3317                  */
3318                 while (mp != NULL && count > 0) {
3319                         cur_len = min(MBLKL(mp) - off, count);
3320                         bcopy((char *)(mp->b_rptr + off), dest, cur_len);
3321                         count -= cur_len;
3322                         dest += cur_len;
3323                         in->cd_offset += cur_len;
3324                         in->cd_length -= cur_len;
3325                         mp = mp->b_cont;
3326                         off = 0;
3327                 }
3328 
3329                 if (mp == NULL && count > 0) {
3330                         /*
3331                          * The end of the mblk was reached but the length
3332                          * requested could not be processed, (requested to
3333                          * digest more data than it provided).
3334                          */
3335                         return (CRYPTO_DATA_LEN_RANGE);
3336                 }
3337                 break;
3338 
3339         default:
3340                 DBG(NULL, DWARN,
3341                     "dca_resid_gather: unrecognised crypto data format");
3342                 rv = CRYPTO_ARGUMENTS_BAD;
3343         }
3344         return (rv);
3345 }
3346 
3347 /*
3348  * Appends the data to the crypto_data_t struct increasing cd_length.
3349  * cd_offset is left unchanged.
3350  * Data is reverse-copied if the flag is TRUE.
3351  */
3352 int
3353 dca_scatter(const char *src, crypto_data_t *out, int count, int reverse)
3354 {
3355         int     rv = CRYPTO_SUCCESS;
3356         off_t   offset = out->cd_offset + out->cd_length;
3357         uint_t  vec_idx;
3358         uio_t   *uiop;
3359         size_t  cur_len;
3360         mblk_t  *mp;
3361 
3362         switch (out->cd_format) {
3363         case CRYPTO_DATA_RAW:
3364                 if (out->cd_raw.iov_len - offset < count) {
3365                         /* Trying to write out more than space available. */
3366                         return (CRYPTO_DATA_LEN_RANGE);
3367                 }
3368                 if (reverse)
3369                         dca_reverse((void*) src, out->cd_raw.iov_base + offset,
3370                             count, count);
3371                 else
3372                         bcopy(src, out->cd_raw.iov_base + offset, count);
3373                 out->cd_length += count;
3374                 break;
3375 
3376         case CRYPTO_DATA_UIO:
3377                 /*
3378                  * Jump to the first iovec that can be written to.
3379                  */
3380                 uiop = out->cd_uio;
3381                 for (vec_idx = 0; vec_idx < uiop->uio_iovcnt &&
3382                     offset >= uiop->uio_iov[vec_idx].iov_len;
3383                     offset -= uiop->uio_iov[vec_idx++].iov_len)
3384                         ;
3385                 if (vec_idx == uiop->uio_iovcnt) {
3386                         /*
3387                          * The caller specified an offset that is larger than
3388                          * the total size of the buffers it provided.
3389                          */
3390                         return (CRYPTO_DATA_LEN_RANGE);
3391                 }
3392 
3393                 /*
3394                  * Now process the iovecs.
3395                  */
3396                 while (vec_idx < uiop->uio_iovcnt && count > 0) {
3397                         cur_len = min(uiop->uio_iov[vec_idx].iov_len -
3398                             offset, count);
3399                         count -= cur_len;
3400                         if (reverse) {
3401                                 dca_reverse((void*) (src+count),
3402                                     uiop->uio_iov[vec_idx].iov_base +
3403                                     offset, cur_len, cur_len);
3404                         } else {
3405                                 bcopy(src, uiop->uio_iov[vec_idx].iov_base +
3406                                     offset, cur_len);
3407                                 src += cur_len;
3408                         }
3409                         out->cd_length += cur_len;
3410                         vec_idx++;
3411                         offset = 0;
3412                 }
3413 
3414                 if (vec_idx == uiop->uio_iovcnt && count > 0) {
3415                         /*
3416                          * The end of the specified iovec's was reached but
3417                          * the length requested could not be processed
3418                          * (requested to write more data than space provided).
3419                          */
3420                         return (CRYPTO_DATA_LEN_RANGE);
3421                 }
3422                 break;
3423 
3424         case CRYPTO_DATA_MBLK:
3425                 /*
3426                  * Jump to the first mblk_t that can be written to.
3427                  */
3428                 for (mp = out->cd_mp; mp != NULL && offset >= MBLKL(mp);
3429                     offset -= MBLKL(mp), mp = mp->b_cont)
3430                         ;
3431                 if (mp == NULL) {
3432                         /*
3433                          * The caller specified an offset that is larger than
3434                          * the total size of the buffers it provided.
3435                          */
3436                         return (CRYPTO_DATA_LEN_RANGE);
3437                 }
3438 
3439                 /*
3440                  * Now do the processing on the mblk chain.
3441                  */
3442                 while (mp != NULL && count > 0) {
3443                         cur_len = min(MBLKL(mp) - offset, count);
3444                         count -= cur_len;
3445                         if (reverse) {
3446                                 dca_reverse((void*) (src+count),
3447                                     (char *)(mp->b_rptr + offset), cur_len,
3448                                     cur_len);
3449                         } else {
3450                                 bcopy(src, (char *)(mp->b_rptr + offset),
3451                                     cur_len);
3452                                 src += cur_len;
3453                         }
3454                         out->cd_length += cur_len;
3455                         mp = mp->b_cont;
3456                         offset = 0;
3457                 }
3458 
3459                 if (mp == NULL && count > 0) {
3460                         /*
3461                          * The end of the mblk was reached but the length
3462                          * requested could not be processed, (requested to
3463                          * digest more data than it provided).
3464                          */
3465                         return (CRYPTO_DATA_LEN_RANGE);
3466                 }
3467                 break;
3468 
3469         default:
3470                 DBG(NULL, DWARN, "unrecognised crypto data format");
3471                 rv = CRYPTO_ARGUMENTS_BAD;
3472         }
3473         return (rv);
3474 }
3475 
3476 /*
3477  * Compare two byte arrays in reverse order.
3478  * Return 0 if they are identical, 1 otherwise.
3479  */
3480 int
3481 dca_bcmp_reverse(const void *s1, const void *s2, size_t n)
3482 {
3483         int i;
3484         caddr_t src, dst;
3485 
3486         if (!n)
3487                 return (0);
3488 
3489         src = ((caddr_t)s1) + n - 1;
3490         dst = (caddr_t)s2;
3491         for (i = 0; i < n; i++) {
3492                 if (*src != *dst)
3493                         return (1);
3494                 src--;
3495                 dst++;
3496         }
3497 
3498         return (0);
3499 }
3500 
3501 
3502 /*
3503  * This calculates the size of a bignum in bits, specifically not counting
3504  * leading zero bits.  This size calculation must be done *before* any
3505  * endian reversal takes place (i.e. the numbers are in absolute big-endian
3506  * order.)
3507  */
3508 int
3509 dca_bitlen(unsigned char *bignum, int bytelen)
3510 {
3511         unsigned char   msbyte;
3512         int             i, j;
3513 
3514         for (i = 0; i < bytelen - 1; i++) {
3515                 if (bignum[i] != 0) {
3516                         break;
3517                 }
3518         }
3519         msbyte = bignum[i];
3520         for (j = 8; j > 1; j--) {
3521                 if (msbyte & 0x80) {
3522                         break;
3523                 }
3524                 msbyte <<= 1;
3525         }
3526         return ((8 * (bytelen - i - 1)) + j);
3527 }
3528 
3529 /*
3530  * This compares to bignums (in big-endian order).  It ignores leading
3531  * null bytes.  The result semantics follow bcmp, mempcmp, strcmp, etc.
3532  */
3533 int
3534 dca_numcmp(caddr_t n1, int n1len, caddr_t n2, int n2len)
3535 {
3536         while ((n1len > 1) && (*n1 == 0)) {
3537                 n1len--;
3538                 n1++;
3539         }
3540         while ((n2len > 1) && (*n2 == 0)) {
3541                 n2len--;
3542                 n2++;
3543         }
3544         if (n1len != n2len) {
3545                 return (n1len - n2len);
3546         }
3547         while ((n1len > 1) && (*n1 == *n2)) {
3548                 n1++;
3549                 n2++;
3550                 n1len--;
3551         }
3552         return ((int)(*(uchar_t *)n1) - (int)(*(uchar_t *)n2));
3553 }
3554 
3555 /*
3556  * Return array of key attributes.
3557  */
3558 crypto_object_attribute_t *
3559 dca_get_key_attr(crypto_key_t *key)
3560 {
3561         if ((key->ck_format != CRYPTO_KEY_ATTR_LIST) ||
3562             (key->ck_count == 0)) {
3563                 return (NULL);
3564         }
3565 
3566         return (key->ck_attrs);
3567 }
3568 
3569 /*
3570  * If attribute type exists valp points to it's 32-bit value.
3571  */
3572 int
3573 dca_attr_lookup_uint32(crypto_object_attribute_t *attrp, uint_t atnum,
3574     uint64_t atype, uint32_t *valp)
3575 {
3576         crypto_object_attribute_t       *bap;
3577 
3578         bap = dca_find_attribute(attrp, atnum, atype);
3579         if (bap == NULL) {
3580                 return (CRYPTO_ATTRIBUTE_TYPE_INVALID);
3581         }
3582 
3583         *valp = *bap->oa_value;
3584 
3585         return (CRYPTO_SUCCESS);
3586 }
3587 
3588 /*
3589  * If attribute type exists data contains the start address of the value,
3590  * and numelems contains it's length.
3591  */
3592 int
3593 dca_attr_lookup_uint8_array(crypto_object_attribute_t *attrp, uint_t atnum,
3594     uint64_t atype, void **data, unsigned int *numelems)
3595 {
3596         crypto_object_attribute_t       *bap;
3597 
3598         bap = dca_find_attribute(attrp, atnum, atype);
3599         if (bap == NULL) {
3600                 return (CRYPTO_ATTRIBUTE_TYPE_INVALID);
3601         }
3602 
3603         *data = bap->oa_value;
3604         *numelems = bap->oa_value_len;
3605 
3606         return (CRYPTO_SUCCESS);
3607 }
3608 
3609 /*
3610  * Finds entry of specified name. If it is not found dca_find_attribute returns
3611  * NULL.
3612  */
3613 crypto_object_attribute_t *
3614 dca_find_attribute(crypto_object_attribute_t *attrp, uint_t atnum,
3615     uint64_t atype)
3616 {
3617         while (atnum) {
3618                 if (attrp->oa_type == atype)
3619                         return (attrp);
3620                 atnum--;
3621                 attrp++;
3622         }
3623         return (NULL);
3624 }
3625 
3626 /*
3627  * Return the address of the first data buffer. If the data format is
3628  * unrecognised return NULL.
3629  */
3630 caddr_t
3631 dca_bufdaddr(crypto_data_t *data)
3632 {
3633         switch (data->cd_format) {
3634         case CRYPTO_DATA_RAW:
3635                 return (data->cd_raw.iov_base + data->cd_offset);
3636         case CRYPTO_DATA_UIO:
3637                 return (data->cd_uio->uio_iov[0].iov_base + data->cd_offset);
3638         case CRYPTO_DATA_MBLK:
3639                 return ((char *)data->cd_mp->b_rptr + data->cd_offset);
3640         default:
3641                 DBG(NULL, DWARN,
3642                     "dca_bufdaddr: unrecognised crypto data format");
3643                 return (NULL);
3644         }
3645 }
3646 
3647 static caddr_t
3648 dca_bufdaddr_out(crypto_data_t *data)
3649 {
3650         size_t offset = data->cd_offset + data->cd_length;
3651 
3652         switch (data->cd_format) {
3653         case CRYPTO_DATA_RAW:
3654                 return (data->cd_raw.iov_base + offset);
3655         case CRYPTO_DATA_UIO:
3656                 return (data->cd_uio->uio_iov[0].iov_base + offset);
3657         case CRYPTO_DATA_MBLK:
3658                 return ((char *)data->cd_mp->b_rptr + offset);
3659         default:
3660                 DBG(NULL, DWARN,
3661                     "dca_bufdaddr_out: unrecognised crypto data format");
3662                 return (NULL);
3663         }
3664 }
3665 
3666 /*
3667  * Control entry points.
3668  */
3669 
3670 /* ARGSUSED */
3671 static void
3672 dca_provider_status(crypto_provider_handle_t provider, uint_t *status)
3673 {
3674         *status = CRYPTO_PROVIDER_READY;
3675 }
3676 
3677 /*
3678  * Cipher (encrypt/decrypt) entry points.
3679  */
3680 
3681 /* ARGSUSED */
3682 static int
3683 dca_encrypt_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
3684     crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
3685     crypto_req_handle_t req)
3686 {
3687         int error = CRYPTO_FAILED;
3688         dca_t *softc;
3689         /* LINTED E_FUNC_SET_NOT_USED */
3690         int instance;
3691 
3692         /* extract softc and instance number from context */
3693         DCA_SOFTC_FROM_CTX(ctx, softc, instance);
3694         DBG(softc, DENTRY, "dca_encrypt_init: started");
3695 
3696         /* check mechanism */
3697         switch (mechanism->cm_type) {
3698         case DES_CBC_MECH_INFO_TYPE:
3699                 error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP,
3700                     DR_ENCRYPT);
3701                 break;
3702         case DES3_CBC_MECH_INFO_TYPE:
3703                 error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP,
3704                     DR_ENCRYPT | DR_TRIPLE);
3705                 break;
3706         case RSA_PKCS_MECH_INFO_TYPE:
3707         case RSA_X_509_MECH_INFO_TYPE:
3708                 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
3709                 break;
3710         default:
3711                 cmn_err(CE_WARN, "dca_encrypt_init: unexpected mech type "
3712                     "0x%llx\n", (unsigned long long)mechanism->cm_type);
3713                 error = CRYPTO_MECHANISM_INVALID;
3714         }
3715 
3716         DBG(softc, DENTRY, "dca_encrypt_init: done, err = 0x%x", error);
3717 
3718         if (error == CRYPTO_SUCCESS)
3719                 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
3720                     &softc->dca_ctx_list_lock);
3721 
3722         return (error);
3723 }
3724 
3725 /* ARGSUSED */
3726 static int
3727 dca_encrypt(crypto_ctx_t *ctx, crypto_data_t *plaintext,
3728     crypto_data_t *ciphertext, crypto_req_handle_t req)
3729 {
3730         int error = CRYPTO_FAILED;
3731         dca_t *softc;
3732         /* LINTED E_FUNC_SET_NOT_USED */
3733         int instance;
3734 
3735         if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
3736                 return (CRYPTO_OPERATION_NOT_INITIALIZED);
3737 
3738         /* extract softc and instance number from context */
3739         DCA_SOFTC_FROM_CTX(ctx, softc, instance);
3740         DBG(softc, DENTRY, "dca_encrypt: started");
3741 
3742         /* handle inplace ops */
3743         if (!ciphertext) {
3744                 dca_request_t *reqp = ctx->cc_provider_private;
3745                 reqp->dr_flags |= DR_INPLACE;
3746                 ciphertext = plaintext;
3747         }
3748 
3749         /* check mechanism */
3750         switch (DCA_MECH_FROM_CTX(ctx)) {
3751         case DES_CBC_MECH_INFO_TYPE:
3752                 error = dca_3des(ctx, plaintext, ciphertext, req, DR_ENCRYPT);
3753                 break;
3754         case DES3_CBC_MECH_INFO_TYPE:
3755                 error = dca_3des(ctx, plaintext, ciphertext, req,
3756                     DR_ENCRYPT | DR_TRIPLE);
3757                 break;
3758         case RSA_PKCS_MECH_INFO_TYPE:
3759         case RSA_X_509_MECH_INFO_TYPE:
3760                 error = dca_rsastart(ctx, plaintext, ciphertext, req,
3761                     DCA_RSA_ENC);
3762                 break;
3763         default:
3764                 /* Should never reach here */
3765                 cmn_err(CE_WARN, "dca_encrypt: unexpected mech type "
3766                     "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
3767                 error = CRYPTO_MECHANISM_INVALID;
3768         }
3769 
3770         if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS) &&
3771             (error != CRYPTO_BUFFER_TOO_SMALL)) {
3772                 ciphertext->cd_length = 0;
3773         }
3774 
3775         DBG(softc, DENTRY, "dca_encrypt: done, err = 0x%x", error);
3776 
3777         return (error);
3778 }
3779 
3780 /* ARGSUSED */
3781 static int
3782 dca_encrypt_update(crypto_ctx_t *ctx, crypto_data_t *plaintext,
3783     crypto_data_t *ciphertext, crypto_req_handle_t req)
3784 {
3785         int error = CRYPTO_FAILED;
3786         dca_t *softc;
3787         /* LINTED E_FUNC_SET_NOT_USED */
3788         int instance;
3789 
3790         if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
3791                 return (CRYPTO_OPERATION_NOT_INITIALIZED);
3792 
3793         /* extract softc and instance number from context */
3794         DCA_SOFTC_FROM_CTX(ctx, softc, instance);
3795         DBG(softc, DENTRY, "dca_encrypt_update: started");
3796 
3797         /* handle inplace ops */
3798         if (!ciphertext) {
3799                 dca_request_t *reqp = ctx->cc_provider_private;
3800                 reqp->dr_flags |= DR_INPLACE;
3801                 ciphertext = plaintext;
3802         }
3803 
3804         /* check mechanism */
3805         switch (DCA_MECH_FROM_CTX(ctx)) {
3806         case DES_CBC_MECH_INFO_TYPE:
3807                 error = dca_3desupdate(ctx, plaintext, ciphertext, req,
3808                     DR_ENCRYPT);
3809                 break;
3810         case DES3_CBC_MECH_INFO_TYPE:
3811                 error = dca_3desupdate(ctx, plaintext, ciphertext, req,
3812                     DR_ENCRYPT | DR_TRIPLE);
3813                 break;
3814         default:
3815                 /* Should never reach here */
3816                 cmn_err(CE_WARN, "dca_encrypt_update: unexpected mech type "
3817                     "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
3818                 error = CRYPTO_MECHANISM_INVALID;
3819         }
3820 
3821         DBG(softc, DENTRY, "dca_encrypt_update: done, err = 0x%x", error);
3822 
3823         return (error);
3824 }
3825 
3826 /* ARGSUSED */
3827 static int
3828 dca_encrypt_final(crypto_ctx_t *ctx, crypto_data_t *ciphertext,
3829     crypto_req_handle_t req)
3830 {
3831         int error = CRYPTO_FAILED;
3832         dca_t *softc;
3833         /* LINTED E_FUNC_SET_NOT_USED */
3834         int instance;
3835 
3836         if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
3837                 return (CRYPTO_OPERATION_NOT_INITIALIZED);
3838 
3839         /* extract softc and instance number from context */
3840         DCA_SOFTC_FROM_CTX(ctx, softc, instance);
3841         DBG(softc, DENTRY, "dca_encrypt_final: started");
3842 
3843         /* check mechanism */
3844         switch (DCA_MECH_FROM_CTX(ctx)) {
3845         case DES_CBC_MECH_INFO_TYPE:
3846                 error = dca_3desfinal(ctx, ciphertext, DR_ENCRYPT);
3847                 break;
3848         case DES3_CBC_MECH_INFO_TYPE:
3849                 error = dca_3desfinal(ctx, ciphertext, DR_ENCRYPT | DR_TRIPLE);
3850                 break;
3851         default:
3852                 /* Should never reach here */
3853                 cmn_err(CE_WARN, "dca_encrypt_final: unexpected mech type "
3854                     "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
3855                 error = CRYPTO_MECHANISM_INVALID;
3856         }
3857 
3858         DBG(softc, DENTRY, "dca_encrypt_final: done, err = 0x%x", error);
3859 
3860         return (error);
3861 }
3862 
3863 /* ARGSUSED */
3864 static int
3865 dca_encrypt_atomic(crypto_provider_handle_t provider,
3866     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
3867     crypto_key_t *key, crypto_data_t *plaintext, crypto_data_t *ciphertext,
3868     crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
3869 {
3870         int error = CRYPTO_FAILED;
3871         dca_t *softc = (dca_t *)provider;
3872 
3873         DBG(softc, DENTRY, "dca_encrypt_atomic: started");
3874 
3875         if (ctx_template != NULL)
3876                 return (CRYPTO_ARGUMENTS_BAD);
3877 
3878         /* handle inplace ops */
3879         if (!ciphertext) {
3880                 ciphertext = plaintext;
3881         }
3882 
3883         /* check mechanism */
3884         switch (mechanism->cm_type) {
3885         case DES_CBC_MECH_INFO_TYPE:
3886                 error = dca_3desatomic(provider, session_id, mechanism, key,
3887                     plaintext, ciphertext, KM_SLEEP, req,
3888                     DR_ENCRYPT | DR_ATOMIC);
3889                 break;
3890         case DES3_CBC_MECH_INFO_TYPE:
3891                 error = dca_3desatomic(provider, session_id, mechanism, key,
3892                     plaintext, ciphertext, KM_SLEEP, req,
3893                     DR_ENCRYPT | DR_TRIPLE | DR_ATOMIC);
3894                 break;
3895         case RSA_PKCS_MECH_INFO_TYPE:
3896         case RSA_X_509_MECH_INFO_TYPE:
3897                 error = dca_rsaatomic(provider, session_id, mechanism, key,
3898                     plaintext, ciphertext, KM_SLEEP, req, DCA_RSA_ENC);
3899                 break;
3900         default:
3901                 cmn_err(CE_WARN, "dca_encrypt_atomic: unexpected mech type "
3902                     "0x%llx\n", (unsigned long long)mechanism->cm_type);
3903                 error = CRYPTO_MECHANISM_INVALID;
3904         }
3905 
3906         if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS)) {
3907                 ciphertext->cd_length = 0;
3908         }
3909 
3910         DBG(softc, DENTRY, "dca_encrypt_atomic: done, err = 0x%x", error);
3911 
3912         return (error);
3913 }
3914 
3915 /* ARGSUSED */
3916 static int
3917 dca_decrypt_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
3918     crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
3919     crypto_req_handle_t req)
3920 {
3921         int error = CRYPTO_FAILED;
3922         dca_t *softc;
3923         /* LINTED E_FUNC_SET_NOT_USED */
3924         int instance;
3925 
3926         /* extract softc and instance number from context */
3927         DCA_SOFTC_FROM_CTX(ctx, softc, instance);
3928         DBG(softc, DENTRY, "dca_decrypt_init: started");
3929 
3930         /* check mechanism */
3931         switch (mechanism->cm_type) {
3932         case DES_CBC_MECH_INFO_TYPE:
3933                 error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP,
3934                     DR_DECRYPT);
3935                 break;
3936         case DES3_CBC_MECH_INFO_TYPE:
3937                 error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP,
3938                     DR_DECRYPT | DR_TRIPLE);
3939                 break;
3940         case RSA_PKCS_MECH_INFO_TYPE:
3941         case RSA_X_509_MECH_INFO_TYPE:
3942                 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
3943                 break;
3944         default:
3945                 cmn_err(CE_WARN, "dca_decrypt_init: unexpected mech type "
3946                     "0x%llx\n", (unsigned long long)mechanism->cm_type);
3947                 error = CRYPTO_MECHANISM_INVALID;
3948         }
3949 
3950         DBG(softc, DENTRY, "dca_decrypt_init: done, err = 0x%x", error);
3951 
3952         if (error == CRYPTO_SUCCESS)
3953                 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
3954                     &softc->dca_ctx_list_lock);
3955 
3956         return (error);
3957 }
3958 
3959 /* ARGSUSED */
3960 static int
3961 dca_decrypt(crypto_ctx_t *ctx, crypto_data_t *ciphertext,
3962     crypto_data_t *plaintext, crypto_req_handle_t req)
3963 {
3964         int error = CRYPTO_FAILED;
3965         dca_t *softc;
3966         /* LINTED E_FUNC_SET_NOT_USED */
3967         int instance;
3968 
3969         if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
3970                 return (CRYPTO_OPERATION_NOT_INITIALIZED);
3971 
3972         /* extract softc and instance number from context */
3973         DCA_SOFTC_FROM_CTX(ctx, softc, instance);
3974         DBG(softc, DENTRY, "dca_decrypt: started");
3975 
3976         /* handle inplace ops */
3977         if (!plaintext) {
3978                 dca_request_t *reqp = ctx->cc_provider_private;
3979                 reqp->dr_flags |= DR_INPLACE;
3980                 plaintext = ciphertext;
3981         }
3982 
3983         /* check mechanism */
3984         switch (DCA_MECH_FROM_CTX(ctx)) {
3985         case DES_CBC_MECH_INFO_TYPE:
3986                 error = dca_3des(ctx, ciphertext, plaintext, req, DR_DECRYPT);
3987                 break;
3988         case DES3_CBC_MECH_INFO_TYPE:
3989                 error = dca_3des(ctx, ciphertext, plaintext, req,
3990                     DR_DECRYPT | DR_TRIPLE);
3991                 break;
3992         case RSA_PKCS_MECH_INFO_TYPE:
3993         case RSA_X_509_MECH_INFO_TYPE:
3994                 error = dca_rsastart(ctx, ciphertext, plaintext, req,
3995                     DCA_RSA_DEC);
3996                 break;
3997         default:
3998                 /* Should never reach here */
3999                 cmn_err(CE_WARN, "dca_decrypt: unexpected mech type "
4000                     "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4001                 error = CRYPTO_MECHANISM_INVALID;
4002         }
4003 
4004         if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS) &&
4005             (error != CRYPTO_BUFFER_TOO_SMALL)) {
4006                 if (plaintext)
4007                         plaintext->cd_length = 0;
4008         }
4009 
4010         DBG(softc, DENTRY, "dca_decrypt: done, err = 0x%x", error);
4011 
4012         return (error);
4013 }
4014 
4015 /* ARGSUSED */
4016 static int
4017 dca_decrypt_update(crypto_ctx_t *ctx, crypto_data_t *ciphertext,
4018     crypto_data_t *plaintext, crypto_req_handle_t req)
4019 {
4020         int error = CRYPTO_FAILED;
4021         dca_t *softc;
4022         /* LINTED E_FUNC_SET_NOT_USED */
4023         int instance;
4024 
4025         if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4026                 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4027 
4028         /* extract softc and instance number from context */
4029         DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4030         DBG(softc, DENTRY, "dca_decrypt_update: started");
4031 
4032         /* handle inplace ops */
4033         if (!plaintext) {
4034                 dca_request_t *reqp = ctx->cc_provider_private;
4035                 reqp->dr_flags |= DR_INPLACE;
4036                 plaintext = ciphertext;
4037         }
4038 
4039         /* check mechanism */
4040         switch (DCA_MECH_FROM_CTX(ctx)) {
4041         case DES_CBC_MECH_INFO_TYPE:
4042                 error = dca_3desupdate(ctx, ciphertext, plaintext, req,
4043                     DR_DECRYPT);
4044                 break;
4045         case DES3_CBC_MECH_INFO_TYPE:
4046                 error = dca_3desupdate(ctx, ciphertext, plaintext, req,
4047                     DR_DECRYPT | DR_TRIPLE);
4048                 break;
4049         default:
4050                 /* Should never reach here */
4051                 cmn_err(CE_WARN, "dca_decrypt_update: unexpected mech type "
4052                     "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4053                 error = CRYPTO_MECHANISM_INVALID;
4054         }
4055 
4056         DBG(softc, DENTRY, "dca_decrypt_update: done, err = 0x%x", error);
4057 
4058         return (error);
4059 }
4060 
4061 /* ARGSUSED */
4062 static int
4063 dca_decrypt_final(crypto_ctx_t *ctx, crypto_data_t *plaintext,
4064     crypto_req_handle_t req)
4065 {
4066         int error = CRYPTO_FAILED;
4067         dca_t *softc;
4068         /* LINTED E_FUNC_SET_NOT_USED */
4069         int instance;
4070 
4071         if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4072                 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4073 
4074         /* extract softc and instance number from context */
4075         DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4076         DBG(softc, DENTRY, "dca_decrypt_final: started");
4077 
4078         /* check mechanism */
4079         switch (DCA_MECH_FROM_CTX(ctx)) {
4080         case DES_CBC_MECH_INFO_TYPE:
4081                 error = dca_3desfinal(ctx, plaintext, DR_DECRYPT);
4082                 break;
4083         case DES3_CBC_MECH_INFO_TYPE:
4084                 error = dca_3desfinal(ctx, plaintext, DR_DECRYPT | DR_TRIPLE);
4085                 break;
4086         default:
4087                 /* Should never reach here */
4088                 cmn_err(CE_WARN, "dca_decrypt_final: unexpected mech type "
4089                     "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4090                 error = CRYPTO_MECHANISM_INVALID;
4091         }
4092 
4093         DBG(softc, DENTRY, "dca_decrypt_final: done, err = 0x%x", error);
4094 
4095         return (error);
4096 }
4097 
4098 /* ARGSUSED */
4099 static int
4100 dca_decrypt_atomic(crypto_provider_handle_t provider,
4101     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
4102     crypto_key_t *key, crypto_data_t *ciphertext, crypto_data_t *plaintext,
4103     crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
4104 {
4105         int error = CRYPTO_FAILED;
4106         dca_t *softc = (dca_t *)provider;
4107 
4108         DBG(softc, DENTRY, "dca_decrypt_atomic: started");
4109 
4110         if (ctx_template != NULL)
4111                 return (CRYPTO_ARGUMENTS_BAD);
4112 
4113         /* handle inplace ops */
4114         if (!plaintext) {
4115                 plaintext = ciphertext;
4116         }
4117 
4118         /* check mechanism */
4119         switch (mechanism->cm_type) {
4120         case DES_CBC_MECH_INFO_TYPE:
4121                 error = dca_3desatomic(provider, session_id, mechanism, key,
4122                     ciphertext, plaintext, KM_SLEEP, req,
4123                     DR_DECRYPT | DR_ATOMIC);
4124                 break;
4125         case DES3_CBC_MECH_INFO_TYPE:
4126                 error = dca_3desatomic(provider, session_id, mechanism, key,
4127                     ciphertext, plaintext, KM_SLEEP, req,
4128                     DR_DECRYPT | DR_TRIPLE | DR_ATOMIC);
4129                 break;
4130         case RSA_PKCS_MECH_INFO_TYPE:
4131         case RSA_X_509_MECH_INFO_TYPE:
4132                 error = dca_rsaatomic(provider, session_id, mechanism, key,
4133                     ciphertext, plaintext, KM_SLEEP, req, DCA_RSA_DEC);
4134                 break;
4135         default:
4136                 cmn_err(CE_WARN, "dca_decrypt_atomic: unexpected mech type "
4137                     "0x%llx\n", (unsigned long long)mechanism->cm_type);
4138                 error = CRYPTO_MECHANISM_INVALID;
4139         }
4140 
4141         if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS)) {
4142                 plaintext->cd_length = 0;
4143         }
4144 
4145         DBG(softc, DENTRY, "dca_decrypt_atomic: done, err = 0x%x", error);
4146 
4147         return (error);
4148 }
4149 
4150 /*
4151  * Sign entry points.
4152  */
4153 
4154 /* ARGSUSED */
4155 static int
4156 dca_sign_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
4157     crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
4158     crypto_req_handle_t req)
4159 {
4160         int error = CRYPTO_FAILED;
4161         dca_t *softc;
4162         /* LINTED E_FUNC_SET_NOT_USED */
4163         int instance;
4164 
4165         /* extract softc and instance number from context */
4166         DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4167         DBG(softc, DENTRY, "dca_sign_init: started\n");
4168 
4169         if (ctx_template != NULL)
4170                 return (CRYPTO_ARGUMENTS_BAD);
4171 
4172         /* check mechanism */
4173         switch (mechanism->cm_type) {
4174         case RSA_PKCS_MECH_INFO_TYPE:
4175         case RSA_X_509_MECH_INFO_TYPE:
4176                 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
4177                 break;
4178         case DSA_MECH_INFO_TYPE:
4179                 error = dca_dsainit(ctx, mechanism, key, KM_SLEEP,
4180                     DCA_DSA_SIGN);
4181                 break;
4182         default:
4183                 cmn_err(CE_WARN, "dca_sign_init: unexpected mech type "
4184                     "0x%llx\n", (unsigned long long)mechanism->cm_type);
4185                 error = CRYPTO_MECHANISM_INVALID;
4186         }
4187 
4188         DBG(softc, DENTRY, "dca_sign_init: done, err = 0x%x", error);
4189 
4190         if (error == CRYPTO_SUCCESS)
4191                 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
4192                     &softc->dca_ctx_list_lock);
4193 
4194         return (error);
4195 }
4196 
4197 static int
4198 dca_sign(crypto_ctx_t *ctx, crypto_data_t *data,
4199     crypto_data_t *signature, crypto_req_handle_t req)
4200 {
4201         int error = CRYPTO_FAILED;
4202         dca_t *softc;
4203         /* LINTED E_FUNC_SET_NOT_USED */
4204         int instance;
4205 
4206         if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4207                 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4208 
4209         /* extract softc and instance number from context */
4210         DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4211         DBG(softc, DENTRY, "dca_sign: started\n");
4212 
4213         /* check mechanism */
4214         switch (DCA_MECH_FROM_CTX(ctx)) {
4215         case RSA_PKCS_MECH_INFO_TYPE:
4216         case RSA_X_509_MECH_INFO_TYPE:
4217                 error = dca_rsastart(ctx, data, signature, req, DCA_RSA_SIGN);
4218                 break;
4219         case DSA_MECH_INFO_TYPE:
4220                 error = dca_dsa_sign(ctx, data, signature, req);
4221                 break;
4222         default:
4223                 cmn_err(CE_WARN, "dca_sign: unexpected mech type "
4224                     "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4225                 error = CRYPTO_MECHANISM_INVALID;
4226         }
4227 
4228         DBG(softc, DENTRY, "dca_sign: done, err = 0x%x", error);
4229 
4230         return (error);
4231 }
4232 
4233 /* ARGSUSED */
4234 static int
4235 dca_sign_update(crypto_ctx_t *ctx, crypto_data_t *data,
4236     crypto_req_handle_t req)
4237 {
4238         int error = CRYPTO_MECHANISM_INVALID;
4239         dca_t *softc;
4240         /* LINTED E_FUNC_SET_NOT_USED */
4241         int instance;
4242 
4243         if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4244                 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4245 
4246         /* extract softc and instance number from context */
4247         DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4248         DBG(softc, DENTRY, "dca_sign_update: started\n");
4249 
4250         cmn_err(CE_WARN, "dca_sign_update: unexpected mech type "
4251             "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4252 
4253         DBG(softc, DENTRY, "dca_sign_update: done, err = 0x%x", error);
4254 
4255         return (error);
4256 }
4257 
4258 /* ARGSUSED */
4259 static int
4260 dca_sign_final(crypto_ctx_t *ctx, crypto_data_t *signature,
4261     crypto_req_handle_t req)
4262 {
4263         int error = CRYPTO_MECHANISM_INVALID;
4264         dca_t *softc;
4265         /* LINTED E_FUNC_SET_NOT_USED */
4266         int instance;
4267 
4268         if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4269                 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4270 
4271         /* extract softc and instance number from context */
4272         DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4273         DBG(softc, DENTRY, "dca_sign_final: started\n");
4274 
4275         cmn_err(CE_WARN, "dca_sign_final: unexpected mech type "
4276             "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4277 
4278         DBG(softc, DENTRY, "dca_sign_final: done, err = 0x%x", error);
4279 
4280         return (error);
4281 }
4282 
4283 static int
4284 dca_sign_atomic(crypto_provider_handle_t provider,
4285     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
4286     crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature,
4287     crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
4288 {
4289         int error = CRYPTO_FAILED;
4290         dca_t *softc = (dca_t *)provider;
4291 
4292         DBG(softc, DENTRY, "dca_sign_atomic: started\n");
4293 
4294         if (ctx_template != NULL)
4295                 return (CRYPTO_ARGUMENTS_BAD);
4296 
4297         /* check mechanism */
4298         switch (mechanism->cm_type) {
4299         case RSA_PKCS_MECH_INFO_TYPE:
4300         case RSA_X_509_MECH_INFO_TYPE:
4301                 error = dca_rsaatomic(provider, session_id, mechanism, key,
4302                     data, signature, KM_SLEEP, req, DCA_RSA_SIGN);
4303                 break;
4304         case DSA_MECH_INFO_TYPE:
4305                 error = dca_dsaatomic(provider, session_id, mechanism, key,
4306                     data, signature, KM_SLEEP, req, DCA_DSA_SIGN);
4307                 break;
4308         default:
4309                 cmn_err(CE_WARN, "dca_sign_atomic: unexpected mech type "
4310                     "0x%llx\n", (unsigned long long)mechanism->cm_type);
4311                 error = CRYPTO_MECHANISM_INVALID;
4312         }
4313 
4314         DBG(softc, DENTRY, "dca_sign_atomic: done, err = 0x%x", error);
4315 
4316         return (error);
4317 }
4318 
4319 /* ARGSUSED */
4320 static int
4321 dca_sign_recover_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
4322     crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
4323     crypto_req_handle_t req)
4324 {
4325         int error = CRYPTO_FAILED;
4326         dca_t *softc;
4327         /* LINTED E_FUNC_SET_NOT_USED */
4328         int instance;
4329 
4330         /* extract softc and instance number from context */
4331         DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4332         DBG(softc, DENTRY, "dca_sign_recover_init: started\n");
4333 
4334         if (ctx_template != NULL)
4335                 return (CRYPTO_ARGUMENTS_BAD);
4336 
4337         /* check mechanism */
4338         switch (mechanism->cm_type) {
4339         case RSA_PKCS_MECH_INFO_TYPE:
4340         case RSA_X_509_MECH_INFO_TYPE:
4341                 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
4342                 break;
4343         default:
4344                 cmn_err(CE_WARN, "dca_sign_recover_init: unexpected mech type "
4345                     "0x%llx\n", (unsigned long long)mechanism->cm_type);
4346                 error = CRYPTO_MECHANISM_INVALID;
4347         }
4348 
4349         DBG(softc, DENTRY, "dca_sign_recover_init: done, err = 0x%x", error);
4350 
4351         if (error == CRYPTO_SUCCESS)
4352                 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
4353                     &softc->dca_ctx_list_lock);
4354 
4355         return (error);
4356 }
4357 
4358 static int
4359 dca_sign_recover(crypto_ctx_t *ctx, crypto_data_t *data,
4360     crypto_data_t *signature, crypto_req_handle_t req)
4361 {
4362         int error = CRYPTO_FAILED;
4363         dca_t *softc;
4364         /* LINTED E_FUNC_SET_NOT_USED */
4365         int instance;
4366 
4367         if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4368                 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4369 
4370         /* extract softc and instance number from context */
4371         DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4372         DBG(softc, DENTRY, "dca_sign_recover: started\n");
4373 
4374         /* check mechanism */
4375         switch (DCA_MECH_FROM_CTX(ctx)) {
4376         case RSA_PKCS_MECH_INFO_TYPE:
4377         case RSA_X_509_MECH_INFO_TYPE:
4378                 error = dca_rsastart(ctx, data, signature, req, DCA_RSA_SIGNR);
4379                 break;
4380         default:
4381                 cmn_err(CE_WARN, "dca_sign_recover: unexpected mech type "
4382                     "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4383                 error = CRYPTO_MECHANISM_INVALID;
4384         }
4385 
4386         DBG(softc, DENTRY, "dca_sign_recover: done, err = 0x%x", error);
4387 
4388         return (error);
4389 }
4390 
4391 static int
4392 dca_sign_recover_atomic(crypto_provider_handle_t provider,
4393     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
4394     crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature,
4395     crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
4396 {
4397         int error = CRYPTO_FAILED;
4398         dca_t *softc = (dca_t *)provider;
4399         /* LINTED E_FUNC_SET_NOT_USED */
4400         int instance;
4401 
4402         instance = ddi_get_instance(softc->dca_dip);
4403         DBG(softc, DENTRY, "dca_sign_recover_atomic: started\n");
4404 
4405         if (ctx_template != NULL)
4406                 return (CRYPTO_ARGUMENTS_BAD);
4407 
4408         /* check mechanism */
4409         switch (mechanism->cm_type) {
4410         case RSA_PKCS_MECH_INFO_TYPE:
4411         case RSA_X_509_MECH_INFO_TYPE:
4412                 error = dca_rsaatomic(provider, session_id, mechanism, key,
4413                     data, signature, KM_SLEEP, req, DCA_RSA_SIGNR);
4414                 break;
4415         default:
4416                 cmn_err(CE_WARN, "dca_sign_recover_atomic: unexpected mech type"
4417                     " 0x%llx\n", (unsigned long long)mechanism->cm_type);
4418                 error = CRYPTO_MECHANISM_INVALID;
4419         }
4420 
4421         DBG(softc, DENTRY, "dca_sign_recover_atomic: done, err = 0x%x", error);
4422 
4423         return (error);
4424 }
4425 
4426 /*
4427  * Verify entry points.
4428  */
4429 
4430 /* ARGSUSED */
4431 static int
4432 dca_verify_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
4433     crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
4434     crypto_req_handle_t req)
4435 {
4436         int error = CRYPTO_FAILED;
4437         dca_t *softc;
4438         /* LINTED E_FUNC_SET_NOT_USED */
4439         int instance;
4440 
4441         /* extract softc and instance number from context */
4442         DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4443         DBG(softc, DENTRY, "dca_verify_init: started\n");
4444 
4445         if (ctx_template != NULL)
4446                 return (CRYPTO_ARGUMENTS_BAD);
4447 
4448         /* check mechanism */
4449         switch (mechanism->cm_type) {
4450         case RSA_PKCS_MECH_INFO_TYPE:
4451         case RSA_X_509_MECH_INFO_TYPE:
4452                 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
4453                 break;
4454         case DSA_MECH_INFO_TYPE:
4455                 error = dca_dsainit(ctx, mechanism, key, KM_SLEEP,
4456                     DCA_DSA_VRFY);
4457                 break;
4458         default:
4459                 cmn_err(CE_WARN, "dca_verify_init: unexpected mech type "
4460                     "0x%llx\n", (unsigned long long)mechanism->cm_type);
4461                 error = CRYPTO_MECHANISM_INVALID;
4462         }
4463 
4464         DBG(softc, DENTRY, "dca_verify_init: done, err = 0x%x", error);
4465 
4466         if (error == CRYPTO_SUCCESS)
4467                 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
4468                     &softc->dca_ctx_list_lock);
4469 
4470         return (error);
4471 }
4472 
4473 static int
4474 dca_verify(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *signature,
4475     crypto_req_handle_t req)
4476 {
4477         int error = CRYPTO_FAILED;
4478         dca_t *softc;
4479         /* LINTED E_FUNC_SET_NOT_USED */
4480         int instance;
4481 
4482         if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4483                 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4484 
4485         /* extract softc and instance number from context */
4486         DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4487         DBG(softc, DENTRY, "dca_verify: started\n");
4488 
4489         /* check mechanism */
4490         switch (DCA_MECH_FROM_CTX(ctx)) {
4491         case RSA_PKCS_MECH_INFO_TYPE:
4492         case RSA_X_509_MECH_INFO_TYPE:
4493                 error = dca_rsastart(ctx, signature, data, req, DCA_RSA_VRFY);
4494                 break;
4495         case DSA_MECH_INFO_TYPE:
4496                 error = dca_dsa_verify(ctx, data, signature, req);
4497                 break;
4498         default:
4499                 cmn_err(CE_WARN, "dca_verify: unexpected mech type "
4500                     "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4501                 error = CRYPTO_MECHANISM_INVALID;
4502         }
4503 
4504         DBG(softc, DENTRY, "dca_verify: done, err = 0x%x", error);
4505 
4506         return (error);
4507 }
4508 
4509 /* ARGSUSED */
4510 static int
4511 dca_verify_update(crypto_ctx_t *ctx, crypto_data_t *data,
4512     crypto_req_handle_t req)
4513 {
4514         int error = CRYPTO_MECHANISM_INVALID;
4515         dca_t *softc;
4516         /* LINTED E_FUNC_SET_NOT_USED */
4517         int instance;
4518 
4519         if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4520                 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4521 
4522         /* extract softc and instance number from context */
4523         DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4524         DBG(softc, DENTRY, "dca_verify_update: started\n");
4525 
4526         cmn_err(CE_WARN, "dca_verify_update: unexpected mech type "
4527             "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4528 
4529         DBG(softc, DENTRY, "dca_verify_update: done, err = 0x%x", error);
4530 
4531         return (error);
4532 }
4533 
4534 /* ARGSUSED */
4535 static int
4536 dca_verify_final(crypto_ctx_t *ctx, crypto_data_t *signature,
4537     crypto_req_handle_t req)
4538 {
4539         int error = CRYPTO_MECHANISM_INVALID;
4540         dca_t *softc;
4541         /* LINTED E_FUNC_SET_NOT_USED */
4542         int instance;
4543 
4544         if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4545                 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4546 
4547         /* extract softc and instance number from context */
4548         DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4549         DBG(softc, DENTRY, "dca_verify_final: started\n");
4550 
4551         cmn_err(CE_WARN, "dca_verify_final: unexpected mech type "
4552             "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4553 
4554         DBG(softc, DENTRY, "dca_verify_final: done, err = 0x%x", error);
4555 
4556         return (error);
4557 }
4558 
4559 static int
4560 dca_verify_atomic(crypto_provider_handle_t provider,
4561     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
4562     crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature,
4563     crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
4564 {
4565         int error = CRYPTO_FAILED;
4566         dca_t *softc = (dca_t *)provider;
4567 
4568         DBG(softc, DENTRY, "dca_verify_atomic: started\n");
4569 
4570         if (ctx_template != NULL)
4571                 return (CRYPTO_ARGUMENTS_BAD);
4572 
4573         /* check mechanism */
4574         switch (mechanism->cm_type) {
4575         case RSA_PKCS_MECH_INFO_TYPE:
4576         case RSA_X_509_MECH_INFO_TYPE:
4577                 error = dca_rsaatomic(provider, session_id, mechanism, key,
4578                     signature, data, KM_SLEEP, req, DCA_RSA_VRFY);
4579                 break;
4580         case DSA_MECH_INFO_TYPE:
4581                 error = dca_dsaatomic(provider, session_id, mechanism, key,
4582                     data, signature, KM_SLEEP, req, DCA_DSA_VRFY);
4583                 break;
4584         default:
4585                 cmn_err(CE_WARN, "dca_verify_atomic: unexpected mech type "
4586                     "0x%llx\n", (unsigned long long)mechanism->cm_type);
4587                 error = CRYPTO_MECHANISM_INVALID;
4588         }
4589 
4590         DBG(softc, DENTRY, "dca_verify_atomic: done, err = 0x%x", error);
4591 
4592         return (error);
4593 }
4594 
4595 /* ARGSUSED */
4596 static int
4597 dca_verify_recover_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
4598     crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
4599     crypto_req_handle_t req)
4600 {
4601         int error = CRYPTO_MECHANISM_INVALID;
4602         dca_t *softc;
4603         /* LINTED E_FUNC_SET_NOT_USED */
4604         int instance;
4605 
4606         /* extract softc and instance number from context */
4607         DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4608         DBG(softc, DENTRY, "dca_verify_recover_init: started\n");
4609 
4610         if (ctx_template != NULL)
4611                 return (CRYPTO_ARGUMENTS_BAD);
4612 
4613         /* check mechanism */
4614         switch (mechanism->cm_type) {
4615         case RSA_PKCS_MECH_INFO_TYPE:
4616         case RSA_X_509_MECH_INFO_TYPE:
4617                 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
4618                 break;
4619         default:
4620                 cmn_err(CE_WARN, "dca_verify_recover_init: unexpected mech type"
4621                     " 0x%llx\n", (unsigned long long)mechanism->cm_type);
4622         }
4623 
4624         DBG(softc, DENTRY, "dca_verify_recover_init: done, err = 0x%x", error);
4625 
4626         if (error == CRYPTO_SUCCESS)
4627                 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
4628                     &softc->dca_ctx_list_lock);
4629 
4630         return (error);
4631 }
4632 
4633 static int
4634 dca_verify_recover(crypto_ctx_t *ctx, crypto_data_t *signature,
4635     crypto_data_t *data, crypto_req_handle_t req)
4636 {
4637         int error = CRYPTO_MECHANISM_INVALID;
4638         dca_t *softc;
4639         /* LINTED E_FUNC_SET_NOT_USED */
4640         int instance;
4641 
4642         if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4643                 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4644 
4645         /* extract softc and instance number from context */
4646         DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4647         DBG(softc, DENTRY, "dca_verify_recover: started\n");
4648 
4649         /* check mechanism */
4650         switch (DCA_MECH_FROM_CTX(ctx)) {
4651         case RSA_PKCS_MECH_INFO_TYPE:
4652         case RSA_X_509_MECH_INFO_TYPE:
4653                 error = dca_rsastart(ctx, signature, data, req, DCA_RSA_VRFYR);
4654                 break;
4655         default:
4656                 cmn_err(CE_WARN, "dca_verify_recover: unexpected mech type "
4657                     "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4658         }
4659 
4660         DBG(softc, DENTRY, "dca_verify_recover: done, err = 0x%x", error);
4661 
4662         return (error);
4663 }
4664 
4665 static int
4666 dca_verify_recover_atomic(crypto_provider_handle_t provider,
4667     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
4668     crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature,
4669     crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
4670 {
4671         int error = CRYPTO_MECHANISM_INVALID;
4672         dca_t *softc = (dca_t *)provider;
4673 
4674         DBG(softc, DENTRY, "dca_verify_recover_atomic: started\n");
4675 
4676         if (ctx_template != NULL)
4677                 return (CRYPTO_ARGUMENTS_BAD);
4678 
4679         /* check mechanism */
4680         switch (mechanism->cm_type) {
4681         case RSA_PKCS_MECH_INFO_TYPE:
4682         case RSA_X_509_MECH_INFO_TYPE:
4683                 error = dca_rsaatomic(provider, session_id, mechanism, key,
4684                     signature, data, KM_SLEEP, req, DCA_RSA_VRFYR);
4685                 break;
4686         default:
4687                 cmn_err(CE_WARN, "dca_verify_recover_atomic: unexpected mech "
4688                     "type 0x%llx\n", (unsigned long long)mechanism->cm_type);
4689                 error = CRYPTO_MECHANISM_INVALID;
4690         }
4691 
4692         DBG(softc, DENTRY,
4693             "dca_verify_recover_atomic: done, err = 0x%x", error);
4694 
4695         return (error);
4696 }
4697 
4698 /*
4699  * Random number entry points.
4700  */
4701 
4702 /* ARGSUSED */
4703 static int
4704 dca_generate_random(crypto_provider_handle_t provider,
4705     crypto_session_id_t session_id,
4706     uchar_t *buf, size_t len, crypto_req_handle_t req)
4707 {
4708         int error = CRYPTO_FAILED;
4709         dca_t *softc = (dca_t *)provider;
4710         /* LINTED E_FUNC_SET_NOT_USED */
4711         int instance;
4712 
4713         instance = ddi_get_instance(softc->dca_dip);
4714         DBG(softc, DENTRY, "dca_generate_random: started");
4715 
4716         error = dca_rng(softc, buf, len, req);
4717 
4718         DBG(softc, DENTRY, "dca_generate_random: done, err = 0x%x", error);
4719 
4720         return (error);
4721 }
4722 
4723 /*
4724  * Context management entry points.
4725  */
4726 
4727 int
4728 dca_free_context(crypto_ctx_t *ctx)
4729 {
4730         int error = CRYPTO_SUCCESS;
4731         dca_t *softc;
4732         /* LINTED E_FUNC_SET_NOT_USED */
4733         int instance;
4734 
4735         /* extract softc and instance number from context */
4736         DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4737         DBG(softc, DENTRY, "dca_free_context: entered");
4738 
4739         if (ctx->cc_provider_private == NULL)
4740                 return (error);
4741 
4742         dca_rmlist2(ctx->cc_provider_private, &softc->dca_ctx_list_lock);
4743 
4744         error = dca_free_context_low(ctx);
4745 
4746         DBG(softc, DENTRY, "dca_free_context: done, err = 0x%x", error);
4747 
4748         return (error);
4749 }
4750 
4751 static int
4752 dca_free_context_low(crypto_ctx_t *ctx)
4753 {
4754         int error = CRYPTO_SUCCESS;
4755 
4756         /* check mechanism */
4757         switch (DCA_MECH_FROM_CTX(ctx)) {
4758         case DES_CBC_MECH_INFO_TYPE:
4759         case DES3_CBC_MECH_INFO_TYPE:
4760                 dca_3desctxfree(ctx);
4761                 break;
4762         case RSA_PKCS_MECH_INFO_TYPE:
4763         case RSA_X_509_MECH_INFO_TYPE:
4764                 dca_rsactxfree(ctx);
4765                 break;
4766         case DSA_MECH_INFO_TYPE:
4767                 dca_dsactxfree(ctx);
4768                 break;
4769         default:
4770                 /* Should never reach here */
4771                 cmn_err(CE_WARN, "dca_free_context_low: unexpected mech type "
4772                     "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4773                 error = CRYPTO_MECHANISM_INVALID;
4774         }
4775 
4776         return (error);
4777 }
4778 
4779 
4780 /* Free any unfreed private context. It is called in detach. */
4781 static void
4782 dca_free_context_list(dca_t *dca)
4783 {
4784         dca_listnode_t  *node;
4785         crypto_ctx_t    ctx;
4786 
4787         (void) memset(&ctx, 0, sizeof (ctx));
4788         ctx.cc_provider = dca;
4789 
4790         while ((node = dca_delist2(&dca->dca_ctx_list,
4791             &dca->dca_ctx_list_lock)) != NULL) {
4792                 ctx.cc_provider_private = node;
4793                 (void) dca_free_context_low(&ctx);
4794         }
4795 }
4796 
4797 static int
4798 ext_info_sym(crypto_provider_handle_t prov,
4799     crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq)
4800 {
4801         return (ext_info_base(prov, ext_info, cfreq, IDENT_SYM));
4802 }
4803 
4804 static int
4805 ext_info_asym(crypto_provider_handle_t prov,
4806     crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq)
4807 {
4808         int rv;
4809 
4810         rv = ext_info_base(prov, ext_info, cfreq, IDENT_ASYM);
4811         /* The asymmetric cipher slot supports random */
4812         ext_info->ei_flags |= CRYPTO_EXTF_RNG;
4813 
4814         return (rv);
4815 }
4816 
4817 /* ARGSUSED */
4818 static int
4819 ext_info_base(crypto_provider_handle_t prov,
4820     crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq, char *id)
4821 {
4822         dca_t   *dca = (dca_t *)prov;
4823         int len;
4824 
4825         /* Label */
4826         (void) sprintf((char *)ext_info->ei_label, "%s/%d %s",
4827             ddi_driver_name(dca->dca_dip), ddi_get_instance(dca->dca_dip), id);
4828         len = strlen((char *)ext_info->ei_label);
4829         (void) memset(ext_info->ei_label + len, ' ',
4830             CRYPTO_EXT_SIZE_LABEL - len);
4831 
4832         /* Manufacturer ID */
4833         (void) sprintf((char *)ext_info->ei_manufacturerID, "%s",
4834             DCA_MANUFACTURER_ID);
4835         len = strlen((char *)ext_info->ei_manufacturerID);
4836         (void) memset(ext_info->ei_manufacturerID + len, ' ',
4837             CRYPTO_EXT_SIZE_MANUF - len);
4838 
4839         /* Model */
4840         (void) sprintf((char *)ext_info->ei_model, dca->dca_model);
4841 
4842         DBG(dca, DWARN, "kCF MODEL: %s", (char *)ext_info->ei_model);
4843 
4844         len = strlen((char *)ext_info->ei_model);
4845         (void) memset(ext_info->ei_model + len, ' ',
4846             CRYPTO_EXT_SIZE_MODEL - len);
4847 
4848         /* Serial Number. Blank for Deimos */
4849         (void) memset(ext_info->ei_serial_number, ' ', CRYPTO_EXT_SIZE_SERIAL);
4850 
4851         ext_info->ei_flags = CRYPTO_EXTF_WRITE_PROTECTED;
4852 
4853         ext_info->ei_max_session_count = CRYPTO_UNAVAILABLE_INFO;
4854         ext_info->ei_max_pin_len = CRYPTO_UNAVAILABLE_INFO;
4855         ext_info->ei_min_pin_len = CRYPTO_UNAVAILABLE_INFO;
4856         ext_info->ei_total_public_memory = CRYPTO_UNAVAILABLE_INFO;
4857         ext_info->ei_free_public_memory = CRYPTO_UNAVAILABLE_INFO;
4858         ext_info->ei_total_private_memory = CRYPTO_UNAVAILABLE_INFO;
4859         ext_info->ei_free_private_memory = CRYPTO_UNAVAILABLE_INFO;
4860         ext_info->ei_hardware_version.cv_major = 0;
4861         ext_info->ei_hardware_version.cv_minor = 0;
4862         ext_info->ei_firmware_version.cv_major = 0;
4863         ext_info->ei_firmware_version.cv_minor = 0;
4864 
4865         /* Time. No need to be supplied for token without a clock */
4866         ext_info->ei_time[0] = '\000';
4867 
4868         return (CRYPTO_SUCCESS);
4869 }
4870 
4871 static void
4872 dca_fma_init(dca_t *dca)
4873 {
4874         ddi_iblock_cookie_t fm_ibc;
4875         int fm_capabilities = DDI_FM_EREPORT_CAPABLE |
4876             DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE |
4877             DDI_FM_ERRCB_CAPABLE;
4878 
4879         /* Read FMA capabilities from dca.conf file (if present) */
4880         dca->fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, dca->dca_dip,
4881             DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable",
4882             fm_capabilities);
4883 
4884         DBG(dca, DWARN, "dca->fm_capabilities = 0x%x", dca->fm_capabilities);
4885 
4886         /* Only register with IO Fault Services if we have some capability */
4887         if (dca->fm_capabilities) {
4888                 dca_regsattr.devacc_attr_access = DDI_FLAGERR_ACC;
4889                 dca_dmaattr.dma_attr_flags = DDI_DMA_FLAGERR;
4890 
4891                 /* Register capabilities with IO Fault Services */
4892                 ddi_fm_init(dca->dca_dip, &dca->fm_capabilities, &fm_ibc);
4893                 DBG(dca, DWARN, "fm_capable() =  0x%x",
4894                     ddi_fm_capable(dca->dca_dip));
4895 
4896                 /*
4897                  * Initialize pci ereport capabilities if ereport capable
4898                  */
4899                 if (DDI_FM_EREPORT_CAP(dca->fm_capabilities) ||
4900                     DDI_FM_ERRCB_CAP(dca->fm_capabilities))
4901                         pci_ereport_setup(dca->dca_dip);
4902 
4903                 /*
4904                  * Initialize callback mutex and register error callback if
4905                  * error callback capable.
4906                  */
4907                 if (DDI_FM_ERRCB_CAP(dca->fm_capabilities)) {
4908                         ddi_fm_handler_register(dca->dca_dip, dca_fm_error_cb,
4909                             (void *)dca);
4910                 }
4911         } else {
4912                 /*
4913                  * These fields have to be cleared of FMA if there are no
4914                  * FMA capabilities at runtime.
4915                  */
4916                 dca_regsattr.devacc_attr_access = DDI_DEFAULT_ACC;
4917                 dca_dmaattr.dma_attr_flags = 0;
4918         }
4919 }
4920 
4921 
4922 static void
4923 dca_fma_fini(dca_t *dca)
4924 {
4925         /* Only unregister FMA capabilities if we registered some */
4926         if (dca->fm_capabilities) {
4927 
4928                 /*
4929                  * Release any resources allocated by pci_ereport_setup()
4930                  */
4931                 if (DDI_FM_EREPORT_CAP(dca->fm_capabilities) ||
4932                     DDI_FM_ERRCB_CAP(dca->fm_capabilities)) {
4933                         pci_ereport_teardown(dca->dca_dip);
4934                 }
4935 
4936                 /*
4937                  * Free callback mutex and un-register error callback if
4938                  * error callback capable.
4939                  */
4940                 if (DDI_FM_ERRCB_CAP(dca->fm_capabilities)) {
4941                         ddi_fm_handler_unregister(dca->dca_dip);
4942                 }
4943 
4944                 /* Unregister from IO Fault Services */
4945                 ddi_fm_fini(dca->dca_dip);
4946                 DBG(dca, DWARN, "fm_capable() = 0x%x",
4947                     ddi_fm_capable(dca->dca_dip));
4948         }
4949 }
4950 
4951 
4952 /*
4953  * The IO fault service error handling callback function
4954  */
4955 /*ARGSUSED*/
4956 static int
4957 dca_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
4958 {
4959         dca_t           *dca = (dca_t *)impl_data;
4960 
4961         pci_ereport_post(dip, err, NULL);
4962         if (err->fme_status == DDI_FM_FATAL) {
4963                 dca_failure(dca, DDI_DATAPATH_FAULT,
4964                     DCA_FM_ECLASS_NONE, dca_ena(0), CRYPTO_DEVICE_ERROR,
4965                     "fault PCI in FMA callback.");
4966         }
4967         return (err->fme_status);
4968 }
4969 
4970 
4971 static int
4972 dca_check_acc_handle(dca_t *dca, ddi_acc_handle_t handle,
4973     dca_fma_eclass_t eclass_index)
4974 {
4975         ddi_fm_error_t  de;
4976         int             version = 0;
4977 
4978         ddi_fm_acc_err_get(handle, &de, version);
4979         if (de.fme_status != DDI_FM_OK) {
4980                 dca_failure(dca, DDI_DATAPATH_FAULT,
4981                     eclass_index, fm_ena_increment(de.fme_ena),
4982                     CRYPTO_DEVICE_ERROR, "");
4983                 return (DDI_FAILURE);
4984         }
4985 
4986         return (DDI_SUCCESS);
4987 }
4988 
4989 int
4990 dca_check_dma_handle(dca_t *dca, ddi_dma_handle_t handle,
4991     dca_fma_eclass_t eclass_index)
4992 {
4993         ddi_fm_error_t  de;
4994         int             version = 0;
4995 
4996         ddi_fm_dma_err_get(handle, &de, version);
4997         if (de.fme_status != DDI_FM_OK) {
4998                 dca_failure(dca, DDI_DATAPATH_FAULT,
4999                     eclass_index, fm_ena_increment(de.fme_ena),
5000                     CRYPTO_DEVICE_ERROR, "");
5001                 return (DDI_FAILURE);
5002         }
5003         return (DDI_SUCCESS);
5004 }
5005 
5006 static uint64_t
5007 dca_ena(uint64_t ena)
5008 {
5009         if (ena == 0)
5010                 ena = fm_ena_generate(0, FM_ENA_FMT1);
5011         else
5012                 ena = fm_ena_increment(ena);
5013         return (ena);
5014 }
5015 
5016 static char *
5017 dca_fma_eclass_string(char *model, dca_fma_eclass_t index)
5018 {
5019         if (strstr(model, "500"))
5020                 return (dca_fma_eclass_sca500[index]);
5021         else
5022                 return (dca_fma_eclass_sca1000[index]);
5023 }