4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 * This file is part of the core Kernel Cryptographic Framework.
28 * It implements the SPI functions exported to cryptographic
33 #include <sys/zfs_context.h>
34 #include <sys/crypto/common.h>
35 #include <sys/crypto/impl.h>
36 #include <sys/crypto/sched_impl.h>
37 #include <sys/crypto/spi.h>
40 * minalloc and maxalloc values to be used for taskq_create().
42 int crypto_taskq_threads = CRYPTO_TASKQ_THREADS;
43 int crypto_taskq_minalloc = CRYPTO_TASKQ_MIN;
44 int crypto_taskq_maxalloc = CRYPTO_TASKQ_MAX;
46 static void remove_provider(kcf_provider_desc_t *);
47 static void process_logical_providers(crypto_provider_info_t *,
48 kcf_provider_desc_t *);
49 static int init_prov_mechs(crypto_provider_info_t *, kcf_provider_desc_t *);
50 static int kcf_prov_kstat_update(kstat_t *, int);
51 static void delete_kstat(kcf_provider_desc_t *);
53 static kcf_prov_stats_t kcf_stats_ks_data_template = {
54 { "kcf_ops_total", KSTAT_DATA_UINT64 },
55 { "kcf_ops_passed", KSTAT_DATA_UINT64 },
56 { "kcf_ops_failed", KSTAT_DATA_UINT64 },
57 { "kcf_ops_returned_busy", KSTAT_DATA_UINT64 }
60 #define KCF_SPI_COPY_OPS(src, dst, ops) if ((src)->ops != NULL) \
61 *((dst)->ops) = *((src)->ops);
64 * Copy an ops vector from src to dst. Used during provider registration
65 * to copy the ops vector from the provider info structure to the
66 * provider descriptor maintained by KCF.
67 * Copying the ops vector specified by the provider is needed since the
68 * framework does not require the provider info structure to be
72 copy_ops_vector_v1(crypto_ops_t *src_ops, crypto_ops_t *dst_ops)
74 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_control_ops);
75 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_digest_ops);
76 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_cipher_ops);
77 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_mac_ops);
78 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_sign_ops);
79 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_verify_ops);
80 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_dual_ops);
81 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_dual_cipher_mac_ops);
82 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_random_ops);
83 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_session_ops);
84 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_object_ops);
85 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_key_ops);
86 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_provider_ops);
87 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_ctx_ops);
91 copy_ops_vector_v2(crypto_ops_t *src_ops, crypto_ops_t *dst_ops)
93 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_mech_ops);
97 copy_ops_vector_v3(crypto_ops_t *src_ops, crypto_ops_t *dst_ops)
99 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_nostore_key_ops);
103 * This routine is used to add cryptographic providers to the KEF framework.
104 * Providers pass a crypto_provider_info structure to crypto_register_provider()
105 * and get back a handle. The crypto_provider_info structure contains a
106 * list of mechanisms supported by the provider and an ops vector containing
107 * provider entry points. Hardware providers call this routine in their attach
108 * routines. Software providers call this routine in their _init() routine.
111 crypto_register_provider(crypto_provider_info_t *info,
112 crypto_kcf_provider_handle_t *handle)
116 kcf_provider_desc_t *prov_desc = NULL;
117 int ret = CRYPTO_ARGUMENTS_BAD;
119 if (info->pi_interface_version > CRYPTO_SPI_VERSION_3)
120 return (CRYPTO_VERSION_MISMATCH);
123 * Check provider type, must be software, hardware, or logical.
125 if (info->pi_provider_type != CRYPTO_HW_PROVIDER &&
126 info->pi_provider_type != CRYPTO_SW_PROVIDER &&
127 info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER)
128 return (CRYPTO_ARGUMENTS_BAD);
131 * Allocate and initialize a new provider descriptor. We also
132 * hold it and release it when done.
134 prov_desc = kcf_alloc_provider_desc(info);
135 KCF_PROV_REFHOLD(prov_desc);
137 prov_desc->pd_prov_type = info->pi_provider_type;
139 /* provider-private handle, opaque to KCF */
140 prov_desc->pd_prov_handle = info->pi_provider_handle;
142 /* copy provider description string */
143 if (info->pi_provider_description != NULL) {
145 * pi_provider_descriptor is a string that can contain
146 * up to CRYPTO_PROVIDER_DESCR_MAX_LEN + 1 characters
147 * INCLUDING the terminating null character. A bcopy()
148 * is necessary here as pd_description should not have
149 * a null character. See comments in kcf_alloc_provider_desc()
150 * for details on pd_description field.
152 bcopy(info->pi_provider_description, prov_desc->pd_description,
153 MIN(strlen(info->pi_provider_description),
154 (size_t)CRYPTO_PROVIDER_DESCR_MAX_LEN));
157 if (info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER) {
158 if (info->pi_ops_vector == NULL) {
161 copy_ops_vector_v1(info->pi_ops_vector,
162 prov_desc->pd_ops_vector);
163 if (info->pi_interface_version >= CRYPTO_SPI_VERSION_2) {
164 copy_ops_vector_v2(info->pi_ops_vector,
165 prov_desc->pd_ops_vector);
166 prov_desc->pd_flags = info->pi_flags;
168 if (info->pi_interface_version == CRYPTO_SPI_VERSION_3) {
169 copy_ops_vector_v3(info->pi_ops_vector,
170 prov_desc->pd_ops_vector);
174 /* object_ops and nostore_key_ops are mutually exclusive */
175 if (prov_desc->pd_ops_vector->co_object_ops &&
176 prov_desc->pd_ops_vector->co_nostore_key_ops) {
180 /* process the mechanisms supported by the provider */
181 if ((ret = init_prov_mechs(info, prov_desc)) != CRYPTO_SUCCESS)
185 * Add provider to providers tables, also sets the descriptor
188 if ((ret = kcf_prov_tab_add_provider(prov_desc)) != CRYPTO_SUCCESS) {
189 undo_register_provider(prov_desc, B_FALSE);
194 * We create a taskq only for a hardware provider. The global
195 * software queue is used for software providers. We handle ordering
196 * of multi-part requests in the taskq routine. So, it is safe to
197 * have multiple threads for the taskq. We pass TASKQ_PREPOPULATE flag
198 * to keep some entries cached to improve performance.
200 if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER)
201 prov_desc->pd_sched_info.ks_taskq = taskq_create("kcf_taskq",
202 crypto_taskq_threads, minclsyspri,
203 crypto_taskq_minalloc, crypto_taskq_maxalloc,
206 prov_desc->pd_sched_info.ks_taskq = NULL;
208 /* no kernel session to logical providers */
209 if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
211 * Open a session for session-oriented providers. This session
212 * is used for all kernel consumers. This is fine as a provider
213 * is required to support multiple thread access to a session.
214 * We can do this only after the taskq has been created as we
215 * do a kcf_submit_request() to open the session.
217 if (KCF_PROV_SESSION_OPS(prov_desc) != NULL) {
218 kcf_req_params_t params;
220 KCF_WRAP_SESSION_OPS_PARAMS(¶ms,
221 KCF_OP_SESSION_OPEN, &prov_desc->pd_sid, 0,
222 CRYPTO_USER, NULL, 0, prov_desc);
223 ret = kcf_submit_request(prov_desc, NULL, NULL, ¶ms,
226 if (ret != CRYPTO_SUCCESS) {
227 undo_register_provider(prov_desc, B_TRUE);
234 if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
236 * Create the kstat for this provider. There is a kstat
237 * installed for each successfully registered provider.
238 * This kstat is deleted, when the provider unregisters.
240 if (prov_desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
241 ks_name = kmem_asprintf("%s_%s",
242 "NONAME", "provider_stats");
244 ks_name = kmem_asprintf("%s_%d_%u_%s",
245 "NONAME", 0, prov_desc->pd_prov_id,
249 prov_desc->pd_kstat = kstat_create("kcf", 0, ks_name, "crypto",
250 KSTAT_TYPE_NAMED, sizeof (kcf_prov_stats_t) /
251 sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
253 if (prov_desc->pd_kstat != NULL) {
254 bcopy(&kcf_stats_ks_data_template,
255 &prov_desc->pd_ks_data,
256 sizeof (kcf_stats_ks_data_template));
257 prov_desc->pd_kstat->ks_data = &prov_desc->pd_ks_data;
258 KCF_PROV_REFHOLD(prov_desc);
259 KCF_PROV_IREFHOLD(prov_desc);
260 prov_desc->pd_kstat->ks_private = prov_desc;
261 prov_desc->pd_kstat->ks_update = kcf_prov_kstat_update;
262 kstat_install(prov_desc->pd_kstat);
264 kmem_strfree(ks_name);
267 if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER)
268 process_logical_providers(info, prov_desc);
270 mutex_enter(&prov_desc->pd_lock);
271 prov_desc->pd_state = KCF_PROV_READY;
272 mutex_exit(&prov_desc->pd_lock);
273 kcf_do_notify(prov_desc, B_TRUE);
275 *handle = prov_desc->pd_kcf_prov_handle;
276 ret = CRYPTO_SUCCESS;
279 KCF_PROV_REFRELE(prov_desc);
284 * This routine is used to notify the framework when a provider is being
285 * removed. Hardware providers call this routine in their detach routines.
286 * Software providers call this routine in their _fini() routine.
289 crypto_unregister_provider(crypto_kcf_provider_handle_t handle)
292 kcf_provider_desc_t *desc;
293 kcf_prov_state_t saved_state;
295 /* lookup provider descriptor */
296 if ((desc = kcf_prov_tab_lookup((crypto_provider_id_t)handle)) == NULL)
297 return (CRYPTO_UNKNOWN_PROVIDER);
299 mutex_enter(&desc->pd_lock);
301 * Check if any other thread is disabling or removing
302 * this provider. We return if this is the case.
304 if (desc->pd_state >= KCF_PROV_DISABLED) {
305 mutex_exit(&desc->pd_lock);
306 /* Release reference held by kcf_prov_tab_lookup(). */
307 KCF_PROV_REFRELE(desc);
308 return (CRYPTO_BUSY);
311 saved_state = desc->pd_state;
312 desc->pd_state = KCF_PROV_REMOVED;
314 if (saved_state == KCF_PROV_BUSY) {
316 * The per-provider taskq threads may be waiting. We
317 * signal them so that they can start failing requests.
319 cv_broadcast(&desc->pd_resume_cv);
322 if (desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
324 * Check if this provider is currently being used.
325 * pd_irefcnt is the number of holds from the internal
326 * structures. We add one to account for the above lookup.
328 if (desc->pd_refcnt > desc->pd_irefcnt + 1) {
329 desc->pd_state = saved_state;
330 mutex_exit(&desc->pd_lock);
331 /* Release reference held by kcf_prov_tab_lookup(). */
332 KCF_PROV_REFRELE(desc);
334 * The administrator presumably will stop the clients
335 * thus removing the holds, when they get the busy
336 * return value. Any retry will succeed then.
338 return (CRYPTO_BUSY);
341 mutex_exit(&desc->pd_lock);
343 if (desc->pd_prov_type != CRYPTO_SW_PROVIDER) {
344 remove_provider(desc);
347 if (desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
348 /* remove the provider from the mechanisms tables */
349 for (mech_idx = 0; mech_idx < desc->pd_mech_list_count;
351 kcf_remove_mech_provider(
352 desc->pd_mechanisms[mech_idx].cm_mech_name, desc);
356 /* remove provider from providers table */
357 if (kcf_prov_tab_rem_provider((crypto_provider_id_t)handle) !=
359 /* Release reference held by kcf_prov_tab_lookup(). */
360 KCF_PROV_REFRELE(desc);
361 return (CRYPTO_UNKNOWN_PROVIDER);
366 if (desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
367 /* Release reference held by kcf_prov_tab_lookup(). */
368 KCF_PROV_REFRELE(desc);
371 * Wait till the existing requests complete.
373 mutex_enter(&desc->pd_lock);
374 while (desc->pd_state != KCF_PROV_FREED)
375 cv_wait(&desc->pd_remove_cv, &desc->pd_lock);
376 mutex_exit(&desc->pd_lock);
379 * Wait until requests that have been sent to the provider
382 mutex_enter(&desc->pd_lock);
383 while (desc->pd_irefcnt > 0)
384 cv_wait(&desc->pd_remove_cv, &desc->pd_lock);
385 mutex_exit(&desc->pd_lock);
388 kcf_do_notify(desc, B_FALSE);
390 if (desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
392 * This is the only place where kcf_free_provider_desc()
393 * is called directly. KCF_PROV_REFRELE() should free the
394 * structure in all other places.
396 ASSERT(desc->pd_state == KCF_PROV_FREED &&
397 desc->pd_refcnt == 0);
398 kcf_free_provider_desc(desc);
400 KCF_PROV_REFRELE(desc);
403 return (CRYPTO_SUCCESS);
407 * This routine is used to notify the framework that the state of
408 * a cryptographic provider has changed. Valid state codes are:
410 * CRYPTO_PROVIDER_READY
411 * The provider indicates that it can process more requests. A provider
412 * will notify with this event if it previously has notified us with a
413 * CRYPTO_PROVIDER_BUSY.
415 * CRYPTO_PROVIDER_BUSY
416 * The provider can not take more requests.
418 * CRYPTO_PROVIDER_FAILED
419 * The provider encountered an internal error. The framework will not
420 * be sending any more requests to the provider. The provider may notify
421 * with a CRYPTO_PROVIDER_READY, if it is able to recover from the error.
423 * This routine can be called from user or interrupt context.
426 crypto_provider_notification(crypto_kcf_provider_handle_t handle, uint_t state)
428 kcf_provider_desc_t *pd;
430 /* lookup the provider from the given handle */
431 if ((pd = kcf_prov_tab_lookup((crypto_provider_id_t)handle)) == NULL)
434 mutex_enter(&pd->pd_lock);
436 if (pd->pd_state <= KCF_PROV_VERIFICATION_FAILED)
439 if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
440 cmn_err(CE_WARN, "crypto_provider_notification: "
441 "logical provider (%x) ignored\n", handle);
445 case CRYPTO_PROVIDER_READY:
446 switch (pd->pd_state) {
448 pd->pd_state = KCF_PROV_READY;
450 * Signal the per-provider taskq threads that they
451 * can start submitting requests.
453 cv_broadcast(&pd->pd_resume_cv);
456 case KCF_PROV_FAILED:
458 * The provider recovered from the error. Let us
461 pd->pd_state = KCF_PROV_READY;
468 case CRYPTO_PROVIDER_BUSY:
469 switch (pd->pd_state) {
471 pd->pd_state = KCF_PROV_BUSY;
478 case CRYPTO_PROVIDER_FAILED:
480 * We note the failure and return. The per-provider taskq
481 * threads check this flag and start failing the
482 * requests, if it is set. See process_req_hwp() for details.
484 switch (pd->pd_state) {
486 pd->pd_state = KCF_PROV_FAILED;
490 pd->pd_state = KCF_PROV_FAILED;
492 * The per-provider taskq threads may be waiting. We
493 * signal them so that they can start failing requests.
495 cv_broadcast(&pd->pd_resume_cv);
505 mutex_exit(&pd->pd_lock);
506 KCF_PROV_REFRELE(pd);
510 * This routine is used to notify the framework the result of
511 * an asynchronous request handled by a provider. Valid error
512 * codes are the same as the CRYPTO_* errors defined in common.h.
514 * This routine can be called from user or interrupt context.
517 crypto_op_notification(crypto_req_handle_t handle, int error)
519 kcf_call_type_t ctype;
524 if ((ctype = GET_REQ_TYPE(handle)) == CRYPTO_SYNCH) {
525 kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)handle;
527 if (error != CRYPTO_SUCCESS)
528 sreq->sn_provider->pd_sched_info.ks_nfails++;
529 KCF_PROV_IREFRELE(sreq->sn_provider);
530 kcf_sop_done(sreq, error);
532 kcf_areq_node_t *areq = (kcf_areq_node_t *)handle;
534 ASSERT(ctype == CRYPTO_ASYNCH);
535 if (error != CRYPTO_SUCCESS)
536 areq->an_provider->pd_sched_info.ks_nfails++;
537 KCF_PROV_IREFRELE(areq->an_provider);
538 kcf_aop_done(areq, error);
543 * This routine is used by software providers to determine
544 * whether to use KM_SLEEP or KM_NOSLEEP during memory allocation.
545 * Note that hardware providers can always use KM_SLEEP. So,
546 * they do not need to call this routine.
548 * This routine can be called from user or interrupt context.
551 crypto_kmflag(crypto_req_handle_t handle)
553 return (REQHNDL2_KMFLAG(handle));
557 * Process the mechanism info structures specified by the provider
558 * during registration. A NULL crypto_provider_info_t indicates
559 * an already initialized provider descriptor.
561 * Mechanisms are not added to the kernel's mechanism table if the
562 * provider is a logical provider.
564 * Returns CRYPTO_SUCCESS on success, CRYPTO_ARGUMENTS if one
565 * of the specified mechanisms was malformed, or CRYPTO_HOST_MEMORY
566 * if the table of mechanisms is full.
569 init_prov_mechs(crypto_provider_info_t *info, kcf_provider_desc_t *desc)
573 int err = CRYPTO_SUCCESS;
574 kcf_prov_mech_desc_t *pmd;
575 int desc_use_count = 0;
576 int mcount = desc->pd_mech_list_count;
578 if (desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
580 ASSERT(info->pi_mechanisms != NULL);
581 bcopy(info->pi_mechanisms, desc->pd_mechanisms,
582 sizeof (crypto_mech_info_t) * mcount);
584 return (CRYPTO_SUCCESS);
588 * Copy the mechanism list from the provider info to the provider
589 * descriptor. desc->pd_mechanisms has an extra crypto_mech_info_t
590 * element if the provider has random_ops since we keep an internal
591 * mechanism, SUN_RANDOM, in this case.
594 if (info->pi_ops_vector->co_random_ops != NULL) {
595 crypto_mech_info_t *rand_mi;
598 * Need the following check as it is possible to have
599 * a provider that implements just random_ops and has
600 * pi_mechanisms == NULL.
602 if (info->pi_mechanisms != NULL) {
603 bcopy(info->pi_mechanisms, desc->pd_mechanisms,
604 sizeof (crypto_mech_info_t) * (mcount - 1));
606 rand_mi = &desc->pd_mechanisms[mcount - 1];
608 bzero(rand_mi, sizeof (crypto_mech_info_t));
609 (void) strncpy(rand_mi->cm_mech_name, SUN_RANDOM,
610 CRYPTO_MAX_MECH_NAME);
611 rand_mi->cm_func_group_mask = CRYPTO_FG_RANDOM;
613 ASSERT(info->pi_mechanisms != NULL);
614 bcopy(info->pi_mechanisms, desc->pd_mechanisms,
615 sizeof (crypto_mech_info_t) * mcount);
620 * For each mechanism support by the provider, add the provider
621 * to the corresponding KCF mechanism mech_entry chain.
623 for (mech_idx = 0; mech_idx < desc->pd_mech_list_count; mech_idx++) {
624 crypto_mech_info_t *mi = &desc->pd_mechanisms[mech_idx];
626 if ((mi->cm_mech_flags & CRYPTO_KEYSIZE_UNIT_IN_BITS) &&
627 (mi->cm_mech_flags & CRYPTO_KEYSIZE_UNIT_IN_BYTES)) {
628 err = CRYPTO_ARGUMENTS_BAD;
632 if (desc->pd_flags & CRYPTO_HASH_NO_UPDATE &&
633 mi->cm_func_group_mask & CRYPTO_FG_DIGEST) {
635 * We ask the provider to specify the limit
636 * per hash mechanism. But, in practice, a
637 * hardware limitation means all hash mechanisms
638 * will have the same maximum size allowed for
639 * input data. So, we make it a per provider
640 * limit to keep it simple.
642 if (mi->cm_max_input_length == 0) {
643 err = CRYPTO_ARGUMENTS_BAD;
646 desc->pd_hash_limit = mi->cm_max_input_length;
650 if ((err = kcf_add_mech_provider(mech_idx, desc, &pmd)) !=
657 /* The provider will be used for this mechanism */
662 * Don't allow multiple software providers with disabled mechanisms
663 * to register. Subsequent enabling of mechanisms will result in
664 * an unsupported configuration, i.e. multiple software providers
667 if (desc_use_count == 0 && desc->pd_prov_type == CRYPTO_SW_PROVIDER)
668 return (CRYPTO_ARGUMENTS_BAD);
670 if (err == KCF_SUCCESS)
671 return (CRYPTO_SUCCESS);
674 * An error occurred while adding the mechanism, cleanup
677 for (cleanup_idx = 0; cleanup_idx < mech_idx; cleanup_idx++) {
678 kcf_remove_mech_provider(
679 desc->pd_mechanisms[cleanup_idx].cm_mech_name, desc);
682 if (err == KCF_MECH_TAB_FULL)
683 return (CRYPTO_HOST_MEMORY);
685 return (CRYPTO_ARGUMENTS_BAD);
689 * Update routine for kstat. Only privileged users are allowed to
690 * access this information, since this information is sensitive.
691 * There are some cryptographic attacks (e.g. traffic analysis)
692 * which can use this information.
695 kcf_prov_kstat_update(kstat_t *ksp, int rw)
697 kcf_prov_stats_t *ks_data;
698 kcf_provider_desc_t *pd = (kcf_provider_desc_t *)ksp->ks_private;
700 if (rw == KSTAT_WRITE)
703 ks_data = ksp->ks_data;
705 ks_data->ps_ops_total.value.ui64 = pd->pd_sched_info.ks_ndispatches;
706 ks_data->ps_ops_failed.value.ui64 = pd->pd_sched_info.ks_nfails;
707 ks_data->ps_ops_busy_rval.value.ui64 = pd->pd_sched_info.ks_nbusy_rval;
708 ks_data->ps_ops_passed.value.ui64 =
709 pd->pd_sched_info.ks_ndispatches -
710 pd->pd_sched_info.ks_nfails -
711 pd->pd_sched_info.ks_nbusy_rval;
718 * Utility routine called from failure paths in crypto_register_provider()
719 * and from crypto_load_soft_disabled().
722 undo_register_provider(kcf_provider_desc_t *desc, boolean_t remove_prov)
726 /* remove the provider from the mechanisms tables */
727 for (mech_idx = 0; mech_idx < desc->pd_mech_list_count;
729 kcf_remove_mech_provider(
730 desc->pd_mechanisms[mech_idx].cm_mech_name, desc);
733 /* remove provider from providers table */
735 (void) kcf_prov_tab_rem_provider(desc->pd_prov_id);
739 * Utility routine called from crypto_load_soft_disabled(). Callers
740 * should have done a prior undo_register_provider().
743 redo_register_provider(kcf_provider_desc_t *pd)
745 /* process the mechanisms supported by the provider */
746 (void) init_prov_mechs(NULL, pd);
749 * Hold provider in providers table. We should not call
750 * kcf_prov_tab_add_provider() here as the provider descriptor
751 * is still valid which means it has an entry in the provider
754 KCF_PROV_REFHOLD(pd);
755 KCF_PROV_IREFHOLD(pd);
759 * Add provider (p1) to another provider's array of providers (p2).
760 * Hardware and logical providers use this array to cross-reference
764 add_provider_to_array(kcf_provider_desc_t *p1, kcf_provider_desc_t *p2)
766 kcf_provider_list_t *new;
768 new = kmem_alloc(sizeof (kcf_provider_list_t), KM_SLEEP);
769 mutex_enter(&p2->pd_lock);
770 new->pl_next = p2->pd_provider_list;
771 p2->pd_provider_list = new;
772 KCF_PROV_IREFHOLD(p1);
773 new->pl_provider = p1;
774 mutex_exit(&p2->pd_lock);
778 * Remove provider (p1) from another provider's array of providers (p2).
779 * Hardware and logical providers use this array to cross-reference
783 remove_provider_from_array(kcf_provider_desc_t *p1, kcf_provider_desc_t *p2)
786 kcf_provider_list_t *pl = NULL, **prev;
788 mutex_enter(&p2->pd_lock);
789 for (pl = p2->pd_provider_list, prev = &p2->pd_provider_list;
790 pl != NULL; prev = &pl->pl_next, pl = pl->pl_next) {
791 if (pl->pl_provider == p1) {
797 mutex_exit(&p2->pd_lock);
801 /* detach and free kcf_provider_list structure */
802 KCF_PROV_IREFRELE(p1);
804 kmem_free(pl, sizeof (*pl));
805 mutex_exit(&p2->pd_lock);
809 * Convert an array of logical provider handles (crypto_provider_id)
810 * stored in a crypto_provider_info structure into an array of provider
811 * descriptors (kcf_provider_desc_t) attached to a logical provider.
814 process_logical_providers(crypto_provider_info_t *info, kcf_provider_desc_t *hp)
816 kcf_provider_desc_t *lp;
817 crypto_provider_id_t handle;
818 int count = info->pi_logical_provider_count;
821 /* add hardware provider to each logical provider */
822 for (i = 0; i < count; i++) {
823 handle = info->pi_logical_providers[i];
824 lp = kcf_prov_tab_lookup((crypto_provider_id_t)handle);
828 add_provider_to_array(hp, lp);
829 hp->pd_flags |= KCF_LPROV_MEMBER;
832 * A hardware provider has to have the provider descriptor of
833 * every logical provider it belongs to, so it can be removed
834 * from the logical provider if the hardware provider
835 * unregisters from the framework.
837 add_provider_to_array(lp, hp);
838 KCF_PROV_REFRELE(lp);
843 * This routine removes a provider from all of the logical or
844 * hardware providers it belongs to, and frees the provider's
845 * array of pointers to providers.
848 remove_provider(kcf_provider_desc_t *pp)
850 kcf_provider_desc_t *p;
851 kcf_provider_list_t *e, *next;
853 mutex_enter(&pp->pd_lock);
854 for (e = pp->pd_provider_list; e != NULL; e = next) {
856 remove_provider_from_array(pp, p);
857 if (p->pd_prov_type == CRYPTO_HW_PROVIDER &&
858 p->pd_provider_list == NULL)
859 p->pd_flags &= ~KCF_LPROV_MEMBER;
860 KCF_PROV_IREFRELE(p);
862 kmem_free(e, sizeof (*e));
864 pp->pd_provider_list = NULL;
865 mutex_exit(&pp->pd_lock);
869 * Dispatch events as needed for a provider. is_added flag tells
870 * whether the provider is registering or unregistering.
873 kcf_do_notify(kcf_provider_desc_t *prov_desc, boolean_t is_added)
876 crypto_notify_event_change_t ec;
878 ASSERT(prov_desc->pd_state > KCF_PROV_VERIFICATION_FAILED);
881 * Inform interested clients of the mechanisms becoming
882 * available/unavailable. We skip this for logical providers
883 * as they do not affect mechanisms.
885 if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
886 ec.ec_provider_type = prov_desc->pd_prov_type;
887 ec.ec_change = is_added ? CRYPTO_MECH_ADDED :
889 for (i = 0; i < prov_desc->pd_mech_list_count; i++) {
890 (void) strlcpy(ec.ec_mech_name,
891 prov_desc->pd_mechanisms[i].cm_mech_name,
892 CRYPTO_MAX_MECH_NAME);
893 kcf_walk_ntfylist(CRYPTO_EVENT_MECHS_CHANGED, &ec);
899 * Inform interested clients about the new or departing provider.
900 * In case of a logical provider, we need to notify the event only
901 * for the logical provider and not for the underlying
902 * providers which are known by the KCF_LPROV_MEMBER bit.
904 if (prov_desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER ||
905 (prov_desc->pd_flags & KCF_LPROV_MEMBER) == 0) {
906 kcf_walk_ntfylist(is_added ? CRYPTO_EVENT_PROVIDER_REGISTERED :
907 CRYPTO_EVENT_PROVIDER_UNREGISTERED, prov_desc);
912 delete_kstat(kcf_provider_desc_t *desc)
914 /* destroy the kstat created for this provider */
915 if (desc->pd_kstat != NULL) {
916 kcf_provider_desc_t *kspd = desc->pd_kstat->ks_private;
918 /* release reference held by desc->pd_kstat->ks_private */
919 ASSERT(desc == kspd);
920 kstat_delete(kspd->pd_kstat);
921 desc->pd_kstat = NULL;
922 KCF_PROV_REFRELE(kspd);
923 KCF_PROV_IREFRELE(kspd);