1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
  23  * Use is subject to license terms.
  24  */
  25 
  26 #pragma ident   "%Z%%M% %I%     %E% SMI"
  27 
  28 #include "libuutil_common.h"
  29 
  30 #include <stdlib.h>
  31 #include <string.h>
  32 #include <unistd.h>
  33 #include <sys/avl.h>
  34 
  35 static uu_avl_pool_t    uu_null_apool = { &uu_null_apool, &uu_null_apool };
  36 static pthread_mutex_t  uu_apool_list_lock = PTHREAD_MUTEX_INITIALIZER;
  37 
  38 /*
  39  * The index mark change on every insert and delete, to catch stale
  40  * references.
  41  *
  42  * We leave the low bit alone, since the avl code uses it.
  43  */
  44 #define INDEX_MAX               (sizeof (uintptr_t) - 2)
  45 #define INDEX_NEXT(m)           (((m) == INDEX_MAX)? 2 : ((m) + 2) & INDEX_MAX)
  46 
  47 #define INDEX_DECODE(i)         ((i) & ~INDEX_MAX)
  48 #define INDEX_ENCODE(p, n)      (((n) & ~INDEX_MAX) | (p)->ua_index)
  49 #define INDEX_VALID(p, i)       (((i) & INDEX_MAX) == (p)->ua_index)
  50 #define INDEX_CHECK(i)          (((i) & INDEX_MAX) != 0)
  51 
  52 /*
  53  * When an element is inactive (not in a tree), we keep a marked pointer to
  54  * its containing pool in its first word, and a NULL pointer in its second.
  55  *
  56  * On insert, we use these to verify that it comes from the correct pool.
  57  */
  58 #define NODE_ARRAY(p, n)        ((uintptr_t *)((uintptr_t)(n) + \
  59                                     (pp)->uap_nodeoffset))
  60 
  61 #define POOL_TO_MARKER(pp) (((uintptr_t)(pp) | 1))
  62 
  63 #define DEAD_MARKER             0xc4
  64 
  65 uu_avl_pool_t *
  66 uu_avl_pool_create(const char *name, size_t objsize, size_t nodeoffset,
  67     uu_compare_fn_t *compare_func, uint32_t flags)
  68 {
  69         uu_avl_pool_t *pp, *next, *prev;
  70 
  71         if (name == NULL ||
  72             uu_check_name(name, UU_NAME_DOMAIN) == -1 ||
  73             nodeoffset + sizeof (uu_avl_node_t) > objsize ||
  74             compare_func == NULL) {
  75                 uu_set_error(UU_ERROR_INVALID_ARGUMENT);
  76                 return (NULL);
  77         }
  78 
  79         if (flags & ~UU_AVL_POOL_DEBUG) {
  80                 uu_set_error(UU_ERROR_UNKNOWN_FLAG);
  81                 return (NULL);
  82         }
  83 
  84         pp = uu_zalloc(sizeof (uu_avl_pool_t));
  85         if (pp == NULL) {
  86                 uu_set_error(UU_ERROR_NO_MEMORY);
  87                 return (NULL);
  88         }
  89 
  90         (void) strlcpy(pp->uap_name, name, sizeof (pp->uap_name));
  91         pp->uap_nodeoffset = nodeoffset;
  92         pp->uap_objsize = objsize;
  93         pp->uap_cmp = compare_func;
  94         if (flags & UU_AVL_POOL_DEBUG)
  95                 pp->uap_debug = 1;
  96         pp->uap_last_index = 0;
  97 
  98         (void) pthread_mutex_init(&pp->uap_lock, NULL);
  99 
 100         pp->uap_null_avl.ua_next_enc = UU_PTR_ENCODE(&pp->uap_null_avl);
 101         pp->uap_null_avl.ua_prev_enc = UU_PTR_ENCODE(&pp->uap_null_avl);
 102 
 103         (void) pthread_mutex_lock(&uu_apool_list_lock);
 104         pp->uap_next = next = &uu_null_apool;
 105         pp->uap_prev = prev = next->uap_prev;
 106         next->uap_prev = pp;
 107         prev->uap_next = pp;
 108         (void) pthread_mutex_unlock(&uu_apool_list_lock);
 109 
 110         return (pp);
 111 }
 112 
 113 void
 114 uu_avl_pool_destroy(uu_avl_pool_t *pp)
 115 {
 116         if (pp->uap_debug) {
 117                 if (pp->uap_null_avl.ua_next_enc !=
 118                     UU_PTR_ENCODE(&pp->uap_null_avl) ||
 119                     pp->uap_null_avl.ua_prev_enc !=
 120                     UU_PTR_ENCODE(&pp->uap_null_avl)) {
 121                         uu_panic("uu_avl_pool_destroy: Pool \"%.*s\" (%p) has "
 122                             "outstanding avls, or is corrupt.\n",
 123                             (int)sizeof (pp->uap_name), pp->uap_name,
 124                             (void *)pp);
 125                 }
 126         }
 127         (void) pthread_mutex_lock(&uu_apool_list_lock);
 128         pp->uap_next->uap_prev = pp->uap_prev;
 129         pp->uap_prev->uap_next = pp->uap_next;
 130         (void) pthread_mutex_unlock(&uu_apool_list_lock);
 131         pp->uap_prev = NULL;
 132         pp->uap_next = NULL;
 133         uu_free(pp);
 134 }
 135 
 136 void
 137 uu_avl_node_init(void *base, uu_avl_node_t *np, uu_avl_pool_t *pp)
 138 {
 139         uintptr_t *na = (uintptr_t *)np;
 140 
 141         if (pp->uap_debug) {
 142                 uintptr_t offset = (uintptr_t)np - (uintptr_t)base;
 143                 if (offset + sizeof (*np) > pp->uap_objsize) {
 144                         uu_panic("uu_avl_node_init(%p, %p, %p (\"%s\")): "
 145                             "offset %ld doesn't fit in object (size %ld)\n",
 146                             base, (void *)np, (void *)pp, pp->uap_name,
 147                             (long)offset, (long)pp->uap_objsize);
 148                 }
 149                 if (offset != pp->uap_nodeoffset) {
 150                         uu_panic("uu_avl_node_init(%p, %p, %p (\"%s\")): "
 151                             "offset %ld doesn't match pool's offset (%ld)\n",
 152                             base, (void *)np, (void *)pp, pp->uap_name,
 153                             (long)offset, (long)pp->uap_objsize);
 154                 }
 155         }
 156 
 157         na[0] = POOL_TO_MARKER(pp);
 158         na[1] = 0;
 159 }
 160 
 161 void
 162 uu_avl_node_fini(void *base, uu_avl_node_t *np, uu_avl_pool_t *pp)
 163 {
 164         uintptr_t *na = (uintptr_t *)np;
 165 
 166         if (pp->uap_debug) {
 167                 if (na[0] == DEAD_MARKER && na[1] == DEAD_MARKER) {
 168                         uu_panic("uu_avl_node_fini(%p, %p, %p (\"%s\")): "
 169                             "node already finied\n",
 170                             base, (void *)np, (void *)pp, pp->uap_name);
 171                 }
 172                 if (na[0] != POOL_TO_MARKER(pp) || na[1] != 0) {
 173                         uu_panic("uu_avl_node_fini(%p, %p, %p (\"%s\")): "
 174                             "node corrupt, in tree, or in different pool\n",
 175                             base, (void *)np, (void *)pp, pp->uap_name);
 176                 }
 177         }
 178 
 179         na[0] = DEAD_MARKER;
 180         na[1] = DEAD_MARKER;
 181         na[2] = DEAD_MARKER;
 182 }
 183 
 184 struct uu_avl_node_compare_info {
 185         uu_compare_fn_t *ac_compare;
 186         void            *ac_private;
 187         void            *ac_right;
 188         void            *ac_found;
 189 };
 190 
 191 static int
 192 uu_avl_node_compare(const void *l, const void *r)
 193 {
 194         struct uu_avl_node_compare_info *info =
 195             (struct uu_avl_node_compare_info *)l;
 196 
 197         int res = info->ac_compare(r, info->ac_right, info->ac_private);
 198 
 199         if (res == 0) {
 200                 if (info->ac_found == NULL)
 201                         info->ac_found = (void *)r;
 202                 return (-1);
 203         }
 204         if (res < 0)
 205                 return (1);
 206         return (-1);
 207 }
 208 
 209 uu_avl_t *
 210 uu_avl_create(uu_avl_pool_t *pp, void *parent, uint32_t flags)
 211 {
 212         uu_avl_t *ap, *next, *prev;
 213 
 214         if (flags & ~UU_AVL_DEBUG) {
 215                 uu_set_error(UU_ERROR_UNKNOWN_FLAG);
 216                 return (NULL);
 217         }
 218 
 219         ap = uu_zalloc(sizeof (*ap));
 220         if (ap == NULL) {
 221                 uu_set_error(UU_ERROR_NO_MEMORY);
 222                 return (NULL);
 223         }
 224 
 225         ap->ua_pool = pp;
 226         ap->ua_parent_enc = UU_PTR_ENCODE(parent);
 227         ap->ua_debug = pp->uap_debug || (flags & UU_AVL_DEBUG);
 228         ap->ua_index = (pp->uap_last_index = INDEX_NEXT(pp->uap_last_index));
 229 
 230         avl_create(&ap->ua_tree, &uu_avl_node_compare, pp->uap_objsize,
 231             pp->uap_nodeoffset);
 232 
 233         ap->ua_null_walk.uaw_next = &ap->ua_null_walk;
 234         ap->ua_null_walk.uaw_prev = &ap->ua_null_walk;
 235 
 236         (void) pthread_mutex_lock(&pp->uap_lock);
 237         next = &pp->uap_null_avl;
 238         prev = UU_PTR_DECODE(next->ua_prev_enc);
 239         ap->ua_next_enc = UU_PTR_ENCODE(next);
 240         ap->ua_prev_enc = UU_PTR_ENCODE(prev);
 241         next->ua_prev_enc = UU_PTR_ENCODE(ap);
 242         prev->ua_next_enc = UU_PTR_ENCODE(ap);
 243         (void) pthread_mutex_unlock(&pp->uap_lock);
 244 
 245         return (ap);
 246 }
 247 
 248 void
 249 uu_avl_destroy(uu_avl_t *ap)
 250 {
 251         uu_avl_pool_t *pp = ap->ua_pool;
 252 
 253         if (ap->ua_debug) {
 254                 if (avl_numnodes(&ap->ua_tree) != 0) {
 255                         uu_panic("uu_avl_destroy(%p): tree not empty\n",
 256                             (void *)ap);
 257                 }
 258                 if (ap->ua_null_walk.uaw_next != &ap->ua_null_walk ||
 259                     ap->ua_null_walk.uaw_prev != &ap->ua_null_walk) {
 260                         uu_panic("uu_avl_destroy(%p):  outstanding walkers\n",
 261                             (void *)ap);
 262                 }
 263         }
 264         (void) pthread_mutex_lock(&pp->uap_lock);
 265         UU_AVL_PTR(ap->ua_next_enc)->ua_prev_enc = ap->ua_prev_enc;
 266         UU_AVL_PTR(ap->ua_prev_enc)->ua_next_enc = ap->ua_next_enc;
 267         (void) pthread_mutex_unlock(&pp->uap_lock);
 268         ap->ua_prev_enc = UU_PTR_ENCODE(NULL);
 269         ap->ua_next_enc = UU_PTR_ENCODE(NULL);
 270 
 271         ap->ua_pool = NULL;
 272         avl_destroy(&ap->ua_tree);
 273 
 274         uu_free(ap);
 275 }
 276 
 277 size_t
 278 uu_avl_numnodes(uu_avl_t *ap)
 279 {
 280         return (avl_numnodes(&ap->ua_tree));
 281 }
 282 
 283 void *
 284 uu_avl_first(uu_avl_t *ap)
 285 {
 286         return (avl_first(&ap->ua_tree));
 287 }
 288 
 289 void *
 290 uu_avl_last(uu_avl_t *ap)
 291 {
 292         return (avl_last(&ap->ua_tree));
 293 }
 294 
 295 void *
 296 uu_avl_next(uu_avl_t *ap, void *node)
 297 {
 298         return (AVL_NEXT(&ap->ua_tree, node));
 299 }
 300 
 301 void *
 302 uu_avl_prev(uu_avl_t *ap, void *node)
 303 {
 304         return (AVL_PREV(&ap->ua_tree, node));
 305 }
 306 
 307 static void
 308 _avl_walk_init(uu_avl_walk_t *wp, uu_avl_t *ap, uint32_t flags)
 309 {
 310         uu_avl_walk_t *next, *prev;
 311 
 312         int robust = (flags & UU_WALK_ROBUST);
 313         int direction = (flags & UU_WALK_REVERSE)? -1 : 1;
 314 
 315         (void) memset(wp, 0, sizeof (*wp));
 316         wp->uaw_avl = ap;
 317         wp->uaw_robust = robust;
 318         wp->uaw_dir = direction;
 319 
 320         if (direction > 0)
 321                 wp->uaw_next_result = avl_first(&ap->ua_tree);
 322         else
 323                 wp->uaw_next_result = avl_last(&ap->ua_tree);
 324 
 325         if (ap->ua_debug || robust) {
 326                 wp->uaw_next = next = &ap->ua_null_walk;
 327                 wp->uaw_prev = prev = next->uaw_prev;
 328                 next->uaw_prev = wp;
 329                 prev->uaw_next = wp;
 330         }
 331 }
 332 
 333 static void *
 334 _avl_walk_advance(uu_avl_walk_t *wp, uu_avl_t *ap)
 335 {
 336         void *np = wp->uaw_next_result;
 337 
 338         avl_tree_t *t = &ap->ua_tree;
 339 
 340         if (np == NULL)
 341                 return (NULL);
 342 
 343         wp->uaw_next_result = (wp->uaw_dir > 0)? AVL_NEXT(t, np) :
 344             AVL_PREV(t, np);
 345 
 346         return (np);
 347 }
 348 
 349 static void
 350 _avl_walk_fini(uu_avl_walk_t *wp)
 351 {
 352         if (wp->uaw_next != NULL) {
 353                 wp->uaw_next->uaw_prev = wp->uaw_prev;
 354                 wp->uaw_prev->uaw_next = wp->uaw_next;
 355                 wp->uaw_next = NULL;
 356                 wp->uaw_prev = NULL;
 357         }
 358         wp->uaw_avl = NULL;
 359         wp->uaw_next_result = NULL;
 360 }
 361 
 362 uu_avl_walk_t *
 363 uu_avl_walk_start(uu_avl_t *ap, uint32_t flags)
 364 {
 365         uu_avl_walk_t *wp;
 366 
 367         if (flags & ~(UU_WALK_ROBUST | UU_WALK_REVERSE)) {
 368                 uu_set_error(UU_ERROR_UNKNOWN_FLAG);
 369                 return (NULL);
 370         }
 371 
 372         wp = uu_zalloc(sizeof (*wp));
 373         if (wp == NULL) {
 374                 uu_set_error(UU_ERROR_NO_MEMORY);
 375                 return (NULL);
 376         }
 377 
 378         _avl_walk_init(wp, ap, flags);
 379         return (wp);
 380 }
 381 
 382 void *
 383 uu_avl_walk_next(uu_avl_walk_t *wp)
 384 {
 385         return (_avl_walk_advance(wp, wp->uaw_avl));
 386 }
 387 
 388 void
 389 uu_avl_walk_end(uu_avl_walk_t *wp)
 390 {
 391         _avl_walk_fini(wp);
 392         uu_free(wp);
 393 }
 394 
 395 int
 396 uu_avl_walk(uu_avl_t *ap, uu_walk_fn_t *func, void *private, uint32_t flags)
 397 {
 398         void *e;
 399         uu_avl_walk_t my_walk;
 400 
 401         int status = UU_WALK_NEXT;
 402 
 403         if (flags & ~(UU_WALK_ROBUST | UU_WALK_REVERSE)) {
 404                 uu_set_error(UU_ERROR_UNKNOWN_FLAG);
 405                 return (-1);
 406         }
 407 
 408         _avl_walk_init(&my_walk, ap, flags);
 409         while (status == UU_WALK_NEXT &&
 410             (e = _avl_walk_advance(&my_walk, ap)) != NULL)
 411                 status = (*func)(e, private);
 412         _avl_walk_fini(&my_walk);
 413 
 414         if (status >= 0)
 415                 return (0);
 416         uu_set_error(UU_ERROR_CALLBACK_FAILED);
 417         return (-1);
 418 }
 419 
 420 void
 421 uu_avl_remove(uu_avl_t *ap, void *elem)
 422 {
 423         uu_avl_walk_t *wp;
 424         uu_avl_pool_t *pp = ap->ua_pool;
 425         uintptr_t *na = NODE_ARRAY(pp, elem);
 426 
 427         if (ap->ua_debug) {
 428                 /*
 429                  * invalidate outstanding uu_avl_index_ts.
 430                  */
 431                 ap->ua_index = INDEX_NEXT(ap->ua_index);
 432         }
 433 
 434         /*
 435          * Robust walkers most be advanced, if we are removing the node
 436          * they are currently using.  In debug mode, non-robust walkers
 437          * are also on the walker list.
 438          */
 439         for (wp = ap->ua_null_walk.uaw_next; wp != &ap->ua_null_walk;
 440             wp = wp->uaw_next) {
 441                 if (wp->uaw_robust) {
 442                         if (elem == wp->uaw_next_result)
 443                                 (void) _avl_walk_advance(wp, ap);
 444                 } else if (wp->uaw_next_result != NULL) {
 445                         uu_panic("uu_avl_remove(%p, %p): active non-robust "
 446                             "walker\n", (void *)ap, elem);
 447                 }
 448         }
 449 
 450         avl_remove(&ap->ua_tree, elem);
 451 
 452         na[0] = POOL_TO_MARKER(pp);
 453         na[1] = 0;
 454 }
 455 
 456 void *
 457 uu_avl_teardown(uu_avl_t *ap, void **cookie)
 458 {
 459         void *elem = avl_destroy_nodes(&ap->ua_tree, cookie);
 460 
 461         if (elem != NULL) {
 462                 uu_avl_pool_t *pp = ap->ua_pool;
 463                 uintptr_t *na = NODE_ARRAY(pp, elem);
 464 
 465                 na[0] = POOL_TO_MARKER(pp);
 466                 na[1] = 0;
 467         }
 468         return (elem);
 469 }
 470 
 471 void *
 472 uu_avl_find(uu_avl_t *ap, void *elem, void *private, uu_avl_index_t *out)
 473 {
 474         struct uu_avl_node_compare_info info;
 475         void *result;
 476 
 477         info.ac_compare = ap->ua_pool->uap_cmp;
 478         info.ac_private = private;
 479         info.ac_right = elem;
 480         info.ac_found = NULL;
 481 
 482         result = avl_find(&ap->ua_tree, &info, out);
 483         if (out != NULL)
 484                 *out = INDEX_ENCODE(ap, *out);
 485 
 486         if (ap->ua_debug && result != NULL)
 487                 uu_panic("uu_avl_find: internal error: avl_find succeeded\n");
 488 
 489         return (info.ac_found);
 490 }
 491 
 492 void
 493 uu_avl_insert(uu_avl_t *ap, void *elem, uu_avl_index_t idx)
 494 {
 495         if (ap->ua_debug) {
 496                 uu_avl_pool_t *pp = ap->ua_pool;
 497                 uintptr_t *na = NODE_ARRAY(pp, elem);
 498 
 499                 if (na[1] != 0)
 500                         uu_panic("uu_avl_insert(%p, %p, %p): node already "
 501                             "in tree, or corrupt\n",
 502                             (void *)ap, elem, (void *)idx);
 503                 if (na[0] == 0)
 504                         uu_panic("uu_avl_insert(%p, %p, %p): node not "
 505                             "initialized\n",
 506                             (void *)ap, elem, (void *)idx);
 507                 if (na[0] != POOL_TO_MARKER(pp))
 508                         uu_panic("uu_avl_insert(%p, %p, %p): node from "
 509                             "other pool, or corrupt\n",
 510                             (void *)ap, elem, (void *)idx);
 511 
 512                 if (!INDEX_VALID(ap, idx))
 513                         uu_panic("uu_avl_insert(%p, %p, %p): %s\n",
 514                             (void *)ap, elem, (void *)idx,
 515                             INDEX_CHECK(idx)? "outdated index" :
 516                             "invalid index");
 517 
 518                 /*
 519                  * invalidate outstanding uu_avl_index_ts.
 520                  */
 521                 ap->ua_index = INDEX_NEXT(ap->ua_index);
 522         }
 523         avl_insert(&ap->ua_tree, elem, INDEX_DECODE(idx));
 524 }
 525 
 526 void *
 527 uu_avl_nearest_next(uu_avl_t *ap, uu_avl_index_t idx)
 528 {
 529         if (ap->ua_debug && !INDEX_VALID(ap, idx))
 530                 uu_panic("uu_avl_nearest_next(%p, %p): %s\n",
 531                     (void *)ap, (void *)idx, INDEX_CHECK(idx)?
 532                     "outdated index" : "invalid index");
 533         return (avl_nearest(&ap->ua_tree, INDEX_DECODE(idx), AVL_AFTER));
 534 }
 535 
 536 void *
 537 uu_avl_nearest_prev(uu_avl_t *ap, uu_avl_index_t idx)
 538 {
 539         if (ap->ua_debug && !INDEX_VALID(ap, idx))
 540                 uu_panic("uu_avl_nearest_prev(%p, %p): %s\n",
 541                     (void *)ap, (void *)idx, INDEX_CHECK(idx)?
 542                     "outdated index" : "invalid index");
 543         return (avl_nearest(&ap->ua_tree, INDEX_DECODE(idx), AVL_BEFORE));
 544 }
 545 
 546 /*
 547  * called from uu_lockup() and uu_release(), as part of our fork1()-safety.
 548  */
 549 void
 550 uu_avl_lockup(void)
 551 {
 552         uu_avl_pool_t *pp;
 553 
 554         (void) pthread_mutex_lock(&uu_apool_list_lock);
 555         for (pp = uu_null_apool.uap_next; pp != &uu_null_apool;
 556             pp = pp->uap_next)
 557                 (void) pthread_mutex_lock(&pp->uap_lock);
 558 }
 559 
 560 void
 561 uu_avl_release(void)
 562 {
 563         uu_avl_pool_t *pp;
 564 
 565         for (pp = uu_null_apool.uap_next; pp != &uu_null_apool;
 566             pp = pp->uap_next)
 567                 (void) pthread_mutex_unlock(&pp->uap_lock);
 568         (void) pthread_mutex_unlock(&uu_apool_list_lock);
 569 }