Print this page
5042 stop using deprecated atomic functions


3590                 return (WALK_ERR);
3591         }
3592 
3593         eqdp = mdb_alloc(sizeof (eqd_walk_data_t), UM_SLEEP);
3594         wsp->walk_data = eqdp;
3595 
3596         eqdp->eqd_stack = mdb_zalloc(sizeof (uintptr_t) * eq.eq_qlen, UM_SLEEP);
3597         eqdp->eqd_buf = mdb_alloc(eq.eq_size, UM_SLEEP);
3598         eqdp->eqd_qlen = eq.eq_qlen;
3599         eqdp->eqd_qpos = 0;
3600         eqdp->eqd_size = eq.eq_size;
3601 
3602         /*
3603          * The newest elements in the queue are on the pending list, so we
3604          * push those on to our stack first.
3605          */
3606         eqd_push_list(eqdp, (uintptr_t)eq.eq_pend);
3607 
3608         /*
3609          * If eq_ptail is set, it may point to a subset of the errors on the
3610          * pending list in the event a casptr() failed; if ptail's data is
3611          * already in our stack, NULL out eq_ptail and ignore it.
3612          */
3613         if (eq.eq_ptail != NULL) {
3614                 for (i = 0; i < eqdp->eqd_qpos; i++) {
3615                         if (eqdp->eqd_stack[i] == (uintptr_t)eqe.eqe_data) {
3616                                 eq.eq_ptail = NULL;
3617                                 break;
3618                         }
3619                 }
3620         }
3621 
3622         /*
3623          * If eq_phead is set, it has the processing list in order from oldest
3624          * to newest.  Use this to recompute eq_ptail as best we can and then
3625          * we nicely fall into eqd_push_list() of eq_ptail below.
3626          */
3627         for (addr = eq.eq_phead; addr != NULL && mdb_vread(&eqe, sizeof (eqe),
3628             (uintptr_t)addr) == sizeof (eqe); addr = eqe.eqe_next)
3629                 eq.eq_ptail = addr;
3630 
3631         /*




3590                 return (WALK_ERR);
3591         }
3592 
3593         eqdp = mdb_alloc(sizeof (eqd_walk_data_t), UM_SLEEP);
3594         wsp->walk_data = eqdp;
3595 
3596         eqdp->eqd_stack = mdb_zalloc(sizeof (uintptr_t) * eq.eq_qlen, UM_SLEEP);
3597         eqdp->eqd_buf = mdb_alloc(eq.eq_size, UM_SLEEP);
3598         eqdp->eqd_qlen = eq.eq_qlen;
3599         eqdp->eqd_qpos = 0;
3600         eqdp->eqd_size = eq.eq_size;
3601 
3602         /*
3603          * The newest elements in the queue are on the pending list, so we
3604          * push those on to our stack first.
3605          */
3606         eqd_push_list(eqdp, (uintptr_t)eq.eq_pend);
3607 
3608         /*
3609          * If eq_ptail is set, it may point to a subset of the errors on the
3610          * pending list in the event a atomic_cas_ptr() failed; if ptail's
3611          * data is already in our stack, NULL out eq_ptail and ignore it.
3612          */
3613         if (eq.eq_ptail != NULL) {
3614                 for (i = 0; i < eqdp->eqd_qpos; i++) {
3615                         if (eqdp->eqd_stack[i] == (uintptr_t)eqe.eqe_data) {
3616                                 eq.eq_ptail = NULL;
3617                                 break;
3618                         }
3619                 }
3620         }
3621 
3622         /*
3623          * If eq_phead is set, it has the processing list in order from oldest
3624          * to newest.  Use this to recompute eq_ptail as best we can and then
3625          * we nicely fall into eqd_push_list() of eq_ptail below.
3626          */
3627         for (addr = eq.eq_phead; addr != NULL && mdb_vread(&eqe, sizeof (eqe),
3628             (uintptr_t)addr) == sizeof (eqe); addr = eqe.eqe_next)
3629                 eq.eq_ptail = addr;
3630 
3631         /*