515 dce = kmem_cache_alloc(dce_cache, KM_NOSLEEP);
516 if (dce == NULL) {
517 rw_exit(&dcb->dcb_lock);
518 return (NULL);
519 }
520 bzero(dce, sizeof (dce_t));
521 dce->dce_ipst = ipst; /* No netstack_hold */
522 dce->dce_v4addr = dst;
523 dce->dce_generation = DCE_GENERATION_INITIAL;
524 dce->dce_ipversion = IPV4_VERSION;
525 dce->dce_last_change_time = TICK_TO_SEC(ddi_get_lbolt64());
526 dce_refhold(dce); /* For the hash list */
527
528 /* Link into list */
529 if (dcb->dcb_dce != NULL)
530 dcb->dcb_dce->dce_ptpn = &dce->dce_next;
531 dce->dce_next = dcb->dcb_dce;
532 dce->dce_ptpn = &dcb->dcb_dce;
533 dcb->dcb_dce = dce;
534 dce->dce_bucket = dcb;
535 atomic_add_32(&dcb->dcb_cnt, 1);
536 dce_refhold(dce); /* For the caller */
537 rw_exit(&dcb->dcb_lock);
538
539 /* Initialize dce_ident to be different than for the last packet */
540 dce->dce_ident = ipst->ips_dce_default->dce_ident + 1;
541
542 dce_increment_generation(ipst->ips_dce_default);
543 return (dce);
544 }
545
546 /*
547 * Atomically looks for a non-default DCE, and if not found tries to create one.
548 * If there is no memory it returns NULL.
549 * When an entry is created we increase the generation number on
550 * the default DCE so that conn_ip_output will detect there is a new DCE.
551 * ifindex should only be used with link-local addresses.
552 */
553 dce_t *
554 dce_lookup_and_add_v6(const in6_addr_t *dst, uint_t ifindex, ip_stack_t *ipst)
555 {
587 if (dce == NULL) {
588 rw_exit(&dcb->dcb_lock);
589 return (NULL);
590 }
591 bzero(dce, sizeof (dce_t));
592 dce->dce_ipst = ipst; /* No netstack_hold */
593 dce->dce_v6addr = *dst;
594 dce->dce_ifindex = ifindex;
595 dce->dce_generation = DCE_GENERATION_INITIAL;
596 dce->dce_ipversion = IPV6_VERSION;
597 dce->dce_last_change_time = TICK_TO_SEC(ddi_get_lbolt64());
598 dce_refhold(dce); /* For the hash list */
599
600 /* Link into list */
601 if (dcb->dcb_dce != NULL)
602 dcb->dcb_dce->dce_ptpn = &dce->dce_next;
603 dce->dce_next = dcb->dcb_dce;
604 dce->dce_ptpn = &dcb->dcb_dce;
605 dcb->dcb_dce = dce;
606 dce->dce_bucket = dcb;
607 atomic_add_32(&dcb->dcb_cnt, 1);
608 dce_refhold(dce); /* For the caller */
609 rw_exit(&dcb->dcb_lock);
610
611 /* Initialize dce_ident to be different than for the last packet */
612 dce->dce_ident = ipst->ips_dce_default->dce_ident + 1;
613 dce_increment_generation(ipst->ips_dce_default);
614 return (dce);
615 }
616
617 /*
618 * Set/update uinfo. Creates a per-destination dce if none exists.
619 *
620 * Note that we do not bump the generation number here.
621 * New connections will find the new uinfo.
622 *
623 * The only use of this (tcp, sctp using iulp_t) is to set rtt+rtt_sd.
624 */
625 static void
626 dce_setuinfo(dce_t *dce, iulp_t *uinfo)
627 {
714 ipaddr_t dst4;
715
716 if (IN6_IS_ADDR_V4MAPPED_ANY(dst)) {
717 IN6_V4MAPPED_TO_IPADDR(dst, dst4);
718 return (dce_update_uinfo_v4(dst4, uinfo, ipst));
719 } else {
720 return (dce_update_uinfo_v6(dst, ifindex, uinfo, ipst));
721 }
722 }
723
724 static void
725 dce_make_condemned(dce_t *dce)
726 {
727 ip_stack_t *ipst = dce->dce_ipst;
728
729 mutex_enter(&dce->dce_lock);
730 ASSERT(!DCE_IS_CONDEMNED(dce));
731 dce->dce_generation = DCE_GENERATION_CONDEMNED;
732 mutex_exit(&dce->dce_lock);
733 /* Count how many condemned dces for kmem_cache callback */
734 atomic_add_32(&ipst->ips_num_dce_condemned, 1);
735 }
736
737 /*
738 * Increment the generation avoiding the special condemned value
739 */
740 void
741 dce_increment_generation(dce_t *dce)
742 {
743 uint_t generation;
744
745 mutex_enter(&dce->dce_lock);
746 if (!DCE_IS_CONDEMNED(dce)) {
747 generation = dce->dce_generation + 1;
748 if (generation == DCE_GENERATION_CONDEMNED)
749 generation = DCE_GENERATION_INITIAL;
750 ASSERT(generation != DCE_GENERATION_VERIFY);
751 dce->dce_generation = generation;
752 }
753 mutex_exit(&dce->dce_lock);
754 }
776 dce_increment_generation(dce);
777 }
778 rw_exit(&dcb->dcb_lock);
779 }
780 dce_increment_generation(ipst->ips_dce_default);
781 }
782
783 /*
784 * Caller needs to do a dce_refrele since we can't do the
785 * dce_refrele under dcb_lock.
786 */
787 static void
788 dce_delete_locked(dcb_t *dcb, dce_t *dce)
789 {
790 dce->dce_bucket = NULL;
791 *dce->dce_ptpn = dce->dce_next;
792 if (dce->dce_next != NULL)
793 dce->dce_next->dce_ptpn = dce->dce_ptpn;
794 dce->dce_ptpn = NULL;
795 dce->dce_next = NULL;
796 atomic_add_32(&dcb->dcb_cnt, -1);
797 dce_make_condemned(dce);
798 }
799
800 static void
801 dce_inactive(dce_t *dce)
802 {
803 ip_stack_t *ipst = dce->dce_ipst;
804
805 ASSERT(!(dce->dce_flags & DCEF_DEFAULT));
806 ASSERT(dce->dce_ptpn == NULL);
807 ASSERT(dce->dce_bucket == NULL);
808
809 /* Count how many condemned dces for kmem_cache callback */
810 if (DCE_IS_CONDEMNED(dce))
811 atomic_add_32(&ipst->ips_num_dce_condemned, -1);
812
813 kmem_cache_free(dce_cache, dce);
814 }
815
816 void
817 dce_refrele(dce_t *dce)
818 {
819 ASSERT(dce->dce_refcnt != 0);
820 if (atomic_add_32_nv(&dce->dce_refcnt, -1) == 0)
821 dce_inactive(dce);
822 }
823
824 void
825 dce_refhold(dce_t *dce)
826 {
827 atomic_add_32(&dce->dce_refcnt, 1);
828 ASSERT(dce->dce_refcnt != 0);
829 }
830
831 /* No tracing support yet hence the same as the above functions */
832 void
833 dce_refrele_notr(dce_t *dce)
834 {
835 ASSERT(dce->dce_refcnt != 0);
836 if (atomic_add_32_nv(&dce->dce_refcnt, -1) == 0)
837 dce_inactive(dce);
838 }
839
840 void
841 dce_refhold_notr(dce_t *dce)
842 {
843 atomic_add_32(&dce->dce_refcnt, 1);
844 ASSERT(dce->dce_refcnt != 0);
845 }
846
847 /* Report both the IPv4 and IPv6 DCEs. */
848 mblk_t *
849 ip_snmp_get_mib2_ip_dce(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst)
850 {
851 struct opthdr *optp;
852 mblk_t *mp2ctl;
853 dest_cache_entry_t dest_cache;
854 mblk_t *mp_tail = NULL;
855 dce_t *dce;
856 dcb_t *dcb;
857 int i;
858 uint64_t current_time;
859
860 current_time = TICK_TO_SEC(ddi_get_lbolt64());
861
862 /*
863 * make a copy of the original message
|
515 dce = kmem_cache_alloc(dce_cache, KM_NOSLEEP);
516 if (dce == NULL) {
517 rw_exit(&dcb->dcb_lock);
518 return (NULL);
519 }
520 bzero(dce, sizeof (dce_t));
521 dce->dce_ipst = ipst; /* No netstack_hold */
522 dce->dce_v4addr = dst;
523 dce->dce_generation = DCE_GENERATION_INITIAL;
524 dce->dce_ipversion = IPV4_VERSION;
525 dce->dce_last_change_time = TICK_TO_SEC(ddi_get_lbolt64());
526 dce_refhold(dce); /* For the hash list */
527
528 /* Link into list */
529 if (dcb->dcb_dce != NULL)
530 dcb->dcb_dce->dce_ptpn = &dce->dce_next;
531 dce->dce_next = dcb->dcb_dce;
532 dce->dce_ptpn = &dcb->dcb_dce;
533 dcb->dcb_dce = dce;
534 dce->dce_bucket = dcb;
535 atomic_inc_32(&dcb->dcb_cnt);
536 dce_refhold(dce); /* For the caller */
537 rw_exit(&dcb->dcb_lock);
538
539 /* Initialize dce_ident to be different than for the last packet */
540 dce->dce_ident = ipst->ips_dce_default->dce_ident + 1;
541
542 dce_increment_generation(ipst->ips_dce_default);
543 return (dce);
544 }
545
546 /*
547 * Atomically looks for a non-default DCE, and if not found tries to create one.
548 * If there is no memory it returns NULL.
549 * When an entry is created we increase the generation number on
550 * the default DCE so that conn_ip_output will detect there is a new DCE.
551 * ifindex should only be used with link-local addresses.
552 */
553 dce_t *
554 dce_lookup_and_add_v6(const in6_addr_t *dst, uint_t ifindex, ip_stack_t *ipst)
555 {
587 if (dce == NULL) {
588 rw_exit(&dcb->dcb_lock);
589 return (NULL);
590 }
591 bzero(dce, sizeof (dce_t));
592 dce->dce_ipst = ipst; /* No netstack_hold */
593 dce->dce_v6addr = *dst;
594 dce->dce_ifindex = ifindex;
595 dce->dce_generation = DCE_GENERATION_INITIAL;
596 dce->dce_ipversion = IPV6_VERSION;
597 dce->dce_last_change_time = TICK_TO_SEC(ddi_get_lbolt64());
598 dce_refhold(dce); /* For the hash list */
599
600 /* Link into list */
601 if (dcb->dcb_dce != NULL)
602 dcb->dcb_dce->dce_ptpn = &dce->dce_next;
603 dce->dce_next = dcb->dcb_dce;
604 dce->dce_ptpn = &dcb->dcb_dce;
605 dcb->dcb_dce = dce;
606 dce->dce_bucket = dcb;
607 atomic_inc_32(&dcb->dcb_cnt);
608 dce_refhold(dce); /* For the caller */
609 rw_exit(&dcb->dcb_lock);
610
611 /* Initialize dce_ident to be different than for the last packet */
612 dce->dce_ident = ipst->ips_dce_default->dce_ident + 1;
613 dce_increment_generation(ipst->ips_dce_default);
614 return (dce);
615 }
616
617 /*
618 * Set/update uinfo. Creates a per-destination dce if none exists.
619 *
620 * Note that we do not bump the generation number here.
621 * New connections will find the new uinfo.
622 *
623 * The only use of this (tcp, sctp using iulp_t) is to set rtt+rtt_sd.
624 */
625 static void
626 dce_setuinfo(dce_t *dce, iulp_t *uinfo)
627 {
714 ipaddr_t dst4;
715
716 if (IN6_IS_ADDR_V4MAPPED_ANY(dst)) {
717 IN6_V4MAPPED_TO_IPADDR(dst, dst4);
718 return (dce_update_uinfo_v4(dst4, uinfo, ipst));
719 } else {
720 return (dce_update_uinfo_v6(dst, ifindex, uinfo, ipst));
721 }
722 }
723
724 static void
725 dce_make_condemned(dce_t *dce)
726 {
727 ip_stack_t *ipst = dce->dce_ipst;
728
729 mutex_enter(&dce->dce_lock);
730 ASSERT(!DCE_IS_CONDEMNED(dce));
731 dce->dce_generation = DCE_GENERATION_CONDEMNED;
732 mutex_exit(&dce->dce_lock);
733 /* Count how many condemned dces for kmem_cache callback */
734 atomic_inc_32(&ipst->ips_num_dce_condemned);
735 }
736
737 /*
738 * Increment the generation avoiding the special condemned value
739 */
740 void
741 dce_increment_generation(dce_t *dce)
742 {
743 uint_t generation;
744
745 mutex_enter(&dce->dce_lock);
746 if (!DCE_IS_CONDEMNED(dce)) {
747 generation = dce->dce_generation + 1;
748 if (generation == DCE_GENERATION_CONDEMNED)
749 generation = DCE_GENERATION_INITIAL;
750 ASSERT(generation != DCE_GENERATION_VERIFY);
751 dce->dce_generation = generation;
752 }
753 mutex_exit(&dce->dce_lock);
754 }
776 dce_increment_generation(dce);
777 }
778 rw_exit(&dcb->dcb_lock);
779 }
780 dce_increment_generation(ipst->ips_dce_default);
781 }
782
783 /*
784 * Caller needs to do a dce_refrele since we can't do the
785 * dce_refrele under dcb_lock.
786 */
787 static void
788 dce_delete_locked(dcb_t *dcb, dce_t *dce)
789 {
790 dce->dce_bucket = NULL;
791 *dce->dce_ptpn = dce->dce_next;
792 if (dce->dce_next != NULL)
793 dce->dce_next->dce_ptpn = dce->dce_ptpn;
794 dce->dce_ptpn = NULL;
795 dce->dce_next = NULL;
796 atomic_dec_32(&dcb->dcb_cnt);
797 dce_make_condemned(dce);
798 }
799
800 static void
801 dce_inactive(dce_t *dce)
802 {
803 ip_stack_t *ipst = dce->dce_ipst;
804
805 ASSERT(!(dce->dce_flags & DCEF_DEFAULT));
806 ASSERT(dce->dce_ptpn == NULL);
807 ASSERT(dce->dce_bucket == NULL);
808
809 /* Count how many condemned dces for kmem_cache callback */
810 if (DCE_IS_CONDEMNED(dce))
811 atomic_dec_32(&ipst->ips_num_dce_condemned);
812
813 kmem_cache_free(dce_cache, dce);
814 }
815
816 void
817 dce_refrele(dce_t *dce)
818 {
819 ASSERT(dce->dce_refcnt != 0);
820 if (atomic_dec_32_nv(&dce->dce_refcnt) == 0)
821 dce_inactive(dce);
822 }
823
824 void
825 dce_refhold(dce_t *dce)
826 {
827 atomic_inc_32(&dce->dce_refcnt);
828 ASSERT(dce->dce_refcnt != 0);
829 }
830
831 /* No tracing support yet hence the same as the above functions */
832 void
833 dce_refrele_notr(dce_t *dce)
834 {
835 ASSERT(dce->dce_refcnt != 0);
836 if (atomic_dec_32_nv(&dce->dce_refcnt) == 0)
837 dce_inactive(dce);
838 }
839
840 void
841 dce_refhold_notr(dce_t *dce)
842 {
843 atomic_inc_32(&dce->dce_refcnt);
844 ASSERT(dce->dce_refcnt != 0);
845 }
846
847 /* Report both the IPv4 and IPv6 DCEs. */
848 mblk_t *
849 ip_snmp_get_mib2_ip_dce(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst)
850 {
851 struct opthdr *optp;
852 mblk_t *mp2ctl;
853 dest_cache_entry_t dest_cache;
854 mblk_t *mp_tail = NULL;
855 dce_t *dce;
856 dcb_t *dcb;
857 int i;
858 uint64_t current_time;
859
860 current_time = TICK_TO_SEC(ddi_get_lbolt64());
861
862 /*
863 * make a copy of the original message
|