1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
  24  * Use is subject to license terms.
  25  */
  26 
  27         .file   "atomic.s"
  28 
  29 #include <sys/asm_linkage.h>
  30 
  31 #if defined(_KERNEL)
  32         /*
  33          * Legacy kernel interfaces; they will go away (eventually).
  34          */
  35         ANSI_PRAGMA_WEAK2(cas8,atomic_cas_8,function)
  36         ANSI_PRAGMA_WEAK2(cas32,atomic_cas_32,function)
  37         ANSI_PRAGMA_WEAK2(cas64,atomic_cas_64,function)
  38         ANSI_PRAGMA_WEAK2(caslong,atomic_cas_ulong,function)
  39         ANSI_PRAGMA_WEAK2(casptr,atomic_cas_ptr,function)
  40         ANSI_PRAGMA_WEAK2(atomic_and_long,atomic_and_ulong,function)
  41         ANSI_PRAGMA_WEAK2(atomic_or_long,atomic_or_ulong,function)
  42 #endif
  43 
  44         ENTRY(atomic_inc_8)
  45         ALTENTRY(atomic_inc_uchar)
  46         movl    4(%esp), %eax
  47         lock
  48         incb    (%eax)
  49         ret
  50         SET_SIZE(atomic_inc_uchar)
  51         SET_SIZE(atomic_inc_8)
  52 
  53         ENTRY(atomic_inc_16)
  54         ALTENTRY(atomic_inc_ushort)
  55         movl    4(%esp), %eax
  56         lock
  57         incw    (%eax)
  58         ret
  59         SET_SIZE(atomic_inc_ushort)
  60         SET_SIZE(atomic_inc_16)
  61 
  62         ENTRY(atomic_inc_32)
  63         ALTENTRY(atomic_inc_uint)
  64         ALTENTRY(atomic_inc_ulong)
  65         movl    4(%esp), %eax
  66         lock
  67         incl    (%eax)
  68         ret
  69         SET_SIZE(atomic_inc_ulong)
  70         SET_SIZE(atomic_inc_uint)
  71         SET_SIZE(atomic_inc_32)
  72 
  73         ENTRY(atomic_inc_8_nv)
  74         ALTENTRY(atomic_inc_uchar_nv)
  75         movl    4(%esp), %edx   / %edx = target address
  76         xorl    %eax, %eax      / clear upper bits of %eax
  77         incb    %al             / %al = 1
  78         lock
  79           xaddb %al, (%edx)     / %al = old value, inc (%edx)
  80         incb    %al     / return new value
  81         ret
  82         SET_SIZE(atomic_inc_uchar_nv)
  83         SET_SIZE(atomic_inc_8_nv)
  84 
  85         ENTRY(atomic_inc_16_nv)
  86         ALTENTRY(atomic_inc_ushort_nv)
  87         movl    4(%esp), %edx   / %edx = target address
  88         xorl    %eax, %eax      / clear upper bits of %eax
  89         incw    %ax             / %ax = 1
  90         lock
  91           xaddw %ax, (%edx)     / %ax = old value, inc (%edx)
  92         incw    %ax             / return new value
  93         ret
  94         SET_SIZE(atomic_inc_ushort_nv)
  95         SET_SIZE(atomic_inc_16_nv)
  96 
  97         ENTRY(atomic_inc_32_nv)
  98         ALTENTRY(atomic_inc_uint_nv)
  99         ALTENTRY(atomic_inc_ulong_nv)
 100         movl    4(%esp), %edx   / %edx = target address
 101         xorl    %eax, %eax      / %eax = 0
 102         incl    %eax            / %eax = 1
 103         lock
 104           xaddl %eax, (%edx)    / %eax = old value, inc (%edx)
 105         incl    %eax            / return new value
 106         ret
 107         SET_SIZE(atomic_inc_ulong_nv)
 108         SET_SIZE(atomic_inc_uint_nv)
 109         SET_SIZE(atomic_inc_32_nv)
 110 
 111         /*
 112          * NOTE: If atomic_inc_64 and atomic_inc_64_nv are ever
 113          * separated, you need to also edit the libc i386 platform
 114          * specific mapfile and remove the NODYNSORT attribute
 115          * from atomic_inc_64_nv.
 116          */
 117         ENTRY(atomic_inc_64)
 118         ALTENTRY(atomic_inc_64_nv)
 119         pushl   %edi
 120         pushl   %ebx
 121         movl    12(%esp), %edi  / %edi = target address
 122         movl    (%edi), %eax
 123         movl    4(%edi), %edx   / %edx:%eax = old value
 124 1:
 125         xorl    %ebx, %ebx
 126         xorl    %ecx, %ecx
 127         incl    %ebx            / %ecx:%ebx = 1
 128         addl    %eax, %ebx
 129         adcl    %edx, %ecx      / add in the carry from inc
 130         lock
 131         cmpxchg8b (%edi)        / try to stick it in
 132         jne     1b
 133         movl    %ebx, %eax
 134         movl    %ecx, %edx      / return new value
 135         popl    %ebx
 136         popl    %edi
 137         ret
 138         SET_SIZE(atomic_inc_64_nv)
 139         SET_SIZE(atomic_inc_64)
 140 
 141         ENTRY(atomic_dec_8)
 142         ALTENTRY(atomic_dec_uchar)
 143         movl    4(%esp), %eax
 144         lock
 145         decb    (%eax)
 146         ret
 147         SET_SIZE(atomic_dec_uchar)
 148         SET_SIZE(atomic_dec_8)
 149 
 150         ENTRY(atomic_dec_16)
 151         ALTENTRY(atomic_dec_ushort)
 152         movl    4(%esp), %eax
 153         lock
 154         decw    (%eax)
 155         ret
 156         SET_SIZE(atomic_dec_ushort)
 157         SET_SIZE(atomic_dec_16)
 158 
 159         ENTRY(atomic_dec_32)
 160         ALTENTRY(atomic_dec_uint)
 161         ALTENTRY(atomic_dec_ulong)
 162         movl    4(%esp), %eax
 163         lock
 164         decl    (%eax)
 165         ret
 166         SET_SIZE(atomic_dec_ulong)
 167         SET_SIZE(atomic_dec_uint)
 168         SET_SIZE(atomic_dec_32)
 169 
 170         ENTRY(atomic_dec_8_nv)
 171         ALTENTRY(atomic_dec_uchar_nv)
 172         movl    4(%esp), %edx   / %edx = target address
 173         xorl    %eax, %eax      / zero upper bits of %eax
 174         decb    %al             / %al = -1
 175         lock
 176           xaddb %al, (%edx)     / %al = old value, dec (%edx)
 177         decb    %al             / return new value
 178         ret
 179         SET_SIZE(atomic_dec_uchar_nv)
 180         SET_SIZE(atomic_dec_8_nv)
 181 
 182         ENTRY(atomic_dec_16_nv)
 183         ALTENTRY(atomic_dec_ushort_nv)
 184         movl    4(%esp), %edx   / %edx = target address
 185         xorl    %eax, %eax      / zero upper bits of %eax
 186         decw    %ax             / %ax = -1
 187         lock
 188           xaddw %ax, (%edx)     / %ax = old value, dec (%edx)
 189         decw    %ax             / return new value
 190         ret
 191         SET_SIZE(atomic_dec_ushort_nv)
 192         SET_SIZE(atomic_dec_16_nv)
 193 
 194         ENTRY(atomic_dec_32_nv)
 195         ALTENTRY(atomic_dec_uint_nv)
 196         ALTENTRY(atomic_dec_ulong_nv)
 197         movl    4(%esp), %edx   / %edx = target address
 198         xorl    %eax, %eax      / %eax = 0
 199         decl    %eax            / %eax = -1
 200         lock
 201           xaddl %eax, (%edx)    / %eax = old value, dec (%edx)
 202         decl    %eax            / return new value
 203         ret
 204         SET_SIZE(atomic_dec_ulong_nv)
 205         SET_SIZE(atomic_dec_uint_nv)
 206         SET_SIZE(atomic_dec_32_nv)
 207 
 208         /*
 209          * NOTE: If atomic_dec_64 and atomic_dec_64_nv are ever
 210          * separated, it is important to edit the libc i386 platform
 211          * specific mapfile and remove the NODYNSORT attribute
 212          * from atomic_dec_64_nv.
 213          */
 214         ENTRY(atomic_dec_64)
 215         ALTENTRY(atomic_dec_64_nv)
 216         pushl   %edi
 217         pushl   %ebx
 218         movl    12(%esp), %edi  / %edi = target address
 219         movl    (%edi), %eax
 220         movl    4(%edi), %edx   / %edx:%eax = old value
 221 1:
 222         xorl    %ebx, %ebx
 223         xorl    %ecx, %ecx
 224         not     %ecx
 225         not     %ebx            / %ecx:%ebx = -1
 226         addl    %eax, %ebx
 227         adcl    %edx, %ecx      / add in the carry from inc
 228         lock
 229         cmpxchg8b (%edi)        / try to stick it in
 230         jne     1b
 231         movl    %ebx, %eax
 232         movl    %ecx, %edx      / return new value
 233         popl    %ebx
 234         popl    %edi
 235         ret
 236         SET_SIZE(atomic_dec_64_nv)
 237         SET_SIZE(atomic_dec_64)
 238 
 239         ENTRY(atomic_add_8)
 240         ALTENTRY(atomic_add_char)
 241         movl    4(%esp), %eax
 242         movl    8(%esp), %ecx
 243         lock
 244         addb    %cl, (%eax)
 245         ret
 246         SET_SIZE(atomic_add_char)
 247         SET_SIZE(atomic_add_8)
 248 
 249         ENTRY(atomic_add_16)
 250         ALTENTRY(atomic_add_short)
 251         movl    4(%esp), %eax
 252         movl    8(%esp), %ecx
 253         lock
 254         addw    %cx, (%eax)
 255         ret
 256         SET_SIZE(atomic_add_short)
 257         SET_SIZE(atomic_add_16)
 258 
 259         ENTRY(atomic_add_32)
 260         ALTENTRY(atomic_add_int)
 261         ALTENTRY(atomic_add_ptr)
 262         ALTENTRY(atomic_add_long)
 263         movl    4(%esp), %eax
 264         movl    8(%esp), %ecx
 265         lock
 266         addl    %ecx, (%eax)
 267         ret
 268         SET_SIZE(atomic_add_long)
 269         SET_SIZE(atomic_add_ptr)
 270         SET_SIZE(atomic_add_int)
 271         SET_SIZE(atomic_add_32)
 272 
 273         ENTRY(atomic_or_8)
 274         ALTENTRY(atomic_or_uchar)
 275         movl    4(%esp), %eax
 276         movb    8(%esp), %cl
 277         lock
 278         orb     %cl, (%eax)
 279         ret
 280         SET_SIZE(atomic_or_uchar)
 281         SET_SIZE(atomic_or_8)
 282 
 283         ENTRY(atomic_or_16)
 284         ALTENTRY(atomic_or_ushort)
 285         movl    4(%esp), %eax
 286         movw    8(%esp), %cx
 287         lock
 288         orw     %cx, (%eax)
 289         ret
 290         SET_SIZE(atomic_or_ushort)
 291         SET_SIZE(atomic_or_16)
 292 
 293         ENTRY(atomic_or_32)
 294         ALTENTRY(atomic_or_uint)
 295         ALTENTRY(atomic_or_ulong)
 296         movl    4(%esp), %eax
 297         movl    8(%esp), %ecx
 298         lock
 299         orl     %ecx, (%eax)
 300         ret
 301         SET_SIZE(atomic_or_ulong)
 302         SET_SIZE(atomic_or_uint)
 303         SET_SIZE(atomic_or_32)
 304 
 305         ENTRY(atomic_and_8)
 306         ALTENTRY(atomic_and_uchar)
 307         movl    4(%esp), %eax
 308         movb    8(%esp), %cl
 309         lock
 310         andb    %cl, (%eax)
 311         ret
 312         SET_SIZE(atomic_and_uchar)
 313         SET_SIZE(atomic_and_8)
 314 
 315         ENTRY(atomic_and_16)
 316         ALTENTRY(atomic_and_ushort)
 317         movl    4(%esp), %eax
 318         movw    8(%esp), %cx
 319         lock
 320         andw    %cx, (%eax)
 321         ret
 322         SET_SIZE(atomic_and_ushort)
 323         SET_SIZE(atomic_and_16)
 324 
 325         ENTRY(atomic_and_32)
 326         ALTENTRY(atomic_and_uint)
 327         ALTENTRY(atomic_and_ulong)
 328         movl    4(%esp), %eax
 329         movl    8(%esp), %ecx
 330         lock
 331         andl    %ecx, (%eax)
 332         ret
 333         SET_SIZE(atomic_and_ulong)
 334         SET_SIZE(atomic_and_uint)
 335         SET_SIZE(atomic_and_32)
 336 
 337         ENTRY(atomic_add_8_nv)
 338         ALTENTRY(atomic_add_char_nv)
 339         movl    4(%esp), %edx   / %edx = target address
 340         movb    8(%esp), %cl    / %cl = delta
 341         movzbl  %cl, %eax       / %al = delta, zero extended
 342         lock
 343           xaddb %cl, (%edx)     / %cl = old value, (%edx) = sum
 344         addb    %cl, %al        / return old value plus delta
 345         ret
 346         SET_SIZE(atomic_add_char_nv)
 347         SET_SIZE(atomic_add_8_nv)
 348 
 349         ENTRY(atomic_add_16_nv)
 350         ALTENTRY(atomic_add_short_nv)
 351         movl    4(%esp), %edx   / %edx = target address
 352         movw    8(%esp), %cx    / %cx = delta
 353         movzwl  %cx, %eax       / %ax = delta, zero extended
 354         lock
 355           xaddw %cx, (%edx)     / %cx = old value, (%edx) = sum
 356         addw    %cx, %ax        / return old value plus delta
 357         ret
 358         SET_SIZE(atomic_add_short_nv)
 359         SET_SIZE(atomic_add_16_nv)
 360 
 361         ENTRY(atomic_add_32_nv)
 362         ALTENTRY(atomic_add_int_nv)
 363         ALTENTRY(atomic_add_ptr_nv)
 364         ALTENTRY(atomic_add_long_nv)
 365         movl    4(%esp), %edx   / %edx = target address
 366         movl    8(%esp), %eax   / %eax = delta
 367         movl    %eax, %ecx      / %ecx = delta
 368         lock
 369           xaddl %eax, (%edx)    / %eax = old value, (%edx) = sum
 370         addl    %ecx, %eax      / return old value plus delta
 371         ret
 372         SET_SIZE(atomic_add_long_nv)
 373         SET_SIZE(atomic_add_ptr_nv)
 374         SET_SIZE(atomic_add_int_nv)
 375         SET_SIZE(atomic_add_32_nv)
 376 
 377         /*
 378          * NOTE: If atomic_add_64 and atomic_add_64_nv are ever
 379          * separated, it is important to edit the libc i386 platform
 380          * specific mapfile and remove the NODYNSORT attribute
 381          * from atomic_add_64_nv.
 382          */
 383         ENTRY(atomic_add_64)
 384         ALTENTRY(atomic_add_64_nv)
 385         pushl   %edi
 386         pushl   %ebx
 387         movl    12(%esp), %edi  / %edi = target address
 388         movl    (%edi), %eax
 389         movl    4(%edi), %edx   / %edx:%eax = old value
 390 1:
 391         movl    16(%esp), %ebx
 392         movl    20(%esp), %ecx  / %ecx:%ebx = delta
 393         addl    %eax, %ebx
 394         adcl    %edx, %ecx      / %ecx:%ebx = new value
 395         lock
 396         cmpxchg8b (%edi)        / try to stick it in
 397         jne     1b
 398         movl    %ebx, %eax
 399         movl    %ecx, %edx      / return new value
 400         popl    %ebx
 401         popl    %edi
 402         ret
 403         SET_SIZE(atomic_add_64_nv)
 404         SET_SIZE(atomic_add_64)
 405 
 406         ENTRY(atomic_or_8_nv)
 407         ALTENTRY(atomic_or_uchar_nv)
 408         movl    4(%esp), %edx   / %edx = target address
 409         movb    (%edx), %al     / %al = old value
 410 1:
 411         movl    8(%esp), %ecx   / %ecx = delta
 412         orb     %al, %cl        / %cl = new value
 413         lock
 414         cmpxchgb %cl, (%edx)    / try to stick it in
 415         jne     1b
 416         movzbl  %cl, %eax       / return new value
 417         ret
 418         SET_SIZE(atomic_or_uchar_nv)
 419         SET_SIZE(atomic_or_8_nv)
 420 
 421         ENTRY(atomic_or_16_nv)
 422         ALTENTRY(atomic_or_ushort_nv)
 423         movl    4(%esp), %edx   / %edx = target address
 424         movw    (%edx), %ax     / %ax = old value
 425 1:
 426         movl    8(%esp), %ecx   / %ecx = delta
 427         orw     %ax, %cx        / %cx = new value
 428         lock
 429         cmpxchgw %cx, (%edx)    / try to stick it in
 430         jne     1b
 431         movzwl  %cx, %eax       / return new value
 432         ret
 433         SET_SIZE(atomic_or_ushort_nv)
 434         SET_SIZE(atomic_or_16_nv)
 435 
 436         ENTRY(atomic_or_32_nv)
 437         ALTENTRY(atomic_or_uint_nv)
 438         ALTENTRY(atomic_or_ulong_nv)
 439         movl    4(%esp), %edx   / %edx = target address
 440         movl    (%edx), %eax    / %eax = old value
 441 1:
 442         movl    8(%esp), %ecx   / %ecx = delta
 443         orl     %eax, %ecx      / %ecx = new value
 444         lock
 445         cmpxchgl %ecx, (%edx)   / try to stick it in
 446         jne     1b
 447         movl    %ecx, %eax      / return new value
 448         ret
 449         SET_SIZE(atomic_or_ulong_nv)
 450         SET_SIZE(atomic_or_uint_nv)
 451         SET_SIZE(atomic_or_32_nv)
 452 
 453         /*
 454          * NOTE: If atomic_or_64 and atomic_or_64_nv are ever
 455          * separated, it is important to edit the libc i386 platform
 456          * specific mapfile and remove the NODYNSORT attribute
 457          * from atomic_or_64_nv.
 458          */
 459         ENTRY(atomic_or_64)
 460         ALTENTRY(atomic_or_64_nv)
 461         pushl   %edi
 462         pushl   %ebx
 463         movl    12(%esp), %edi  / %edi = target address
 464         movl    (%edi), %eax
 465         movl    4(%edi), %edx   / %edx:%eax = old value
 466 1:
 467         movl    16(%esp), %ebx
 468         movl    20(%esp), %ecx  / %ecx:%ebx = delta
 469         orl     %eax, %ebx
 470         orl     %edx, %ecx      / %ecx:%ebx = new value
 471         lock
 472         cmpxchg8b (%edi)        / try to stick it in
 473         jne     1b
 474         movl    %ebx, %eax
 475         movl    %ecx, %edx      / return new value
 476         popl    %ebx
 477         popl    %edi
 478         ret
 479         SET_SIZE(atomic_or_64_nv)
 480         SET_SIZE(atomic_or_64)
 481 
 482         ENTRY(atomic_and_8_nv)
 483         ALTENTRY(atomic_and_uchar_nv)
 484         movl    4(%esp), %edx   / %edx = target address
 485         movb    (%edx), %al     / %al = old value
 486 1:
 487         movl    8(%esp), %ecx   / %ecx = delta
 488         andb    %al, %cl        / %cl = new value
 489         lock
 490         cmpxchgb %cl, (%edx)    / try to stick it in
 491         jne     1b
 492         movzbl  %cl, %eax       / return new value
 493         ret
 494         SET_SIZE(atomic_and_uchar_nv)
 495         SET_SIZE(atomic_and_8_nv)
 496 
 497         ENTRY(atomic_and_16_nv)
 498         ALTENTRY(atomic_and_ushort_nv)
 499         movl    4(%esp), %edx   / %edx = target address
 500         movw    (%edx), %ax     / %ax = old value
 501 1:
 502         movl    8(%esp), %ecx   / %ecx = delta
 503         andw    %ax, %cx        / %cx = new value
 504         lock
 505         cmpxchgw %cx, (%edx)    / try to stick it in
 506         jne     1b
 507         movzwl  %cx, %eax       / return new value
 508         ret
 509         SET_SIZE(atomic_and_ushort_nv)
 510         SET_SIZE(atomic_and_16_nv)
 511 
 512         ENTRY(atomic_and_32_nv)
 513         ALTENTRY(atomic_and_uint_nv)
 514         ALTENTRY(atomic_and_ulong_nv)
 515         movl    4(%esp), %edx   / %edx = target address
 516         movl    (%edx), %eax    / %eax = old value
 517 1:
 518         movl    8(%esp), %ecx   / %ecx = delta
 519         andl    %eax, %ecx      / %ecx = new value
 520         lock
 521         cmpxchgl %ecx, (%edx)   / try to stick it in
 522         jne     1b
 523         movl    %ecx, %eax      / return new value
 524         ret
 525         SET_SIZE(atomic_and_ulong_nv)
 526         SET_SIZE(atomic_and_uint_nv)
 527         SET_SIZE(atomic_and_32_nv)
 528 
 529         /*
 530          * NOTE: If atomic_and_64 and atomic_and_64_nv are ever
 531          * separated, it is important to edit the libc i386 platform
 532          * specific mapfile and remove the NODYNSORT attribute
 533          * from atomic_and_64_nv.
 534          */
 535         ENTRY(atomic_and_64)
 536         ALTENTRY(atomic_and_64_nv)
 537         pushl   %edi
 538         pushl   %ebx
 539         movl    12(%esp), %edi  / %edi = target address
 540         movl    (%edi), %eax
 541         movl    4(%edi), %edx   / %edx:%eax = old value
 542 1:
 543         movl    16(%esp), %ebx
 544         movl    20(%esp), %ecx  / %ecx:%ebx = delta
 545         andl    %eax, %ebx
 546         andl    %edx, %ecx      / %ecx:%ebx = new value
 547         lock
 548         cmpxchg8b (%edi)        / try to stick it in
 549         jne     1b
 550         movl    %ebx, %eax
 551         movl    %ecx, %edx      / return new value
 552         popl    %ebx
 553         popl    %edi
 554         ret
 555         SET_SIZE(atomic_and_64_nv)
 556         SET_SIZE(atomic_and_64)
 557 
 558         ENTRY(atomic_cas_8)
 559         ALTENTRY(atomic_cas_uchar)
 560         movl    4(%esp), %edx
 561         movzbl  8(%esp), %eax
 562         movb    12(%esp), %cl
 563         lock
 564         cmpxchgb %cl, (%edx)
 565         ret
 566         SET_SIZE(atomic_cas_uchar)
 567         SET_SIZE(atomic_cas_8)
 568 
 569         ENTRY(atomic_cas_16)
 570         ALTENTRY(atomic_cas_ushort)
 571         movl    4(%esp), %edx
 572         movzwl  8(%esp), %eax
 573         movw    12(%esp), %cx
 574         lock
 575         cmpxchgw %cx, (%edx)
 576         ret
 577         SET_SIZE(atomic_cas_ushort)
 578         SET_SIZE(atomic_cas_16)
 579 
 580         ENTRY(atomic_cas_32)
 581         ALTENTRY(atomic_cas_uint)
 582         ALTENTRY(atomic_cas_ulong)
 583         ALTENTRY(atomic_cas_ptr)
 584         movl    4(%esp), %edx
 585         movl    8(%esp), %eax
 586         movl    12(%esp), %ecx
 587         lock
 588         cmpxchgl %ecx, (%edx)
 589         ret
 590         SET_SIZE(atomic_cas_ptr)
 591         SET_SIZE(atomic_cas_ulong)
 592         SET_SIZE(atomic_cas_uint)
 593         SET_SIZE(atomic_cas_32)
 594 
 595         ENTRY(atomic_cas_64)
 596         pushl   %ebx
 597         pushl   %esi
 598         movl    12(%esp), %esi
 599         movl    16(%esp), %eax
 600         movl    20(%esp), %edx
 601         movl    24(%esp), %ebx
 602         movl    28(%esp), %ecx
 603         lock
 604         cmpxchg8b (%esi)
 605         popl    %esi
 606         popl    %ebx
 607         ret
 608         SET_SIZE(atomic_cas_64)
 609 
 610         ENTRY(atomic_swap_8)
 611         ALTENTRY(atomic_swap_uchar)
 612         movl    4(%esp), %edx
 613         movzbl  8(%esp), %eax
 614         lock
 615         xchgb   %al, (%edx)
 616         ret
 617         SET_SIZE(atomic_swap_uchar)
 618         SET_SIZE(atomic_swap_8)
 619 
 620         ENTRY(atomic_swap_16)
 621         ALTENTRY(atomic_swap_ushort)
 622         movl    4(%esp), %edx
 623         movzwl  8(%esp), %eax
 624         lock
 625         xchgw   %ax, (%edx)
 626         ret
 627         SET_SIZE(atomic_swap_ushort)
 628         SET_SIZE(atomic_swap_16)
 629 
 630         ENTRY(atomic_swap_32)
 631         ALTENTRY(atomic_swap_uint)
 632         ALTENTRY(atomic_swap_ptr)
 633         ALTENTRY(atomic_swap_ulong)
 634         movl    4(%esp), %edx
 635         movl    8(%esp), %eax
 636         lock
 637         xchgl   %eax, (%edx)
 638         ret
 639         SET_SIZE(atomic_swap_ulong)
 640         SET_SIZE(atomic_swap_ptr)
 641         SET_SIZE(atomic_swap_uint)
 642         SET_SIZE(atomic_swap_32)
 643 
 644         ENTRY(atomic_swap_64)
 645         pushl   %esi
 646         pushl   %ebx
 647         movl    12(%esp), %esi
 648         movl    16(%esp), %ebx
 649         movl    20(%esp), %ecx
 650         movl    (%esi), %eax
 651         movl    4(%esi), %edx   / %edx:%eax = old value
 652 1:
 653         lock
 654         cmpxchg8b (%esi)
 655         jne     1b
 656         popl    %ebx
 657         popl    %esi
 658         ret
 659         SET_SIZE(atomic_swap_64)
 660 
 661         ENTRY(atomic_set_long_excl)
 662         movl    4(%esp), %edx   / %edx = target address
 663         movl    8(%esp), %ecx   / %ecx = bit id
 664         xorl    %eax, %eax
 665         lock
 666         btsl    %ecx, (%edx)
 667         jnc     1f
 668         decl    %eax            / return -1
 669 1:
 670         ret
 671         SET_SIZE(atomic_set_long_excl)
 672 
 673         ENTRY(atomic_clear_long_excl)
 674         movl    4(%esp), %edx   / %edx = target address
 675         movl    8(%esp), %ecx   / %ecx = bit id
 676         xorl    %eax, %eax
 677         lock
 678         btrl    %ecx, (%edx)
 679         jc      1f
 680         decl    %eax            / return -1
 681 1:
 682         ret
 683         SET_SIZE(atomic_clear_long_excl)
 684 
 685 #if !defined(_KERNEL)
 686 
 687         /*
 688          * NOTE: membar_enter, membar_exit, membar_producer, and 
 689          * membar_consumer are all identical routines. We define them
 690          * separately, instead of using ALTENTRY definitions to alias them
 691          * together, so that DTrace and debuggers will see a unique address
 692          * for them, allowing more accurate tracing.
 693         */
 694 
 695 
 696         ENTRY(membar_enter)
 697         lock
 698         xorl    $0, (%esp)
 699         ret
 700         SET_SIZE(membar_enter)
 701 
 702         ENTRY(membar_exit)
 703         lock
 704         xorl    $0, (%esp)
 705         ret
 706         SET_SIZE(membar_exit)
 707 
 708         ENTRY(membar_producer)
 709         lock
 710         xorl    $0, (%esp)
 711         ret
 712         SET_SIZE(membar_producer)
 713 
 714         ENTRY(membar_consumer)
 715         lock
 716         xorl    $0, (%esp)
 717         ret
 718         SET_SIZE(membar_consumer)
 719 
 720 #endif  /* !_KERNEL */