618 return PlatformAdd<sizeof(D)>()(addend, dest, order); 619 } 620 }; 621 622 template<typename I, typename P> 623 struct Atomic::AddImpl< 624 I, P*, 625 typename EnableIf<IsIntegral<I>::value && (sizeof(I) <= sizeof(P*))>::type> 626 { 627 P* operator()(I add_value, P* volatile* dest, atomic_memory_order order) const { 628 STATIC_ASSERT(sizeof(intptr_t) == sizeof(P*)); 629 STATIC_ASSERT(sizeof(uintptr_t) == sizeof(P*)); 630 typedef typename Conditional<IsSigned<I>::value, 631 intptr_t, 632 uintptr_t>::type CI; 633 CI addend = add_value; 634 return PlatformAdd<sizeof(P*)>()(addend, dest, order); 635 } 636 }; 637 638 // Most platforms do not support atomic add on a 2-byte value. However, 639 // if the value occupies the most significant 16 bits of an aligned 32-bit 640 // word, then we can do this with an atomic add of (add_value << 16) 641 // to the 32-bit word. 642 // 643 // The least significant parts of this 32-bit word will never be affected, even 644 // in case of overflow/underflow. 645 // 646 // Use the ATOMIC_SHORT_PAIR macro (see macros.hpp) to get the desired alignment. 647 template<> 648 struct Atomic::AddImpl<short, short> { 649 short operator()(short add_value, short volatile* dest, atomic_memory_order order) const { 650 #ifdef VM_LITTLE_ENDIAN 651 assert((intx(dest) & 0x03) == 0x02, "wrong alignment"); 652 int new_value = Atomic::add(add_value << 16, (volatile int*)(dest-1), order); 653 #else 654 assert((intx(dest) & 0x03) == 0x00, "wrong alignment"); 655 int new_value = Atomic::add(add_value << 16, (volatile int*)(dest), order); 656 #endif 657 return (short)(new_value >> 16); // preserves sign 658 } 659 }; 660 661 template<typename Derived> 662 template<typename I, typename D> 663 inline D Atomic::FetchAndAdd<Derived>::operator()(I add_value, D volatile* dest, 664 atomic_memory_order order) const { 665 I addend = add_value; 666 // If D is a pointer type P*, scale by sizeof(P). 667 if (IsPointer<D>::value) { 668 addend *= sizeof(typename RemovePointer<D>::type); 669 } 670 D old = static_cast<const Derived*>(this)->fetch_and_add(addend, dest, order); 671 return old + add_value; 672 } 673 674 template<typename Derived> 675 template<typename I, typename D> 676 inline D Atomic::AddAndFetch<Derived>::operator()(I add_value, D volatile* dest, 677 atomic_memory_order order) const { 678 // If D is a pointer type P*, scale by sizeof(P). 679 if (IsPointer<D>::value) { 680 add_value *= sizeof(typename RemovePointer<D>::type); | 618 return PlatformAdd<sizeof(D)>()(addend, dest, order); 619 } 620 }; 621 622 template<typename I, typename P> 623 struct Atomic::AddImpl< 624 I, P*, 625 typename EnableIf<IsIntegral<I>::value && (sizeof(I) <= sizeof(P*))>::type> 626 { 627 P* operator()(I add_value, P* volatile* dest, atomic_memory_order order) const { 628 STATIC_ASSERT(sizeof(intptr_t) == sizeof(P*)); 629 STATIC_ASSERT(sizeof(uintptr_t) == sizeof(P*)); 630 typedef typename Conditional<IsSigned<I>::value, 631 intptr_t, 632 uintptr_t>::type CI; 633 CI addend = add_value; 634 return PlatformAdd<sizeof(P*)>()(addend, dest, order); 635 } 636 }; 637 638 template<typename Derived> 639 template<typename I, typename D> 640 inline D Atomic::FetchAndAdd<Derived>::operator()(I add_value, D volatile* dest, 641 atomic_memory_order order) const { 642 I addend = add_value; 643 // If D is a pointer type P*, scale by sizeof(P). 644 if (IsPointer<D>::value) { 645 addend *= sizeof(typename RemovePointer<D>::type); 646 } 647 D old = static_cast<const Derived*>(this)->fetch_and_add(addend, dest, order); 648 return old + add_value; 649 } 650 651 template<typename Derived> 652 template<typename I, typename D> 653 inline D Atomic::AddAndFetch<Derived>::operator()(I add_value, D volatile* dest, 654 atomic_memory_order order) const { 655 // If D is a pointer type P*, scale by sizeof(P). 656 if (IsPointer<D>::value) { 657 add_value *= sizeof(typename RemovePointer<D>::type); |