Commit 3dd9f217 authored by thakis@chromium.org's avatar thakis@chromium.org

base atomicops: Drop SSE2 detection, we always require SSE2 starting in m35.

BUG=348761,94925
NOTRY=true

Review URL: https://codereview.chromium.org/291993003

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@271506 0039d316-1c4b-4281-b951-d872f2087c98
parent bbd32301
...@@ -40,7 +40,6 @@ ...@@ -40,7 +40,6 @@
// default values should hopefully be pretty safe. // default values should hopefully be pretty safe.
struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures = { struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures = {
false, // bug can't exist before process spawns multiple threads false, // bug can't exist before process spawns multiple threads
false, // no SSE2
}; };
namespace { namespace {
...@@ -82,9 +81,6 @@ void AtomicOps_Internalx86CPUFeaturesInit() { ...@@ -82,9 +81,6 @@ void AtomicOps_Internalx86CPUFeaturesInit() {
} else { } else {
AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = false; AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = false;
} }
// edx bit 26 is SSE2 which we use to tell use whether we can use mfence
AtomicOps_Internalx86CPUFeatures.has_sse2 = ((edx >> 26) & 1);
} }
class AtomicOpsx86Initializer { class AtomicOpsx86Initializer {
......
...@@ -17,7 +17,6 @@ ...@@ -17,7 +17,6 @@
struct AtomicOps_x86CPUFeatureStruct { struct AtomicOps_x86CPUFeatureStruct {
bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence
// after acquire compare-and-swap. // after acquire compare-and-swap.
bool has_sse2; // Processor has SSE2.
}; };
BASE_EXPORT extern struct AtomicOps_x86CPUFeatureStruct BASE_EXPORT extern struct AtomicOps_x86CPUFeatureStruct
AtomicOps_Internalx86CPUFeatures; AtomicOps_Internalx86CPUFeatures;
...@@ -92,10 +91,6 @@ inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { ...@@ -92,10 +91,6 @@ inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value; *ptr = value;
} }
#if defined(__x86_64__)
// 64-bit implementations of memory barrier can be simpler, because it
// "mfence" is guaranteed to exist.
inline void MemoryBarrier() { inline void MemoryBarrier() {
__asm__ __volatile__("mfence" : : : "memory"); __asm__ __volatile__("mfence" : : : "memory");
} }
...@@ -105,28 +100,6 @@ inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { ...@@ -105,28 +100,6 @@ inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
MemoryBarrier(); MemoryBarrier();
} }
#else
inline void MemoryBarrier() {
if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
__asm__ __volatile__("mfence" : : : "memory");
} else { // mfence is faster but not present on PIII
Atomic32 x = 0;
NoBarrier_AtomicExchange(&x, 0); // acts as a barrier on PIII
}
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
*ptr = value;
__asm__ __volatile__("mfence" : : : "memory");
} else {
NoBarrier_AtomicExchange(ptr, value);
// acts as a barrier on PIII
}
}
#endif
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
ATOMICOPS_COMPILER_BARRIER(); ATOMICOPS_COMPILER_BARRIER();
*ptr = value; // An x86 store acts as a release barrier. *ptr = value; // An x86 store acts as a release barrier.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment