Fix incorrectly disabled SSE2 WebAudio optimizations for Windows.

MSVC doesn't set __SSE__ or__SSE2__ defines:

http://msdn.microsoft.com/en-us/library/b0084kay.aspx

BUG=349320
TEST=compiles

Review URL: https://codereview.chromium.org/307963005

git-svn-id: svn://svn.chromium.org/blink/trunk@175173 bbb929c8-8fbe-4397-9dbb-9b2b20218538
parent 149b1e3f
...@@ -33,9 +33,10 @@ ...@@ -33,9 +33,10 @@
#include "platform/audio/SincResampler.h" #include "platform/audio/SincResampler.h"
#include "platform/audio/AudioBus.h" #include "platform/audio/AudioBus.h"
#include "wtf/CPU.h"
#include "wtf/MathExtras.h" #include "wtf/MathExtras.h"
#ifdef __SSE2__ #if CPU(X86) || CPU(X86_64)
#include <emmintrin.h> #include <emmintrin.h>
#endif #endif
...@@ -262,7 +263,7 @@ void SincResampler::process(AudioSourceProvider* sourceProvider, float* destinat ...@@ -262,7 +263,7 @@ void SincResampler::process(AudioSourceProvider* sourceProvider, float* destinat
{ {
float input; float input;
#ifdef __SSE2__ #if CPU(X86) || CPU(X86_64)
// If the sourceP address is not 16-byte aligned, the first several frames (at most three) should be processed seperately. // If the sourceP address is not 16-byte aligned, the first several frames (at most three) should be processed seperately.
while ((reinterpret_cast<uintptr_t>(inputP) & 0x0F) && n) { while ((reinterpret_cast<uintptr_t>(inputP) & 0x0F) && n) {
CONVOLVE_ONE_SAMPLE CONVOLVE_ONE_SAMPLE
......
...@@ -35,7 +35,7 @@ ...@@ -35,7 +35,7 @@
#include <Accelerate/Accelerate.h> #include <Accelerate/Accelerate.h>
#endif #endif
#ifdef __SSE2__ #if CPU(X86) || CPU(X86_64)
#include <emmintrin.h> #include <emmintrin.h>
#endif #endif
...@@ -135,7 +135,7 @@ void vsma(const float* sourceP, int sourceStride, const float* scale, float* des ...@@ -135,7 +135,7 @@ void vsma(const float* sourceP, int sourceStride, const float* scale, float* des
{ {
int n = framesToProcess; int n = framesToProcess;
#ifdef __SSE2__ #if CPU(X86) || CPU(X86_64)
if ((sourceStride == 1) && (destStride == 1)) { if ((sourceStride == 1) && (destStride == 1)) {
float k = *scale; float k = *scale;
...@@ -208,7 +208,7 @@ void vsmul(const float* sourceP, int sourceStride, const float* scale, float* de ...@@ -208,7 +208,7 @@ void vsmul(const float* sourceP, int sourceStride, const float* scale, float* de
{ {
int n = framesToProcess; int n = framesToProcess;
#ifdef __SSE2__ #if CPU(X86) || CPU(X86_64)
if ((sourceStride == 1) && (destStride == 1)) { if ((sourceStride == 1) && (destStride == 1)) {
float k = *scale; float k = *scale;
...@@ -279,7 +279,7 @@ void vsmul(const float* sourceP, int sourceStride, const float* scale, float* de ...@@ -279,7 +279,7 @@ void vsmul(const float* sourceP, int sourceStride, const float* scale, float* de
sourceP += sourceStride; sourceP += sourceStride;
destP += destStride; destP += destStride;
} }
#ifdef __SSE2__ #if CPU(X86) || CPU(X86_64)
} }
#endif #endif
} }
...@@ -288,7 +288,7 @@ void vadd(const float* source1P, int sourceStride1, const float* source2P, int s ...@@ -288,7 +288,7 @@ void vadd(const float* source1P, int sourceStride1, const float* source2P, int s
{ {
int n = framesToProcess; int n = framesToProcess;
#ifdef __SSE2__ #if CPU(X86) || CPU(X86_64)
if ((sourceStride1 ==1) && (sourceStride2 == 1) && (destStride == 1)) { if ((sourceStride1 ==1) && (sourceStride2 == 1) && (destStride == 1)) {
// If the sourceP address is not 16-byte aligned, the first several frames (at most three) should be processed separately. // If the sourceP address is not 16-byte aligned, the first several frames (at most three) should be processed separately.
while ((reinterpret_cast<size_t>(source1P) & 0x0F) && n) { while ((reinterpret_cast<size_t>(source1P) & 0x0F) && n) {
...@@ -391,7 +391,7 @@ void vadd(const float* source1P, int sourceStride1, const float* source2P, int s ...@@ -391,7 +391,7 @@ void vadd(const float* source1P, int sourceStride1, const float* source2P, int s
source2P += sourceStride2; source2P += sourceStride2;
destP += destStride; destP += destStride;
} }
#ifdef __SSE2__ #if CPU(X86) || CPU(X86_64)
} }
#endif #endif
} }
...@@ -401,7 +401,7 @@ void vmul(const float* source1P, int sourceStride1, const float* source2P, int s ...@@ -401,7 +401,7 @@ void vmul(const float* source1P, int sourceStride1, const float* source2P, int s
int n = framesToProcess; int n = framesToProcess;
#ifdef __SSE2__ #if CPU(X86) || CPU(X86_64)
if ((sourceStride1 == 1) && (sourceStride2 == 1) && (destStride == 1)) { if ((sourceStride1 == 1) && (sourceStride2 == 1) && (destStride == 1)) {
// If the source1P address is not 16-byte aligned, the first several frames (at most three) should be processed separately. // If the source1P address is not 16-byte aligned, the first several frames (at most three) should be processed separately.
while ((reinterpret_cast<uintptr_t>(source1P) & 0x0F) && n) { while ((reinterpret_cast<uintptr_t>(source1P) & 0x0F) && n) {
...@@ -474,7 +474,7 @@ void vmul(const float* source1P, int sourceStride1, const float* source2P, int s ...@@ -474,7 +474,7 @@ void vmul(const float* source1P, int sourceStride1, const float* source2P, int s
void zvmul(const float* real1P, const float* imag1P, const float* real2P, const float* imag2P, float* realDestP, float* imagDestP, size_t framesToProcess) void zvmul(const float* real1P, const float* imag1P, const float* real2P, const float* imag2P, float* realDestP, float* imagDestP, size_t framesToProcess)
{ {
unsigned i = 0; unsigned i = 0;
#ifdef __SSE2__ #if CPU(X86) || CPU(X86_64)
// Only use the SSE optimization in the very common case that all addresses are 16-byte aligned. // Only use the SSE optimization in the very common case that all addresses are 16-byte aligned.
// Otherwise, fall through to the scalar code below. // Otherwise, fall through to the scalar code below.
if (!(reinterpret_cast<uintptr_t>(real1P) & 0x0F) if (!(reinterpret_cast<uintptr_t>(real1P) & 0x0F)
...@@ -532,7 +532,7 @@ void vsvesq(const float* sourceP, int sourceStride, float* sumP, size_t framesTo ...@@ -532,7 +532,7 @@ void vsvesq(const float* sourceP, int sourceStride, float* sumP, size_t framesTo
int n = framesToProcess; int n = framesToProcess;
float sum = 0; float sum = 0;
#ifdef __SSE2__ #if CPU(X86) || CPU(X86_64)
if (sourceStride == 1) { if (sourceStride == 1) {
// If the sourceP address is not 16-byte aligned, the first several frames (at most three) should be processed separately. // If the sourceP address is not 16-byte aligned, the first several frames (at most three) should be processed separately.
while ((reinterpret_cast<uintptr_t>(sourceP) & 0x0F) && n) { while ((reinterpret_cast<uintptr_t>(sourceP) & 0x0F) && n) {
...@@ -597,7 +597,7 @@ void vmaxmgv(const float* sourceP, int sourceStride, float* maxP, size_t framesT ...@@ -597,7 +597,7 @@ void vmaxmgv(const float* sourceP, int sourceStride, float* maxP, size_t framesT
int n = framesToProcess; int n = framesToProcess;
float max = 0; float max = 0;
#ifdef __SSE2__ #if CPU(X86) || CPU(X86_64)
if (sourceStride == 1) { if (sourceStride == 1) {
// If the sourceP address is not 16-byte aligned, the first several frames (at most three) should be processed separately. // If the sourceP address is not 16-byte aligned, the first several frames (at most three) should be processed separately.
while ((reinterpret_cast<uintptr_t>(sourceP) & 0x0F) && n) { while ((reinterpret_cast<uintptr_t>(sourceP) & 0x0F) && n) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment