Commit d8b90842 authored by barraclough@apple.com's avatar barraclough@apple.com

Bug 56273 - Add three operand forms to MacroAssember operations.

Reviewed by Sam Weinig.

Adding for X86(_64) for now, should be rolled out to other backends as necessary.
These may allow more efficient code generation in some cases, avoiding the need
for unnecessary register-register move instructions.

* assembler/AbstractMacroAssembler.h:
(JSC::AbstractMacroAssembler::Jump::link):
(JSC::AbstractMacroAssembler::Jump::linkTo):
    - marked these methods const.
(JSC::AbstractMacroAssembler::Jump::isSet):
    - add a method to check whether a Jump object has been set to
      reference an instruction, or is in a null, unset state. 
* assembler/MacroAssemblerCodeRef.h:
(JSC::FunctionPtr::FunctionPtr):
    - add non-explicit constructor, for FunctionPtr's to C/C++ functions.
* assembler/MacroAssemblerX86Common.h:
(JSC::MacroAssemblerX86Common::and32):
(JSC::MacroAssemblerX86Common::lshift32):
(JSC::MacroAssemblerX86Common::or32):
(JSC::MacroAssemblerX86Common::rshift32):
(JSC::MacroAssemblerX86Common::urshift32):
(JSC::MacroAssemblerX86Common::xor32):
(JSC::MacroAssemblerX86Common::moveDouble):
(JSC::MacroAssemblerX86Common::addDouble):
(JSC::MacroAssemblerX86Common::divDouble):
(JSC::MacroAssemblerX86Common::subDouble):
(JSC::MacroAssemblerX86Common::mulDouble):
(JSC::MacroAssemblerX86Common::branchTruncateDoubleToInt32):
(JSC::MacroAssemblerX86Common::branchTest32):
(JSC::MacroAssemblerX86Common::branchTest8):
(JSC::MacroAssemblerX86Common::branchAdd32):
(JSC::MacroAssemblerX86Common::branchMul32):
(JSC::MacroAssemblerX86Common::branchSub32):
    - add three operand forms of these instructions.
* assembler/MacroAssemblerX86_64.h:
(JSC::MacroAssemblerX86_64::addDouble):
(JSC::MacroAssemblerX86_64::convertInt32ToDouble):
(JSC::MacroAssemblerX86_64::loadPtr):
(JSC::MacroAssemblerX86_64::branchTestPtr):
* assembler/X86Assembler.h:
(JSC::X86Assembler::JmpSrc::isSet):
    - add a method to check whether a JmpSrc object has been set to
      reference an instruction, or is in a null, unset state. 
(JSC::X86Assembler::movsd_rr):
    - added FP register-register move.
(JSC::X86Assembler::linkJump):
    - Add an assert to check jumps aren't linked more than once.
* jit/JITInlineMethods.h:
(JSC::JIT::emitLoadInt32ToDouble):
    - load integers to the FPU via regsiters on x86-64.



git-svn-id: svn://svn.chromium.org/blink/trunk@80972 bbb929c8-8fbe-4397-9dbb-9b2b20218538
parent cdf56833
2011-03-13 Gavin Barraclough <barraclough@apple.com>
Reviewed by Sam Weinig.
Bug 56273 - Add three operand forms to MacroAssember operations.
Adding for X86(_64) for now, should be rolled out to other backends as necessary.
These may allow more efficient code generation in some cases, avoiding the need
for unnecessary register-register move instructions.
* assembler/AbstractMacroAssembler.h:
(JSC::AbstractMacroAssembler::Jump::link):
(JSC::AbstractMacroAssembler::Jump::linkTo):
- marked these methods const.
(JSC::AbstractMacroAssembler::Jump::isSet):
- add a method to check whether a Jump object has been set to
reference an instruction, or is in a null, unset state.
* assembler/MacroAssemblerCodeRef.h:
(JSC::FunctionPtr::FunctionPtr):
- add non-explicit constructor, for FunctionPtr's to C/C++ functions.
* assembler/MacroAssemblerX86Common.h:
(JSC::MacroAssemblerX86Common::and32):
(JSC::MacroAssemblerX86Common::lshift32):
(JSC::MacroAssemblerX86Common::or32):
(JSC::MacroAssemblerX86Common::rshift32):
(JSC::MacroAssemblerX86Common::urshift32):
(JSC::MacroAssemblerX86Common::xor32):
(JSC::MacroAssemblerX86Common::moveDouble):
(JSC::MacroAssemblerX86Common::addDouble):
(JSC::MacroAssemblerX86Common::divDouble):
(JSC::MacroAssemblerX86Common::subDouble):
(JSC::MacroAssemblerX86Common::mulDouble):
(JSC::MacroAssemblerX86Common::branchTruncateDoubleToInt32):
(JSC::MacroAssemblerX86Common::branchTest32):
(JSC::MacroAssemblerX86Common::branchTest8):
(JSC::MacroAssemblerX86Common::branchAdd32):
(JSC::MacroAssemblerX86Common::branchMul32):
(JSC::MacroAssemblerX86Common::branchSub32):
- add three operand forms of these instructions.
* assembler/MacroAssemblerX86_64.h:
(JSC::MacroAssemblerX86_64::addDouble):
(JSC::MacroAssemblerX86_64::convertInt32ToDouble):
(JSC::MacroAssemblerX86_64::loadPtr):
(JSC::MacroAssemblerX86_64::branchTestPtr):
* assembler/X86Assembler.h:
(JSC::X86Assembler::JmpSrc::isSet):
- add a method to check whether a JmpSrc object has been set to
reference an instruction, or is in a null, unset state.
(JSC::X86Assembler::movsd_rr):
- added FP register-register move.
(JSC::X86Assembler::linkJump):
- Add an assert to check jumps aren't linked more than once.
* jit/JITInlineMethods.h:
(JSC::JIT::emitLoadInt32ToDouble):
- load integers to the FPU via regsiters on x86-64.
2011-03-13 Gavin Barraclough <barraclough@apple.com> 2011-03-13 Gavin Barraclough <barraclough@apple.com>
ARM build fix. ARM build fix.
......
...@@ -358,16 +358,18 @@ public: ...@@ -358,16 +358,18 @@ public:
{ {
} }
void link(AbstractMacroAssembler<AssemblerType>* masm) void link(AbstractMacroAssembler<AssemblerType>* masm) const
{ {
masm->m_assembler.linkJump(m_jmp, masm->m_assembler.label()); masm->m_assembler.linkJump(m_jmp, masm->m_assembler.label());
} }
void linkTo(Label label, AbstractMacroAssembler<AssemblerType>* masm) void linkTo(Label label, AbstractMacroAssembler<AssemblerType>* masm) const
{ {
masm->m_assembler.linkJump(m_jmp, label.m_label); masm->m_assembler.linkJump(m_jmp, label.m_label);
} }
bool isSet() const { return m_jmp.isSet(); }
private: private:
JmpSrc m_jmp; JmpSrc m_jmp;
}; };
......
...@@ -65,15 +65,47 @@ public: ...@@ -65,15 +65,47 @@ public:
{ {
} }
template<typename returnType>
FunctionPtr(returnType(*value)())
: m_value((void*)value)
{
ASSERT_VALID_CODE_POINTER(m_value);
}
template<typename returnType, typename argType1>
FunctionPtr(returnType(*value)(argType1))
: m_value((void*)value)
{
ASSERT_VALID_CODE_POINTER(m_value);
}
template<typename returnType, typename argType1, typename argType2>
FunctionPtr(returnType(*value)(argType1, argType2))
: m_value((void*)value)
{
ASSERT_VALID_CODE_POINTER(m_value);
}
template<typename returnType, typename argType1, typename argType2, typename argType3>
FunctionPtr(returnType(*value)(argType1, argType2, argType3))
: m_value((void*)value)
{
ASSERT_VALID_CODE_POINTER(m_value);
}
template<typename returnType, typename argType1, typename argType2, typename argType3, typename argType4>
FunctionPtr(returnType(*value)(argType1, argType2, argType3, argType4))
: m_value((void*)value)
{
ASSERT_VALID_CODE_POINTER(m_value);
}
template<typename FunctionType> template<typename FunctionType>
explicit FunctionPtr(FunctionType* value) explicit FunctionPtr(FunctionType* value)
#if COMPILER(RVCT) // Using a C-ctyle cast here to avoid compiler error on RVTC:
// RVTC compiler needs C-style cast as it fails with the following error // Error: #694: reinterpret_cast cannot cast away const or other type qualifiers
// Error: #694: reinterpret_cast cannot cast away const or other type qualifiers // (I guess on RVTC function pointers have a different constness to GCC/MSVC?)
: m_value((void*)(value)) : m_value((void*)value)
#else
: m_value(reinterpret_cast<void*>(value))
#endif
{ {
ASSERT_VALID_CODE_POINTER(m_value); ASSERT_VALID_CODE_POINTER(m_value);
} }
......
...@@ -137,31 +137,59 @@ public: ...@@ -137,31 +137,59 @@ public:
m_assembler.andl_im(imm.m_value, address.offset, address.base); m_assembler.andl_im(imm.m_value, address.offset, address.base);
} }
void lshift32(Imm32 imm, RegisterID dest) void and32(RegisterID op1, RegisterID op2, RegisterID dest)
{
if (op1 == op2)
zeroExtend32ToPtr(op1, dest);
else if (op1 == dest)
and32(op2, dest);
else {
move(op2, dest);
and32(op1, dest);
}
}
void and32(Imm32 imm, RegisterID src, RegisterID dest)
{ {
m_assembler.shll_i8r(imm.m_value, dest); move(src, dest);
and32(imm, dest);
} }
void lshift32(RegisterID shift_amount, RegisterID dest) void lshift32(RegisterID shift_amount, RegisterID dest)
{ {
// On x86 we can only shift by ecx; if asked to shift by another register we'll ASSERT(shift_amount != dest);
// need rejig the shift amount into ecx first, and restore the registers afterwards.
if (shift_amount != X86Registers::ecx) {
swap(shift_amount, X86Registers::ecx);
// E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx" if (shift_amount == X86Registers::ecx)
if (dest == shift_amount)
m_assembler.shll_CLr(X86Registers::ecx);
// E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
else if (dest == X86Registers::ecx)
m_assembler.shll_CLr(shift_amount);
// E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
else
m_assembler.shll_CLr(dest);
swap(shift_amount, X86Registers::ecx);
} else
m_assembler.shll_CLr(dest); m_assembler.shll_CLr(dest);
else {
// On x86 we can only shift by ecx; if asked to shift by another register we'll
// need rejig the shift amount into ecx first, and restore the registers afterwards.
// If we dest is ecx, then shift the swapped register!
swap(shift_amount, X86Registers::ecx);
m_assembler.shll_CLr(dest == X86Registers::ecx ? shift_amount : dest);
swap(shift_amount, X86Registers::ecx);
}
}
void lshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
{
ASSERT(shift_amount != dest);
if (src != dest)
move(src, dest);
lshift32(shift_amount, dest);
}
void lshift32(Imm32 imm, RegisterID dest)
{
m_assembler.shll_i8r(imm.m_value, dest);
}
void lshift32(RegisterID src, Imm32 imm, RegisterID dest)
{
if (src != dest)
move(src, dest);
lshift32(imm, dest);
} }
void mul32(RegisterID src, RegisterID dest) void mul32(RegisterID src, RegisterID dest)
...@@ -224,26 +252,47 @@ public: ...@@ -224,26 +252,47 @@ public:
m_assembler.orl_im(imm.m_value, address.offset, address.base); m_assembler.orl_im(imm.m_value, address.offset, address.base);
} }
void or32(RegisterID op1, RegisterID op2, RegisterID dest)
{
if (op1 == op2)
zeroExtend32ToPtr(op1, dest);
else if (op1 == dest)
or32(op2, dest);
else {
move(op2, dest);
or32(op1, dest);
}
}
void or32(Imm32 imm, RegisterID src, RegisterID dest)
{
move(src, dest);
or32(imm, dest);
}
void rshift32(RegisterID shift_amount, RegisterID dest) void rshift32(RegisterID shift_amount, RegisterID dest)
{ {
// On x86 we can only shift by ecx; if asked to shift by another register we'll ASSERT(shift_amount != dest);
// need rejig the shift amount into ecx first, and restore the registers afterwards.
if (shift_amount != X86Registers::ecx) {
swap(shift_amount, X86Registers::ecx);
// E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx" if (shift_amount == X86Registers::ecx)
if (dest == shift_amount)
m_assembler.sarl_CLr(X86Registers::ecx);
// E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
else if (dest == X86Registers::ecx)
m_assembler.sarl_CLr(shift_amount);
// E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
else
m_assembler.sarl_CLr(dest);
swap(shift_amount, X86Registers::ecx);
} else
m_assembler.sarl_CLr(dest); m_assembler.sarl_CLr(dest);
else {
// On x86 we can only shift by ecx; if asked to shift by another register we'll
// need rejig the shift amount into ecx first, and restore the registers afterwards.
// If we dest is ecx, then shift the swapped register!
swap(shift_amount, X86Registers::ecx);
m_assembler.sarl_CLr(dest == X86Registers::ecx ? shift_amount : dest);
swap(shift_amount, X86Registers::ecx);
}
}
void rshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
{
ASSERT(shift_amount != dest);
if (src != dest)
move(src, dest);
rshift32(shift_amount, dest);
} }
void rshift32(Imm32 imm, RegisterID dest) void rshift32(Imm32 imm, RegisterID dest)
...@@ -251,33 +300,50 @@ public: ...@@ -251,33 +300,50 @@ public:
m_assembler.sarl_i8r(imm.m_value, dest); m_assembler.sarl_i8r(imm.m_value, dest);
} }
void rshift32(RegisterID src, Imm32 imm, RegisterID dest)
{
if (src != dest)
move(src, dest);
rshift32(imm, dest);
}
void urshift32(RegisterID shift_amount, RegisterID dest) void urshift32(RegisterID shift_amount, RegisterID dest)
{ {
// On x86 we can only shift by ecx; if asked to shift by another register we'll ASSERT(shift_amount != dest);
// need rejig the shift amount into ecx first, and restore the registers afterwards.
if (shift_amount != X86Registers::ecx) { if (shift_amount == X86Registers::ecx)
m_assembler.shrl_CLr(dest);
else {
// On x86 we can only shift by ecx; if asked to shift by another register we'll
// need rejig the shift amount into ecx first, and restore the registers afterwards.
// If we dest is ecx, then shift the swapped register!
swap(shift_amount, X86Registers::ecx); swap(shift_amount, X86Registers::ecx);
m_assembler.shrl_CLr(dest == X86Registers::ecx ? shift_amount : dest);
// E.g. transform "shrl %eax, %eax" -> "xchgl %eax, %ecx; shrl %ecx, %ecx; xchgl %eax, %ecx"
if (dest == shift_amount)
m_assembler.shrl_CLr(X86Registers::ecx);
// E.g. transform "shrl %eax, %ecx" -> "xchgl %eax, %ecx; shrl %ecx, %eax; xchgl %eax, %ecx"
else if (dest == X86Registers::ecx)
m_assembler.shrl_CLr(shift_amount);
// E.g. transform "shrl %eax, %ebx" -> "xchgl %eax, %ecx; shrl %ecx, %ebx; xchgl %eax, %ecx"
else
m_assembler.shrl_CLr(dest);
swap(shift_amount, X86Registers::ecx); swap(shift_amount, X86Registers::ecx);
} else }
m_assembler.shrl_CLr(dest);
} }
void urshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
{
ASSERT(shift_amount != dest);
if (src != dest)
move(src, dest);
urshift32(shift_amount, dest);
}
void urshift32(Imm32 imm, RegisterID dest) void urshift32(Imm32 imm, RegisterID dest)
{ {
m_assembler.shrl_i8r(imm.m_value, dest); m_assembler.shrl_i8r(imm.m_value, dest);
} }
void urshift32(RegisterID src, Imm32 imm, RegisterID dest)
{
if (src != dest)
move(src, dest);
urshift32(imm, dest);
}
void sub32(RegisterID src, RegisterID dest) void sub32(RegisterID src, RegisterID dest)
{ {
m_assembler.subl_rr(src, dest); m_assembler.subl_rr(src, dest);
...@@ -329,6 +395,24 @@ public: ...@@ -329,6 +395,24 @@ public:
m_assembler.xorl_mr(src.offset, src.base, dest); m_assembler.xorl_mr(src.offset, src.base, dest);
} }
void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
{
if (op1 == op2)
move(Imm32(0), dest);
else if (op1 == dest)
xor32(op2, dest);
else {
move(op2, dest);
xor32(op1, dest);
}
}
void xor32(Imm32 imm, RegisterID src, RegisterID dest)
{
move(src, dest);
xor32(imm, dest);
}
void sqrtDouble(FPRegisterID src, FPRegisterID dst) void sqrtDouble(FPRegisterID src, FPRegisterID dst)
{ {
m_assembler.sqrtsd_rr(src, dst); m_assembler.sqrtsd_rr(src, dst);
...@@ -398,6 +482,13 @@ public: ...@@ -398,6 +482,13 @@ public:
// //
// Presently only supports SSE, not x87 floating point. // Presently only supports SSE, not x87 floating point.
void moveDouble(FPRegisterID src, FPRegisterID dest)
{
ASSERT(isSSE2Present());
if (src != dest)
m_assembler.movsd_rr(src, dest);
}
void loadDouble(ImplicitAddress address, FPRegisterID dest) void loadDouble(ImplicitAddress address, FPRegisterID dest)
{ {
ASSERT(isSSE2Present()); ASSERT(isSSE2Present());
...@@ -416,6 +507,17 @@ public: ...@@ -416,6 +507,17 @@ public:
m_assembler.addsd_rr(src, dest); m_assembler.addsd_rr(src, dest);
} }
void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
{
ASSERT(isSSE2Present());
if (op1 == dest)
addDouble(op2, dest);
else {
moveDouble(op2, dest);
addDouble(op1, dest);
}
}
void addDouble(Address src, FPRegisterID dest) void addDouble(Address src, FPRegisterID dest)
{ {
ASSERT(isSSE2Present()); ASSERT(isSSE2Present());
...@@ -428,6 +530,15 @@ public: ...@@ -428,6 +530,15 @@ public:
m_assembler.divsd_rr(src, dest); m_assembler.divsd_rr(src, dest);
} }
void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
{
// B := A / B is invalid.
ASSERT(op1 == dest || op2 != dest);
moveDouble(op1, dest);
divDouble(op2, dest);
}
void divDouble(Address src, FPRegisterID dest) void divDouble(Address src, FPRegisterID dest)
{ {
ASSERT(isSSE2Present()); ASSERT(isSSE2Present());
...@@ -440,6 +551,15 @@ public: ...@@ -440,6 +551,15 @@ public:
m_assembler.subsd_rr(src, dest); m_assembler.subsd_rr(src, dest);
} }
void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
{
// B := A - B is invalid.
ASSERT(op1 == dest || op2 != dest);
moveDouble(op1, dest);
subDouble(op2, dest);
}
void subDouble(Address src, FPRegisterID dest) void subDouble(Address src, FPRegisterID dest)
{ {
ASSERT(isSSE2Present()); ASSERT(isSSE2Present());
...@@ -452,6 +572,17 @@ public: ...@@ -452,6 +572,17 @@ public:
m_assembler.mulsd_rr(src, dest); m_assembler.mulsd_rr(src, dest);
} }
void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
{
ASSERT(isSSE2Present());
if (op1 == dest)
mulDouble(op2, dest);
else {
moveDouble(op2, dest);
mulDouble(op1, dest);
}
}
void mulDouble(Address src, FPRegisterID dest) void mulDouble(Address src, FPRegisterID dest)
{ {
ASSERT(isSSE2Present()); ASSERT(isSSE2Present());
...@@ -501,11 +632,12 @@ public: ...@@ -501,11 +632,12 @@ public:
// If the result is not representable as a 32 bit value, branch. // If the result is not representable as a 32 bit value, branch.
// May also branch for some values that are representable in 32 bits // May also branch for some values that are representable in 32 bits
// (specifically, in this case, INT_MIN). // (specifically, in this case, INT_MIN).
Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest) enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
{ {
ASSERT(isSSE2Present()); ASSERT(isSSE2Present());
m_assembler.cvttsd2si_rr(src, dest); m_assembler.cvttsd2si_rr(src, dest);
return branch32(Equal, dest, Imm32(0x80000000)); return branch32(branchType ? NotEqual : Equal, dest, Imm32(0x80000000));
} }
// Convert 'src' to an integer, and places the resulting 'dest'. // Convert 'src' to an integer, and places the resulting 'dest'.
...@@ -728,14 +860,14 @@ public: ...@@ -728,14 +860,14 @@ public:
Jump branchTest32(Condition cond, RegisterID reg, RegisterID mask) Jump branchTest32(Condition cond, RegisterID reg, RegisterID mask)
{ {
ASSERT((cond == Zero) || (cond == NonZero)); ASSERT((cond == Zero) || (cond == NonZero) || (cond == Signed));
m_assembler.testl_rr(reg, mask); m_assembler.testl_rr(reg, mask);
return Jump(m_assembler.jCC(x86Condition(cond))); return Jump(m_assembler.jCC(x86Condition(cond)));
} }
Jump branchTest32(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1)) Jump branchTest32(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
{ {
ASSERT((cond == Zero) || (cond == NonZero)); ASSERT((cond == Zero) || (cond == NonZero) || (cond == Signed));
// if we are only interested in the low seven bits, this can be tested with a testb // if we are only interested in the low seven bits, this can be tested with a testb
if (mask.m_value == -1) if (mask.m_value == -1)
m_assembler.testl_rr(reg, reg); m_assembler.testl_rr(reg, reg);
...@@ -748,7 +880,7 @@ public: ...@@ -748,7 +880,7 @@ public:
Jump branchTest32(Condition cond, Address address, Imm32 mask = Imm32(-1)) Jump branchTest32(Condition cond, Address address, Imm32 mask = Imm32(-1))
{ {
ASSERT((cond == Zero) || (cond == NonZero)); ASSERT((cond == Zero) || (cond == NonZero) || (cond == Signed));
if (mask.m_value == -1) if (mask.m_value == -1)
m_assembler.cmpl_im(0, address.offset, address.base); m_assembler.cmpl_im(0, address.offset, address.base);
else else
...@@ -758,7 +890,7 @@ public: ...@@ -758,7 +890,7 @@ public:
Jump branchTest32(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1)) Jump branchTest32(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
{ {
ASSERT((cond == Zero) || (cond == NonZero)); ASSERT((cond == Zero) || (cond == NonZero) || (cond == Signed));
if (mask.m_value == -1) if (mask.m_value == -1)
m_assembler.cmpl_im(0, address.offset, address.base, address.index, address.scale); m_assembler.cmpl_im(0, address.offset, address.base, address.index, address.scale);
else else
...@@ -768,7 +900,7 @@ public: ...@@ -768,7 +900,7 @@ public:
Jump branchTest8(Condition cond, Address address, Imm32 mask = Imm32(-1)) Jump branchTest8(Condition cond, Address address, Imm32 mask = Imm32(-1))
{ {
ASSERT((cond == Zero) || (cond == NonZero)); ASSERT((cond == Zero) || (cond == NonZero) || (cond == Signed));
if (mask.m_value == -1) if (mask.m_value == -1)
m_assembler.cmpb_im(0, address.offset, address.base); m_assembler.cmpb_im(0, address.offset, address.base);
else else
...@@ -778,7 +910,7 @@ public: ...@@ -778,7 +910,7 @@ public:
Jump branchTest8(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1)) Jump branchTest8(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
{ {
ASSERT((cond == Zero) || (cond == NonZero)); ASSERT((cond == Zero) || (cond == NonZero) || (cond == Signed));
if (mask.m_value == -1) if (mask.m_value == -1)
m_assembler.cmpb_im(0, address.offset, address.base, address.index, address.scale); m_assembler.cmpb_im(0, address.offset, address.base, address.index, address.scale);
else else
...@@ -848,6 +980,20 @@ public: ...@@ -848,6 +980,20 @@ public:
return Jump(m_assembler.jCC(x86Condition(cond))); return Jump(m_assembler.jCC(x86Condition(cond)));
} }
Jump branchAdd32(Condition cond, RegisterID src1, RegisterID src2, RegisterID dest)
{
if (src1 == dest)
return branchAdd32(cond, src2, dest);
move(src2, dest);
return branchAdd32(cond, src1, dest);
}
Jump branchAdd32(Condition cond, Imm32 imm, RegisterID src, RegisterID dest)
{
move(src, dest);
return branchAdd32(cond, imm, dest);
}
Jump branchMul32(Condition cond, RegisterID src, RegisterID dest) Jump branchMul32(Condition cond, RegisterID src, RegisterID dest)
{ {
ASSERT(cond == Overflow); ASSERT(cond == Overflow);
...@@ -869,6 +1015,14 @@ public: ...@@ -869,6 +1015,14 @@ public:
return Jump(m_assembler.jCC(x86Condition(cond))); return Jump(m_assembler.jCC(x86Condition(cond)));
} }
Jump branchMul32(Condition cond, RegisterID src1, RegisterID src2, RegisterID dest)
{
if (src1 == dest)
return branchMul32(cond, src2, dest);
move(src2, dest);
return branchMul32(cond, src1, dest);
}
Jump branchSub32(Condition cond, RegisterID src, RegisterID dest) Jump branchSub32(Condition cond, RegisterID src, RegisterID dest)
{ {
ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero)); ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
...@@ -904,6 +1058,15 @@ public: ...@@ -904,6 +1058,15 @@ public:
return Jump(m_assembler.jCC(x86Condition(cond))); return Jump(m_assembler.jCC(x86Condition(cond)));
} }
Jump branchSub32(Condition cond, RegisterID src1, RegisterID src2, RegisterID dest)
{
// B := A - B is invalid.
ASSERT(src1 == dest || src2 != dest);
move(src1, dest);
return branchSub32(cond, src2, dest);
}
Jump branchNeg32(Condition cond, RegisterID srcDest) Jump branchNeg32(Condition cond, RegisterID srcDest)
{ {
ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero)); ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
......
...@@ -48,6 +48,7 @@ public: ...@@ -48,6 +48,7 @@ public:
using MacroAssemblerX86Common::load32; using MacroAssemblerX86Common::load32;
using MacroAssemblerX86Common::store32; using MacroAssemblerX86Common::store32;
using MacroAssemblerX86Common::call; using MacroAssemblerX86Common::call;
using MacroAssemblerX86Common::addDouble;
using MacroAssemblerX86Common::loadDouble; using MacroAssemblerX86Common::loadDouble;
using MacroAssemblerX86Common::convertInt32ToDouble; using MacroAssemblerX86Common::convertInt32ToDouble;
...@@ -92,9 +93,15 @@ public: ...@@ -92,9 +93,15 @@ public:
loadDouble(scratchRegister, dest); loadDouble(scratchRegister, dest);
} }
void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest) void addDouble(AbsoluteAddress address, FPRegisterID dest)
{ {
move(Imm32(*static_cast<const int32_t*>(src.m_ptr)), scratchRegister); move(ImmPtr(address.m_ptr), scratchRegister);
m_assembler.addsd_mr(0, scratchRegister, dest);
}
void convertInt32ToDouble(Imm32 imm, FPRegisterID dest)
{
move(imm, scratchRegister);
m_assembler.cvtsi2sd_rr(scratchRegister, dest); m_assembler.cvtsi2sd_rr(scratchRegister, dest);
} }
...@@ -227,7 +234,7 @@ public: ...@@ -227,7 +234,7 @@ public:
m_assembler.movq_mr(address.offset, address.base, address.index, address.scale, dest); m_assembler.movq_mr(address.offset, address.base, address.index, address.scale, dest);
} }
void loadPtr(void* address, RegisterID dest) void loadPtr(const void* address, RegisterID dest)
{ {
if (dest == X86Registers::eax) if (dest == X86Registers::eax)
m_assembler.movq_mEAX(address); m_assembler.movq_mEAX(address);
...@@ -351,6 +358,12 @@ public: ...@@ -351,6 +358,12 @@ public:
return Jump(m_assembler.jCC(x86Condition(cond))); return Jump(m_assembler.jCC(x86Condition(cond)));
} }
Jump branchTestPtr(Condition cond, AbsoluteAddress address, Imm32 mask = Imm32(-1))
{
loadPtr(address.m_ptr, scratchRegister);
return branchTestPtr(cond, scratchRegister, mask);
}
Jump branchTestPtr(Condition cond, Address address, Imm32 mask = Imm32(-1)) Jump branchTestPtr(Condition cond, Address address, Imm32 mask = Imm32(-1))
{ {
if (mask.m_value == -1) if (mask.m_value == -1)
......
...@@ -228,6 +228,8 @@ public: ...@@ -228,6 +228,8 @@ public:
{ {
} }
bool isSet() const { return (m_offset != -1); }
private: private:
JmpSrc(int offset) JmpSrc(int offset)
: m_offset(offset) : m_offset(offset)
...@@ -1398,6 +1400,12 @@ public: ...@@ -1398,6 +1400,12 @@ public:
} }
#endif #endif
void movsd_rr(XMMRegisterID src, XMMRegisterID dst)
{
m_formatter.prefix(PRE_SSE_F2);
m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
}
void movsd_rm(XMMRegisterID src, int offset, RegisterID base) void movsd_rm(XMMRegisterID src, int offset, RegisterID base)
{ {
m_formatter.prefix(PRE_SSE_F2); m_formatter.prefix(PRE_SSE_F2);
...@@ -1536,6 +1544,7 @@ public: ...@@ -1536,6 +1544,7 @@ public:
ASSERT(to.m_offset != -1); ASSERT(to.m_offset != -1);
char* code = reinterpret_cast<char*>(m_formatter.data()); char* code = reinterpret_cast<char*>(m_formatter.data());
ASSERT(!reinterpret_cast<int32_t*>(code + from.m_offset)[-1]);
setRel32(code + from.m_offset, code + to.m_offset); setRel32(code + from.m_offset, code + to.m_offset);
} }
......
...@@ -710,8 +710,8 @@ inline void JIT::emitLoadDouble(unsigned index, FPRegisterID value) ...@@ -710,8 +710,8 @@ inline void JIT::emitLoadDouble(unsigned index, FPRegisterID value)
inline void JIT::emitLoadInt32ToDouble(unsigned index, FPRegisterID value) inline void JIT::emitLoadInt32ToDouble(unsigned index, FPRegisterID value)
{ {
if (m_codeBlock->isConstantRegisterIndex(index)) { if (m_codeBlock->isConstantRegisterIndex(index)) {
WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(index); ASSERT(isOperandConstantImmediateInt(index));
convertInt32ToDouble(AbsoluteAddress(&inConstantPool), value); convertInt32ToDouble(Imm32(getConstantOperand(index).asInt32()), value);
} else } else
convertInt32ToDouble(addressFor(index), value); convertInt32ToDouble(addressFor(index), value);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment