name
stringlengths 1
473k
| code
stringlengths 7
647k
| asm
stringlengths 4
3.39M
| file
stringlengths 8
196
|
---|---|---|---|
deqp::gls::ShaderExecUtil::generateFragShaderOutputDecl(std::ostream&, deqp::gls::ShaderExecUtil::ShaderSpec const&, bool, std::map<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, int, std::less<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>>, std::allocator<std::pair<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>> const, int>>> const&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>> const&)
|
static void generateFragShaderOutputDecl (std::ostream& src, const ShaderSpec& shaderSpec, bool useIntOutputs, const std::map<std::string, int>& outLocationMap, const std::string& outputPrefix)
{
DE_ASSERT(glu::glslVersionUsesInOutQualifiers(shaderSpec.version));
for (int outNdx = 0; outNdx < (int)shaderSpec.outputs.size(); ++outNdx)
{
const Symbol& output = shaderSpec.outputs[outNdx];
const int location = de::lookup(outLocationMap, output.name);
const std::string outVarName = outputPrefix + output.name;
glu::VariableDeclaration decl (output.varType, outVarName, glu::STORAGE_OUT, glu::INTERPOLATION_LAST, glu::Layout(location));
TCU_CHECK_INTERNAL(output.varType.isBasicType());
if (useIntOutputs && glu::isDataTypeFloatOrVec(output.varType.getBasicType()))
{
const int vecSize = glu::getDataTypeScalarSize(output.varType.getBasicType());
const glu::DataType uintBasicType = vecSize > 1 ? glu::getDataTypeUintVec(vecSize) : glu::TYPE_UINT;
const glu::VarType uintType (uintBasicType, glu::PRECISION_HIGHP);
decl.varType = uintType;
src << decl << ";\n";
}
else if (glu::isDataTypeBoolOrBVec(output.varType.getBasicType()))
{
const int vecSize = glu::getDataTypeScalarSize(output.varType.getBasicType());
const glu::DataType intBasicType = vecSize > 1 ? glu::getDataTypeIntVec(vecSize) : glu::TYPE_INT;
const glu::VarType intType (intBasicType, glu::PRECISION_HIGHP);
decl.varType = intType;
src << decl << ";\n";
}
else if (glu::isDataTypeMatrix(output.varType.getBasicType()))
{
const int vecSize = glu::getDataTypeMatrixNumRows(output.varType.getBasicType());
const int numVecs = glu::getDataTypeMatrixNumColumns(output.varType.getBasicType());
const glu::DataType uintBasicType = glu::getDataTypeUintVec(vecSize);
const glu::VarType uintType (uintBasicType, glu::PRECISION_HIGHP);
decl.varType = uintType;
for (int vecNdx = 0; vecNdx < numVecs; ++vecNdx)
{
decl.name = outVarName + "_" + de::toString(vecNdx);
decl.layout.location = location + vecNdx;
src << decl << ";\n";
}
}
else
src << decl << ";\n";
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x2b8, %rsp # imm = 0x2B8
movq %r8, 0xb0(%rsp)
movq %rcx, 0xa8(%rsp)
movl %edx, 0x1c(%rsp)
movq 0x20(%rsi), %r13
movq %rsi, 0xa0(%rsp)
movq 0x28(%rsi), %rax
subq %r13, %rax
shrq $0x3, %rax
imull $0xb6db6db7, %eax, %eax # imm = 0xB6DB6DB7
testl %eax, %eax
jle 0x878996
xorl %ecx, %ecx
leaq 0x140(%rsp), %rbx
movq %rdi, 0x10(%rsp)
movq %rcx, 0xb8(%rsp)
movl %ecx, %eax
imulq $0x38, %rax, %r14
leaq (%r14,%r13), %r15
movq 0xa8(%rsp), %rdi
movq %r15, %rsi
callq 0x878f72
movl (%rax), %ebp
movq %rbx, %r12
leaq 0x80(%rsp), %rbx
movq %rbx, %rdi
movq 0xb0(%rsp), %rsi
movq %r15, %rdx
callq 0x36611e
movq %r12, %rdi
movq %rbp, 0xc0(%rsp)
movl %ebp, %esi
movl $0xffffffff, %edx # imm = 0xFFFFFFFF
movl $0xffffffff, %ecx # imm = 0xFFFFFFFF
movl $0xd, %r8d
movl $0x2, %r9d
callq 0x8d9d04
addq %r14, %r13
addq $0x20, %r13
movl $0x0, (%rsp)
leaq 0xc8(%rsp), %rdi
movq %r13, %rsi
movq %rbx, %rdx
movl $0x1, %ecx
movl $0x3, %r8d
movq %r12, %r9
callq 0x8d9d4c
cmpl $0x0, (%r13)
jne 0x8789a8
cmpb $0x0, 0x1c(%rsp)
leaq 0x70(%rsp), %r14
movq %r12, %rbx
je 0x878634
movl 0x28(%r15), %edi
leal -0x1(%rdi), %eax
cmpl $0x3, %eax
ja 0x878634
callq 0x8bb822
movl %eax, %edi
movl $0x1f, %eax
cmpl $0x2, %edi
movq 0x10(%rsp), %r12
leaq 0xc8(%rsp), %r15
jl 0x8785f4
callq 0x8bb880
movq %rbx, %rdi
movl %eax, %esi
movl $0x2, %edx
callq 0x8d997c
leaq 0xe8(%rsp), %rdi
movq %rbx, %rsi
callq 0x8d98c4
movq %r12, %rdi
movq %r15, %rsi
callq 0x8da397
movl $0x2, %edx
movq %rax, %rdi
leaq 0x20f136(%rip), %rsi # 0xa87763
callq 0x325e70
jmp 0x8786a1
movl 0x28(%r15), %edi
leal -0x23(%rdi), %eax
cmpl $0x3, %eax
ja 0x8786a9
callq 0x8bb822
movq 0x10(%rsp), %r12
leaq 0xc8(%rsp), %r15
movl %eax, %edi
movl $0x1b, %eax
cmpl $0x2, %edi
jl 0x878663
callq 0x8bb87c
movq %rbx, %rdi
movl %eax, %esi
movl $0x2, %edx
callq 0x8d997c
leaq 0xe8(%rsp), %rdi
movq %rbx, %rsi
callq 0x8d98c4
movq %r12, %rdi
movq %r15, %rsi
callq 0x8da397
movl $0x2, %edx
movq %rax, %rdi
leaq 0x20f0c7(%rip), %rsi # 0xa87763
callq 0x325e70
movq %rbx, %rdi
jmp 0x8788e8
leal -0x5(%rdi), %eax
cmpl $0x9, %eax
setae %al
leal -0x12(%rdi), %ecx
cmpl $0x9, %ecx
setae %cl
testb %cl, %al
movq 0x10(%rsp), %r12
leaq 0xc8(%rsp), %rsi
jne 0x878975
callq 0x8bb891
movl %eax, %r13d
movl 0x28(%r15), %edi
callq 0x8bb8a8
movl %eax, %r15d
movl %r13d, %edi
callq 0x8bb880
leaq 0x128(%rsp), %r13
movq %r13, %rdi
movl %eax, %esi
movl $0x2, %edx
callq 0x8d997c
leaq 0xe8(%rsp), %rdi
movq %r13, %rsi
callq 0x8d98c4
testl %r15d, %r15d
jle 0x8788e0
xorl %r13d, %r13d
movq %r14, 0x60(%rsp)
movq 0x80(%rsp), %rsi
movq 0x88(%rsp), %rdx
addq %rsi, %rdx
leaq 0x60(%rsp), %rbp
movq %rbp, %rdi
callq 0x334442
movq %rbp, %rdi
leaq 0x19bdba(%rip), %rsi # 0xa14508
callq 0x3259c0
movq %rbx, %rdi
callq 0x325e00
movq %rbx, %rdi
movl %r13d, %esi
callq 0x325530
leaq 0x40(%rsp), %rdi
leaq 0x148(%rsp), %rsi
callq 0x325660
movq %rbx, %rdi
movq 0x3e43a6(%rip), %rsi # 0xc5cb28
callq 0x325aa0
leaq 0x1b0(%rsp), %rdi
callq 0x325a80
movq 0x60(%rsp), %rcx
movl $0xf, %esi
cmpq %r14, %rcx
je 0x8787a8
movq 0x70(%rsp), %rsi
movq 0x68(%rsp), %r8
movq 0x48(%rsp), %rdx
leaq (%rdx,%r8), %rax
cmpq %rsi, %rax
jbe 0x8787d6
movl $0xf, %esi
leaq 0x50(%rsp), %rdi
cmpq %rdi, 0x40(%rsp)
je 0x8787d1
movq 0x50(%rsp), %rsi
cmpq %rsi, %rax
jbe 0x8787e7
movq 0x40(%rsp), %rsi
leaq 0x60(%rsp), %rdi
callq 0x325af0
jmp 0x8787f5
leaq 0x40(%rsp), %rdi
xorl %esi, %esi
xorl %edx, %edx
callq 0x325230
leaq 0x30(%rsp), %rsi
movq %rsi, 0x20(%rsp)
movq (%rax), %rdx
leaq 0x10(%rax), %rcx
cmpq %rcx, %rdx
je 0x87881a
movq %rdx, 0x20(%rsp)
movq (%rcx), %rdx
movq %rdx, 0x30(%rsp)
jmp 0x878820
movups (%rcx), %xmm0
movups %xmm0, (%rsi)
movq %rax, %rdx
addq $0x8, %rdx
movq 0x8(%rax), %rsi
movq %rsi, 0x28(%rsp)
movq %rcx, (%rax)
movq $0x0, (%rdx)
movb $0x0, (%rcx)
leaq 0x108(%rsp), %rdi
leaq 0x20(%rsp), %rsi
callq 0x325800
movq 0x20(%rsp), %rdi
leaq 0x30(%rsp), %rax
cmpq %rax, %rdi
je 0x87886b
movq 0x30(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x40(%rsp), %rdi
leaq 0x50(%rsp), %rax
cmpq %rax, %rdi
je 0x878887
movq 0x50(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x60(%rsp), %rdi
cmpq %r14, %rdi
je 0x87889e
movq 0x70(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0xc0(%rsp), %rax
addl %r13d, %eax
movl %eax, 0xc8(%rsp)
movq %r12, %rdi
leaq 0xc8(%rsp), %rsi
callq 0x8da397
movl $0x2, %edx
movq %rax, %rdi
leaq 0x20ee94(%rip), %rsi # 0xa87763
callq 0x325e70
incl %r13d
cmpl %r13d, %r15d
jne 0x87871f
leaq 0x128(%rsp), %rdi
callq 0x8d9a14
movq 0x108(%rsp), %rdi
leaq 0x118(%rsp), %rax
cmpq %rax, %rdi
je 0x878912
movq 0x118(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0xe8(%rsp), %rdi
callq 0x8d9a14
movq 0x80(%rsp), %rdi
leaq 0x90(%rsp), %rax
cmpq %rax, %rdi
je 0x878944
movq 0x90(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0xb8(%rsp), %rcx
incl %ecx
movq 0xa0(%rsp), %rax
movq 0x20(%rax), %r13
movq 0x28(%rax), %rax
subq %r13, %rax
shrq $0x3, %rax
imull $0xb6db6db7, %eax, %eax # imm = 0xB6DB6DB7
cmpl %eax, %ecx
jl 0x878512
jmp 0x878996
movq %r12, %rdi
callq 0x8da397
movl $0x2, %edx
movq %rax, %rdi
leaq 0x20edd7(%rip), %rsi # 0xa87763
callq 0x325e70
jmp 0x8788ed
addq $0x2b8, %rsp # imm = 0x2B8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movl $0x38, %edi
callq 0x325650
movq %rax, %r14
leaq 0x20ecbf(%rip), %rdx # 0xa8767b
leaq 0x20e832(%rip), %rcx # 0xa871f5
movq %rax, %rdi
xorl %esi, %esi
movl $0x135, %r8d # imm = 0x135
callq 0x99c1d4
leaq 0x3a26a6(%rip), %rsi # 0xc1b080
leaq -0x546c01(%rip), %rdx # 0x331de0
movq %r14, %rdi
callq 0x325940
jmp 0x878a04
jmp 0x878a04
jmp 0x878a9a
jmp 0x878a04
jmp 0x878a04
jmp 0x878a04
jmp 0x878a04
jmp 0x878a04
jmp 0x878a04
jmp 0x878a04
jmp 0x878a04
jmp 0x878a04
movq %rax, %rbx
jmp 0x878aaa
movq %rax, %rbx
movq %r14, %rdi
callq 0x325d40
jmp 0x878aaa
jmp 0x878a1e
movq %rax, %rbx
leaq 0x140(%rsp), %rdi
jmp 0x878aa5
movq %rax, %rbx
jmp 0x878ab7
jmp 0x878a58
jmp 0x878a9a
movq %rax, %rbx
movq 0x40(%rsp), %rdi
leaq 0x50(%rsp), %rax
cmpq %rax, %rdi
je 0x878a81
movq 0x50(%rsp), %rsi
incq %rsi
callq 0x3251a0
jmp 0x878a81
movq %rax, %rbx
jmp 0x878a81
movq %rax, %rbx
movq 0x3e40c1(%rip), %rsi # 0xc5cb28
leaq 0x140(%rsp), %rdi
callq 0x325aa0
leaq 0x1b0(%rsp), %rdi
callq 0x325a80
movq 0x60(%rsp), %rdi
cmpq %r14, %rdi
je 0x878a9d
movq 0x70(%rsp), %rsi
incq %rsi
callq 0x3251a0
jmp 0x878a9d
movq %rax, %rbx
leaq 0x128(%rsp), %rdi
callq 0x8d9a14
leaq 0xc8(%rsp), %rdi
callq 0x879592
movq 0x80(%rsp), %rdi
leaq 0x90(%rsp), %rax
cmpq %rax, %rdi
je 0x878adc
movq 0x90(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq %rbx, %rdi
callq 0x3259a0
|
/kaydenl[P]VK-GL-CTS/modules/glshared/glsShaderExecUtil.cpp
|
deqp::gls::ShaderExecUtil::generateFragShaderOutAssign(std::ostream&, deqp::gls::ShaderExecUtil::ShaderSpec const&, bool, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>> const&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>> const&)
|
static void generateFragShaderOutAssign (std::ostream& src, const ShaderSpec& shaderSpec, bool useIntOutputs, const std::string& valuePrefix, const std::string& outputPrefix)
{
for (vector<Symbol>::const_iterator output = shaderSpec.outputs.begin(); output != shaderSpec.outputs.end(); ++output)
{
if (useIntOutputs && glu::isDataTypeFloatOrVec(output->varType.getBasicType()))
src << " o_" << output->name << " = floatBitsToUint(" << valuePrefix << output->name << ");\n";
else if (glu::isDataTypeMatrix(output->varType.getBasicType()))
{
const int numVecs = glu::getDataTypeMatrixNumColumns(output->varType.getBasicType());
for (int vecNdx = 0; vecNdx < numVecs; ++vecNdx)
if (useIntOutputs)
src << "\t" << outputPrefix << output->name << "_" << vecNdx << " = floatBitsToUint(" << valuePrefix << output->name << "[" << vecNdx << "]);\n";
else
src << "\t" << outputPrefix << output->name << "_" << vecNdx << " = " << valuePrefix << output->name << "[" << vecNdx << "];\n";
}
else if (glu::isDataTypeBoolOrBVec(output->varType.getBasicType()))
{
const int vecSize = glu::getDataTypeScalarSize(output->varType.getBasicType());
const glu::DataType intBaseType = vecSize > 1 ? glu::getDataTypeIntVec(vecSize) : glu::TYPE_INT;
src << "\t" << outputPrefix << output->name << " = " << glu::getDataTypeName(intBaseType) << "(" << valuePrefix << output->name << ");\n";
}
else
src << "\t" << outputPrefix << output->name << " = " << valuePrefix << output->name << ";\n";
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x18, %rsp
movl %edx, 0x4(%rsp)
movq 0x20(%rsi), %rbx
movq %rsi, 0x8(%rsp)
cmpq 0x28(%rsi), %rbx
je 0x878ea6
movq %r8, %rbp
movq %rcx, %r14
movq %rdi, %r12
movq %r8, 0x10(%rsp)
cmpb $0x0, 0x4(%rsp)
je 0x878b62
movl 0x28(%rbx), %eax
decl %eax
cmpl $0x3, %eax
ja 0x878b62
movl $0x3, %edx
movq %r12, %rdi
leaq 0x20eb61(%rip), %rsi # 0xa87698
callq 0x325e70
movq (%rbx), %rsi
movq 0x8(%rbx), %rdx
movq %r12, %rdi
callq 0x325e70
movq %rax, %r15
movl $0x13, %edx
movq %rax, %rdi
leaq 0x20eb3f(%rip), %rsi # 0xa8769c
jmp 0x878e5c
movl 0x28(%rbx), %edi
leal -0x5(%rdi), %eax
cmpl $0x9, %eax
setae %al
leal -0x12(%rdi), %ecx
cmpl $0x9, %ecx
setae %cl
testb %cl, %al
jne 0x878d1b
callq 0x8bb8a8
testl %eax, %eax
jle 0x878e93
movl %eax, %r13d
xorl %r15d, %r15d
movl $0x1, %edx
movq %r12, %rdi
leaq 0x1e6c2f(%rip), %rsi # 0xa5f7d0
callq 0x325e70
movq (%rbp), %rsi
cmpb $0x0, 0x4(%rsp)
je 0x878c5d
movq 0x8(%rbp), %rdx
movq %r12, %rdi
callq 0x325e70
movq (%rbx), %rsi
movq 0x8(%rbx), %rdx
movq %rax, %rdi
callq 0x325e70
movq %rax, %rbp
movl $0x1, %edx
movq %rax, %rdi
leaq 0x19b926(%rip), %rsi # 0xa14508
callq 0x325e70
movq %rbp, %rdi
movl %r15d, %esi
callq 0x325530
movq %rax, %rbp
movl $0x13, %edx
movq %rax, %rdi
leaq 0x20ea98(%rip), %rsi # 0xa8769c
callq 0x325e70
movq (%r14), %rsi
movq 0x8(%r14), %rdx
movq %rbp, %rdi
callq 0x325e70
movq (%rbx), %rsi
movq 0x8(%rbx), %rdx
movq %rax, %rdi
callq 0x325e70
movq %rax, %rbp
movl $0x1, %edx
movq %rax, %rdi
leaq 0x1d1518(%rip), %rsi # 0xa4a151
callq 0x325e70
movq %rbp, %rdi
movl %r15d, %esi
callq 0x325530
movl $0x4, %edx
movq %rax, %rdi
leaq 0x20ea58(%rip), %rsi # 0xa876b0
jmp 0x878d00
movq 0x8(%rbp), %rdx
movq %r12, %rdi
callq 0x325e70
movq (%rbx), %rsi
movq 0x8(%rbx), %rdx
movq %rax, %rdi
callq 0x325e70
movq %rax, %rbp
movl $0x1, %edx
movq %rax, %rdi
leaq 0x19b87e(%rip), %rsi # 0xa14508
callq 0x325e70
movq %rbp, %rdi
movl %r15d, %esi
callq 0x325530
movq %rax, %rbp
movl $0x3, %edx
movq %rax, %rdi
leaq 0x1d2d29(%rip), %rsi # 0xa4b9d5
callq 0x325e70
movq (%r14), %rsi
movq 0x8(%r14), %rdx
movq %rbp, %rdi
callq 0x325e70
movq (%rbx), %rsi
movq 0x8(%rbx), %rdx
movq %rax, %rdi
callq 0x325e70
movq %rax, %rbp
movl $0x1, %edx
movq %rax, %rdi
leaq 0x1d1470(%rip), %rsi # 0xa4a151
callq 0x325e70
movq %rbp, %rdi
movl %r15d, %esi
callq 0x325530
movl $0x3, %edx
movq %rax, %rdi
leaq 0x190e1a(%rip), %rsi # 0xa09b1a
callq 0x325e70
incl %r15d
cmpl %r15d, %r13d
movq 0x10(%rsp), %rbp
jne 0x878b92
jmp 0x878e93
leal -0x23(%rdi), %eax
cmpl $0x3, %eax
ja 0x878dba
callq 0x8bb822
movl $0x1b, %r13d
cmpl $0x2, %eax
jl 0x878d41
movl %eax, %edi
callq 0x8bb87c
movl %eax, %r13d
movl $0x1, %edx
movq %r12, %rdi
leaq 0x1e6a80(%rip), %rsi # 0xa5f7d0
callq 0x325e70
movq (%rbp), %rsi
movq 0x8(%rbp), %rdx
movq %r12, %rdi
callq 0x325e70
movq (%rbx), %rsi
movq 0x8(%rbx), %rdx
movq %rax, %rdi
callq 0x325e70
movq %rax, %r15
movl $0x3, %edx
movq %rax, %rdi
leaq 0x1d2c4f(%rip), %rsi # 0xa4b9d5
callq 0x325e70
movl %r13d, %edi
callq 0x8bb810
testq %rax, %rax
je 0x878e33
movq %rax, %r13
movq %rax, %rdi
callq 0x325680
movq %r15, %rdi
movq %r13, %rsi
movq %rax, %rdx
callq 0x325e70
jmp 0x878e4d
movl $0x1, %edx
movq %r12, %rdi
leaq 0x1e6a07(%rip), %rsi # 0xa5f7d0
callq 0x325e70
movq (%rbp), %rsi
movq 0x8(%rbp), %rdx
movq %r12, %rdi
callq 0x325e70
movq (%rbx), %rsi
movq 0x8(%rbx), %rdx
movq %rax, %rdi
callq 0x325e70
movq %rax, %r15
movl $0x3, %edx
movq %rax, %rdi
leaq 0x1d2bd6(%rip), %rsi # 0xa4b9d5
callq 0x325e70
movq (%r14), %rsi
movq 0x8(%r14), %rdx
movq %r15, %rdi
callq 0x325e70
movq (%rbx), %rsi
movq 0x8(%rbx), %rdx
movq %rax, %rdi
callq 0x325e70
movl $0x2, %edx
movq %rax, %rdi
leaq 0x20e932(%rip), %rsi # 0xa87763
jmp 0x878e8e
movq (%r15), %rax
movq -0x18(%rax), %rax
movq %r15, %rdi
addq %rax, %rdi
movl 0x20(%r15,%rax), %esi
orl $0x1, %esi
callq 0x326070
movl $0x1, %edx
movq %r15, %rdi
leaq 0x1be3e6(%rip), %rsi # 0xa37242
callq 0x325e70
movq (%r14), %rsi
movq 0x8(%r14), %rdx
movq %r15, %rdi
callq 0x325e70
movq (%rbx), %rsi
movq 0x8(%rbx), %rdx
movq %rax, %rdi
callq 0x325e70
movl $0x3, %edx
movq %rax, %rdi
leaq 0x20e8d4(%rip), %rsi # 0xa87762
callq 0x325e70
addq $0x38, %rbx
movq 0x8(%rsp), %rax
cmpq 0x28(%rax), %rbx
jne 0x878b17
addq $0x18, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/kaydenl[P]VK-GL-CTS/modules/glshared/glsShaderExecUtil.cpp
|
std::map<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, int, std::less<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>>, std::allocator<std::pair<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>> const, int>>>::mapped_type const& de::lookup<std::map<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, int, std::less<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>>, std::allocator<std::pair<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>> const, int>>>>(std::map<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, int, std::less<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>>, std::allocator<std::pair<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>> const, int>>> const&, std::map<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, int, std::less<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>>, std::allocator<std::pair<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>> const, int>>>::key_type const&)
|
const typename M::mapped_type& lookup (const M& map, const typename M::key_type& key)
{
const typename M::mapped_type* ptr = tryLookup(map, key);
if (ptr == DE_NULL)
throw std::out_of_range("key not found in map");
return *ptr;
}
|
pushq %r14
pushq %rbx
pushq %rax
movq %rdi, %rbx
callq 0x879130
addq $0x8, %rbx
cmpq %rbx, %rax
je 0x878f93
addq $0x40, %rax
addq $0x8, %rsp
popq %rbx
popq %r14
retq
movl $0x10, %edi
callq 0x325650
movq %rax, %rbx
leaq 0x1eeaec(%rip), %rsi # 0xa67a93
movq %rax, %rdi
callq 0x3253d0
movq 0x3e393a(%rip), %rsi # 0xc5c8f0
movq 0x3e3d53(%rip), %rdx # 0xc5cd10
movq %rbx, %rdi
callq 0x325940
movq %rax, %r14
movq %rbx, %rdi
callq 0x325d40
movq %r14, %rdi
callq 0x3259a0
|
/kaydenl[P]VK-GL-CTS/framework/delibs/decpp/deSTLUtil.hpp
|
bool de::insert<std::map<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, int, std::less<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>>, std::allocator<std::pair<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>> const, int>>>>(std::map<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, int, std::less<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>>, std::allocator<std::pair<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>> const, int>>>&, std::map<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, int, std::less<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>>, std::allocator<std::pair<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>> const, int>>>::key_type const&, std::map<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, int, std::less<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>>, std::allocator<std::pair<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>> const, int>>>::mapped_type const&)
|
bool insert (M& map, const typename M::key_type& key, const typename M::mapped_type& value)
{
typename M::value_type entry(key, value);
std::pair<typename M::iterator,bool> ret = map.insert(entry);
return ret.second;
}
|
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $0x28, %rsp
movq %rdx, %rbx
movq %rdi, %r14
leaq 0x10(%rsp), %r12
movq %r12, -0x10(%r12)
movq (%rsi), %rax
movq 0x8(%rsi), %rdx
addq %rax, %rdx
movq %rsp, %r15
movq %r15, %rdi
movq %rax, %rsi
callq 0x334442
movl (%rbx), %eax
movl %eax, 0x20(%r15)
movq %r14, %rdi
movq %r15, %rsi
callq 0x8791ba
movl %edx, %ebx
movq (%rsp), %rdi
cmpq %r12, %rdi
je 0x8790fd
movq 0x10(%rsp), %rsi
incq %rsi
callq 0x3251a0
andb $0x1, %bl
movl %ebx, %eax
addq $0x28, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
retq
movq %rax, %rbx
movq (%rsp), %rdi
cmpq %r12, %rdi
je 0x879127
movq 0x10(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq %rbx, %rdi
callq 0x3259a0
nop
|
/kaydenl[P]VK-GL-CTS/framework/delibs/decpp/deSTLUtil.hpp
|
deqp::gls::TextureSamplerTest::setTextureState(glw::Functions const&, unsigned int, deqp::gls::TextureSamplerTest::SamplingState)
|
void TextureSamplerTest::setTextureState (const glw::Functions& gl, GLenum target, SamplingState state)
{
gl.texParameteri(target, GL_TEXTURE_MIN_FILTER, state.minFilter);
GLU_EXPECT_NO_ERROR(gl.getError(), "glTexParameteri(target, GL_TEXTURE_MIN_FILTER, state.minFilter)");
gl.texParameteri(target, GL_TEXTURE_MAG_FILTER, state.magFilter);
GLU_EXPECT_NO_ERROR(gl.getError(), "glTexParameteri(target, GL_TEXTURE_MAG_FILTER, state.magFilter)");
gl.texParameteri(target, GL_TEXTURE_WRAP_S, state.wrapS);
GLU_EXPECT_NO_ERROR(gl.getError(), "glTexParameteri(target, GL_TEXTURE_WRAP_S, state.wrapS)");
gl.texParameteri(target, GL_TEXTURE_WRAP_T, state.wrapT);
GLU_EXPECT_NO_ERROR(gl.getError(), "glTexParameteri(target, GL_TEXTURE_WRAP_T, state.wrapT)");
gl.texParameteri(target, GL_TEXTURE_WRAP_R, state.wrapR);
GLU_EXPECT_NO_ERROR(gl.getError(), "glTexParameteri(target, GL_TEXTURE_WRAP_R, state.wrapR)");
gl.texParameterf(target, GL_TEXTURE_MAX_LOD, state.maxLod);
GLU_EXPECT_NO_ERROR(gl.getError(), "glTexParameterf(target, GL_TEXTURE_MAX_LOD, state.maxLod)");
gl.texParameterf(target, GL_TEXTURE_MIN_LOD, state.minLod);
GLU_EXPECT_NO_ERROR(gl.getError(), "glTexParameterf(target, GL_TEXTURE_MIN_LOD, state.minLod)");
}
|
pushq %rbp
pushq %r14
pushq %rbx
movl %esi, %ebp
movq %rdi, %rbx
movl 0x20(%rsp), %edx
movl %esi, %edi
movl $0x2801, %esi # imm = 0x2801
callq *0x1360(%rbx)
callq *0x800(%rbx)
leaq 0x20e007(%rip), %rsi # 0xa878d0
leaq 0x20e040(%rip), %r14 # 0xa87910
movl %eax, %edi
movq %r14, %rdx
movl $0x56, %ecx
callq 0x8b6518
movl 0x24(%rsp), %edx
movl %ebp, %edi
movl $0x2800, %esi # imm = 0x2800
callq *0x1360(%rbx)
callq *0x800(%rbx)
leaq 0x20e088(%rip), %rsi # 0xa87985
movl %eax, %edi
movq %r14, %rdx
movl $0x58, %ecx
callq 0x8b6518
movl 0x2c(%rsp), %edx
movl %ebp, %edi
movl $0x2802, %esi # imm = 0x2802
callq *0x1360(%rbx)
callq *0x800(%rbx)
leaq 0x20e09b(%rip), %rsi # 0xa879c5
movl %eax, %edi
movq %r14, %rdx
movl $0x5a, %ecx
callq 0x8b6518
movl 0x28(%rsp), %edx
movl %ebp, %edi
movl $0x2803, %esi # imm = 0x2803
callq *0x1360(%rbx)
callq *0x800(%rbx)
leaq 0x20e0a6(%rip), %rsi # 0xa879fd
movl %eax, %edi
movq %r14, %rdx
movl $0x5c, %ecx
callq 0x8b6518
movl 0x30(%rsp), %edx
movl %ebp, %edi
movl $0x8072, %esi # imm = 0x8072
callq *0x1360(%rbx)
callq *0x800(%rbx)
leaq 0x20e0b1(%rip), %rsi # 0xa87a35
movl %eax, %edi
movq %r14, %rdx
movl $0x5e, %ecx
callq 0x8b6518
movss 0x38(%rsp), %xmm0
movl %ebp, %edi
movl $0x813b, %esi # imm = 0x813B
callq *0x1350(%rbx)
callq *0x800(%rbx)
leaq 0x20e0ba(%rip), %rsi # 0xa87a6d
movl %eax, %edi
movq %r14, %rdx
movl $0x60, %ecx
callq 0x8b6518
movss 0x34(%rsp), %xmm0
movl %ebp, %edi
movl $0x813a, %esi # imm = 0x813A
callq *0x1350(%rbx)
callq *0x800(%rbx)
leaq 0x20e0c5(%rip), %rsi # 0xa87aa7
movl %eax, %edi
movq %r14, %rdx
movl $0x62, %ecx
popq %rbx
popq %r14
popq %rbp
jmp 0x8b6518
nop
|
/kaydenl[P]VK-GL-CTS/modules/glshared/glsSamplerObjectTest.cpp
|
deqp::gls::TextureSamplerTest::selectVertexShader(unsigned int)
|
const char* TextureSamplerTest::selectVertexShader (GLenum target)
{
switch (target)
{
case GL_TEXTURE_2D:
return
"${VTX_HDR}"
"${VTX_IN} ${HIGHP} vec2 a_position;\n"
"uniform ${HIGHP} float u_posScale;\n"
"${VTX_OUT} ${MEDIUMP} vec2 v_texCoord;\n"
"void main (void)\n"
"{\n"
"\tv_texCoord = a_position;\n"
"\tgl_Position = vec4(u_posScale * a_position, 0.0, 1.0);\n"
"}";
case GL_TEXTURE_3D:
return
"${VTX_HDR}"
"${VTX_IN} ${HIGHP} vec3 a_position;\n"
"uniform ${HIGHP} float u_posScale;\n"
"${VTX_OUT} ${MEDIUMP} vec3 v_texCoord;\n"
"void main (void)\n"
"{\n"
"\tv_texCoord = a_position;\n"
"\tgl_Position = vec4(u_posScale * a_position.xy, 0.0, 1.0);\n"
"}";
case GL_TEXTURE_CUBE_MAP:
return
"${VTX_HDR}"
"${VTX_IN} ${HIGHP} vec4 a_position;\n"
"uniform ${HIGHP} float u_posScale;\n"
"${VTX_OUT} ${MEDIUMP} vec2 v_texCoord;\n"
"void main (void)\n"
"{\n"
"\tv_texCoord = a_position.zw;\n"
"\tgl_Position = vec4(u_posScale * a_position.xy, 0.0, 1.0);\n"
"}";
default:
DE_ASSERT(false);
return NULL;
}
}
|
cmpl $0xde1, %edi # imm = 0xDE1
je 0x879b72
cmpl $0x8513, %edi # imm = 0x8513
je 0x879b6a
cmpl $0x806f, %edi # imm = 0x806F
jne 0x879b7a
leaq 0x20e216(%rip), %rax # 0xa87d7f
retq
leaq 0x20e2f0(%rip), %rax # 0xa87e61
retq
leaq 0x20e127(%rip), %rax # 0xa87ca0
retq
xorl %eax, %eax
retq
nop
|
/kaydenl[P]VK-GL-CTS/modules/glshared/glsSamplerObjectTest.cpp
|
deqp::gls::TextureSamplerTest::createTexture(glw::Functions const&, unsigned int)
|
GLuint TextureSamplerTest::createTexture (const glw::Functions& gl, GLenum target)
{
switch (target)
{
case GL_TEXTURE_2D:
return createTexture2D(gl);
case GL_TEXTURE_3D:
return createTexture3D(gl);
case GL_TEXTURE_CUBE_MAP:
return createTextureCube(gl);
default:
DE_ASSERT(false);
return (GLuint)-1;
}
}
|
cmpl $0x8513, %esi # imm = 0x8513
je 0x87b50f
cmpl $0x806f, %esi # imm = 0x806F
je 0x87b088
cmpl $0xde1, %esi # imm = 0xDE1
jne 0x87b519
jmp 0x87aed6
pushq %rax
callq 0x87b240
addq $0x8, %rsp
movl $0xffffffff, %eax # imm = 0xFFFFFFFF
retq
nop
|
/kaydenl[P]VK-GL-CTS/modules/glshared/glsSamplerObjectTest.cpp
|
deqp::gls::TextureSamplerTest::iterate()
|
tcu::TestCase::IterateResult TextureSamplerTest::iterate (void)
{
tcu::TestLog& log = m_testCtx.getLog();
tcu::Surface textureRef(VIEWPORT_WIDTH, VIEWPORT_HEIGHT);
tcu::Surface samplerRef(VIEWPORT_WIDTH, VIEWPORT_HEIGHT);
tcu::Surface textureResult(VIEWPORT_WIDTH, VIEWPORT_HEIGHT);
tcu::Surface samplerResult(VIEWPORT_WIDTH, VIEWPORT_HEIGHT);
int x = m_random.getInt(0, m_renderCtx.getRenderTarget().getWidth() - VIEWPORT_WIDTH);
int y = m_random.getInt(0, m_renderCtx.getRenderTarget().getHeight() - VIEWPORT_HEIGHT);
renderReferences(textureRef, samplerRef, x, y);
renderResults(textureResult, samplerResult, x, y);
bool isOk = pixelThresholdCompare (log, "Sampler render result", "Result from rendering with sampler", samplerRef, samplerResult, tcu::RGBA(0, 0, 0, 0), tcu::COMPARE_LOG_RESULT);
if (!pixelThresholdCompare (log, "Texture render result", "Result from rendering with texture state", textureRef, textureResult, tcu::RGBA(0, 0, 0, 0), tcu::COMPARE_LOG_RESULT))
isOk = false;
if (!isOk)
{
m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Fail");
return STOP;
}
m_testCtx.setTestResult(QP_TEST_RESULT_PASS, "Pass");
return STOP;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $0x70, %rsp
movq %rdi, %rbx
movq 0x8(%rdi), %rax
movq 0x10(%rax), %r14
leaq 0x58(%rsp), %rdi
movl $0x80, %esi
movl $0x80, %edx
callq 0x9a957c
leaq 0x40(%rsp), %rdi
movl $0x80, %esi
movl $0x80, %edx
callq 0x9a957c
leaq 0x28(%rsp), %rdi
movl $0x80, %esi
movl $0x80, %edx
callq 0x9a957c
leaq 0x10(%rsp), %rdi
movl $0x80, %esi
movl $0x80, %edx
callq 0x9a957c
movq 0x70(%rbx), %rdi
movq (%rdi), %rax
callq *0x20(%rax)
leaq 0xbc(%rbx), %r15
movl (%rax), %ebp
movq %r15, %rdi
callq 0x9fa2ea
addl $-0x7f, %ebp
xorl %edx, %edx
divl %ebp
movl %edx, %ebp
movq 0x70(%rbx), %rdi
movq (%rdi), %rax
callq *0x20(%rax)
movl 0x4(%rax), %r12d
movq %r15, %rdi
callq 0x9fa2ea
addl $-0x7f, %r12d
xorl %edx, %edx
divl %r12d
movl %edx, %r15d
leaq 0x58(%rsp), %rsi
leaq 0x40(%rsp), %rdx
movq %rbx, %rdi
movl %ebp, %ecx
movl %r15d, %r8d
callq 0x87b520
leaq 0x28(%rsp), %rsi
leaq 0x10(%rsp), %rdx
movq %rbx, %rdi
movl %ebp, %ecx
movl %r15d, %r8d
callq 0x87b730
leaq 0xc(%rsp), %r9
movl $0x0, (%r9)
movl $0x1, (%rsp)
leaq 0x20caaf(%rip), %rsi # 0xa885fc
leaq 0x20cabe(%rip), %rdx # 0xa88612
leaq 0x40(%rsp), %rcx
leaq 0x10(%rsp), %r8
movq %r14, %rdi
callq 0x9a5367
movl %eax, %ebp
leaq 0xc(%rsp), %r9
movl $0x0, (%r9)
movl $0x1, (%rsp)
leaq 0x20cab3(%rip), %rsi # 0xa88635
leaq 0x20cac2(%rip), %rdx # 0xa8864b
leaq 0x58(%rsp), %rcx
leaq 0x28(%rsp), %r8
movq %r14, %rdi
callq 0x9a5367
andb %al, %bpl
movq 0x8(%rbx), %rdi
movl %ebp, %eax
xorb $0x1, %al
movzbl %al, %esi
leaq 0x1cfe39(%rip), %rax # 0xa4b9e9
leaq 0x18696a(%rip), %rdx # 0xa02521
testb %bpl, %bpl
cmovneq %rax, %rdx
callq 0x9a9c16
leaq 0x10(%rsp), %rdi
callq 0x9a9590
leaq 0x28(%rsp), %rdi
callq 0x9a9590
leaq 0x40(%rsp), %rdi
callq 0x9a9590
leaq 0x58(%rsp), %rdi
callq 0x9a9590
xorl %eax, %eax
addq $0x70, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
jmp 0x87bc11
jmp 0x87bc11
jmp 0x87bc11
movq %rax, %rbx
jmp 0x87bc1e
movq %rax, %rbx
jmp 0x87bc28
movq %rax, %rbx
jmp 0x87bc32
jmp 0x87bc11
movq %rax, %rbx
leaq 0x10(%rsp), %rdi
callq 0x9a9590
leaq 0x28(%rsp), %rdi
callq 0x9a9590
leaq 0x40(%rsp), %rdi
callq 0x9a9590
leaq 0x58(%rsp), %rdi
callq 0x9a9590
movq %rbx, %rdi
callq 0x3259a0
|
/kaydenl[P]VK-GL-CTS/modules/glshared/glsSamplerObjectTest.cpp
|
deqp::gls::MultiTextureSamplerTest::setSamplerState(glw::Functions const&, deqp::gls::MultiTextureSamplerTest::SamplingState, unsigned int)
|
void MultiTextureSamplerTest::setSamplerState (const glw::Functions& gl, SamplingState state, GLuint sampler)
{
gl.samplerParameteri(sampler, GL_TEXTURE_MIN_FILTER, state.minFilter);
GLU_EXPECT_NO_ERROR(gl.getError(), "glSamplerParameteri(sampler, GL_TEXTURE_MIN_FILTER, state.minFilter)");
gl.samplerParameteri(sampler, GL_TEXTURE_MAG_FILTER, state.magFilter);
GLU_EXPECT_NO_ERROR(gl.getError(), "glSamplerParameteri(sampler, GL_TEXTURE_MAG_FILTER, state.magFilter)");
gl.samplerParameteri(sampler, GL_TEXTURE_WRAP_S, state.wrapS);
GLU_EXPECT_NO_ERROR(gl.getError(), "glSamplerParameteri(sampler, GL_TEXTURE_WRAP_S, state.wrapS)");
gl.samplerParameteri(sampler, GL_TEXTURE_WRAP_T, state.wrapT);
GLU_EXPECT_NO_ERROR(gl.getError(), "glSamplerParameteri(sampler, GL_TEXTURE_WRAP_T, state.wrapT)");
gl.samplerParameteri(sampler, GL_TEXTURE_WRAP_R, state.wrapR);
GLU_EXPECT_NO_ERROR(gl.getError(), "glSamplerParameteri(sampler, GL_TEXTURE_WRAP_R, state.wrapR)");
gl.samplerParameterf(sampler, GL_TEXTURE_MAX_LOD, state.maxLod);
GLU_EXPECT_NO_ERROR(gl.getError(), "glSamplerParameterf(sampler, GL_TEXTURE_MAX_LOD, state.maxLod)");
gl.samplerParameterf(sampler, GL_TEXTURE_MIN_LOD, state.minLod);
GLU_EXPECT_NO_ERROR(gl.getError(), "glSamplerParameterf(sampler, GL_TEXTURE_MIN_LOD, state.minLod)");
}
|
pushq %rbp
pushq %r14
pushq %rbx
movl %esi, %ebp
movq %rdi, %rbx
movl 0x20(%rsp), %edx
movl %esi, %edi
movl $0x2801, %esi # imm = 0x2801
callq *0x1280(%rbx)
callq *0x800(%rbx)
leaq 0x20bc70(%rip), %rsi # 0xa87ae1
leaq 0x20ba98(%rip), %r14 # 0xa87910
movl %eax, %edi
movq %r14, %rdx
movl $0x268, %ecx # imm = 0x268
callq 0x8b6518
movl 0x24(%rsp), %edx
movl %ebp, %edi
movl $0x2800, %esi # imm = 0x2800
callq *0x1280(%rbx)
callq *0x800(%rbx)
leaq 0x20bc81(%rip), %rsi # 0xa87b26
movl %eax, %edi
movq %r14, %rdx
movl $0x26a, %ecx # imm = 0x26A
callq 0x8b6518
movl 0x2c(%rsp), %edx
movl %ebp, %edi
movl $0x2802, %esi # imm = 0x2802
callq *0x1280(%rbx)
callq *0x800(%rbx)
leaq 0x20bc99(%rip), %rsi # 0xa87b6b
movl %eax, %edi
movq %r14, %rdx
movl $0x26c, %ecx # imm = 0x26C
callq 0x8b6518
movl 0x28(%rsp), %edx
movl %ebp, %edi
movl $0x2803, %esi # imm = 0x2803
callq *0x1280(%rbx)
callq *0x800(%rbx)
leaq 0x20bca9(%rip), %rsi # 0xa87ba8
movl %eax, %edi
movq %r14, %rdx
movl $0x26e, %ecx # imm = 0x26E
callq 0x8b6518
movl 0x30(%rsp), %edx
movl %ebp, %edi
movl $0x8072, %esi # imm = 0x8072
callq *0x1280(%rbx)
callq *0x800(%rbx)
leaq 0x20bcb9(%rip), %rsi # 0xa87be5
movl %eax, %edi
movq %r14, %rdx
movl $0x270, %ecx # imm = 0x270
callq 0x8b6518
movss 0x38(%rsp), %xmm0
movl %ebp, %edi
movl $0x813b, %esi # imm = 0x813B
callq *0x1270(%rbx)
callq *0x800(%rbx)
leaq 0x20bcc7(%rip), %rsi # 0xa87c22
movl %eax, %edi
movq %r14, %rdx
movl $0x272, %ecx # imm = 0x272
callq 0x8b6518
movss 0x34(%rsp), %xmm0
movl %ebp, %edi
movl $0x813a, %esi # imm = 0x813A
callq *0x1270(%rbx)
callq *0x800(%rbx)
leaq 0x20bcd7(%rip), %rsi # 0xa87c61
movl %eax, %edi
movq %r14, %rdx
movl $0x274, %ecx # imm = 0x274
popq %rbx
popq %r14
popq %rbp
jmp 0x8b6518
nop
|
/kaydenl[P]VK-GL-CTS/modules/glshared/glsSamplerObjectTest.cpp
|
deqp::gls::MultiTextureSamplerTest::render()
|
void MultiTextureSamplerTest::render (void)
{
const glw::Functions& gl = m_renderCtx.getFunctions();
GLuint samplerLoc1 = (GLuint)-1;
GLuint samplerLoc2 = (GLuint)-1;
GLuint scaleLoc = (GLuint)-1;
gl.useProgram(m_program->getProgram());
GLU_EXPECT_NO_ERROR(gl.getError(), "glUseProgram(m_program->getProgram())");
samplerLoc1 = glGetUniformLocation(m_program->getProgram(), "u_sampler1");
TCU_CHECK(samplerLoc1 != (GLuint)-1);
samplerLoc2 = glGetUniformLocation(m_program->getProgram(), "u_sampler2");
TCU_CHECK(samplerLoc2 != (GLuint)-1);
scaleLoc = glGetUniformLocation(m_program->getProgram(), "u_posScale");
TCU_CHECK(scaleLoc != (GLuint)-1);
gl.clearColor(0.5f, 0.5f, 0.5f, 1.0f);
GLU_EXPECT_NO_ERROR(gl.getError(), "glClearColor(0.5f, 0.5f, 0.5f, 1.0f)");
gl.clear(GL_COLOR_BUFFER_BIT);
GLU_EXPECT_NO_ERROR(gl.getError(), "glClear(GL_COLOR_BUFFER_BIT)");
gl.uniform1i(samplerLoc1, 0);
GLU_EXPECT_NO_ERROR(gl.getError(), "glUniform1i(samplerLoc1, 0)");
gl.uniform1i(samplerLoc2, 1);
GLU_EXPECT_NO_ERROR(gl.getError(), "glUniform1i(samplerLoc2, 1)");
gl.uniform1f(scaleLoc, 1.0f);
GLU_EXPECT_NO_ERROR(gl.getError(), "glUniform1f(scaleLoc, 1.0f)");
switch (m_target)
{
case GL_TEXTURE_2D:
{
glu::VertexArrayBinding vertexArrays[] =
{
glu::VertexArrayBinding(glu::BindingPoint("a_position"), glu::VertexArrayPointer(glu::VTX_COMP_FLOAT, glu::VTX_COMP_CONVERT_NONE, 2, 6, 0, s_positions))
};
glu::draw(m_renderCtx, m_program->getProgram(), DE_LENGTH_OF_ARRAY(vertexArrays), vertexArrays, glu::PrimitiveList(glu::PRIMITIVETYPE_TRIANGLES, 6));
gl.uniform1f(scaleLoc, 0.25f);
GLU_EXPECT_NO_ERROR(gl.getError(), "glUniform1f(scaleLoc, 0.25f)");
glu::draw(m_renderCtx, m_program->getProgram(), DE_LENGTH_OF_ARRAY(vertexArrays), vertexArrays, glu::PrimitiveList(glu::PRIMITIVETYPE_TRIANGLES, 6));
break;
}
case GL_TEXTURE_3D:
{
glu::VertexArrayBinding vertexArrays[] =
{
glu::VertexArrayBinding(glu::BindingPoint("a_position"), glu::VertexArrayPointer(glu::VTX_COMP_FLOAT, glu::VTX_COMP_CONVERT_NONE, 3, 6, 0, s_positions3D))
};
glu::draw(m_renderCtx, m_program->getProgram(), DE_LENGTH_OF_ARRAY(vertexArrays), vertexArrays, glu::PrimitiveList(glu::PRIMITIVETYPE_TRIANGLES, 6));
gl.uniform1f(scaleLoc, 0.25f);
GLU_EXPECT_NO_ERROR(gl.getError(), "glUniform1f(scaleLoc, 0.25f)");
glu::draw(m_renderCtx, m_program->getProgram(), DE_LENGTH_OF_ARRAY(vertexArrays), vertexArrays, glu::PrimitiveList(glu::PRIMITIVETYPE_TRIANGLES, 6));
break;
}
case GL_TEXTURE_CUBE_MAP:
{
glu::VertexArrayBinding vertexArrays[] =
{
glu::VertexArrayBinding(glu::BindingPoint("a_position"), glu::VertexArrayPointer(glu::VTX_COMP_FLOAT, glu::VTX_COMP_CONVERT_NONE, 4, 6, 0, s_positionsCube))
};
glu::draw(m_renderCtx, m_program->getProgram(), DE_LENGTH_OF_ARRAY(vertexArrays), vertexArrays, glu::PrimitiveList(glu::PRIMITIVETYPE_TRIANGLES, 6));
gl.uniform1f(scaleLoc, 0.25f);
GLU_EXPECT_NO_ERROR(gl.getError(), "glUniform1f(scaleLoc, 0.25f)");
glu::draw(m_renderCtx, m_program->getProgram(), DE_LENGTH_OF_ARRAY(vertexArrays), vertexArrays, glu::PrimitiveList(glu::PRIMITIVETYPE_TRIANGLES, 6));
break;
}
default:
DE_ASSERT(false);
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xa8, %rsp
movq %rdi, %rbx
movq 0x70(%rdi), %rdi
movq (%rdi), %rax
callq *0x18(%rax)
movq %rax, %r14
movq 0x78(%rbx), %rax
movl 0x98(%rax), %edi
callq *0x1680(%r14)
callq *0x800(%r14)
leaq 0x19d02e(%rip), %rsi # 0xa19c78
leaq 0x20acbf(%rip), %rdx # 0xa87910
movl %eax, %edi
movl $0x318, %ecx # imm = 0x318
callq 0x8b6518
movq 0x78(%rbx), %rax
movl 0x98(%rax), %edi
leaq 0x20be17(%rip), %rsi # 0xa88a85
callq 0x959316
cmpl $-0x1, %eax
je 0x87d283
movl %eax, %r12d
movq 0x78(%rbx), %rax
movl 0x98(%rax), %edi
leaq 0x20be1a(%rip), %rsi # 0xa88aaa
callq 0x959316
cmpl $-0x1, %eax
je 0x87d2b0
movl %eax, %r15d
movq 0x78(%rbx), %rax
movl 0x98(%rax), %edi
leaq 0x20b554(%rip), %rsi # 0xa88206
callq 0x959316
cmpl $-0x1, %eax
je 0x87d2dd
movl %eax, %ebp
movss 0x17efd2(%rip), %xmm0 # 0x9fbc9c
movss 0x17efce(%rip), %xmm3 # 0x9fbca0
movaps %xmm0, %xmm1
movaps %xmm0, %xmm2
callq *0x1c0(%r14)
callq *0x800(%r14)
leaq 0x20b53b(%rip), %rsi # 0xa88228
leaq 0x20ac1c(%rip), %r13 # 0xa87910
movl %eax, %edi
movq %r13, %rdx
movl $0x324, %ecx # imm = 0x324
callq 0x8b6518
movl $0x4000, %edi # imm = 0x4000
callq *0x188(%r14)
callq *0x800(%r14)
leaq 0x19ced1(%rip), %rsi # 0xa19bee
movl %eax, %edi
movq %r13, %rdx
movl $0x327, %ecx # imm = 0x327
callq 0x8b6518
movl %r12d, %edi
xorl %esi, %esi
callq *0x14f0(%r14)
callq *0x800(%r14)
leaq 0x20bd89(%rip), %rsi # 0xa88acf
movl %eax, %edi
movq %r13, %rdx
movl $0x32a, %ecx # imm = 0x32A
callq 0x8b6518
movl %r15d, %edi
movl $0x1, %esi
callq *0x14f0(%r14)
callq *0x800(%r14)
leaq 0x20bd79(%rip), %rsi # 0xa88aeb
movl %eax, %edi
movq %r13, %rdx
movl $0x32d, %ecx # imm = 0x32D
callq 0x8b6518
movl %ebp, %edi
movss 0x17ef15(%rip), %xmm0 # 0x9fbca0
callq *0x14e0(%r14)
callq *0x800(%r14)
leaq 0x20b4c8(%rip), %rsi # 0xa88268
movl %eax, %edi
movq %r13, %rdx
movl $0x330, %ecx # imm = 0x330
callq 0x8b6518
movl 0x80(%rbx), %eax
cmpl $0x8513, %eax # imm = 0x8513
je 0x87d0dc
cmpl $0x806f, %eax # imm = 0x806F
je 0x87cf59
cmpl $0xde1, %eax # imm = 0xDE1
jne 0x87d271
leaq 0x40(%rsp), %r12
movq %r12, -0x10(%r12)
leaq 0x184848(%rip), %rsi # 0xa0162f
leaq 0x18484b(%rip), %rdx # 0xa01639
leaq 0x30(%rsp), %rdi
callq 0x334070
leaq 0x8(%rsp), %rdi
movl $0x1, -0x8(%rdi)
leaq 0x18(%rsp), %r13
movq %r13, -0x10(%r13)
movq 0x30(%rsp), %rsi
movq 0x38(%rsp), %rdx
addq %rsi, %rdx
callq 0x334442
movl $0x0, 0x28(%rsp)
movl (%rsp), %eax
leaq 0x58(%rsp), %rdi
movl %eax, -0x8(%rdi)
leaq 0x68(%rsp), %r15
movq %r15, -0x10(%r15)
movq 0x8(%rsp), %rsi
movq 0x10(%rsp), %rdx
addq %rsi, %rdx
callq 0x334442
movl 0x28(%rsp), %eax
movl %eax, 0x78(%rsp)
movaps 0x20aa44(%rip), %xmm0 # 0xa878a0
movaps %xmm0, 0x80(%rsp)
movl $0x0, 0x90(%rsp)
leaq 0x20bdca(%rip), %rax # 0xa88c40
movq %rax, 0x98(%rsp)
movq 0x8(%rsp), %rdi
cmpq %r13, %rdi
je 0x87ce95
movq 0x18(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x30(%rsp), %rdi
cmpq %r12, %rdi
je 0x87ceac
movq 0x40(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x70(%rbx), %rdi
movq 0x78(%rbx), %rax
movl 0x98(%rax), %esi
movabsq $0x600000000, %r12 # imm = 0x600000000
movq %rsp, %r8
movq %r12, (%r8)
movl $0x3, 0x8(%r8)
movq $0x0, 0x10(%r8)
leaq 0x50(%rsp), %rcx
movl $0x1, %edx
xorl %r9d, %r9d
callq 0x8b7848
movss 0x17eda4(%rip), %xmm0 # 0x9fbc98
movl %ebp, %edi
callq *0x14e0(%r14)
callq *0x800(%r14)
leaq 0x20b379(%rip), %rsi # 0xa88284
leaq 0x20a9fe(%rip), %rdx # 0xa87910
movl %eax, %edi
movl $0x33e, %ecx # imm = 0x33E
callq 0x8b6518
movq 0x70(%rbx), %rdi
movq 0x78(%rbx), %rax
movl 0x98(%rax), %esi
movq %rsp, %r8
movq %r12, (%r8)
movl $0x3, 0x8(%r8)
movq $0x0, 0x10(%r8)
leaq 0x50(%rsp), %rcx
movl $0x1, %edx
xorl %r9d, %r9d
callq 0x8b7848
jmp 0x87d25a
leaq 0x40(%rsp), %r12
movq %r12, -0x10(%r12)
leaq 0x1846c5(%rip), %rsi # 0xa0162f
leaq 0x1846c8(%rip), %rdx # 0xa01639
leaq 0x30(%rsp), %rdi
callq 0x334070
leaq 0x8(%rsp), %rdi
movl $0x1, -0x8(%rdi)
leaq 0x18(%rsp), %r13
movq %r13, -0x10(%r13)
movq 0x30(%rsp), %rsi
movq 0x38(%rsp), %rdx
addq %rsi, %rdx
callq 0x334442
movl $0x0, 0x28(%rsp)
movl (%rsp), %eax
leaq 0x58(%rsp), %rdi
movl %eax, -0x8(%rdi)
leaq 0x68(%rsp), %r15
movq %r15, -0x10(%r15)
movq 0x8(%rsp), %rsi
movq 0x10(%rsp), %rdx
addq %rsi, %rdx
callq 0x334442
movl 0x28(%rsp), %eax
movl %eax, 0x78(%rsp)
movaps 0x20a8b1(%rip), %xmm0 # 0xa87890
movaps %xmm0, 0x80(%rsp)
movl $0x0, 0x90(%rsp)
leaq 0x20bc77(%rip), %rax # 0xa88c70
movq %rax, 0x98(%rsp)
movq 0x8(%rsp), %rdi
cmpq %r13, %rdi
je 0x87d018
movq 0x18(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x30(%rsp), %rdi
cmpq %r12, %rdi
je 0x87d02f
movq 0x40(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x70(%rbx), %rdi
movq 0x78(%rbx), %rax
movl 0x98(%rax), %esi
movabsq $0x600000000, %r12 # imm = 0x600000000
movq %rsp, %r8
movq %r12, (%r8)
movl $0x3, 0x8(%r8)
movq $0x0, 0x10(%r8)
leaq 0x50(%rsp), %rcx
movl $0x1, %edx
xorl %r9d, %r9d
callq 0x8b7848
movss 0x17ec21(%rip), %xmm0 # 0x9fbc98
movl %ebp, %edi
callq *0x14e0(%r14)
callq *0x800(%r14)
leaq 0x20b1f6(%rip), %rsi # 0xa88284
leaq 0x20a87b(%rip), %rdx # 0xa87910
movl %eax, %edi
movl $0x34f, %ecx # imm = 0x34F
callq 0x8b6518
movq 0x70(%rbx), %rdi
movq 0x78(%rbx), %rax
movl 0x98(%rax), %esi
movq %rsp, %r8
movq %r12, (%r8)
movl $0x3, 0x8(%r8)
movq $0x0, 0x10(%r8)
leaq 0x50(%rsp), %rcx
movl $0x1, %edx
xorl %r9d, %r9d
callq 0x8b7848
jmp 0x87d25a
leaq 0x40(%rsp), %r12
movq %r12, -0x10(%r12)
leaq 0x184542(%rip), %rsi # 0xa0162f
leaq 0x184545(%rip), %rdx # 0xa01639
leaq 0x30(%rsp), %rdi
callq 0x334070
leaq 0x8(%rsp), %rdi
movl $0x1, -0x8(%rdi)
leaq 0x18(%rsp), %r13
movq %r13, -0x10(%r13)
movq 0x30(%rsp), %rsi
movq 0x38(%rsp), %rdx
addq %rsi, %rdx
callq 0x334442
movl $0x0, 0x28(%rsp)
movl (%rsp), %eax
leaq 0x58(%rsp), %rdi
movl %eax, -0x8(%rdi)
leaq 0x68(%rsp), %r15
movq %r15, -0x10(%r15)
movq 0x8(%rsp), %rsi
movq 0x10(%rsp), %rdx
addq %rsi, %rdx
callq 0x334442
movl 0x28(%rsp), %eax
movl %eax, 0x78(%rsp)
movaps 0x20a71e(%rip), %xmm0 # 0xa87880
movaps %xmm0, 0x80(%rsp)
movl $0x0, 0x90(%rsp)
leaq 0x20bb44(%rip), %rax # 0xa88cc0
movq %rax, 0x98(%rsp)
movq 0x8(%rsp), %rdi
cmpq %r13, %rdi
je 0x87d19b
movq 0x18(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x30(%rsp), %rdi
cmpq %r12, %rdi
je 0x87d1b2
movq 0x40(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x70(%rbx), %rdi
movq 0x78(%rbx), %rax
movl 0x98(%rax), %esi
movabsq $0x600000000, %r12 # imm = 0x600000000
movq %rsp, %r8
movq %r12, (%r8)
movl $0x3, 0x8(%r8)
movq $0x0, 0x10(%r8)
leaq 0x50(%rsp), %rcx
movl $0x1, %edx
xorl %r9d, %r9d
callq 0x8b7848
movss 0x17ea9e(%rip), %xmm0 # 0x9fbc98
movl %ebp, %edi
callq *0x14e0(%r14)
callq *0x800(%r14)
leaq 0x20b073(%rip), %rsi # 0xa88284
leaq 0x20a6f8(%rip), %rdx # 0xa87910
movl %eax, %edi
movl $0x360, %ecx # imm = 0x360
callq 0x8b6518
movq 0x70(%rbx), %rdi
movq 0x78(%rbx), %rax
movl 0x98(%rax), %esi
movq %rsp, %r8
movq %r12, (%r8)
movl $0x3, 0x8(%r8)
movq $0x0, 0x10(%r8)
leaq 0x50(%rsp), %rcx
movl $0x1, %edx
xorl %r9d, %r9d
callq 0x8b7848
movq 0x58(%rsp), %rdi
cmpq %r15, %rdi
je 0x87d271
movq 0x68(%rsp), %rsi
incq %rsi
callq 0x3251a0
addq $0xa8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movl $0x38, %edi
callq 0x325650
movq %rax, %r14
leaq 0x20b7f9(%rip), %rdx # 0xa88a90
leaq 0x20a672(%rip), %rcx # 0xa87910
movq %rax, %rdi
xorl %esi, %esi
movl $0x31b, %r8d # imm = 0x31B
callq 0x99c17e
jmp 0x87d308
movl $0x38, %edi
callq 0x325650
movq %rax, %r14
leaq 0x20b7f1(%rip), %rdx # 0xa88ab5
leaq 0x20a645(%rip), %rcx # 0xa87910
movq %rax, %rdi
xorl %esi, %esi
movl $0x31e, %r8d # imm = 0x31E
callq 0x99c17e
jmp 0x87d308
movl $0x38, %edi
callq 0x325650
movq %rax, %r14
leaq 0x20af20(%rip), %rdx # 0xa88211
leaq 0x20a618(%rip), %rcx # 0xa87910
movq %rax, %rdi
xorl %esi, %esi
movl $0x321, %r8d # imm = 0x321
callq 0x99c17e
leaq 0x39ddb1(%rip), %rsi # 0xc1b0c0
leaq -0x54b536(%rip), %rdx # 0x331de0
movq %r14, %rdi
callq 0x325940
jmp 0x87d380
jmp 0x87d380
jmp 0x87d380
jmp 0x87d380
jmp 0x87d380
jmp 0x87d380
jmp 0x87d32e
jmp 0x87d32e
movq %rax, %rbx
movq 0x8(%rsp), %rdi
cmpq %r13, %rdi
je 0x87d351
movq 0x18(%rsp), %rsi
incq %rsi
callq 0x3251a0
jmp 0x87d351
jmp 0x87d34e
jmp 0x87d34e
movq %rax, %rbx
movq 0x30(%rsp), %rdi
cmpq %r12, %rdi
je 0x87d39a
movq 0x40(%rsp), %rsi
jmp 0x87d392
jmp 0x87d366
jmp 0x87d366
movq %rax, %rbx
jmp 0x87d39a
jmp 0x87d36f
jmp 0x87d36f
movq %rax, %rbx
movq %r14, %rdi
callq 0x325d40
jmp 0x87d39a
jmp 0x87d380
jmp 0x87d380
movq %rax, %rbx
movq 0x58(%rsp), %rdi
cmpq %r15, %rdi
je 0x87d39a
movq 0x68(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq %rbx, %rdi
callq 0x3259a0
|
/kaydenl[P]VK-GL-CTS/modules/glshared/glsSamplerObjectTest.cpp
|
deqp::gls::MultiTextureSamplerTest::createTextureCube(glw::Functions const&, int)
|
GLuint MultiTextureSamplerTest::createTextureCube (const glw::Functions& gl, int id)
{
GLuint texture = (GLuint)-1;
tcu::TextureCube refTexture (tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNORM_INT8), CUBEMAP_SIZE);
gl.genTextures(1, &texture);
GLU_EXPECT_NO_ERROR(gl.getError(), "glGenTextures(1, &texture)");
refTexture.allocLevel(tcu::CUBEFACE_POSITIVE_X, 0);
refTexture.allocLevel(tcu::CUBEFACE_POSITIVE_Y, 0);
refTexture.allocLevel(tcu::CUBEFACE_POSITIVE_Z, 0);
refTexture.allocLevel(tcu::CUBEFACE_NEGATIVE_X, 0);
refTexture.allocLevel(tcu::CUBEFACE_NEGATIVE_Y, 0);
refTexture.allocLevel(tcu::CUBEFACE_NEGATIVE_Z, 0);
switch (id)
{
case 0:
tcu::fillWithComponentGradients(refTexture.getLevelFace(0, tcu::CUBEFACE_POSITIVE_X), tcu::Vec4(0.0f, 0.0f, 0.0f, 0.0f), tcu::Vec4(0.5f, 0.5f, 0.5f, 0.5f));
tcu::fillWithComponentGradients(refTexture.getLevelFace(0, tcu::CUBEFACE_POSITIVE_Y), tcu::Vec4(0.0f, 0.0f, 0.0f, 0.0f), tcu::Vec4(0.5f, 0.5f, 0.5f, 0.5f));
tcu::fillWithComponentGradients(refTexture.getLevelFace(0, tcu::CUBEFACE_POSITIVE_Z), tcu::Vec4(0.0f, 0.0f, 0.0f, 0.0f), tcu::Vec4(0.5f, 0.5f, 0.5f, 0.5f));
tcu::fillWithComponentGradients(refTexture.getLevelFace(0, tcu::CUBEFACE_NEGATIVE_X), tcu::Vec4(0.0f, 0.0f, 0.0f, 0.0f), tcu::Vec4(0.5f, 0.5f, 0.5f, 0.5f));
tcu::fillWithComponentGradients(refTexture.getLevelFace(0, tcu::CUBEFACE_NEGATIVE_Y), tcu::Vec4(0.0f, 0.0f, 0.0f, 0.0f), tcu::Vec4(0.5f, 0.5f, 0.5f, 0.5f));
tcu::fillWithComponentGradients(refTexture.getLevelFace(0, tcu::CUBEFACE_NEGATIVE_Z), tcu::Vec4(0.0f, 0.0f, 0.0f, 0.0f), tcu::Vec4(0.5f, 0.5f, 0.5f, 0.5f));
break;
case 1:
tcu::fillWithComponentGradients(refTexture.getLevelFace(0, tcu::CUBEFACE_POSITIVE_X), tcu::Vec4(0.5f, 0.5f, 0.5f, 0.5f), tcu::Vec4(1.0f, 1.0f, 1.0f, 1.0f));
tcu::fillWithComponentGradients(refTexture.getLevelFace(0, tcu::CUBEFACE_POSITIVE_Y), tcu::Vec4(0.5f, 0.5f, 0.5f, 0.5f), tcu::Vec4(1.0f, 1.0f, 1.0f, 1.0f));
tcu::fillWithComponentGradients(refTexture.getLevelFace(0, tcu::CUBEFACE_POSITIVE_Z), tcu::Vec4(0.5f, 0.5f, 0.5f, 0.5f), tcu::Vec4(1.0f, 1.0f, 1.0f, 1.0f));
tcu::fillWithComponentGradients(refTexture.getLevelFace(0, tcu::CUBEFACE_NEGATIVE_X), tcu::Vec4(0.5f, 0.5f, 0.5f, 0.5f), tcu::Vec4(1.0f, 1.0f, 1.0f, 1.0f));
tcu::fillWithComponentGradients(refTexture.getLevelFace(0, tcu::CUBEFACE_NEGATIVE_Y), tcu::Vec4(0.5f, 0.5f, 0.5f, 0.5f), tcu::Vec4(1.0f, 1.0f, 1.0f, 1.0f));
tcu::fillWithComponentGradients(refTexture.getLevelFace(0, tcu::CUBEFACE_NEGATIVE_Z), tcu::Vec4(0.5f, 0.5f, 0.5f, 0.5f), tcu::Vec4(1.0f, 1.0f, 1.0f, 1.0f));
break;
default:
DE_ASSERT(false);
}
gl.bindTexture(GL_TEXTURE_CUBE_MAP, texture);
GLU_EXPECT_NO_ERROR(gl.getError(), "glBindTexture(GL_TEXTURE_CUBE_MAP, texture)");
for (int face = 0; face < tcu::CUBEFACE_LAST; face++)
{
const deUint32 target = glu::getGLCubeFace((tcu::CubeFace)face);
gl.texImage2D(target, 0, GL_RGBA8, refTexture.getSize(), refTexture.getSize(), 0, GL_RGBA, GL_UNSIGNED_BYTE, refTexture.getLevelFace(0, (tcu::CubeFace)face).getDataPtr());
}
GLU_EXPECT_NO_ERROR(gl.getError(), "glTexImage2D(GL_TEXTURE_CUBE_MAP_...) failed");
gl.generateMipmap(GL_TEXTURE_CUBE_MAP);
GLU_EXPECT_NO_ERROR(gl.getError(), "glGenerateMipmap(GL_TEXTURE_CUBE_MAP)");
gl.bindTexture(GL_TEXTURE_CUBE_MAP, 0);
GLU_EXPECT_NO_ERROR(gl.getError(), "glBindTexture(GL_TEXTURE_CUBE_MAP, 0)");
return texture;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %rbx
subq $0x198, %rsp # imm = 0x198
movl %esi, %ebp
movq %rdi, %rbx
leaq 0x2c(%rsp), %r14
movl $0xffffffff, (%r14) # imm = 0xFFFFFFFF
movabsq $0x300000008, %rax # imm = 0x300000008
movq %rsp, %rsi
movq %rax, (%rsi)
leaq 0x30(%rsp), %rdi
movl $0x20, %edx
callq 0x9b6f1a
movl $0x1, %edi
movq %r14, %rsi
callq *0x6f8(%rbx)
callq *0x800(%rbx)
leaq 0x20aad5(%rip), %rsi # 0xa882a1
leaq 0x20a13d(%rip), %rdx # 0xa87910
movl %eax, %edi
movl $0x3c0, %ecx # imm = 0x3C0
callq 0x8b6518
leaq 0x30(%rsp), %rdi
movl $0x1, %esi
xorl %edx, %edx
callq 0x9b7232
leaq 0x30(%rsp), %rdi
movl $0x3, %esi
xorl %edx, %edx
callq 0x9b7232
leaq 0x30(%rsp), %rdi
movl $0x5, %esi
xorl %edx, %edx
callq 0x9b7232
leaq 0x30(%rsp), %rdi
xorl %esi, %esi
xorl %edx, %edx
callq 0x9b7232
leaq 0x30(%rsp), %rdi
movl $0x2, %esi
xorl %edx, %edx
callq 0x9b7232
leaq 0x30(%rsp), %rdi
movl $0x4, %esi
xorl %edx, %edx
callq 0x9b7232
testl %ebp, %ebp
je 0x87d94e
cmpl $0x1, %ebp
jne 0x87da2c
movq 0xe8(%rsp), %rdi
movaps 0x1c23be(%rip), %xmm0 # 0xa3fc20
movq %rsp, %rsi
movups %xmm0, (%rsi)
movaps 0x183401(%rip), %xmm0 # 0xa00c70
leaq 0x1c(%rsp), %rdx
movups %xmm0, (%rdx)
callq 0x9bba73
movq 0x118(%rsp), %rdi
movq %rsp, %rsi
movaps 0x1c2392(%rip), %xmm0 # 0xa3fc20
movups %xmm0, (%rsi)
leaq 0x1c(%rsp), %rdx
movaps 0x1833d3(%rip), %xmm0 # 0xa00c70
movups %xmm0, (%rdx)
callq 0x9bba73
movq 0x148(%rsp), %rdi
movq %rsp, %rsi
movaps 0x1c2369(%rip), %xmm0 # 0xa3fc20
movups %xmm0, (%rsi)
leaq 0x1c(%rsp), %rdx
movaps 0x1833aa(%rip), %xmm0 # 0xa00c70
movups %xmm0, (%rdx)
callq 0x9bba73
movq 0xd0(%rsp), %rdi
movq %rsp, %rsi
movaps 0x1c2340(%rip), %xmm0 # 0xa3fc20
movups %xmm0, (%rsi)
leaq 0x1c(%rsp), %rdx
movaps 0x183381(%rip), %xmm0 # 0xa00c70
movups %xmm0, (%rdx)
callq 0x9bba73
movq 0x100(%rsp), %rdi
movq %rsp, %rsi
movaps 0x1c2317(%rip), %xmm0 # 0xa3fc20
movups %xmm0, (%rsi)
leaq 0x1c(%rsp), %rdx
movaps 0x183358(%rip), %xmm0 # 0xa00c70
movups %xmm0, (%rdx)
callq 0x9bba73
movq 0x130(%rsp), %rdi
movq %rsp, %rsi
movaps 0x1c22ee(%rip), %xmm0 # 0xa3fc20
movups %xmm0, (%rsi)
leaq 0x1c(%rsp), %rdx
movaps 0x18332f(%rip), %xmm0 # 0xa00c70
movups %xmm0, (%rdx)
callq 0x9bba73
jmp 0x87da2c
movq 0xe8(%rsp), %rdi
xorps %xmm0, %xmm0
movq %rsp, %rsi
movaps %xmm0, (%rsi)
movaps 0x1c22ba(%rip), %xmm0 # 0xa3fc20
leaq 0x1c(%rsp), %rdx
movups %xmm0, (%rdx)
callq 0x9bba73
movq 0x118(%rsp), %rdi
movq %rsp, %rsi
xorps %xmm0, %xmm0
movaps %xmm0, (%rsi)
leaq 0x1c(%rsp), %rdx
movaps 0x1c2290(%rip), %xmm0 # 0xa3fc20
movups %xmm0, (%rdx)
callq 0x9bba73
movq 0x148(%rsp), %rdi
xorps %xmm0, %xmm0
movq %rsp, %rsi
movaps %xmm0, (%rsi)
leaq 0x1c(%rsp), %rdx
movaps 0x1c226b(%rip), %xmm0 # 0xa3fc20
movups %xmm0, (%rdx)
callq 0x9bba73
movq 0xd0(%rsp), %rdi
movq %rsp, %rsi
xorps %xmm0, %xmm0
movaps %xmm0, (%rsi)
leaq 0x1c(%rsp), %rdx
movaps 0x1c2246(%rip), %xmm0 # 0xa3fc20
movups %xmm0, (%rdx)
callq 0x9bba73
movq 0x100(%rsp), %rdi
xorps %xmm0, %xmm0
movq %rsp, %rsi
movaps %xmm0, (%rsi)
leaq 0x1c(%rsp), %rdx
movaps 0x1c2221(%rip), %xmm0 # 0xa3fc20
movups %xmm0, (%rdx)
callq 0x9bba73
movq 0x130(%rsp), %rdi
movq %rsp, %rsi
xorps %xmm0, %xmm0
movaps %xmm0, (%rsi)
leaq 0x1c(%rsp), %rdx
movaps 0x1c21fc(%rip), %xmm0 # 0xa3fc20
movups %xmm0, (%rdx)
callq 0x9bba73
movl 0x2c(%rsp), %esi
movl $0x8513, %edi # imm = 0x8513
callq *0xb8(%rbx)
callq *0x800(%rbx)
leaq 0x20aa47(%rip), %rsi # 0xa8848f
leaq 0x209ec1(%rip), %rdx # 0xa87910
movl %eax, %edi
movl $0x3e2, %ecx # imm = 0x3E2
callq 0x8b6518
leaq 0xd0(%rsp), %r15
xorl %r14d, %r14d
movl %r14d, %edi
callq 0x8c404a
movq 0x1310(%rbx), %r10
movl 0x38(%rsp), %ecx
movq (%r15), %r11
subq $0x8, %rsp
movl %eax, %edi
xorl %esi, %esi
movl $0x8058, %edx # imm = 0x8058
movl %ecx, %r8d
xorl %r9d, %r9d
pushq 0x20(%r11)
pushq $0x1401 # imm = 0x1401
pushq $0x1908 # imm = 0x1908
callq *%r10
addq $0x20, %rsp
incq %r14
addq $0x18, %r15
cmpq $0x6, %r14
jne 0x87da66
callq *0x800(%rbx)
leaq 0x20a9fd(%rip), %rsi # 0xa884bb
leaq 0x209e4b(%rip), %rdx # 0xa87910
movl %eax, %edi
movl $0x3e9, %ecx # imm = 0x3E9
callq 0x8b6518
movl $0x8513, %edi # imm = 0x8513
callq *0x710(%rbx)
callq *0x800(%rbx)
leaq 0x20a9ff(%rip), %rsi # 0xa884e8
leaq 0x209e20(%rip), %rdx # 0xa87910
movl %eax, %edi
movl $0x3ec, %ecx # imm = 0x3EC
callq 0x8b6518
movl $0x8513, %edi # imm = 0x8513
xorl %esi, %esi
callq *0xb8(%rbx)
callq *0x800(%rbx)
leaq 0x20b011(%rip), %rsi # 0xa88b27
leaq 0x209df3(%rip), %rdx # 0xa87910
movl %eax, %edi
movl $0x3ee, %ecx # imm = 0x3EE
callq 0x8b6518
movl 0x2c(%rsp), %ebx
leaq 0x30(%rsp), %rdi
callq 0x9b7522
movl %ebx, %eax
addq $0x198, %rsp # imm = 0x198
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
jmp 0x87db61
jmp 0x87db61
jmp 0x87db61
jmp 0x87db61
jmp 0x87db61
jmp 0x87db61
jmp 0x87db61
jmp 0x87db61
jmp 0x87db61
jmp 0x87db61
jmp 0x87db61
jmp 0x87db61
jmp 0x87db61
movq %rax, %rbx
leaq 0x30(%rsp), %rdi
callq 0x9b7522
movq %rbx, %rdi
callq 0x3259a0
|
/kaydenl[P]VK-GL-CTS/modules/glshared/glsSamplerObjectTest.cpp
|
deqp::gls::MultiTextureSamplerTest::renderResults(tcu::Surface&, tcu::Surface&, int, int)
|
void MultiTextureSamplerTest::renderResults (tcu::Surface& textureResult, tcu::Surface& samplerResult, int x, int y)
{
const glw::Functions& gl = m_renderCtx.getFunctions();
GLuint texture1 = createTexture(gl, m_target, 0);
GLuint texture2 = createTexture(gl, m_target, 1);
GLuint sampler = -1;
gl.viewport(x, y, VIEWPORT_WIDTH, VIEWPORT_HEIGHT);
GLU_EXPECT_NO_ERROR(gl.getError(), "glViewport(x, y, VIEWPORT_WIDTH, VIEWPORT_HEIGHT)");
gl.genSamplers(1, &sampler);
GLU_EXPECT_NO_ERROR(gl.getError(), "glGenSamplers(1, &sampler)");
TCU_CHECK(sampler != (GLuint)-1);
gl.bindSampler(0, sampler);
GLU_EXPECT_NO_ERROR(gl.getError(), "glBindSampler(0, sampler)");
gl.bindSampler(1, sampler);
GLU_EXPECT_NO_ERROR(gl.getError(), "glBindSampler(1, sampler)");
// First set sampler state
setSamplerState(gl, m_samplerState, sampler);
// Set texture state
gl.bindTexture(m_target, texture1);
GLU_EXPECT_NO_ERROR(gl.getError(), "glBindTexture(m_target, texture1)");
setTextureState(gl, m_target, m_textureState1);
gl.bindTexture(m_target, texture2);
GLU_EXPECT_NO_ERROR(gl.getError(), "glBindTexture(m_target, texture2)");
setTextureState(gl, m_target, m_textureState2);
gl.activeTexture(GL_TEXTURE0);
GLU_EXPECT_NO_ERROR(gl.getError(), "glActiveTexture(GL_TEXTURE0)");
gl.bindTexture(m_target, texture1);
GLU_EXPECT_NO_ERROR(gl.getError(), "glBindTexture(m_target, texture1)");
gl.activeTexture(GL_TEXTURE1);
GLU_EXPECT_NO_ERROR(gl.getError(), "glActiveTexture(GL_TEXTURE1)");
gl.bindTexture(m_target, texture2);
GLU_EXPECT_NO_ERROR(gl.getError(), "glBindTexture(m_target, texture2)");
// Render using sampler
render();
glu::readPixels(m_renderCtx, x, y, samplerResult.getAccess());
gl.bindSampler(0, 0);
GLU_EXPECT_NO_ERROR(gl.getError(), "glBindSampler(0, 0)");
gl.bindSampler(1, 0);
GLU_EXPECT_NO_ERROR(gl.getError(), "glBindSampler(1, 0)");
render();
glu::readPixels(m_renderCtx, x, y, textureResult.getAccess());
gl.activeTexture(GL_TEXTURE0);
GLU_EXPECT_NO_ERROR(gl.getError(), "glActiveTexture(GL_TEXTURE0)");
gl.bindTexture(m_target, 0);
GLU_EXPECT_NO_ERROR(gl.getError(), "glBindTexture(m_target, 0)");
gl.activeTexture(GL_TEXTURE1);
GLU_EXPECT_NO_ERROR(gl.getError(), "glActiveTexture(GL_TEXTURE1)");
gl.bindTexture(m_target, 0);
GLU_EXPECT_NO_ERROR(gl.getError(), "glBindTexture(m_target, 0)");
gl.deleteSamplers(1, &sampler);
GLU_EXPECT_NO_ERROR(gl.getError(), "glDeleteSamplers(1, &sampler)");
gl.deleteTextures(1, &texture1);
GLU_EXPECT_NO_ERROR(gl.getError(), "glDeleteTextures(1, &texture1)");
gl.deleteTextures(1, &texture2);
GLU_EXPECT_NO_ERROR(gl.getError(), "glDeleteTextures(1, &texture2)");
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xb8, %rsp
movl %r8d, %r12d
movl %ecx, %r13d
movq %rdx, %rbp
movq %rsi, 0x48(%rsp)
movq %rdi, %r14
movq 0x70(%rdi), %rdi
movq (%rdi), %rax
callq *0x18(%rax)
movq %rax, %rbx
movl 0x80(%r14), %esi
movq %rax, %rdi
xorl %edx, %edx
callq 0x87db76
movl %eax, 0x34(%rsp)
movl 0x80(%r14), %esi
movq %rbx, %rdi
movl $0x1, %edx
callq 0x87db76
movl %eax, 0x30(%rsp)
leaq 0x2c(%rsp), %r15
movl $0xffffffff, (%r15) # imm = 0xFFFFFFFF
movl %r13d, 0x38(%rsp)
movl %r13d, %edi
movl %r12d, 0x3c(%rsp)
movl %r12d, %esi
movl $0x80, %edx
movl $0x80, %ecx
callq *0x1a00(%rbx)
callq *0x800(%rbx)
leaq 0x20a511(%rip), %rsi # 0xa8850e
leaq 0x20990c(%rip), %r12 # 0xa87910
movl %eax, %edi
movq %r12, %rdx
movl $0x438, %ecx # imm = 0x438
callq 0x8b6518
movl $0x1, %edi
movq %r15, %rsi
callq *0x6f0(%rbx)
callq *0x800(%rbx)
leaq 0x20a551(%rip), %rsi # 0xa8857f
movl %eax, %edi
movq %r12, %rdx
movl $0x43b, %ecx # imm = 0x43B
callq 0x8b6518
movl (%r15), %esi
cmpl $-0x1, %esi
je 0x87e476
xorl %edi, %edi
callq *0xa8(%rbx)
callq *0x800(%rbx)
leaq 0x20a552(%rip), %rsi # 0xa885b0
leaq 0x2098ab(%rip), %r15 # 0xa87910
movl %eax, %edi
movq %r15, %rdx
movl $0x43f, %ecx # imm = 0x43F
callq 0x8b6518
movl 0x2c(%rsp), %esi
movl $0x1, %edi
callq *0xa8(%rbx)
callq *0x800(%rbx)
leaq 0x20ab1e(%rip), %rsi # 0xa88bae
movl %eax, %edi
movq %r15, %rdx
movl $0x441, %ecx # imm = 0x441
callq 0x8b6518
movups 0xbc(%r14), %xmm0
movups 0xc8(%r14), %xmm1
movups %xmm1, 0x7c(%rsp)
movaps %xmm0, 0x70(%rsp)
movl 0x2c(%rsp), %esi
movups 0x7c(%rsp), %xmm0
movups %xmm0, 0xc(%rsp)
movaps 0x70(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %rbx, %rdi
callq 0x87be4a
movl 0x80(%r14), %edi
movl 0x34(%rsp), %esi
callq *0xb8(%rbx)
callq *0x800(%rbx)
leaq 0x20aa57(%rip), %r13 # 0xa88b4d
movl %eax, %edi
movq %r13, %rsi
movq %r15, %rdx
movl $0x448, %ecx # imm = 0x448
callq 0x8b6518
movl 0x80(%r14), %esi
movups 0x90(%r14), %xmm0
movups %xmm0, 0x5c(%rsp)
movups 0x84(%r14), %xmm0
movaps %xmm0, 0x50(%rsp)
movups 0x5c(%rsp), %xmm0
movups %xmm0, 0xc(%rsp)
movaps 0x50(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %rbx, %rdi
callq 0x87bcf6
movl 0x80(%r14), %edi
movl 0x30(%rsp), %esi
callq *0xb8(%rbx)
callq *0x800(%rbx)
leaq 0x20aa2a(%rip), %r12 # 0xa88b8c
movl %eax, %edi
movq %r12, %rsi
movq %r15, %rdx
movl $0x44c, %ecx # imm = 0x44C
callq 0x8b6518
movl 0x80(%r14), %esi
movups 0xa0(%r14), %xmm0
movups 0xac(%r14), %xmm1
movups %xmm1, 0xc(%rsp)
movups %xmm0, (%rsp)
movq %rbx, %rdi
callq 0x87bcf6
movl $0x84c0, %edi # imm = 0x84C0
callq *0x8(%rbx)
callq *0x800(%rbx)
leaq 0x2024dc(%rip), %rsi # 0xa8068d
movl %eax, %edi
movq %r15, %rdx
movl $0x450, %ecx # imm = 0x450
callq 0x8b6518
movl 0x80(%r14), %edi
movl 0x34(%rsp), %esi
callq *0xb8(%rbx)
callq *0x800(%rbx)
movl %eax, %edi
movq %r13, %rsi
movq %r15, %rdx
movl $0x452, %ecx # imm = 0x452
callq 0x8b6518
movl $0x84c1, %edi # imm = 0x84C1
callq *0x8(%rbx)
callq *0x800(%rbx)
leaq 0x20a971(%rip), %rsi # 0xa88b6f
movl %eax, %edi
movq %r15, %rdx
movl $0x455, %ecx # imm = 0x455
callq 0x8b6518
movl 0x80(%r14), %edi
movl 0x30(%rsp), %esi
callq *0xb8(%rbx)
callq *0x800(%rbx)
movl %eax, %edi
movq %r12, %rsi
movq %r15, %rdx
movl $0x457, %ecx # imm = 0x457
callq 0x8b6518
movq %r14, %rdi
callq 0x87cc0a
movq 0x70(%r14), %r13
movabsq $0x300000008, %rax # imm = 0x300000008
movq %rax, 0x40(%rsp)
movl (%rbp), %edx
movl 0x4(%rbp), %ecx
movq 0x10(%rbp), %r9
testq %r9, %r9
je 0x87e264
movq 0x8(%rbp), %r9
leaq 0x90(%rsp), %rbp
leaq 0x40(%rsp), %r12
movq %rbp, %rdi
movq %r12, %rsi
movl $0x1, %r8d
callq 0x9ad03e
movq %r13, %rdi
movl 0x38(%rsp), %esi
movl 0x3c(%rsp), %edx
movq %rbp, %rcx
callq 0x8b7e74
xorl %edi, %edi
xorl %esi, %esi
callq *0xa8(%rbx)
callq *0x800(%rbx)
leaq 0x20a31e(%rip), %rsi # 0xa885ca
movl %eax, %edi
movq %r15, %rdx
movl $0x45e, %ecx # imm = 0x45E
callq 0x8b6518
movl $0x1, %edi
xorl %esi, %esi
callq *0xa8(%rbx)
callq *0x800(%rbx)
leaq 0x20a8f3(%rip), %rsi # 0xa88bc8
movl %eax, %edi
movq %r15, %rdx
movl $0x460, %ecx # imm = 0x460
callq 0x8b6518
movq %r14, %rdi
callq 0x87cc0a
movq 0x70(%r14), %r13
movabsq $0x300000008, %rax # imm = 0x300000008
movq %rax, (%r12)
movq 0x48(%rsp), %rax
movl (%rax), %edx
movl 0x4(%rax), %ecx
movq 0x10(%rax), %r9
testq %r9, %r9
je 0x87e315
movq 0x8(%rax), %r9
leaq 0x90(%rsp), %r12
leaq 0x40(%rsp), %rsi
movq %r12, %rdi
movl $0x1, %r8d
callq 0x9ad03e
movq %r13, %rdi
movl 0x38(%rsp), %esi
movl 0x3c(%rsp), %edx
movq %r12, %rcx
callq 0x8b7e74
movl $0x84c0, %edi # imm = 0x84C0
callq *0x8(%rbx)
callq *0x800(%rbx)
movl %eax, %edi
leaq 0x202333(%rip), %rsi # 0xa8068d
movq %r15, %rdx
movl $0x466, %ecx # imm = 0x466
callq 0x8b6518
movl 0x80(%r14), %edi
xorl %esi, %esi
callq *0xb8(%rbx)
callq *0x800(%rbx)
leaq 0x20a859(%rip), %r12 # 0xa88bdc
movl %eax, %edi
movq %r12, %rsi
movq %r15, %rdx
movl $0x468, %ecx # imm = 0x468
callq 0x8b6518
movl $0x84c1, %edi # imm = 0x84C1
callq *0x8(%rbx)
callq *0x800(%rbx)
movl %eax, %edi
leaq 0x20a7c3(%rip), %rsi # 0xa88b6f
movq %r15, %rdx
movl $0x46b, %ecx # imm = 0x46B
callq 0x8b6518
movl 0x80(%r14), %edi
xorl %esi, %esi
callq *0xb8(%rbx)
callq *0x800(%rbx)
movl %eax, %edi
movq %r12, %rsi
movq %r15, %rdx
movl $0x46d, %ecx # imm = 0x46D
callq 0x8b6518
leaq 0x2c(%rsp), %rsi
movl $0x1, %edi
callq *0x468(%rbx)
callq *0x800(%rbx)
leaq 0x20a1e1(%rip), %rsi # 0xa885de
movl %eax, %edi
movq %r15, %rdx
movl $0x470, %ecx # imm = 0x470
callq 0x8b6518
leaq 0x34(%rsp), %rsi
movl $0x1, %edi
callq *0x480(%rbx)
callq *0x800(%rbx)
leaq 0x20a7ce(%rip), %rsi # 0xa88bf7
movl %eax, %edi
movq %r15, %rdx
movl $0x472, %ecx # imm = 0x472
callq 0x8b6518
leaq 0x30(%rsp), %rsi
movl $0x1, %edi
callq *0x480(%rbx)
callq *0x800(%rbx)
leaq 0x20a7c1(%rip), %rsi # 0xa88c16
movl %eax, %edi
movq %r15, %rdx
movl $0x474, %ecx # imm = 0x474
callq 0x8b6518
addq $0xb8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movl $0x38, %edi
callq 0x325650
movq %rax, %rbx
leaq 0x20a110(%rip), %rdx # 0xa8859a
leaq 0x20947f(%rip), %rcx # 0xa87910
movq %rax, %rdi
xorl %esi, %esi
movl $0x43c, %r8d # imm = 0x43C
callq 0x99c17e
leaq 0x39cc18(%rip), %rsi # 0xc1b0c0
leaq -0x54c6cf(%rip), %rdx # 0x331de0
movq %rbx, %rdi
callq 0x325940
movq %rax, %r14
movq %rbx, %rdi
callq 0x325d40
movq %r14, %rdi
callq 0x3259a0
|
/kaydenl[P]VK-GL-CTS/modules/glshared/glsSamplerObjectTest.cpp
|
deqp::gls::MultiTextureSamplerTest::iterate()
|
tcu::TestCase::IterateResult MultiTextureSamplerTest::iterate (void)
{
tcu::TestLog& log = m_testCtx.getLog();
tcu::Surface textureRef(VIEWPORT_WIDTH, VIEWPORT_HEIGHT);
tcu::Surface samplerRef(VIEWPORT_WIDTH, VIEWPORT_HEIGHT);
tcu::Surface textureResult(VIEWPORT_WIDTH, VIEWPORT_HEIGHT);
tcu::Surface samplerResult(VIEWPORT_WIDTH, VIEWPORT_HEIGHT);
int x = m_random.getInt(0, m_renderCtx.getRenderTarget().getWidth() - VIEWPORT_WIDTH);
int y = m_random.getInt(0, m_renderCtx.getRenderTarget().getHeight() - VIEWPORT_HEIGHT);
renderReferences(textureRef, samplerRef, x, y);
renderResults(textureResult, samplerResult, x, y);
bool isOk = pixelThresholdCompare (log, "Sampler render result", "Result from rendering with sampler", samplerRef, samplerResult, tcu::RGBA(0, 0, 0, 0), tcu::COMPARE_LOG_RESULT);
if (!pixelThresholdCompare (log, "Texture render result", "Result from rendering with texture state", textureRef, textureResult, tcu::RGBA(0, 0, 0, 0), tcu::COMPARE_LOG_RESULT))
isOk = false;
if (!isOk)
{
m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Fail");
return STOP;
}
m_testCtx.setTestResult(QP_TEST_RESULT_PASS, "Pass");
return STOP;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $0x70, %rsp
movq %rdi, %rbx
movq 0x8(%rdi), %rax
movq 0x10(%rax), %r14
leaq 0x58(%rsp), %rdi
movl $0x80, %esi
movl $0x80, %edx
callq 0x9a957c
leaq 0x40(%rsp), %rdi
movl $0x80, %esi
movl $0x80, %edx
callq 0x9a957c
leaq 0x28(%rsp), %rdi
movl $0x80, %esi
movl $0x80, %edx
callq 0x9a957c
leaq 0x10(%rsp), %rdi
movl $0x80, %esi
movl $0x80, %edx
callq 0x9a957c
movq 0x70(%rbx), %rdi
movq (%rdi), %rax
callq *0x20(%rax)
leaq 0xd8(%rbx), %r15
movl (%rax), %ebp
movq %r15, %rdi
callq 0x9fa2ea
addl $-0x7f, %ebp
xorl %edx, %edx
divl %ebp
movl %edx, %ebp
movq 0x70(%rbx), %rdi
movq (%rdi), %rax
callq *0x20(%rax)
movl 0x4(%rax), %r12d
movq %r15, %rdi
callq 0x9fa2ea
addl $-0x7f, %r12d
xorl %edx, %edx
divl %r12d
movl %edx, %r15d
leaq 0x58(%rsp), %rsi
leaq 0x40(%rsp), %rdx
movq %rbx, %rdi
movl %ebp, %ecx
movl %r15d, %r8d
callq 0x87dbaa
leaq 0x28(%rsp), %rsi
leaq 0x10(%rsp), %rdx
movq %rbx, %rdi
movl %ebp, %ecx
movl %r15d, %r8d
callq 0x87df68
leaq 0xc(%rsp), %r9
movl $0x0, (%r9)
movl $0x1, (%rsp)
leaq 0x20a03d(%rip), %rsi # 0xa885fc
leaq 0x20a04c(%rip), %rdx # 0xa88612
leaq 0x40(%rsp), %rcx
leaq 0x10(%rsp), %r8
movq %r14, %rdi
callq 0x9a5367
movl %eax, %ebp
leaq 0xc(%rsp), %r9
movl $0x0, (%r9)
movl $0x1, (%rsp)
leaq 0x20a041(%rip), %rsi # 0xa88635
leaq 0x20a050(%rip), %rdx # 0xa8864b
leaq 0x58(%rsp), %rcx
leaq 0x28(%rsp), %r8
movq %r14, %rdi
callq 0x9a5367
andb %al, %bpl
movq 0x8(%rbx), %rdi
movl %ebp, %eax
xorb $0x1, %al
movzbl %al, %esi
leaq 0x1cd3c7(%rip), %rax # 0xa4b9e9
leaq 0x183ef8(%rip), %rdx # 0xa02521
testb %bpl, %bpl
cmovneq %rax, %rdx
callq 0x9a9c16
leaq 0x10(%rsp), %rdi
callq 0x9a9590
leaq 0x28(%rsp), %rdi
callq 0x9a9590
leaq 0x40(%rsp), %rdi
callq 0x9a9590
leaq 0x58(%rsp), %rdi
callq 0x9a9590
xorl %eax, %eax
addq $0x70, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
jmp 0x87e683
jmp 0x87e683
jmp 0x87e683
movq %rax, %rbx
jmp 0x87e690
movq %rax, %rbx
jmp 0x87e69a
movq %rax, %rbx
jmp 0x87e6a4
jmp 0x87e683
movq %rax, %rbx
leaq 0x10(%rsp), %rdi
callq 0x9a9590
leaq 0x28(%rsp), %rdi
callq 0x9a9590
leaq 0x40(%rsp), %rdi
callq 0x9a9590
leaq 0x58(%rsp), %rdi
callq 0x9a9590
movq %rbx, %rdi
callq 0x3259a0
nop
|
/kaydenl[P]VK-GL-CTS/modules/glshared/glsSamplerObjectTest.cpp
|
deqp::gls::ShaderPerformanceMeasurer::render(int)
|
void ShaderPerformanceMeasurer::render (int numDrawCalls)
{
const glw::Functions& gl = m_renderCtx.getFunctions();
GLsizei numIndices = (GLsizei)getNumIndices(m_gridSizeX, m_gridSizeY);
gl.viewport(0, 0, m_viewportWidth, m_viewportHeight);
for (int callNdx = 0; callNdx < numDrawCalls; callNdx++)
gl.drawElements(GL_TRIANGLES, numIndices, GL_UNSIGNED_SHORT, DE_NULL);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
movl %esi, %ebx
movq %rdi, %r15
movq (%rdi), %rdi
movq (%rdi), %rax
callq *0x18(%rax)
movq %rax, %r14
movl 0x8(%r15), %r12d
movl 0x10(%r15), %edx
imull 0xc(%r15), %r12d
movl 0x14(%r15), %ecx
xorl %edi, %edi
xorl %esi, %esi
callq *0x1a00(%rax)
testl %ebx, %ebx
jle 0x87f54c
addl %r12d, %r12d
leal (%r12,%r12,2), %ebp
movl $0x4, %edi
movl %ebp, %esi
movl $0x1403, %edx # imm = 0x1403
xorl %ecx, %ecx
callq *0x568(%r14)
decl %ebx
jne 0x87f533
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
nop
|
/kaydenl[P]VK-GL-CTS/modules/glshared/glsShaderPerformanceMeasurer.cpp
|
deqp::gls::InteractionTestUtil::getRandomColor(de::Random&)
|
static Vec4 getRandomColor (de::Random& rnd)
{
static const float components[] = { 0.0f, 0.2f, 0.4f, 0.5f, 0.6f, 0.8f, 1.0f };
float r = rnd.choose<float>(DE_ARRAY_BEGIN(components), DE_ARRAY_END(components));
float g = rnd.choose<float>(DE_ARRAY_BEGIN(components), DE_ARRAY_END(components));
float b = rnd.choose<float>(DE_ARRAY_BEGIN(components), DE_ARRAY_END(components));
float a = rnd.choose<float>(DE_ARRAY_BEGIN(components), DE_ARRAY_END(components));
return Vec4(r, g, b, a);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x18, %rsp
movq %rsi, %r14
movq %rdi, %rbx
xorl %ebp, %ebp
leaq 0x8(%rsp), %r13
movl %ebp, (%r13)
leaq 0x2086bc(%rip), %r15 # 0xa89070
leaq 0x2086d1(%rip), %r12 # 0xa8908c
movq %rsi, %rdi
movq %r15, %rsi
movq %r12, %rdx
movq %r13, %rcx
movl $0x1, %r8d
callq 0x592734
movss (%r13), %xmm0
movss %xmm0, 0x14(%rsp)
movl %ebp, (%r13)
leaq 0x8(%rsp), %r13
movq %r14, %rdi
movq %r15, %rsi
movq %r12, %rdx
movq %r13, %rcx
movl $0x1, %r8d
callq 0x592734
movss (%r13), %xmm0
movss %xmm0, 0x10(%rsp)
movl %ebp, (%r13)
leaq 0x8(%rsp), %r13
movq %r14, %rdi
movq %r15, %rsi
movq %r12, %rdx
movq %r13, %rcx
movl $0x1, %r8d
callq 0x592734
movss (%r13), %xmm0
movss %xmm0, 0xc(%rsp)
movl %ebp, (%r13)
leaq 0x8(%rsp), %r13
movq %r14, %rdi
movq %r15, %rsi
movq %r12, %rdx
movq %r13, %rcx
movl $0x1, %r8d
callq 0x592734
movss (%r13), %xmm0
movss 0x14(%rsp), %xmm1
movss %xmm1, (%rbx)
movss 0x10(%rsp), %xmm1
movss %xmm1, 0x4(%rbx)
movss 0xc(%rsp), %xmm1
movss %xmm1, 0x8(%rbx)
movss %xmm0, 0xc(%rbx)
movq %rbx, %rax
addq $0x18, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/kaydenl[P]VK-GL-CTS/modules/glshared/glsInteractionTestUtil.cpp
|
deqp::gls::FragOpInteractionCase::deinit()
|
void FragOpInteractionCase::deinit (void)
{
delete m_referenceCtx;
m_referenceCtx = DE_NULL;
delete m_glCtx;
m_glCtx = DE_NULL;
delete m_program;
m_program = DE_NULL;
}
|
pushq %r15
pushq %r14
pushq %rbx
movq %rdi, %rbx
movq 0x440(%rdi), %r14
testq %r14, %r14
je 0x8812d4
leaq 0xd0(%r14), %rdi
callq 0x8a13b4
leaq 0x58(%r14), %r15
leaq 0xa8(%r14), %rdi
callq 0x9b433a
leaq 0x80(%r14), %rdi
callq 0x9b433a
movq %r15, %rdi
callq 0x9b433a
movq 0x38(%r14), %rdi
leaq 0x48(%r14), %rax
cmpq %rax, %rdi
je 0x8812be
movq (%rax), %rsi
incq %rsi
callq 0x3251a0
leaq 0x20(%r14), %rdi
callq 0x33607e
movl $0x6280, %esi # imm = 0x6280
movq %r14, %rdi
callq 0x3251a0
movq $0x0, 0x440(%rbx)
movq 0x438(%rbx), %rdi
testq %rdi, %rdi
je 0x8812f1
movq (%rdi), %rax
callq *0x8(%rax)
movq $0x0, 0x438(%rbx)
movq 0x430(%rbx), %rdi
testq %rdi, %rdi
je 0x88130e
movq (%rdi), %rax
callq *0x10(%rax)
movq $0x0, 0x430(%rbx)
popq %rbx
popq %r14
popq %r15
retq
nop
|
/kaydenl[P]VK-GL-CTS/modules/glshared/glsFragOpInteractionCase.cpp
|
deqp::gls::FragOpInteractionCase::init()
|
void FragOpInteractionCase::init (void)
{
de::Random rnd (m_params.seed ^ 0x232faac);
const int viewportW = de::min<int>(m_renderCtx.getRenderTarget().getWidth(), VIEWPORT_WIDTH);
const int viewportH = de::min<int>(m_renderCtx.getRenderTarget().getHeight(), VIEWPORT_HEIGHT);
const int viewportX = rnd.getInt(0, m_renderCtx.getRenderTarget().getWidth() - viewportW);
const int viewportY = rnd.getInt(0, m_renderCtx.getRenderTarget().getHeight() - viewportH);
rsg::ProgramGenerator generator;
generator.generate(m_params, m_vertexShader, m_fragmentShader);
rsg::computeUnifiedUniforms(m_vertexShader, m_fragmentShader, m_unifiedUniforms);
try
{
DE_ASSERT(!m_program);
m_program = new gls::RandomShaderProgram(m_vertexShader, m_fragmentShader, (int)m_unifiedUniforms.size(), m_unifiedUniforms.empty() ? DE_NULL : &m_unifiedUniforms[0]);
DE_ASSERT(!m_referenceCtx);
m_referenceCtx = new ReferenceContext(m_renderCtx, viewportW, viewportH);
DE_ASSERT(!m_glCtx);
m_glCtx = new sglr::GLContext(m_renderCtx, m_testCtx.getLog(), sglr::GLCONTEXT_LOG_CALLS|sglr::GLCONTEXT_LOG_PROGRAMS, IVec4(viewportX, viewportY, viewportW, viewportH));
m_refProgram = m_referenceCtx->context.createProgram(m_program);
m_glProgram = m_glCtx->createProgram(m_program);
m_viewportSize = tcu::IVec2(viewportW, viewportH);
m_iterNdx = 0;
m_testCtx.setTestResult(QP_TEST_RESULT_PASS, "Pass");
}
catch (...)
{
// Save some memory by cleaning up stuff.
FragOpInteractionCase::deinit();
throw;
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x38, %rsp
movq %rdi, %rbx
leaq 0x80(%rdi), %rax
movq %rax, 0x10(%rsp)
movl $0x232faac, %esi # imm = 0x232FAAC
xorl 0x80(%rdi), %esi
leaq 0x28(%rsp), %r12
movq %r12, %rdi
callq 0x9fa2bc
movq 0x70(%rbx), %rdi
movq (%rdi), %rax
callq *0x20(%rax)
movl (%rax), %ebp
movl $0x40, %r14d
cmpl %r14d, %ebp
cmovgel %r14d, %ebp
movq 0x70(%rbx), %rdi
movq (%rdi), %rax
callq *0x20(%rax)
movl 0x4(%rax), %eax
cmpl $0x40, %eax
cmovll %eax, %r14d
movq 0x70(%rbx), %rdi
movq (%rdi), %rax
callq *0x20(%rax)
movl (%rax), %r15d
movq %r12, %rdi
callq 0x9fa2ea
subl %ebp, %r15d
incl %r15d
xorl %edx, %edx
divl %r15d
movl %edx, 0xc(%rsp)
movq 0x70(%rbx), %rdi
movq (%rdi), %rax
callq *0x20(%rax)
movl 0x4(%rax), %r15d
movq %r12, %rdi
callq 0x9fa2ea
subl %r14d, %r15d
incl %r15d
xorl %edx, %edx
divl %r15d
movl %edx, 0x8(%rsp)
leaq 0x7(%rsp), %r13
movq %r13, %rdi
callq 0x97387c
leaq 0xe8(%rbx), %r15
leaq 0x280(%rbx), %r12
movq %r13, %rdi
movq 0x10(%rsp), %rsi
movq %r15, %rdx
movq %r12, %rcx
callq 0x973880
leaq 0x418(%rbx), %rdx
movq %r15, %rdi
movq %r12, %rsi
callq 0x979d9c
movl $0x258, %edi # imm = 0x258
callq 0x325210
movq %rax, %r13
movq 0x418(%rbx), %rax
movq 0x420(%rbx), %rcx
xorl %r8d, %r8d
subq %rax, %rcx
cmovneq %rax, %r8
shrq $0x3, %rcx
movq %r13, %rdi
movq %r15, %rsi
movq %r12, %rdx
callq 0x89df60
movq %r13, 0x430(%rbx)
movl $0x6280, %edi # imm = 0x6280
callq 0x325210
movq %rax, %r15
movq 0x70(%rbx), %rsi
movq %rax, %rdi
movl %ebp, %edx
movl %r14d, %ecx
callq 0x883580
movq %r15, 0x440(%rbx)
movl $0x170, %edi # imm = 0x170
callq 0x325210
movq %rax, %r15
movq 0x8(%rbx), %rax
movq 0x70(%rbx), %rsi
movq 0x10(%rax), %rdx
leaq 0x18(%rsp), %r8
movl 0xc(%rsp), %eax
movl %eax, (%r8)
movl 0x8(%rsp), %eax
movl %eax, 0x4(%r8)
movl %ebp, 0x8(%r8)
movl %r14d, 0xc(%r8)
movq %r15, %rdi
movl $0x3, %ecx
callq 0x8b2ba0
movq %r15, 0x438(%rbx)
movl $0xd0, %edi
addq 0x440(%rbx), %rdi
movq 0x430(%rbx), %rsi
callq 0x8ac7ee
movl %eax, 0x44c(%rbx)
movq 0x430(%rbx), %rsi
movq 0x438(%rbx), %rdi
movq (%rdi), %rax
callq *0x3a8(%rax)
movl %eax, 0x448(%rbx)
shlq $0x20, %r14
movl %ebp, %eax
orq %r14, %rax
movq %rax, 0x450(%rbx)
movl $0x0, 0x458(%rbx)
movq 0x8(%rbx), %rdi
leaq 0x1ca4ba(%rip), %rdx # 0xa4b9e9
xorl %esi, %esi
callq 0x9a9c16
leaq 0x7(%rsp), %rdi
callq 0x97387e
addq $0x38, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq %rax, %r14
movl $0x170, %esi # imm = 0x170
jmp 0x881561
movq %rax, %r14
movl $0x6280, %esi # imm = 0x6280
movq %r15, %rdi
jmp 0x881571
movq %rax, %r14
movl $0x258, %esi # imm = 0x258
movq %r13, %rdi
callq 0x3251a0
jmp 0x881580
movq %rax, %rbx
jmp 0x88159d
movq %rax, %r14
movq %r14, %rdi
callq 0x3250e0
movq %rbx, %rdi
callq 0x881262
callq 0x325c10
movq %rax, %rbx
callq 0x325b60
leaq 0x7(%rsp), %rdi
callq 0x97387e
movq %rbx, %rdi
callq 0x3259a0
movq %rax, %rdi
callq 0x3314f2
nop
|
/kaydenl[P]VK-GL-CTS/modules/glshared/glsFragOpInteractionCase.cpp
|
deqp::gls::StateChangePerformanceCase::deinit()
|
void StateChangePerformanceCase::deinit (void)
{
m_indices.clear();
m_interleavedResults.clear();
m_batchedResults.clear();
{
const glw::Functions& gl = m_renderCtx.getFunctions();
if (!m_indexBuffers.empty())
{
gl.deleteBuffers((GLsizei)m_indexBuffers.size(), &(m_indexBuffers[0]));
m_indexBuffers.clear();
}
if (!m_coordBuffers.empty())
{
gl.deleteBuffers((GLsizei)m_coordBuffers.size(), &(m_coordBuffers[0]));
m_coordBuffers.clear();
}
if (!m_textures.empty())
{
gl.deleteTextures((GLsizei)m_textures.size(), &(m_textures[0]));
m_textures.clear();
}
if (!m_framebuffers.empty())
{
gl.deleteFramebuffers((GLsizei)m_framebuffers.size(), &(m_framebuffers[0]));
m_framebuffers.clear();
}
if (!m_renderbuffers.empty())
{
gl.deleteRenderbuffers((GLsizei)m_renderbuffers.size(), &(m_renderbuffers[0]));
m_renderbuffers.clear();
}
if (!m_samplers.empty())
{
gl.deleteSamplers((GLsizei)m_samplers.size(), &m_samplers[0]);
m_samplers.clear();
}
if (!m_vertexArrays.empty())
{
gl.deleteVertexArrays((GLsizei)m_vertexArrays.size(), &m_vertexArrays[0]);
m_vertexArrays.clear();
}
for (int programNdx = 0; programNdx < (int)m_programs.size(); programNdx++)
{
delete m_programs[programNdx];
m_programs[programNdx] = NULL;
}
m_programs.clear();
}
}
|
pushq %r15
pushq %r14
pushq %rbx
movq %rdi, %rbx
movq 0x148(%rdi), %rax
cmpq %rax, 0x150(%rdi)
je 0x883877
movq %rax, 0x150(%rbx)
movq 0x160(%rbx), %rax
cmpq %rax, 0x168(%rbx)
je 0x88388e
movq %rax, 0x168(%rbx)
movq 0x178(%rbx), %rax
cmpq %rax, 0x180(%rbx)
je 0x8838a5
movq %rax, 0x180(%rbx)
movq 0x70(%rbx), %rdi
movq (%rdi), %rax
callq *0x18(%rax)
movq %rax, %r14
movq 0x88(%rbx), %rsi
movq 0x90(%rbx), %rdi
cmpq %rdi, %rsi
je 0x8838ea
subq %rsi, %rdi
shrq $0x2, %rdi
callq *0x438(%r14)
movq 0x88(%rbx), %rax
cmpq %rax, 0x90(%rbx)
je 0x8838ea
movq %rax, 0x90(%rbx)
movq 0xa0(%rbx), %rsi
movq 0xa8(%rbx), %rdi
cmpq %rdi, %rsi
je 0x883922
subq %rsi, %rdi
shrq $0x2, %rdi
callq *0x438(%r14)
movq 0xa0(%rbx), %rax
cmpq %rax, 0xa8(%rbx)
je 0x883922
movq %rax, 0xa8(%rbx)
movq 0xb8(%rbx), %rsi
movq 0xc0(%rbx), %rdi
cmpq %rdi, %rsi
je 0x88395a
subq %rsi, %rdi
shrq $0x2, %rdi
callq *0x480(%r14)
movq 0xb8(%rbx), %rax
cmpq %rax, 0xc0(%rbx)
je 0x88395a
movq %rax, 0xc0(%rbx)
movq 0xe8(%rbx), %rsi
movq 0xf0(%rbx), %rdi
cmpq %rdi, %rsi
je 0x883992
subq %rsi, %rdi
shrq $0x2, %rdi
callq *0x440(%r14)
movq 0xe8(%rbx), %rax
cmpq %rax, 0xf0(%rbx)
je 0x883992
movq %rax, 0xf0(%rbx)
movq 0x100(%rbx), %rsi
movq 0x108(%rbx), %rdi
cmpq %rdi, %rsi
je 0x8839ca
subq %rsi, %rdi
shrq $0x2, %rdi
callq *0x460(%r14)
movq 0x100(%rbx), %rax
cmpq %rax, 0x108(%rbx)
je 0x8839ca
movq %rax, 0x108(%rbx)
movq 0x118(%rbx), %rsi
movq 0x120(%rbx), %rdi
cmpq %rdi, %rsi
je 0x883a02
subq %rsi, %rdi
shrq $0x2, %rdi
callq *0x468(%r14)
movq 0x118(%rbx), %rax
cmpq %rax, 0x120(%rbx)
je 0x883a02
movq %rax, 0x120(%rbx)
movq 0x130(%rbx), %rsi
movq 0x138(%rbx), %rdi
cmpq %rdi, %rsi
je 0x883a3a
subq %rsi, %rdi
shrq $0x2, %rdi
callq *0x490(%r14)
movq 0x130(%rbx), %rax
cmpq %rax, 0x138(%rbx)
je 0x883a3a
movq %rax, 0x138(%rbx)
movq 0xd0(%rbx), %rax
movq 0xd8(%rbx), %rcx
movq %rcx, %rdx
subq %rax, %rdx
shrq $0x3, %rdx
testl %edx, %edx
jle 0x883aa9
xorl %r15d, %r15d
movq (%rax,%r15,8), %r14
testq %r14, %r14
je 0x883a77
movq %r14, %rdi
callq 0x8ba6d2
movl $0xd0, %esi
movq %r14, %rdi
callq 0x3251a0
movq 0xd0(%rbx), %rax
movq $0x0, (%rax,%r15,8)
incq %r15
movq 0xd0(%rbx), %rax
movq 0xd8(%rbx), %rcx
movq %rcx, %rdx
subq %rax, %rdx
shrq $0x3, %rdx
movslq %edx, %rdx
cmpq %rdx, %r15
jl 0x883a59
cmpq %rax, %rcx
je 0x883ab5
movq %rax, 0xd8(%rbx)
popq %rbx
popq %r14
popq %r15
retq
nop
|
/kaydenl[P]VK-GL-CTS/modules/glshared/glsStateChangePerfTestCases.cpp
|
deqp::gls::StateChangePerformanceCase::callDraw(glw::Functions const&)
|
void StateChangePerformanceCase::callDraw (const glw::Functions& gl)
{
switch (m_drawType)
{
case DRAWTYPE_NOT_INDEXED: gl.drawArrays(GL_TRIANGLES, 0, m_triangleCount * 3); break;
case DRAWTYPE_INDEXED_USER_PTR: gl.drawElements(GL_TRIANGLES, m_triangleCount * 3, GL_UNSIGNED_SHORT, &m_indices[0]); break;
case DRAWTYPE_INDEXED_BUFFER: gl.drawElements(GL_TRIANGLES, m_triangleCount * 3, GL_UNSIGNED_SHORT, NULL); break;
default:
DE_ASSERT(false);
}
}
|
movl 0x78(%rdi), %eax
cmpl $0x2, %eax
je 0x885d2d
cmpl $0x1, %eax
je 0x885d0a
testl %eax, %eax
jne 0x885d4b
movq 0x538(%rsi), %rax
movl 0x84(%rdi), %ecx
leal (%rcx,%rcx,2), %edx
movl $0x4, %edi
xorl %esi, %esi
jmpq *%rax
movq 0x568(%rsi), %rax
movl 0x84(%rdi), %ecx
leal (%rcx,%rcx,2), %esi
movq 0x148(%rdi), %rcx
movl $0x4, %edi
movl $0x1403, %edx # imm = 0x1403
jmpq *%rax
movq 0x568(%rsi), %rax
movl 0x84(%rdi), %ecx
leal (%rcx,%rcx,2), %esi
movl $0x4, %edi
movl $0x1403, %edx # imm = 0x1403
xorl %ecx, %ecx
jmpq *%rax
retq
|
/kaydenl[P]VK-GL-CTS/modules/glshared/glsStateChangePerfTestCases.cpp
|
deqp::gls::StateChangeCallPerformanceCase::iterate()
|
tcu::TestCase::IterateResult StateChangeCallPerformanceCase::iterate (void)
{
if (m_results.empty())
logTestCase();
if ((int)m_results.size() < m_iterationCount)
{
executeTest();
GLU_EXPECT_NO_ERROR(m_renderCtx.getFunctions().getError(), "Unexpected error");
return CONTINUE;
}
else
{
logAndSetTestResult();
return STOP;
}
}
|
pushq %rbx
movq %rdi, %rbx
movq 0x80(%rdi), %rax
cmpq 0x88(%rdi), %rax
jne 0x8865f4
movq %rbx, %rdi
callq 0x885e58
movq 0x88(%rbx), %rax
subq 0x80(%rbx), %rax
shrq $0x3, %rax
cmpl %eax, 0x78(%rbx)
jle 0x886644
movq %rbx, %rdi
callq 0x885dce
movq 0x70(%rbx), %rdi
movq (%rdi), %rax
callq *0x18(%rax)
callq *0x800(%rax)
leaq 0x203076(%rip), %rsi # 0xa896a0
leaq 0x202be5(%rip), %rdx # 0xa89216
movl %eax, %edi
movl $0x2ce, %ecx # imm = 0x2CE
callq 0x8b6518
movl $0x1, %eax
jmp 0x88664e
movq %rbx, %rdi
callq 0x885fcc
xorl %eax, %eax
popq %rbx
retq
|
/kaydenl[P]VK-GL-CTS/modules/glshared/glsStateChangePerfTestCases.cpp
|
deqp::gls::BufferTestUtil::getUsageHintName(unsigned int)
|
const char* getUsageHintName (deUint32 hint)
{
switch (hint)
{
case GL_STREAM_DRAW: return "stream_draw";
case GL_STREAM_READ: return "stream_read";
case GL_STREAM_COPY: return "stream_copy";
case GL_STATIC_DRAW: return "static_draw";
case GL_STATIC_READ: return "static_read";
case GL_STATIC_COPY: return "static_copy";
case GL_DYNAMIC_DRAW: return "dynamic_draw";
case GL_DYNAMIC_READ: return "dynamic_read";
case GL_DYNAMIC_COPY: return "dynamic_copy";
default:
DE_ASSERT(false);
return DE_NULL;
}
}
|
addl $0xffff7720, %edi # imm = 0xFFFF7720
cmpl $0xa, %edi
ja 0x887229
movl %edi, %eax
leaq 0x3cf864(%rip), %rcx # 0xc56a88
movq (%rcx,%rax,8), %rax
retq
xorl %eax, %eax
retq
|
/kaydenl[P]VK-GL-CTS/modules/glshared/glsBufferTestUtil.cpp
|
deqp::gls::BufferTestUtil::BufferWriter::BufferWriter(glu::RenderContext&, tcu::TestLog&, deqp::gls::BufferTestUtil::WriteType)
|
BufferWriter::BufferWriter (glu::RenderContext& renderCtx, tcu::TestLog& log, WriteType writeType)
: m_writer(DE_NULL)
{
switch (writeType)
{
case WRITE_BUFFER_SUB_DATA: m_writer = new BufferSubDataWriter (renderCtx, log); break;
case WRITE_BUFFER_WRITE_MAP: m_writer = new BufferWriteMapWriter (renderCtx, log); break;
default:
TCU_FAIL("Unsupported writer");
}
}
|
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
pushq %rax
movq %rdx, %r12
movq %rsi, %r15
movq %rdi, %rbx
movq $0x0, (%rdi)
cmpl $0x1, %ecx
je 0x887661
testl %ecx, %ecx
jne 0x8876ab
movl $0x28, %edi
callq 0x325210
movq %rax, %r14
movq (%r15), %rax
movq %r15, %rdi
callq *0x18(%rax)
movq %r14, %rdi
addq $0x8, %rdi
movq %rax, %rsi
movq %r12, %rdx
callq 0x8e03bc
movq %r15, 0x20(%r14)
movb $0x1, 0x18(%r14)
leaq 0x3cf2a1(%rip), %rax # 0xc56900
jmp 0x887699
movl $0x28, %edi
callq 0x325210
movq %rax, %r14
movq (%r15), %rax
movq %r15, %rdi
callq *0x18(%rax)
movq %r14, %rdi
addq $0x8, %rdi
movq %rax, %rsi
movq %r12, %rdx
callq 0x8e03bc
movq %r15, 0x20(%r14)
movb $0x1, 0x18(%r14)
leaq 0x3cf2bf(%rip), %rax # 0xc56958
movq %rax, (%r14)
movq %r14, (%rbx)
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
retq
movl $0x38, %edi
callq 0x325650
movq %rax, %r14
leaq 0x20210b(%rip), %rsi # 0xa897ca
leaq 0x202117(%rip), %rcx # 0xa897dd
movq %rax, %rdi
xorl %edx, %edx
movl $0x113, %r8d # imm = 0x113
callq 0x99c17e
leaq 0x3939e3(%rip), %rsi # 0xc1b0c0
leaq -0x555904(%rip), %rdx # 0x331de0
movq %r14, %rdi
callq 0x325940
movq %rax, %rbx
movq %r14, %rdi
callq 0x325d40
jmp 0x88770b
jmp 0x8876fb
movq %rax, %rbx
movl $0x28, %esi
movq %r14, %rdi
callq 0x3251a0
movq %rbx, %rdi
callq 0x3259a0
nop
|
/kaydenl[P]VK-GL-CTS/modules/glshared/glsBufferTestUtil.cpp
|
deqp::gls::BufferTestUtil::VertexArrayVerifier::~VertexArrayVerifier()
|
VertexArrayVerifier::~VertexArrayVerifier (void)
{
const glw::Functions& gl = m_renderCtx.getFunctions();
if (m_vao) gl.deleteVertexArrays(1, &m_vao);
if (m_positionBuf) gl.deleteBuffers(1, &m_positionBuf);
if (m_indexBuf) gl.deleteBuffers(1, &m_indexBuf);
delete m_program;
}
|
pushq %r14
pushq %rbx
pushq %rax
movq %rdi, %rbx
leaq 0x3ce870(%rip), %rax # 0xc56858
movq %rax, (%rdi)
movq 0x20(%rdi), %rdi
movq (%rdi), %rax
callq *0x18(%rax)
movq %rax, %r14
cmpl $0x0, 0x40(%rbx)
je 0x88800e
leaq 0x40(%rbx), %rsi
movl $0x1, %edi
callq *0x490(%r14)
cmpl $0x0, 0x44(%rbx)
je 0x888024
leaq 0x44(%rbx), %rsi
movl $0x1, %edi
callq *0x438(%r14)
cmpl $0x0, 0x48(%rbx)
je 0x88803a
leaq 0x48(%rbx), %rsi
movl $0x1, %edi
callq *0x438(%r14)
movq 0x30(%rbx), %r14
testq %r14, %r14
je 0x888058
movq %r14, %rdi
callq 0x8ba6d2
movl $0xd0, %esi
movq %r14, %rdi
callq 0x3251a0
addq $0x8, %rbx
movq %rbx, %rdi
addq $0x8, %rsp
popq %rbx
popq %r14
jmp 0x8e03c8
movq %rax, %rdi
callq 0x3314f2
nop
|
/kaydenl[P]VK-GL-CTS/modules/glshared/glsBufferTestUtil.cpp
|
deqp::gls::BufferTestUtil::IndexArrayVerifier::~IndexArrayVerifier()
|
IndexArrayVerifier::~IndexArrayVerifier (void)
{
const glw::Functions& gl = m_renderCtx.getFunctions();
if (m_vao) gl.deleteVertexArrays(1, &m_vao);
if (m_positionBuf) gl.deleteBuffers(1, &m_positionBuf);
if (m_colorBuf) gl.deleteBuffers(1, &m_colorBuf);
delete m_program;
}
|
pushq %r14
pushq %rbx
pushq %rax
movq %rdi, %rbx
leaq 0x3cd344(%rip), %rax # 0xc56898
movq %rax, (%rdi)
movq 0x20(%rdi), %rdi
movq (%rdi), %rax
callq *0x18(%rax)
movq %rax, %r14
cmpl $0x0, 0x40(%rbx)
je 0x88957a
leaq 0x40(%rbx), %rsi
movl $0x1, %edi
callq *0x490(%r14)
cmpl $0x0, 0x44(%rbx)
je 0x889590
leaq 0x44(%rbx), %rsi
movl $0x1, %edi
callq *0x438(%r14)
cmpl $0x0, 0x48(%rbx)
je 0x8895a6
leaq 0x48(%rbx), %rsi
movl $0x1, %edi
callq *0x438(%r14)
movq 0x30(%rbx), %r14
testq %r14, %r14
je 0x8895c4
movq %r14, %rdi
callq 0x8ba6d2
movl $0xd0, %esi
movq %r14, %rdi
callq 0x3251a0
addq $0x8, %rbx
movq %rbx, %rdi
addq $0x8, %rsp
popq %rbx
popq %r14
jmp 0x8e03c8
movq %rax, %rdi
callq 0x3314f2
nop
|
/kaydenl[P]VK-GL-CTS/modules/glshared/glsBufferTestUtil.cpp
|
deqp::gls::(anonymous namespace)::logProgram(tcu::TestLog&, glw::Functions const&, unsigned int)
|
void logProgram (TestLog& log, const glw::Functions& gl, deUint32 program)
{
const bool programLinkOk = getProgramLinkStatus(gl, program);
const string programInfoLog = getProgramInfoLog(gl, program);
tcu::ScopedLogSection linkInfo (log, "Program Link Info", "Program Link Info");
{
tcu::ScopedLogSection infoLogSection(log, "Info Log", "Info Log");
log << TestLog::Message << programInfoLog << TestLog::EndMessage;
}
log << TestLog::Message << "Link result: " << (programLinkOk ? "Ok" : "Fail") << TestLog::EndMessage;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x1d8, %rsp # imm = 0x1D8
movl %edx, %ebp
movq %rsi, %r14
movq %rdi, %rbx
leaq 0x58(%rsp), %r15
movl %edx, %edi
movl $0x8b82, %esi # imm = 0x8B82
movq %r15, %rdx
callq *0x9d8(%r14)
callq *0x800(%r14)
leaq 0x1fc740(%rip), %rsi # 0xa8a648
leaq 0x1fc237(%rip), %rdx # 0xa8a146
movl %eax, %edi
movl $0x162, %ecx # imm = 0x162
callq 0x8b6518
movl (%r15), %r12d
movl $0x0, (%r15)
leaq 0x48(%rsp), %r15
movq %r15, -0x10(%r15)
movq $0x0, -0x8(%r15)
movb $0x0, (%r15)
leaq 0x58(%rsp), %rdx
movl %ebp, %edi
movl $0x8b84, %esi # imm = 0x8B84
callq *0x9d8(%r14)
callq *0x800(%r14)
leaq 0x1fc6ed(%rip), %rsi # 0xa8a648
leaq 0x1fc1e4(%rip), %rdx # 0xa8a146
movl %eax, %edi
movl $0x153, %ecx # imm = 0x153
callq 0x8b6518
movslq 0x58(%rsp), %rsi
leaq 0x38(%rsp), %rdi
xorl %edx, %edx
callq 0x3259f0
movl 0x40(%rsp), %esi
movq 0x38(%rsp), %rcx
movl %ebp, %edi
xorl %edx, %edx
callq *0x988(%r14)
callq *0x800(%r14)
leaq 0x1fc6b8(%rip), %rsi # 0xa8a659
leaq 0x1fc19e(%rip), %rdx # 0xa8a146
movl %eax, %edi
movl $0x158, %ecx # imm = 0x158
callq 0x8b6518
leaq 0x68(%rsp), %r14
movq %r14, -0x10(%r14)
leaq 0x1fc664(%rip), %rsi # 0xa8a628
leaq 0x1fc66e(%rip), %rdx # 0xa8a639
leaq 0x58(%rsp), %rdi
callq 0x334070
leaq 0x18(%rsp), %r13
movq %r13, -0x10(%r13)
leaq 0x1fc643(%rip), %rsi # 0xa8a628
leaq 0x1fc64d(%rip), %rdx # 0xa8a639
leaq 0x8(%rsp), %rdi
callq 0x334070
leaq 0x30(%rsp), %rdi
leaq 0x58(%rsp), %rdx
leaq 0x8(%rsp), %rcx
movq %rbx, %rsi
callq 0x35a2a6
movq 0x8(%rsp), %rdi
cmpq %r13, %rdi
je 0x88e024
movq 0x18(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x58(%rsp), %rdi
cmpq %r14, %rdi
je 0x88e03b
movq 0x68(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0x58(%rsp), %rdi
movq %r14, (%rdi)
leaq 0x1fc58e(%rip), %rsi # 0xa8a5d8
leaq 0x1fc58f(%rip), %rdx # 0xa8a5e0
callq 0x334070
leaq 0x8(%rsp), %rdi
movq %r13, (%rdi)
leaq 0x1fc573(%rip), %rsi # 0xa8a5d8
leaq 0x1fc574(%rip), %rdx # 0xa8a5e0
callq 0x334070
leaq 0x28(%rsp), %rdi
leaq 0x58(%rsp), %rdx
leaq 0x8(%rsp), %rcx
movq %rbx, %rsi
callq 0x35a2a6
movq 0x8(%rsp), %rdi
cmpq %r13, %rdi
je 0x88e09f
movq 0x18(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x58(%rsp), %rdi
cmpq %r14, %rdi
je 0x88e0b6
movq 0x68(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0x60(%rsp), %r14
movq %rbx, -0x8(%r14)
movq %r14, %rdi
callq 0x325e00
movq 0x38(%rsp), %rsi
movq 0x40(%rsp), %rdx
movq %r14, %rdi
callq 0x325e70
leaq 0x2151ad(%rip), %rsi # 0xaa328d
leaq 0x58(%rsp), %rdi
callq 0x9aba2a
movq 0x3cea37(%rip), %rsi # 0xc5cb28
movq %r14, %rdi
callq 0x325aa0
leaq 0xd0(%rsp), %rdi
callq 0x325a80
movq 0x28(%rsp), %rdi
callq 0x9abf3a
leaq 0x60(%rsp), %r14
movq %rbx, -0x8(%r14)
movq %r14, %rdi
callq 0x325e00
leaq 0x1fc512(%rip), %rsi # 0xa8a63a
movl $0xd, %edx
movq %r14, %rdi
callq 0x325e70
xorl %eax, %eax
cmpl $0x1, %r12d
setne %al
leaq 0x1fc267(%rip), %rcx # 0xa8a3ac
leaq 0x1743d5(%rip), %rsi # 0xa02521
cmoveq %rcx, %rsi
leaq 0x2(,%rax,2), %rdx
movq %r14, %rdi
callq 0x325e70
leaq 0x215126(%rip), %rsi # 0xaa328d
leaq 0x58(%rsp), %rdi
callq 0x9aba2a
movq 0x3ce9b0(%rip), %rsi # 0xc5cb28
movq %r14, %rdi
callq 0x325aa0
leaq 0xd0(%rsp), %rdi
callq 0x325a80
movq 0x30(%rsp), %rdi
callq 0x9abf3a
movq 0x38(%rsp), %rdi
cmpq %r15, %rdi
je 0x88e1ae
movq 0x48(%rsp), %rsi
incq %rsi
callq 0x3251a0
addq $0x1d8, %rsp # imm = 0x1D8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0x88e2b2
jmp 0x88e25a
jmp 0x88e218
jmp 0x88e2b2
movq %rax, %rbx
jmp 0x88e29a
movq %rax, %rbx
movq 0x8(%rsp), %rdi
cmpq %r13, %rdi
je 0x88e1f8
movq 0x18(%rsp), %rsi
incq %rsi
callq 0x3251a0
jmp 0x88e1f8
movq %rax, %rbx
movq 0x58(%rsp), %rdi
cmpq %r14, %rdi
je 0x88e2a4
movq 0x68(%rsp), %rsi
incq %rsi
callq 0x3251a0
jmp 0x88e2a4
movq %rax, %rbx
jmp 0x88e2a4
movq %rax, %rbx
movq 0x8(%rsp), %rdi
cmpq %r13, %rdi
je 0x88e23f
movq 0x18(%rsp), %rsi
incq %rsi
callq 0x3251a0
jmp 0x88e23f
movq %rax, %rbx
movq 0x58(%rsp), %rdi
cmpq %r14, %rdi
je 0x88e2bd
movq 0x68(%rsp), %rsi
incq %rsi
callq 0x3251a0
jmp 0x88e2bd
jmp 0x88e2ba
movq %rax, %rbx
movq 0x3ce8c4(%rip), %rsi # 0xc5cb28
movq %r14, %rdi
callq 0x325aa0
leaq 0xd0(%rsp), %rdi
callq 0x325a80
jmp 0x88e2a4
movq %rax, %rbx
movq 0x3ce8a3(%rip), %rsi # 0xc5cb28
movq %r14, %rdi
callq 0x325aa0
leaq 0xd0(%rsp), %rdi
callq 0x325a80
movq 0x28(%rsp), %rdi
callq 0x9abf3a
movq 0x30(%rsp), %rdi
callq 0x9abf3a
jmp 0x88e2bd
jmp 0x88e2b2
movq %rax, %rdi
callq 0x3314f2
movq %rax, %rbx
movq 0x38(%rsp), %rdi
cmpq %r15, %rdi
je 0x88e2d4
movq 0x48(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq %rbx, %rdi
callq 0x3259a0
|
/kaydenl[P]VK-GL-CTS/modules/glshared/glsAttributeLocationTests.cpp
|
deqp::gls::(anonymous namespace)::generateTestName[abi:cxx11](deqp::gls::AttributeLocationTestUtil::AttribType const&, int)
|
string generateTestName (const AttribType& type, int arraySize)
{
return type.getName() + (arraySize != Attribute::NOT_ARRAY ? "_array_" + de::toString(arraySize) : "");
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $0x1c0, %rsp # imm = 0x1C0
movl %edx, %ebp
movq %rsi, %r14
movq %rdi, %rbx
cmpl $-0x1, %edx
setne %r12b
je 0x88f581
leaq 0x48(%rsp), %r15
movq %r15, %rdi
callq 0x325e00
movq %r15, %rdi
movl %ebp, %esi
callq 0x325530
leaq 0x50(%rsp), %rsi
leaq 0x28(%rsp), %rdi
callq 0x325660
movq 0x3cd600(%rip), %rsi # 0xc5cb28
leaq 0x48(%rsp), %rdi
callq 0x325aa0
leaq 0xb8(%rsp), %rdi
callq 0x325a80
leaq 0x186c4b(%rip), %rcx # 0xa16191
leaq 0x28(%rsp), %rdi
movl $0x7, %r8d
xorl %esi, %esi
xorl %edx, %edx
callq 0x325230
leaq 0x18(%rsp), %rdx
movq %rdx, -0x10(%rdx)
movq (%rax), %rsi
movq %rax, %rcx
addq $0x10, %rcx
cmpq %rcx, %rsi
je 0x88f5a0
movq %rsi, 0x8(%rsp)
movq (%rcx), %rdx
movq %rdx, 0x18(%rsp)
jmp 0x88f5a6
leaq 0x18(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0x1c92c4(%rip), %rdx # 0xa58855
leaq 0x8(%rsp), %rdi
movq %rdx, %rsi
callq 0x334070
jmp 0x88f5be
movups (%rcx), %xmm0
movups %xmm0, (%rdx)
movq 0x8(%rax), %rdx
movq %rdx, 0x10(%rsp)
movq %rcx, (%rax)
movq $0x0, 0x8(%rax)
movb $0x0, 0x10(%rax)
movq (%r14), %rcx
movq 0x8(%r14), %r8
leaq 0x8(%rsp), %rdi
xorl %esi, %esi
xorl %edx, %edx
callq 0x325230
leaq 0x10(%rbx), %rdx
movq %rdx, (%rbx)
movq (%rax), %rsi
movq %rax, %rcx
addq $0x10, %rcx
cmpq %rcx, %rsi
je 0x88f5f5
movq %rsi, (%rbx)
movq (%rcx), %rdx
movq %rdx, 0x10(%rbx)
jmp 0x88f5fb
movups (%rcx), %xmm0
movups %xmm0, (%rdx)
movq 0x8(%rax), %rdx
movq %rdx, 0x8(%rbx)
movq %rcx, (%rax)
movq $0x0, 0x8(%rax)
movb $0x0, 0x10(%rax)
leaq 0x18(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x88f62d
movq 0x18(%rsp), %rsi
incq %rsi
callq 0x3251a0
cmpl $-0x1, %ebp
je 0x88f64d
leaq 0x38(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x88f64d
movq 0x38(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq %rbx, %rax
addq $0x1c0, %rsp # imm = 0x1C0
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
movq %rax, %rbx
xorl %r12d, %r12d
jmp 0x88f68e
movq %rax, %rbx
movb $0x1, %r12b
jmp 0x88f68e
movq %rax, %rbx
leaq 0x18(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x88f68e
movq 0x18(%rsp), %rsi
incq %rsi
callq 0x3251a0
testb %r12b, %r12b
je 0x88f6c0
leaq 0x28(%rsp), %rdi
callq 0x32fa68
jmp 0x88f6c0
movq %rax, %rbx
movq 0x3cd47f(%rip), %rsi # 0xc5cb28
leaq 0x48(%rsp), %rdi
callq 0x325aa0
leaq 0xb8(%rsp), %rdi
callq 0x325a80
movq %rbx, %rdi
callq 0x3259a0
|
/kaydenl[P]VK-GL-CTS/modules/glshared/glsAttributeLocationTests.cpp
|
deqp::gls::BindAliasingAttributeTest::iterate()
|
tcu::TestCase::IterateResult BindAliasingAttributeTest::iterate (void)
{
const vector<Bind> noBindings;
vector<Attribute> attributes;
vector<Bind> bindings;
attributes.push_back(Attribute(m_type, "a_0", Attribute::LOC_UNDEF, Cond("A", true), m_arraySize));
attributes.push_back(Attribute(AttribType("vec4", 1, GL_FLOAT_VEC4), "a_1", Attribute::LOC_UNDEF, Cond("A", false)));
bindings.push_back(Bind("a_0", 1));
bindings.push_back(Bind("a_1", 1 + m_offset));
runTest(m_testCtx, m_renderCtx, attributes, noBindings, bindings, noBindings, false);
return STOP;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x188, %rsp # imm = 0x188
movq %rdi, %rbx
xorps %xmm0, %xmm0
movaps %xmm0, 0x170(%rsp)
xorl %eax, %eax
movq %rax, 0x180(%rsp)
movq %rax, 0xa0(%rsp)
movaps %xmm0, 0x90(%rsp)
movq %rax, 0x80(%rsp)
movaps %xmm0, 0x70(%rsp)
leaq 0x58(%rsp), %r15
movq %r15, -0x10(%r15)
leaq 0x1f9885(%rip), %rsi # 0xa8a25d
leaq 0x1f9881(%rip), %rdx # 0xa8a260
leaq 0x48(%rsp), %rdi
callq 0x334070
leaq 0x38(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0x1ce49c(%rip), %rsi # 0xa5ee95
leaq 0x1ce496(%rip), %rdx # 0xa5ee96
leaq 0x28(%rsp), %rdi
callq 0x334070
leaq 0x8(%rsp), %rdi
movb $0x1, -0x8(%rdi)
leaq 0x18(%rsp), %r13
movq %r13, -0x10(%r13)
movq 0x28(%rsp), %rsi
movq 0x30(%rsp), %rdx
addq %rsi, %rdx
callq 0x334442
leaq 0x78(%rbx), %rsi
movl 0xa4(%rbx), %r9d
leaq 0xd0(%rsp), %rdi
leaq 0x48(%rsp), %rdx
movq %rsp, %r8
movl $0xffffffff, %ecx # imm = 0xFFFFFFFF
callq 0x88a7a2
leaq 0x90(%rsp), %rdi
leaq 0xd0(%rsp), %rsi
callq 0x89c29a
leaq 0x138(%rsp), %rbp
movq -0x10(%rbp), %rdi
cmpq %rbp, %rdi
je 0x890a89
movq 0x138(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0x108(%rsp), %r14
movq -0x10(%r14), %rdi
cmpq %r14, %rdi
je 0x890aaa
movq 0x108(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0xe0(%rsp), %r12
movq -0x10(%r12), %rdi
cmpq %r12, %rdi
je 0x890acc
movq 0xe0(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x8(%rsp), %rdi
cmpq %r13, %rdi
je 0x890ae3
movq 0x18(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x28(%rsp), %rdi
leaq 0x38(%rsp), %r13
cmpq %r13, %rdi
je 0x890aff
movq 0x38(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x48(%rsp), %rdi
cmpq %r15, %rdi
je 0x890b16
movq 0x58(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0x28(%rsp), %rdi
movq %r13, (%rdi)
leaq 0x181db2(%rip), %rsi # 0xa128d7
leaq 0x181daf(%rip), %rdx # 0xa128db
callq 0x334070
leaq 0x10(%rsp), %rax
movq %rax, -0x10(%rax)
movq 0x28(%rsp), %rsi
movq 0x30(%rsp), %rdx
addq %rsi, %rdx
movq %rsp, %rdi
callq 0x334442
movabsq $0x8b5200000001, %rax # imm = 0x8B5200000001
movq %rax, 0x20(%rsp)
leaq 0xc0(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0x1f9711(%rip), %rsi # 0xa8a282
leaq 0x1f970d(%rip), %rdx # 0xa8a285
leaq 0xb0(%rsp), %rdi
callq 0x334070
leaq 0x160(%rsp), %r13
movq %r13, -0x10(%r13)
leaq 0x1ce2fd(%rip), %rsi # 0xa5ee95
leaq 0x1ce2f7(%rip), %rdx # 0xa5ee96
leaq 0x150(%rsp), %rdi
callq 0x334070
leaq 0x50(%rsp), %rdi
movb $0x0, -0x8(%rdi)
leaq 0x60(%rsp), %r15
movq %r15, -0x10(%r15)
movq 0x150(%rsp), %rsi
movq 0x158(%rsp), %rdx
addq %rsi, %rdx
callq 0x334442
leaq 0xd0(%rsp), %rdi
movq %rsp, %rsi
leaq 0xb0(%rsp), %rdx
leaq 0x48(%rsp), %r8
movl $0xffffffff, %ecx # imm = 0xFFFFFFFF
movl $0xffffffff, %r9d # imm = 0xFFFFFFFF
callq 0x88a7a2
leaq 0x90(%rsp), %rdi
leaq 0xd0(%rsp), %rsi
callq 0x89c29a
movq 0x128(%rsp), %rdi
cmpq %rbp, %rdi
je 0x890c30
movq 0x138(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0xf8(%rsp), %rdi
cmpq %r14, %rdi
je 0x890c4d
movq 0x108(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0xd0(%rsp), %rdi
cmpq %r12, %rdi
je 0x890c6a
movq 0xe0(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x50(%rsp), %rdi
cmpq %r15, %rdi
je 0x890c81
movq 0x60(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x150(%rsp), %rdi
cmpq %r13, %rdi
leaq 0x10(%rsp), %r15
je 0x890ca3
movq 0x160(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0xb0(%rsp), %rdi
leaq 0xc0(%rsp), %rax
cmpq %rax, %rdi
leaq 0x38(%rsp), %r14
je 0x890ccd
movq 0xc0(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq (%rsp), %rdi
cmpq %r15, %rdi
je 0x890ce3
movq 0x10(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x28(%rsp), %rdi
cmpq %r14, %rdi
je 0x890cfa
movq 0x38(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq %rsp, %rdi
movq %r15, (%rdi)
leaq 0x1f9556(%rip), %rsi # 0xa8a25d
leaq 0x1f9552(%rip), %rdx # 0xa8a260
callq 0x334070
leaq 0xd0(%rsp), %rdi
movq %r12, (%rdi)
movq (%rsp), %rsi
movq 0x8(%rsp), %rdx
addq %rsi, %rdx
callq 0x334442
leaq 0xd0(%rsp), %rsi
movl $0x1, 0x20(%rsi)
leaq 0x70(%rsp), %rdi
callq 0x89c578
movq 0xd0(%rsp), %rdi
cmpq %r12, %rdi
je 0x890d65
movq 0xe0(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq (%rsp), %rdi
cmpq %r15, %rdi
je 0x890d7b
movq 0x10(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq %rsp, %rdi
movq %r15, (%rdi)
leaq 0x1f94fa(%rip), %rsi # 0xa8a282
leaq 0x1f94f6(%rip), %rdx # 0xa8a285
callq 0x334070
movl 0xa0(%rbx), %ebp
leaq 0xd0(%rsp), %rdi
movq %r12, (%rdi)
movq (%rsp), %rsi
movq 0x8(%rsp), %rdx
addq %rsi, %rdx
callq 0x334442
incl %ebp
leaq 0xd0(%rsp), %r14
movl %ebp, 0x20(%r14)
leaq 0x70(%rsp), %rdi
movq %r14, %rsi
callq 0x89c578
movq 0xd0(%rsp), %rdi
cmpq %r12, %rdi
je 0x890dee
movq 0xe0(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq (%rsp), %rdi
cmpq %r15, %rdi
je 0x890e04
movq 0x10(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x8(%rbx), %rdi
movq 0x70(%rbx), %rsi
xorps %xmm0, %xmm0
movaps %xmm0, 0xd0(%rsp)
movq $0x0, 0xe0(%rsp)
subq $0x8, %rsp
xorl %eax, %eax
leaq 0x98(%rsp), %rdx
leaq 0x78(%rsp), %r8
leaq 0x178(%rsp), %rcx
movq %rcx, %r9
pushq %r14
pushq %rax
pushq %rax
callq 0x88a8ae
addq $0x20, %rsp
leaq 0xd0(%rsp), %rdi
callq 0x89b1bc
leaq 0x70(%rsp), %rdi
callq 0x89b1ec
leaq 0x90(%rsp), %rdi
callq 0x89b1bc
leaq 0x170(%rsp), %rdi
callq 0x89b1ec
xorl %eax, %eax
addq $0x188, %rsp # imm = 0x188
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq %rax, %rbx
leaq 0xd0(%rsp), %rdi
callq 0x89b1bc
jmp 0x89101a
jmp 0x890eb1
jmp 0x890ed3
jmp 0x891017
movq %rax, %rbx
movq 0xd0(%rsp), %rdi
cmpq %r12, %rdi
je 0x890ed6
movq 0xe0(%rsp), %rsi
incq %rsi
callq 0x3251a0
jmp 0x890ed6
movq %rax, %rbx
movq (%rsp), %rdi
leaq 0x10(%rsp), %rax
cmpq %rax, %rdi
je 0x89101a
movq 0x10(%rsp), %rsi
jmp 0x89100d
jmp 0x891017
movq %rax, %rbx
leaq 0xd0(%rsp), %rdi
callq 0x89b16e
jmp 0x890f0c
movq %rax, %rbx
movq 0x50(%rsp), %rdi
cmpq %r15, %rdi
je 0x890f28
movq 0x60(%rsp), %rsi
incq %rsi
callq 0x3251a0
jmp 0x890f28
movq %rax, %rbx
movq 0x150(%rsp), %rdi
cmpq %r13, %rdi
je 0x890f4a
movq 0x160(%rsp), %rsi
incq %rsi
callq 0x3251a0
jmp 0x890f4a
movq %rax, %rbx
movq 0xb0(%rsp), %rdi
leaq 0xc0(%rsp), %rax
cmpq %rax, %rdi
je 0x890f74
movq 0xc0(%rsp), %rsi
incq %rsi
callq 0x3251a0
jmp 0x890f74
movq %rax, %rbx
movq (%rsp), %rdi
leaq 0x10(%rsp), %rax
cmpq %rax, %rdi
je 0x890f94
movq 0x10(%rsp), %rsi
incq %rsi
callq 0x3251a0
jmp 0x890f94
movq %rax, %rbx
movq 0x28(%rsp), %rdi
leaq 0x38(%rsp), %rax
cmpq %rax, %rdi
je 0x89101a
movq 0x38(%rsp), %rsi
jmp 0x89100d
jmp 0x891017
movq %rax, %rbx
leaq 0xd0(%rsp), %rdi
callq 0x89b16e
jmp 0x890fc1
movq %rax, %rbx
movq 0x8(%rsp), %rdi
cmpq %r13, %rdi
je 0x890fdd
movq 0x18(%rsp), %rsi
incq %rsi
callq 0x3251a0
jmp 0x890fdd
movq %rax, %rbx
movq 0x28(%rsp), %rdi
leaq 0x38(%rsp), %rax
cmpq %rax, %rdi
je 0x890ffe
movq 0x38(%rsp), %rsi
incq %rsi
callq 0x3251a0
jmp 0x890ffe
movq %rax, %rbx
movq 0x48(%rsp), %rdi
cmpq %r15, %rdi
je 0x89101a
movq 0x58(%rsp), %rsi
incq %rsi
callq 0x3251a0
jmp 0x89101a
movq %rax, %rbx
leaq 0x70(%rsp), %rdi
callq 0x89b1ec
leaq 0x90(%rsp), %rdi
callq 0x89b1bc
leaq 0x170(%rsp), %rdi
callq 0x89b1ec
movq %rbx, %rdi
callq 0x3259a0
|
/kaydenl[P]VK-GL-CTS/modules/glshared/glsAttributeLocationTests.cpp
|
deqp::gls::BindMaxAliasingAttributeTest::iterate()
|
tcu::TestCase::IterateResult BindMaxAliasingAttributeTest::iterate (void)
{
const vector<Bind> noBindings;
const deInt32 maxAttributes = getMaxAttributeLocations(m_renderCtx);
const int arrayElementCount = (m_arraySize != Attribute::NOT_ARRAY ? m_arraySize : 1);
vector<Attribute> attributes;
vector<Bind> bindings;
int ndx = 0;
m_testCtx.getLog() << TestLog::Message << "GL_MAX_VERTEX_ATTRIBS: " << maxAttributes << TestLog::EndMessage;
for (int loc = maxAttributes - arrayElementCount * m_type.getLocationSize(); loc >= 0; loc -= m_type.getLocationSize() * arrayElementCount)
{
attributes.push_back(Attribute(m_type, "a_" + de::toString(ndx), Attribute::LOC_UNDEF, Cond("A", true)));
bindings.push_back(Bind("a_" + de::toString(ndx), loc));
attributes.push_back(Attribute(m_type, "a_" + de::toString(ndx + maxAttributes), Attribute::LOC_UNDEF, Cond("A", false)));
bindings.push_back(Bind("a_" + de::toString(ndx + maxAttributes), loc));
ndx++;
}
runTest(m_testCtx, m_renderCtx, attributes, noBindings, bindings, noBindings, false);
return STOP;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x2e8, %rsp # imm = 0x2E8
movq %rdi, %rbx
xorps %xmm0, %xmm0
movaps %xmm0, 0x140(%rsp)
movq $0x0, 0x150(%rsp)
movq 0x70(%rdi), %rdi
callq 0x890121
movq %rax, 0x70(%rsp)
movl 0xa0(%rbx), %eax
cmpl $-0x1, %eax
movl $0x1, %ecx
cmovnel %eax, %ecx
movl %ecx, 0x44(%rsp)
xorl %eax, %eax
movq %rax, 0x130(%rsp)
xorps %xmm0, %xmm0
movaps %xmm0, 0x120(%rsp)
movq %rax, 0x110(%rsp)
movaps %xmm0, 0x100(%rsp)
movq 0x8(%rbx), %rax
movq 0x10(%rax), %rax
leaq 0x168(%rsp), %r14
movq %rax, -0x8(%r14)
movq %r14, %rdi
callq 0x325e00
leaq 0x1f8f23(%rip), %rsi # 0xa8a261
movl $0x17, %edx
movq %r14, %rdi
callq 0x325e70
movq %r14, %rdi
movq 0x70(%rsp), %rsi
callq 0x325530
leaq 0x211f2e(%rip), %rsi # 0xaa328d
leaq 0x160(%rsp), %rdi
callq 0x9aba2a
movq 0x3cb7b5(%rip), %rsi # 0xc5cb28
movq %r14, %rdi
callq 0x325aa0
leaq 0x1d8(%rsp), %rdi
callq 0x325a80
movq %rbx, 0x68(%rsp)
movl 0x98(%rbx), %eax
imull 0x44(%rsp), %eax
movq 0x70(%rsp), %rcx
movl %ecx, %ebx
subl %eax, %ebx
js 0x891a5d
movq 0x68(%rsp), %rax
addq $0x78, %rax
movq %rax, 0x78(%rsp)
xorl %ebp, %ebp
leaq 0x160(%rsp), %r15
leaq 0x20(%rsp), %r13
leaq 0x80(%rsp), %r12
movq %r15, %rdi
callq 0x325e00
movq %r15, %rdi
movl %ebp, %esi
callq 0x325530
movq %r13, %rdi
leaq 0x168(%rsp), %rsi
callq 0x325660
movq %r15, %rdi
movq 0x3cb730(%rip), %rsi # 0xc5cb28
callq 0x325aa0
leaq 0x1d0(%rsp), %rdi
callq 0x325a80
movl $0x2, %r8d
movq %r13, %rdi
xorl %esi, %esi
xorl %edx, %edx
leaq 0x1f4961(%rip), %rcx # 0xa85d7f
callq 0x325230
leaq 0x10(%rsp), %rsi
movq %rsi, (%rsp)
movq (%rax), %rdx
movq %rax, %rcx
addq $0x10, %rcx
cmpq %rcx, %rdx
je 0x891449
movq %rdx, (%rsp)
movq (%rcx), %rdx
movq %rdx, 0x10(%rsp)
jmp 0x89144f
movups (%rcx), %xmm0
movups %xmm0, (%rsi)
movq 0x8(%rax), %rdx
movq %rdx, 0x8(%rsp)
movq %rcx, (%rax)
movq $0x0, 0x8(%rax)
movb $0x0, 0x10(%rax)
leaq 0x58(%rsp), %rax
movq %rax, 0x48(%rsp)
leaq 0x48(%rsp), %rdi
leaq 0x1cda18(%rip), %rsi # 0xa5ee95
leaq 0x1cda12(%rip), %rdx # 0xa5ee96
callq 0x334070
movb $0x1, 0x160(%rsp)
leaq 0x178(%rsp), %rax
movq %rax, 0x168(%rsp)
movq 0x48(%rsp), %rsi
movq 0x50(%rsp), %rdx
addq %rsi, %rdx
leaq 0x168(%rsp), %rdi
callq 0x334442
movq %r12, %rdi
movq 0x78(%rsp), %rsi
movq %rsp, %rdx
movl $0xffffffff, %ecx # imm = 0xFFFFFFFF
movq %r15, %r8
movl $0xffffffff, %r9d # imm = 0xFFFFFFFF
callq 0x88a7a2
leaq 0x120(%rsp), %rdi
movq %r12, %rsi
callq 0x89c29a
movq 0xd8(%rsp), %rdi
leaq 0xe8(%rsp), %rax
cmpq %rax, %rdi
je 0x89150e
movq 0xe8(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0xa8(%rsp), %rdi
leaq 0xb8(%rsp), %rax
cmpq %rax, %rdi
je 0x891533
movq 0xb8(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x80(%rsp), %rdi
leaq 0x90(%rsp), %rax
cmpq %rax, %rdi
je 0x891558
movq 0x90(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x168(%rsp), %rdi
leaq 0x178(%rsp), %rax
cmpq %rax, %rdi
je 0x89157d
movq 0x178(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x48(%rsp), %rdi
leaq 0x58(%rsp), %rax
cmpq %rax, %rdi
je 0x891599
movq 0x58(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq (%rsp), %rdi
leaq 0x10(%rsp), %rax
cmpq %rax, %rdi
je 0x8915b4
movq 0x10(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x20(%rsp), %rdi
leaq 0x30(%rsp), %rax
cmpq %rax, %rdi
je 0x8915d0
movq 0x30(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq %r15, %rdi
callq 0x325e00
movq %r15, %rdi
movl %ebp, %esi
callq 0x325530
movq %r13, %rdi
leaq 0x168(%rsp), %rsi
callq 0x325660
movq %r15, %rdi
movq 0x3cb52c(%rip), %rsi # 0xc5cb28
callq 0x325aa0
leaq 0x1d0(%rsp), %rdi
callq 0x325a80
movl $0x2, %r8d
movq %r13, %rdi
xorl %esi, %esi
xorl %edx, %edx
leaq 0x1f475d(%rip), %rcx # 0xa85d7f
callq 0x325230
leaq 0x10(%rsp), %rsi
movq %rsi, (%rsp)
movq (%rax), %rdx
movq %rax, %rcx
addq $0x10, %rcx
cmpq %rcx, %rdx
je 0x89164d
movq %rdx, (%rsp)
movq (%rcx), %rdx
movq %rdx, 0x10(%rsp)
jmp 0x891653
movups (%rcx), %xmm0
movups %xmm0, (%rsi)
movq 0x8(%rax), %rdx
movq %rdx, 0x8(%rsp)
movq %rcx, (%rax)
movq $0x0, 0x8(%rax)
movb $0x0, 0x10(%rax)
leaq 0x90(%rsp), %rax
movq %rax, 0x80(%rsp)
movq (%rsp), %rsi
movq 0x8(%rsp), %rdx
addq %rsi, %rdx
movq %r12, %rdi
callq 0x334442
movl %ebx, 0xa0(%rsp)
leaq 0x100(%rsp), %rdi
movq %r12, %rsi
callq 0x89c578
movq 0x80(%rsp), %rdi
leaq 0x90(%rsp), %rax
cmpq %rax, %rdi
je 0x8916cb
movq 0x90(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq (%rsp), %rdi
leaq 0x10(%rsp), %rax
cmpq %rax, %rdi
je 0x8916e6
movq 0x10(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x20(%rsp), %rdi
leaq 0x30(%rsp), %rax
cmpq %rax, %rdi
je 0x891702
movq 0x30(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq %r15, %rdi
callq 0x325e00
movq 0x70(%rsp), %rax
leal (%rax,%rbp), %r14d
movq %r15, %rdi
movl %r14d, %esi
callq 0x325530
movq %r13, %rdi
leaq 0x168(%rsp), %rsi
callq 0x325660
movq %r15, %rdi
movq 0x3cb3f0(%rip), %rsi # 0xc5cb28
callq 0x325aa0
leaq 0x1d0(%rsp), %rdi
callq 0x325a80
movl $0x2, %r8d
movq %r13, %rdi
xorl %esi, %esi
xorl %edx, %edx
leaq 0x1f4621(%rip), %rcx # 0xa85d7f
callq 0x325230
leaq 0x10(%rsp), %rsi
movq %rsi, (%rsp)
movq (%rax), %rdx
movq %rax, %rcx
addq $0x10, %rcx
cmpq %rcx, %rdx
je 0x891789
movq %rdx, (%rsp)
movq (%rcx), %rdx
movq %rdx, 0x10(%rsp)
jmp 0x89178f
movups (%rcx), %xmm0
movups %xmm0, (%rsi)
movq 0x8(%rax), %rdx
movq %rdx, 0x8(%rsp)
movq %rcx, (%rax)
movq $0x0, 0x8(%rax)
movb $0x0, 0x10(%rax)
leaq 0x58(%rsp), %rax
movq %rax, 0x48(%rsp)
leaq 0x48(%rsp), %rdi
leaq 0x1cd6d8(%rip), %rsi # 0xa5ee95
leaq 0x1cd6d2(%rip), %rdx # 0xa5ee96
callq 0x334070
movb $0x0, 0x160(%rsp)
leaq 0x178(%rsp), %rax
movq %rax, 0x168(%rsp)
movq 0x48(%rsp), %rsi
movq 0x50(%rsp), %rdx
addq %rsi, %rdx
leaq 0x168(%rsp), %rdi
callq 0x334442
movq %r12, %rdi
movq 0x78(%rsp), %rsi
movq %rsp, %rdx
movl $0xffffffff, %ecx # imm = 0xFFFFFFFF
movq %r15, %r8
movl $0xffffffff, %r9d # imm = 0xFFFFFFFF
callq 0x88a7a2
leaq 0x120(%rsp), %rdi
movq %r12, %rsi
callq 0x89c29a
movq 0xd8(%rsp), %rdi
leaq 0xe8(%rsp), %rax
cmpq %rax, %rdi
je 0x89184e
movq 0xe8(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0xa8(%rsp), %rdi
leaq 0xb8(%rsp), %rax
cmpq %rax, %rdi
je 0x891873
movq 0xb8(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x80(%rsp), %rdi
leaq 0x90(%rsp), %rax
cmpq %rax, %rdi
je 0x891898
movq 0x90(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x168(%rsp), %rdi
leaq 0x178(%rsp), %rax
cmpq %rax, %rdi
je 0x8918bd
movq 0x178(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x48(%rsp), %rdi
leaq 0x58(%rsp), %rax
cmpq %rax, %rdi
je 0x8918d9
movq 0x58(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq (%rsp), %rdi
leaq 0x10(%rsp), %rax
cmpq %rax, %rdi
je 0x8918f4
movq 0x10(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x20(%rsp), %rdi
leaq 0x30(%rsp), %rax
cmpq %rax, %rdi
je 0x891910
movq 0x30(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq %r15, %rdi
callq 0x325e00
movq %r15, %rdi
movl %r14d, %esi
callq 0x325530
movq %r13, %rdi
leaq 0x168(%rsp), %rsi
callq 0x325660
movq %r15, %rdi
movq 0x3cb1eb(%rip), %rsi # 0xc5cb28
callq 0x325aa0
leaq 0x1d0(%rsp), %rdi
callq 0x325a80
movl $0x2, %r8d
movq %r13, %rdi
xorl %esi, %esi
xorl %edx, %edx
leaq 0x1f441c(%rip), %rcx # 0xa85d7f
callq 0x325230
leaq 0x10(%rsp), %rsi
movq %rsi, (%rsp)
movq (%rax), %rdx
movq %rax, %rcx
addq $0x10, %rcx
cmpq %rcx, %rdx
je 0x89198e
movq %rdx, (%rsp)
movq (%rcx), %rdx
movq %rdx, 0x10(%rsp)
jmp 0x891994
movups (%rcx), %xmm0
movups %xmm0, (%rsi)
movq 0x8(%rax), %rdx
movq %rdx, 0x8(%rsp)
movq %rcx, (%rax)
movq $0x0, 0x8(%rax)
movb $0x0, 0x10(%rax)
leaq 0x90(%rsp), %rax
movq %rax, 0x80(%rsp)
movq (%rsp), %rsi
movq 0x8(%rsp), %rdx
addq %rsi, %rdx
movq %r12, %rdi
callq 0x334442
movl %ebx, 0xa0(%rsp)
leaq 0x100(%rsp), %rdi
movq %r12, %rsi
callq 0x89c578
movq 0x80(%rsp), %rdi
leaq 0x90(%rsp), %rax
cmpq %rax, %rdi
je 0x891a0c
movq 0x90(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq (%rsp), %rdi
leaq 0x10(%rsp), %rax
cmpq %rax, %rdi
je 0x891a27
movq 0x10(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x20(%rsp), %rdi
leaq 0x30(%rsp), %rax
cmpq %rax, %rdi
je 0x891a43
movq 0x30(%rsp), %rsi
incq %rsi
callq 0x3251a0
incl %ebp
movq 0x68(%rsp), %rax
movl 0x98(%rax), %eax
imull 0x44(%rsp), %eax
subl %eax, %ebx
jns 0x8913cc
movq 0x68(%rsp), %rax
movq 0x8(%rax), %rdi
movq 0x70(%rax), %rsi
xorps %xmm0, %xmm0
movaps %xmm0, 0x160(%rsp)
movq $0x0, 0x170(%rsp)
subq $0x8, %rsp
xorl %eax, %eax
leaq 0x128(%rsp), %rdx
leaq 0x108(%rsp), %r8
leaq 0x148(%rsp), %rcx
movq %rcx, %r9
leaq 0x168(%rsp), %r10
pushq %r10
pushq %rax
pushq %rax
callq 0x88a8ae
addq $0x20, %rsp
leaq 0x160(%rsp), %rdi
callq 0x89b1bc
leaq 0x100(%rsp), %rdi
callq 0x89b1ec
leaq 0x120(%rsp), %rdi
callq 0x89b1bc
leaq 0x140(%rsp), %rdi
callq 0x89b1ec
xorl %eax, %eax
addq $0x2e8, %rsp # imm = 0x2E8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq %rax, %r15
leaq 0x160(%rsp), %rdi
callq 0x89b1bc
jmp 0x891c4f
jmp 0x891c20
movq %rax, %r15
jmp 0x891c69
movq %rax, %r15
movq 0x3caffd(%rip), %rsi # 0xc5cb28
movq %r14, %rdi
callq 0x325aa0
leaq 0x1d8(%rsp), %rdi
jmp 0x891c4a
jmp 0x891bb5
jmp 0x891bba
jmp 0x891b5e
jmp 0x891b63
jmp 0x891b68
jmp 0x891c20
jmp 0x891bb5
jmp 0x891bba
jmp 0x891bbf
jmp 0x891c20
jmp 0x891bb5
jmp 0x891bba
movq %rax, %r15
jmp 0x891b9d
movq %rax, %r15
jmp 0x891b78
movq %rax, %r15
leaq 0x80(%rsp), %rdi
callq 0x89b16e
movq 0x168(%rsp), %rdi
leaq 0x178(%rsp), %rax
cmpq %rax, %rdi
je 0x891b9d
movq 0x178(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x48(%rsp), %rdi
leaq 0x58(%rsp), %rax
cmpq %rax, %rdi
je 0x891be7
movq 0x58(%rsp), %rsi
jmp 0x891bdf
jmp 0x891c20
movq %rax, %r15
jmp 0x891c02
movq %rax, %r15
jmp 0x891be7
movq %rax, %r15
movq 0x80(%rsp), %rdi
leaq 0x90(%rsp), %rax
cmpq %rax, %rdi
je 0x891be7
movq 0x90(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq (%rsp), %rdi
leaq 0x10(%rsp), %rax
cmpq %rax, %rdi
je 0x891c02
movq 0x10(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x20(%rsp), %rdi
leaq 0x30(%rsp), %rax
cmpq %rax, %rdi
je 0x891c4f
movq 0x30(%rsp), %rsi
incq %rsi
callq 0x3251a0
jmp 0x891c4f
movq %rax, %r15
jmp 0x891c4f
jmp 0x891c2b
jmp 0x891c2b
jmp 0x891c2b
movq %rax, %r15
movq 0x3caef3(%rip), %rsi # 0xc5cb28
leaq 0x160(%rsp), %rdi
callq 0x325aa0
leaq 0x1d0(%rsp), %rdi
callq 0x325a80
leaq 0x100(%rsp), %rdi
callq 0x89b1ec
leaq 0x120(%rsp), %rdi
callq 0x89b1bc
leaq 0x140(%rsp), %rdi
callq 0x89b1ec
movq %r15, %rdi
callq 0x3259a0
|
/kaydenl[P]VK-GL-CTS/modules/glshared/glsAttributeLocationTests.cpp
|
deqp::gls::BindHoleAttributeTest::iterate()
|
deInt32 getMaxAttributeLocations (glu::RenderContext& renderCtx)
{
const glw::Functions& gl = renderCtx.getFunctions();
deInt32 maxAttribs;
gl.getIntegerv(GL_MAX_VERTEX_ATTRIBS, &maxAttribs);
GLU_EXPECT_NO_ERROR(gl.getError(), "glGetIntegerv()");
return maxAttribs;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x2c8, %rsp # imm = 0x2C8
xorps %xmm0, %xmm0
movaps %xmm0, 0x130(%rsp)
movq $0x0, 0x140(%rsp)
movq %rdi, 0x28(%rsp)
movq 0x70(%rdi), %rdi
callq 0x890121
movl %eax, 0x24(%rsp)
leaq 0x160(%rsp), %r15
movq %r15, -0x10(%r15)
leaq 0x180ae0(%rip), %rsi # 0xa128d7
leaq 0x180add(%rip), %rdx # 0xa128db
leaq 0x150(%rsp), %rdi
callq 0x334070
leaq 0x118(%rsp), %rax
movq %rax, -0x10(%rax)
movq 0x150(%rsp), %rsi
movq 0x158(%rsp), %rdx
addq %rsi, %rdx
leaq 0x108(%rsp), %rdi
callq 0x334442
movabsq $0x8b5200000001, %rax # imm = 0x8B5200000001
movq %rax, 0x128(%rsp)
movq 0x150(%rsp), %rdi
cmpq %r15, %rdi
je 0x891e66
movq 0x160(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x28(%rsp), %rax
movl 0xa0(%rax), %eax
cmpl $-0x1, %eax
movl $0x1, %r14d
cmovnel %eax, %r14d
xorl %eax, %eax
movq %rax, 0x60(%rsp)
xorps %xmm0, %xmm0
movaps %xmm0, 0x50(%rsp)
movq %rax, 0x100(%rsp)
movaps %xmm0, 0xf0(%rsp)
leaq 0x10(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0x1f83b0(%rip), %rsi # 0xa8a25d
leaq 0x1f83ac(%rip), %rdx # 0xa8a260
movq %rsp, %rdi
callq 0x334070
leaq 0x88(%rsp), %r12
movb $0x1, -0x18(%r12)
movq %r12, -0x10(%r12)
movabsq $0x737961776c615f5f, %rax # imm = 0x737961776C615F5F
movq %rax, (%r12)
movw $0x5f5f, 0x8(%r12) # imm = 0x5F5F
movq $0xa, -0x8(%r12)
movb $0x0, 0xa(%r12)
leaq 0x150(%rsp), %rdi
leaq 0x108(%rsp), %rsi
movq %rsp, %rdx
leaq 0x70(%rsp), %r8
movl $0xffffffff, %ecx # imm = 0xFFFFFFFF
movl $0xffffffff, %r9d # imm = 0xFFFFFFFF
callq 0x88a7a2
leaq 0x50(%rsp), %rdi
leaq 0x150(%rsp), %rsi
callq 0x89c29a
leaq 0x1b8(%rsp), %rbx
movq -0x10(%rbx), %rdi
cmpq %rbx, %rdi
je 0x891f4f
movq 0x1b8(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0x188(%rsp), %r13
movq -0x10(%r13), %rdi
cmpq %r13, %rdi
je 0x891f70
movq 0x188(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x150(%rsp), %rdi
cmpq %r15, %rdi
je 0x891f8d
movq 0x160(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x78(%rsp), %rdi
cmpq %r12, %rdi
je 0x891fa7
movq 0x88(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq (%rsp), %rdi
leaq 0x10(%rsp), %rax
cmpq %rax, %rdi
je 0x891fc2
movq 0x10(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0x80(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0x1f8288(%rip), %rsi # 0xa8a25d
leaq 0x1f8284(%rip), %rdx # 0xa8a260
leaq 0x70(%rsp), %rdi
callq 0x334070
leaq 0x150(%rsp), %rdi
movq %r15, (%rdi)
movq 0x70(%rsp), %rsi
movq 0x78(%rsp), %rdx
addq %rsi, %rdx
callq 0x334442
leaq 0x150(%rsp), %rsi
movl $0x0, 0x20(%rsi)
leaq 0xf0(%rsp), %rdi
callq 0x89c578
movq 0x150(%rsp), %rdi
cmpq %r15, %rdi
je 0x89203c
movq 0x160(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x70(%rsp), %rdi
leaq 0x80(%rsp), %rax
cmpq %rax, %rdi
je 0x89205e
movq 0x80(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq %rsp, %rdi
leaq 0x10(%rsp), %rax
movq %rax, (%rdi)
leaq 0x1f8212(%rip), %rsi # 0xa8a282
leaq 0x1f820e(%rip), %rdx # 0xa8a285
callq 0x334070
movq 0x28(%rsp), %rax
leaq 0x78(%rax), %rsi
leaq 0x70(%rsp), %r8
movb $0x1, (%r8)
movq %r12, 0x8(%r8)
movabsq $0x737961776c615f5f, %rcx # imm = 0x737961776C615F5F
movq %rcx, 0x18(%r8)
movw $0x5f5f, 0x20(%r8) # imm = 0x5F5F
movq $0xa, 0x10(%r8)
movb $0x0, 0x22(%r8)
movl 0xa0(%rax), %r9d
leaq 0x150(%rsp), %rdi
movq %rsp, %rdx
movl $0xffffffff, %ecx # imm = 0xFFFFFFFF
callq 0x88a7a2
leaq 0x50(%rsp), %rdi
leaq 0x150(%rsp), %rsi
callq 0x89c29a
movq 0x1a8(%rsp), %rdi
cmpq %rbx, %rdi
je 0x8920ff
movq 0x1b8(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x178(%rsp), %rdi
cmpq %r13, %rdi
je 0x89211c
movq 0x188(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x150(%rsp), %rdi
cmpq %r15, %rdi
je 0x892139
movq 0x160(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x78(%rsp), %rdi
cmpq %r12, %rdi
je 0x892153
movq 0x88(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq (%rsp), %rdi
leaq 0x10(%rsp), %rax
cmpq %rax, %rdi
je 0x89216e
movq 0x10(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x28(%rsp), %rax
imull 0x98(%rax), %r14d
incl %r14d
cmpl 0x24(%rsp), %r14d
jge 0x8924a7
leaq 0x168(%rsp), %r13
movl $0x2, %ebx
leaq 0x150(%rsp), %r15
leaq 0x30(%rsp), %r12
leaq 0x70(%rsp), %rbp
movq %r15, %rdi
callq 0x325e00
movq %r15, %rdi
movl %ebx, %esi
callq 0x325530
movq %r12, %rdi
leaq 0x158(%rsp), %rsi
callq 0x325660
movq %r15, %rdi
movq 0x3ca954(%rip), %rsi # 0xc5cb28
callq 0x325aa0
leaq 0x1c0(%rsp), %rdi
callq 0x325a80
movl $0x2, %r8d
movq %r12, %rdi
xorl %esi, %esi
xorl %edx, %edx
leaq 0x1f3b85(%rip), %rcx # 0xa85d7f
callq 0x325230
leaq 0x10(%rsp), %rsi
movq %rsi, (%rsp)
movq (%rax), %rdx
movq %rax, %rcx
addq $0x10, %rcx
cmpq %rcx, %rdx
je 0x892225
movq %rdx, (%rsp)
movq (%rcx), %rdx
movq %rdx, 0x10(%rsp)
jmp 0x89222b
movups (%rcx), %xmm0
movups %xmm0, (%rsi)
movq 0x8(%rax), %rdx
movq %rdx, 0x8(%rsp)
movq %rcx, (%rax)
movq $0x0, 0x8(%rax)
xorl %ecx, %ecx
movb %cl, 0x10(%rax)
movb $0x1, 0x150(%rsp)
movq %r13, 0x158(%rsp)
movabsq $0x737961776c615f5f, %rax # imm = 0x737961776C615F5F
movq %rax, (%r13)
movw $0x5f5f, 0x8(%r13) # imm = 0x5F5F
movq $0xa, 0x160(%rsp)
movb %cl, 0x172(%rsp)
movq %rbp, %rdi
leaq 0x108(%rsp), %rsi
movq %rsp, %rdx
movl $0xffffffff, %ecx # imm = 0xFFFFFFFF
movq %r15, %r8
movl $0xffffffff, %r9d # imm = 0xFFFFFFFF
callq 0x88a7a2
leaq 0x50(%rsp), %rdi
movq %rbp, %rsi
callq 0x89c29a
movq 0xc8(%rsp), %rdi
leaq 0xd8(%rsp), %rax
cmpq %rax, %rdi
je 0x8922cf
movq 0xd8(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x98(%rsp), %rdi
leaq 0xa8(%rsp), %rax
cmpq %rax, %rdi
je 0x8922f4
movq 0xa8(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x70(%rsp), %rdi
leaq 0x80(%rsp), %rax
cmpq %rax, %rdi
je 0x892316
movq 0x80(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x158(%rsp), %rdi
cmpq %r13, %rdi
je 0x892333
movq 0x168(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq (%rsp), %rdi
leaq 0x10(%rsp), %rax
cmpq %rax, %rdi
je 0x89234e
movq 0x10(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x30(%rsp), %rdi
leaq 0x40(%rsp), %rax
cmpq %rax, %rdi
je 0x89236a
movq 0x40(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq %r15, %rdi
callq 0x325e00
movq %r15, %rdi
movl %ebx, %esi
callq 0x325530
movq %r12, %rdi
leaq 0x158(%rsp), %rsi
callq 0x325660
movq %r15, %rdi
movq 0x3ca792(%rip), %rsi # 0xc5cb28
callq 0x325aa0
leaq 0x1c0(%rsp), %rdi
callq 0x325a80
movl $0x2, %r8d
movq %r12, %rdi
xorl %esi, %esi
xorl %edx, %edx
leaq 0x1f39c3(%rip), %rcx # 0xa85d7f
callq 0x325230
leaq 0x10(%rsp), %rsi
movq %rsi, (%rsp)
movq (%rax), %rdx
movq %rax, %rcx
addq $0x10, %rcx
cmpq %rcx, %rdx
je 0x8923e7
movq %rdx, (%rsp)
movq (%rcx), %rdx
movq %rdx, 0x10(%rsp)
jmp 0x8923ed
movups (%rcx), %xmm0
movups %xmm0, (%rsi)
movq 0x8(%rax), %rdx
movq %rdx, 0x8(%rsp)
movq %rcx, (%rax)
movq $0x0, 0x8(%rax)
movb $0x0, 0x10(%rax)
leaq 0x80(%rsp), %rax
movq %rax, 0x70(%rsp)
movq (%rsp), %rsi
movq 0x8(%rsp), %rdx
addq %rsi, %rdx
movq %rbp, %rdi
callq 0x334442
movl %r14d, 0x90(%rsp)
leaq 0xf0(%rsp), %rdi
movq %rbp, %rsi
callq 0x89c578
movq 0x70(%rsp), %rdi
leaq 0x80(%rsp), %rax
cmpq %rax, %rdi
je 0x892460
movq 0x80(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq (%rsp), %rdi
leaq 0x10(%rsp), %rax
cmpq %rax, %rdi
je 0x89247b
movq 0x10(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x30(%rsp), %rdi
leaq 0x40(%rsp), %rax
cmpq %rax, %rdi
je 0x892497
movq 0x40(%rsp), %rsi
incq %rsi
callq 0x3251a0
incl %ebx
incl %r14d
cmpl %r14d, 0x24(%rsp)
jne 0x8921a8
movq 0x28(%rsp), %rax
movq 0x8(%rax), %rdi
movq 0x70(%rax), %rsi
xorps %xmm0, %xmm0
movaps %xmm0, 0x150(%rsp)
movq $0x0, 0x160(%rsp)
subq $0x8, %rsp
xorl %eax, %eax
leaq 0x58(%rsp), %rdx
leaq 0xf8(%rsp), %r8
leaq 0x138(%rsp), %rcx
movq %rcx, %r9
leaq 0x158(%rsp), %r10
pushq %r10
pushq %rax
pushq %rax
callq 0x88a8ae
addq $0x20, %rsp
leaq 0x150(%rsp), %rdi
callq 0x89b1bc
leaq 0xf0(%rsp), %rdi
callq 0x89b1ec
leaq 0x50(%rsp), %rdi
callq 0x89b1bc
movq 0x108(%rsp), %rdi
leaq 0x118(%rsp), %rax
cmpq %rax, %rdi
je 0x892547
movq 0x118(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0x130(%rsp), %rdi
callq 0x89b1ec
xorl %eax, %eax
addq $0x2c8, %rsp # imm = 0x2C8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq %rax, %rbx
leaq 0x150(%rsp), %rdi
callq 0x89b1bc
jmp 0x89270e
jmp 0x8925d3
jmp 0x8925e5
jmp 0x892678
movq %rax, %rbx
movq 0x150(%rsp), %rdi
cmpq %r15, %rdi
je 0x8925ab
movq 0x160(%rsp), %rsi
incq %rsi
callq 0x3251a0
jmp 0x8925ab
movq %rax, %rbx
movq 0x70(%rsp), %rdi
leaq 0x80(%rsp), %rax
cmpq %rax, %rdi
je 0x89270e
movq 0x80(%rsp), %rsi
jmp 0x8926de
jmp 0x892678
movq %rax, %rbx
leaq 0x150(%rsp), %rdi
callq 0x89b16e
jmp 0x8925e8
movq %rax, %rbx
movq 0x78(%rsp), %rdi
cmpq %r12, %rdi
je 0x892602
movq 0x88(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq (%rsp), %rdi
leaq 0x10(%rsp), %rax
cmpq %rax, %rdi
je 0x89270e
movq 0x10(%rsp), %rsi
jmp 0x8926de
jmp 0x892678
movq %rax, %rbx
movq 0x150(%rsp), %rdi
cmpq %r15, %rdi
je 0x89274a
movq 0x160(%rsp), %rsi
jmp 0x892742
jmp 0x892643
movq %rax, %rbx
jmp 0x89274a
jmp 0x892678
jmp 0x892680
movq %rax, %rbx
jmp 0x892661
movq %rax, %rbx
leaq 0x70(%rsp), %rdi
callq 0x89b16e
movq 0x158(%rsp), %rdi
cmpq %r13, %rdi
je 0x8926af
movq 0x168(%rsp), %rsi
jmp 0x8926a7
movq %rax, %rbx
jmp 0x89270e
movq %rax, %rbx
jmp 0x8926ca
movq %rax, %rbx
jmp 0x8926af
movq %rax, %rbx
movq 0x70(%rsp), %rdi
leaq 0x80(%rsp), %rax
cmpq %rax, %rdi
je 0x8926af
movq 0x80(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq (%rsp), %rdi
leaq 0x10(%rsp), %rax
cmpq %rax, %rdi
je 0x8926ca
movq 0x10(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x30(%rsp), %rdi
leaq 0x40(%rsp), %rax
cmpq %rax, %rdi
je 0x89270e
movq 0x40(%rsp), %rsi
incq %rsi
callq 0x3251a0
jmp 0x89270e
jmp 0x8926ea
movq %rax, %rbx
movq 0x3ca434(%rip), %rsi # 0xc5cb28
leaq 0x150(%rsp), %rdi
callq 0x325aa0
leaq 0x1c0(%rsp), %rdi
callq 0x325a80
leaq 0xf0(%rsp), %rdi
callq 0x89b1ec
leaq 0x50(%rsp), %rdi
callq 0x89b1bc
movq 0x108(%rsp), %rdi
leaq 0x118(%rsp), %rax
cmpq %rax, %rdi
je 0x89274a
movq 0x118(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0x130(%rsp), %rdi
callq 0x89b1ec
movq %rbx, %rdi
callq 0x3259a0
nop
|
/kaydenl[P]VK-GL-CTS/modules/glshared/glsAttributeLocationTests.cpp
|
deqp::gls::BindInactiveAliasingAttributeTest::iterate()
|
tcu::TestCase::IterateResult BindInactiveAliasingAttributeTest::iterate (void)
{
const vector<Bind> noBindings;
const deInt32 maxAttributes = getMaxAttributeLocations(m_renderCtx);
const int arrayElementCount = (m_arraySize != Attribute::NOT_ARRAY ? m_arraySize : 1);
vector<Attribute> attributes;
vector<Bind> bindings;
int ndx = 0;
m_testCtx.getLog() << TestLog::Message << "GL_MAX_VERTEX_ATTRIBS: " << maxAttributes << TestLog::EndMessage;
for (int loc = maxAttributes - arrayElementCount * m_type.getLocationSize(); loc >= 0; loc -= m_type.getLocationSize() * arrayElementCount)
{
attributes.push_back(Attribute(m_type, "a_" + de::toString(ndx), Attribute::LOC_UNDEF, Cond("A")));
bindings.push_back(Bind("a_" + de::toString(ndx), loc));
attributes.push_back(Attribute(m_type, "a_" + de::toString(ndx + maxAttributes), Attribute::LOC_UNDEF, Cond::COND_NEVER));
bindings.push_back(Bind("a_" + de::toString(ndx + maxAttributes), loc));
ndx++;
}
runTest(m_testCtx, m_renderCtx, attributes, noBindings, bindings, noBindings, false);
return STOP;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x2e8, %rsp # imm = 0x2E8
movq %rdi, %rbx
xorps %xmm0, %xmm0
movaps %xmm0, 0x140(%rsp)
movq $0x0, 0x150(%rsp)
movq 0x70(%rdi), %rdi
callq 0x890121
movq %rax, 0x50(%rsp)
movl 0xa0(%rbx), %eax
cmpl $-0x1, %eax
movl $0x1, %ecx
cmovnel %eax, %ecx
movl %ecx, 0x44(%rsp)
xorl %eax, %eax
movq %rax, 0x130(%rsp)
xorps %xmm0, %xmm0
movaps %xmm0, 0x120(%rsp)
movq %rax, 0x110(%rsp)
movaps %xmm0, 0x100(%rsp)
movq 0x8(%rbx), %rax
movq 0x10(%rax), %rax
leaq 0x168(%rsp), %r14
movq %rax, -0x8(%r14)
movq %r14, %rdi
callq 0x325e00
leaq 0x1f7809(%rip), %rsi # 0xa8a261
movl $0x17, %edx
movq %r14, %rdi
callq 0x325e70
movq %r14, %rdi
movq 0x50(%rsp), %rsi
callq 0x325530
leaq 0x210814(%rip), %rsi # 0xaa328d
leaq 0x160(%rsp), %rdi
callq 0x9aba2a
movq 0x3ca09b(%rip), %rsi # 0xc5cb28
movq %r14, %rdi
callq 0x325aa0
leaq 0x1d8(%rsp), %rdi
callq 0x325a80
movq %rbx, 0x48(%rsp)
movl 0x98(%rbx), %eax
imull 0x44(%rsp), %eax
movq 0x50(%rsp), %rcx
movl %ecx, %r14d
subl %eax, %r14d
js 0x89314e
movq 0x48(%rsp), %rax
addq $0x78, %rax
movq %rax, 0x58(%rsp)
leaq 0x160(%rsp), %r13
leaq 0x20(%rsp), %rbp
leaq 0x80(%rsp), %rbx
xorl %r12d, %r12d
movq %r13, %rdi
callq 0x325e00
movq %r13, %rdi
movl %r12d, %esi
callq 0x325530
movq %rbp, %rdi
leaq 0x168(%rsp), %rsi
callq 0x325660
movq %r13, %rdi
movq 0x3ca012(%rip), %rsi # 0xc5cb28
callq 0x325aa0
leaq 0x1d0(%rsp), %rdi
callq 0x325a80
movl $0x2, %r8d
movq %rbp, %rdi
xorl %esi, %esi
xorl %edx, %edx
leaq 0x1f3243(%rip), %rcx # 0xa85d7f
callq 0x325230
leaq 0x10(%rsp), %rsi
movq %rsi, (%rsp)
movq (%rax), %rdx
movq %rax, %rcx
addq $0x10, %rcx
cmpq %rcx, %rdx
je 0x892b67
movq %rdx, (%rsp)
movq (%rcx), %rdx
movq %rdx, 0x10(%rsp)
jmp 0x892b6d
movups (%rcx), %xmm0
movups %xmm0, (%rsi)
movq 0x8(%rax), %rdx
movq %rdx, 0x8(%rsp)
movq %rcx, (%rax)
movq $0x0, 0x8(%rax)
movb $0x0, 0x10(%rax)
leaq 0x70(%rsp), %rax
movq %rax, 0x60(%rsp)
leaq 0x60(%rsp), %rdi
leaq 0x1cc2fa(%rip), %rsi # 0xa5ee95
leaq 0x1cc2f4(%rip), %rdx # 0xa5ee96
callq 0x334070
movb $0x1, 0x160(%rsp)
leaq 0x178(%rsp), %rax
movq %rax, 0x168(%rsp)
movq 0x60(%rsp), %rsi
movq 0x68(%rsp), %rdx
addq %rsi, %rdx
leaq 0x168(%rsp), %rdi
callq 0x334442
movq %rbx, %rdi
movq 0x58(%rsp), %rsi
movq %rsp, %rdx
movl $0xffffffff, %ecx # imm = 0xFFFFFFFF
movq %r13, %r8
movl $0xffffffff, %r9d # imm = 0xFFFFFFFF
callq 0x88a7a2
leaq 0x120(%rsp), %rdi
movq %rbx, %rsi
callq 0x89c29a
movq 0xd8(%rsp), %rdi
leaq 0xe8(%rsp), %rax
cmpq %rax, %rdi
je 0x892c2c
movq 0xe8(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0xa8(%rsp), %rdi
leaq 0xb8(%rsp), %rax
cmpq %rax, %rdi
je 0x892c51
movq 0xb8(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x80(%rsp), %rdi
leaq 0x90(%rsp), %rax
cmpq %rax, %rdi
je 0x892c76
movq 0x90(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x168(%rsp), %rdi
leaq 0x178(%rsp), %rax
cmpq %rax, %rdi
je 0x892c9b
movq 0x178(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x60(%rsp), %rdi
leaq 0x70(%rsp), %rax
cmpq %rax, %rdi
je 0x892cb7
movq 0x70(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq (%rsp), %rdi
leaq 0x10(%rsp), %rax
cmpq %rax, %rdi
je 0x892cd2
movq 0x10(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x20(%rsp), %rdi
leaq 0x30(%rsp), %rax
cmpq %rax, %rdi
je 0x892cee
movq 0x30(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq %r13, %rdi
callq 0x325e00
movq %r13, %rdi
movl %r12d, %esi
callq 0x325530
movq %rbp, %rdi
leaq 0x168(%rsp), %rsi
callq 0x325660
movq %r13, %rdi
movq 0x3c9e0d(%rip), %rsi # 0xc5cb28
callq 0x325aa0
leaq 0x1d0(%rsp), %rdi
callq 0x325a80
movl $0x2, %r8d
movq %rbp, %rdi
xorl %esi, %esi
xorl %edx, %edx
leaq 0x1f303e(%rip), %rcx # 0xa85d7f
callq 0x325230
leaq 0x10(%rsp), %rsi
movq %rsi, (%rsp)
movq (%rax), %rdx
movq %rax, %rcx
addq $0x10, %rcx
cmpq %rcx, %rdx
je 0x892d6c
movq %rdx, (%rsp)
movq (%rcx), %rdx
movq %rdx, 0x10(%rsp)
jmp 0x892d72
movups (%rcx), %xmm0
movups %xmm0, (%rsi)
movq 0x8(%rax), %rdx
movq %rdx, 0x8(%rsp)
movq %rcx, (%rax)
movq $0x0, 0x8(%rax)
movb $0x0, 0x10(%rax)
leaq 0x90(%rsp), %rax
movq %rax, 0x80(%rsp)
movq (%rsp), %rsi
movq 0x8(%rsp), %rdx
addq %rsi, %rdx
movq %rbx, %rdi
callq 0x334442
movl %r14d, 0xa0(%rsp)
leaq 0x100(%rsp), %rdi
movq %rbx, %rsi
callq 0x89c578
movq 0x80(%rsp), %rdi
leaq 0x90(%rsp), %rax
cmpq %rax, %rdi
je 0x892deb
movq 0x90(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq (%rsp), %rdi
leaq 0x10(%rsp), %rax
cmpq %rax, %rdi
je 0x892e06
movq 0x10(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x20(%rsp), %rdi
leaq 0x30(%rsp), %rax
cmpq %rax, %rdi
je 0x892e22
movq 0x30(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq %r13, %rdi
callq 0x325e00
movq 0x50(%rsp), %rax
leal (%rax,%r12), %r15d
movq %r13, %rdi
movl %r15d, %esi
callq 0x325530
movq %rbp, %rdi
leaq 0x168(%rsp), %rsi
callq 0x325660
movq %r13, %rdi
movq 0x3c9cd0(%rip), %rsi # 0xc5cb28
callq 0x325aa0
leaq 0x1d0(%rsp), %rdi
callq 0x325a80
movl $0x2, %r8d
movq %rbp, %rdi
xorl %esi, %esi
xorl %edx, %edx
leaq 0x1f2f01(%rip), %rcx # 0xa85d7f
callq 0x325230
leaq 0x10(%rsp), %rsi
movq %rsi, (%rsp)
movq (%rax), %rdx
movq %rax, %rcx
addq $0x10, %rcx
cmpq %rcx, %rdx
je 0x892ea9
movq %rdx, (%rsp)
movq (%rcx), %rdx
movq %rdx, 0x10(%rsp)
jmp 0x892eaf
movups (%rcx), %xmm0
movups %xmm0, (%rsi)
movq 0x8(%rax), %rdx
movq %rdx, 0x8(%rsp)
movq %rcx, (%rax)
movq $0x0, 0x8(%rax)
xorl %ecx, %ecx
movb %cl, 0x10(%rax)
movb %cl, 0x160(%rsp)
leaq 0x178(%rsp), %rax
movq %rax, 0x168(%rsp)
movabsq $0x737961776c615f5f, %rdx # imm = 0x737961776C615F5F
movq %rdx, (%rax)
movw $0x5f5f, 0x8(%rax) # imm = 0x5F5F
movq $0xa, 0x170(%rsp)
movb %cl, 0x182(%rsp)
movq %rbx, %rdi
movq 0x58(%rsp), %rsi
movq %rsp, %rdx
movl $0xffffffff, %ecx # imm = 0xFFFFFFFF
movq %r13, %r8
movl $0xffffffff, %r9d # imm = 0xFFFFFFFF
callq 0x88a7a2
leaq 0x120(%rsp), %rdi
movq %rbx, %rsi
callq 0x89c29a
movq 0xd8(%rsp), %rdi
leaq 0xe8(%rsp), %rax
cmpq %rax, %rdi
je 0x892f58
movq 0xe8(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0xa8(%rsp), %rdi
leaq 0xb8(%rsp), %rax
cmpq %rax, %rdi
je 0x892f7d
movq 0xb8(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x80(%rsp), %rdi
leaq 0x90(%rsp), %rax
cmpq %rax, %rdi
je 0x892fa2
movq 0x90(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x168(%rsp), %rdi
leaq 0x178(%rsp), %rax
cmpq %rax, %rdi
je 0x892fc7
movq 0x178(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq (%rsp), %rdi
leaq 0x10(%rsp), %rax
cmpq %rax, %rdi
je 0x892fe2
movq 0x10(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x20(%rsp), %rdi
leaq 0x30(%rsp), %rax
cmpq %rax, %rdi
je 0x892ffe
movq 0x30(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq %r13, %rdi
callq 0x325e00
movq %r13, %rdi
movl %r15d, %esi
callq 0x325530
movq %rbp, %rdi
leaq 0x168(%rsp), %rsi
callq 0x325660
movq %r13, %rdi
movq 0x3c9afd(%rip), %rsi # 0xc5cb28
callq 0x325aa0
leaq 0x1d0(%rsp), %rdi
callq 0x325a80
movl $0x2, %r8d
movq %rbp, %rdi
xorl %esi, %esi
xorl %edx, %edx
leaq 0x1f2d2e(%rip), %rcx # 0xa85d7f
callq 0x325230
leaq 0x10(%rsp), %rsi
movq %rsi, (%rsp)
movq (%rax), %rdx
movq %rax, %rcx
addq $0x10, %rcx
cmpq %rcx, %rdx
je 0x89307c
movq %rdx, (%rsp)
movq (%rcx), %rdx
movq %rdx, 0x10(%rsp)
jmp 0x893082
movups (%rcx), %xmm0
movups %xmm0, (%rsi)
movq 0x8(%rax), %rdx
movq %rdx, 0x8(%rsp)
movq %rcx, (%rax)
movq $0x0, 0x8(%rax)
movb $0x0, 0x10(%rax)
leaq 0x90(%rsp), %rax
movq %rax, 0x80(%rsp)
movq (%rsp), %rsi
movq 0x8(%rsp), %rdx
addq %rsi, %rdx
movq %rbx, %rdi
callq 0x334442
movl %r14d, 0xa0(%rsp)
leaq 0x100(%rsp), %rdi
movq %rbx, %rsi
callq 0x89c578
movq 0x80(%rsp), %rdi
leaq 0x90(%rsp), %rax
cmpq %rax, %rdi
je 0x8930fb
movq 0x90(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq (%rsp), %rdi
leaq 0x10(%rsp), %rax
cmpq %rax, %rdi
je 0x893116
movq 0x10(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x20(%rsp), %rdi
leaq 0x30(%rsp), %rax
cmpq %rax, %rdi
je 0x893132
movq 0x30(%rsp), %rsi
incq %rsi
callq 0x3251a0
incl %r12d
movq 0x48(%rsp), %rax
movl 0x98(%rax), %eax
imull 0x44(%rsp), %eax
subl %eax, %r14d
jns 0x892ae9
movq 0x48(%rsp), %rax
movq 0x8(%rax), %rdi
movq 0x70(%rax), %rsi
xorps %xmm0, %xmm0
movaps %xmm0, 0x160(%rsp)
movq $0x0, 0x170(%rsp)
subq $0x8, %rsp
xorl %eax, %eax
leaq 0x128(%rsp), %rdx
leaq 0x108(%rsp), %r8
leaq 0x148(%rsp), %rcx
movq %rcx, %r9
leaq 0x168(%rsp), %r10
pushq %r10
pushq %rax
pushq %rax
callq 0x88a8ae
addq $0x20, %rsp
leaq 0x160(%rsp), %rdi
callq 0x89b1bc
leaq 0x100(%rsp), %rdi
callq 0x89b1ec
leaq 0x120(%rsp), %rdi
callq 0x89b1bc
leaq 0x140(%rsp), %rdi
callq 0x89b1ec
xorl %eax, %eax
addq $0x2e8, %rsp # imm = 0x2E8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq %rax, %rbx
leaq 0x160(%rsp), %rdi
callq 0x89b1bc
jmp 0x893376
jmp 0x893347
movq %rax, %rbx
jmp 0x893390
movq %rax, %rbx
movq 0x3c990c(%rip), %rsi # 0xc5cb28
movq %r14, %rdi
callq 0x325aa0
leaq 0x1d8(%rsp), %rdi
jmp 0x893371
jmp 0x8932dc
movq %rax, %rbx
jmp 0x89324b
movq %rax, %rbx
leaq 0x80(%rsp), %rdi
callq 0x89b16e
movq 0x168(%rsp), %rdi
leaq 0x178(%rsp), %rax
cmpq %rax, %rdi
je 0x89330e
movq 0x178(%rsp), %rsi
jmp 0x893306
jmp 0x893347
jmp 0x8932dc
jmp 0x8932e1
jmp 0x8932e6
jmp 0x893347
jmp 0x8932dc
jmp 0x8932e1
movq %rax, %rbx
jmp 0x8932c4
movq %rax, %rbx
jmp 0x89329f
movq %rax, %rbx
leaq 0x80(%rsp), %rdi
callq 0x89b16e
movq 0x168(%rsp), %rdi
leaq 0x178(%rsp), %rax
cmpq %rax, %rdi
je 0x8932c4
movq 0x178(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x60(%rsp), %rdi
leaq 0x70(%rsp), %rax
cmpq %rax, %rdi
je 0x89330e
movq 0x70(%rsp), %rsi
jmp 0x893306
jmp 0x893347
movq %rax, %rbx
jmp 0x893329
movq %rax, %rbx
jmp 0x89330e
movq %rax, %rbx
movq 0x80(%rsp), %rdi
leaq 0x90(%rsp), %rax
cmpq %rax, %rdi
je 0x89330e
movq 0x90(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq (%rsp), %rdi
leaq 0x10(%rsp), %rax
cmpq %rax, %rdi
je 0x893329
movq 0x10(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x20(%rsp), %rdi
leaq 0x30(%rsp), %rax
cmpq %rax, %rdi
je 0x893376
movq 0x30(%rsp), %rsi
incq %rsi
callq 0x3251a0
jmp 0x893376
movq %rax, %rbx
jmp 0x893376
jmp 0x893352
jmp 0x893352
jmp 0x893352
movq %rax, %rbx
movq 0x3c97cc(%rip), %rsi # 0xc5cb28
leaq 0x160(%rsp), %rdi
callq 0x325aa0
leaq 0x1d0(%rsp), %rdi
callq 0x325a80
leaq 0x100(%rsp), %rdi
callq 0x89b1ec
leaq 0x120(%rsp), %rdi
callq 0x89b1bc
leaq 0x140(%rsp), %rdi
callq 0x89b1ec
movq %rbx, %rdi
callq 0x3259a0
nop
|
/kaydenl[P]VK-GL-CTS/modules/glshared/glsAttributeLocationTests.cpp
|
deqp::gls::MixedAttributeTest::MixedAttributeTest(tcu::TestContext&, glu::RenderContext&, deqp::gls::AttributeLocationTestUtil::AttribType const&, int)
|
MixedAttributeTest::MixedAttributeTest (tcu::TestContext& testCtx,
glu::RenderContext& renderCtx,
const AttribType& type,
int arraySize)
: TestCase (testCtx, generateTestName(type, arraySize).c_str(), generateTestName(type, arraySize).c_str())
, m_renderCtx (renderCtx)
, m_type (type)
, m_arraySize (arraySize)
{
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x48, %rsp
movl %r8d, %ebp
movq %rcx, %r14
movq %rdx, %r15
movq %rsi, %r12
movq %rdi, %rbx
leaq 0x28(%rsp), %r13
movq %r13, %rdi
movq %rcx, %rsi
movl %r8d, %edx
callq 0x88f4d7
movq (%r13), %r13
leaq 0x8(%rsp), %rdi
movq %r14, %rsi
movl %ebp, %edx
callq 0x88f4d7
movq 0x8(%rsp), %rcx
movq %rbx, %rdi
movq %r12, %rsi
movq %r13, %rdx
callq 0x9a9abc
leaq 0x18(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x895235
movq 0x18(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0x38(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x895250
movq 0x38(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0x3c1b39(%rip), %rax # 0xc56d90
movq %rax, (%rbx)
movq %r15, 0x70(%rbx)
leaq 0x78(%rbx), %rdi
leaq 0x88(%rbx), %rax
movq %rax, 0x78(%rbx)
movq (%r14), %rsi
movq 0x8(%r14), %rdx
addq %rsi, %rdx
callq 0x334442
movq 0x20(%r14), %rax
movq %rax, 0x98(%rbx)
movl %ebp, 0xa0(%rbx)
addq $0x48, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq %rax, %r14
movq %rbx, %rdi
callq 0x9a98c4
jmp 0x8952e7
movq %rax, %r14
leaq 0x18(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x8952cc
movq 0x18(%rsp), %rsi
incq %rsi
callq 0x3251a0
jmp 0x8952cc
movq %rax, %r14
leaq 0x38(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x8952e7
movq 0x38(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq %r14, %rdi
callq 0x3259a0
nop
|
/kaydenl[P]VK-GL-CTS/modules/glshared/glsAttributeLocationTests.cpp
|
rsg::AssignOp::~AssignOp()
|
AssignOp::~AssignOp (void)
{
delete m_lvalueExpr;
delete m_rvalueExpr;
}
|
pushq %rbx
movq %rdi, %rbx
leaq 0x2d7de7(%rip), %rax # 0xc597d0
movq %rax, (%rdi)
movq 0xa0(%rdi), %rdi
testq %rdi, %rdi
je 0x9819fe
movq (%rdi), %rax
callq *0x8(%rax)
movq 0xa8(%rbx), %rdi
testq %rdi, %rdi
je 0x981a10
movq (%rdi), %rax
callq *0x8(%rax)
movq 0x88(%rbx), %rdi
testq %rdi, %rdi
je 0x981a2b
movq 0x98(%rbx), %rsi
subq %rdi, %rsi
callq 0x3251a0
addq $0x8, %rbx
movq %rbx, %rdi
popq %rbx
jmp 0x97f67a
|
/kaydenl[P]VK-GL-CTS/framework/randomshaders/rsgExpression.cpp
|
rsg::AssignOp::createNextChild(rsg::GeneratorState&)
|
Expression* AssignOp::createNextChild (GeneratorState& state)
{
if (m_lvalueExpr == DE_NULL)
{
// Construct lvalue
// \todo [2011-03-14 pyry] Proper l-value generation:
// - pure L-value part is generated first
// - variable valuerange is made unbound
// - R-value is generated
// - R-values in L-value are generated
m_lvalueExpr = Expression::createRandomLValue(state, m_valueRange.asAccess());
return m_lvalueExpr;
}
else if (m_rvalueExpr == DE_NULL)
{
// Construct value expr
m_rvalueExpr = Expression::createRandom(state, m_valueRange.asAccess());
return m_rvalueExpr;
}
else
return DE_NULL;
}
|
pushq %r14
pushq %rbx
subq $0x38, %rsp
movq %rsi, %r14
movq %rdi, %rbx
cmpq $0x0, 0xa0(%rdi)
je 0x981bac
cmpq $0x0, 0xa8(%rbx)
je 0x981c1d
xorl %eax, %eax
jmp 0x981c8c
movq 0x58(%rbx), %rax
movq 0x70(%rbx), %rcx
xorl %edx, %edx
cmpq 0x60(%rbx), %rax
cmoveq %rdx, %rax
cmpq 0x78(%rbx), %rcx
cmoveq %rdx, %rcx
leaq 0x8(%rbx), %rdx
movq %rdx, 0x20(%rsp)
movq %rax, 0x28(%rsp)
movq %rcx, 0x30(%rsp)
movq %rcx, 0x10(%rsp)
movups 0x20(%rsp), %xmm0
movups %xmm0, (%rsp)
leaq 0x2d7aa2(%rip), %rsi # 0xc59690
movq %r14, %rdi
movl $0x1, %edx
callq 0x97fce9
movq 0x30(%rsp), %rcx
movq %rcx, 0x10(%rsp)
movups 0x20(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %r14, %rdi
callq *0x8(%rax)
movq %rax, 0xa0(%rbx)
jmp 0x981c8c
movq 0x58(%rbx), %rax
movq 0x70(%rbx), %rcx
xorl %edx, %edx
cmpq 0x60(%rbx), %rax
cmoveq %rdx, %rax
cmpq 0x78(%rbx), %rcx
cmoveq %rdx, %rcx
leaq 0x8(%rbx), %rdx
movq %rdx, 0x20(%rsp)
movq %rax, 0x28(%rsp)
movq %rcx, 0x30(%rsp)
movq %rcx, 0x10(%rsp)
movups 0x20(%rsp), %xmm0
movups %xmm0, (%rsp)
leaq 0x2d7851(%rip), %rsi # 0xc594b0
movq %r14, %rdi
movl $0x1e, %edx
callq 0x97fce9
movq 0x30(%rsp), %rcx
movq %rcx, 0x10(%rsp)
movups 0x20(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %r14, %rdi
callq *0x8(%rax)
movq %rax, 0xa8(%rbx)
addq $0x38, %rsp
popq %rbx
popq %r14
retq
|
/kaydenl[P]VK-GL-CTS/framework/randomshaders/rsgExpression.cpp
|
rsg::AssignOp::evaluate(rsg::ExecutionContext&)
|
void AssignOp::evaluate (ExecutionContext& evalCtx)
{
// Evaluate l-value
m_lvalueExpr->evaluate(evalCtx);
// Evaluate value
m_rvalueExpr->evaluate(evalCtx);
m_value.setStorage(m_valueRange.getType());
m_value.getValue(m_valueRange.getType()) = m_rvalueExpr->getValue().value();
// Assign
assignMasked(m_lvalueExpr->getLValue(), m_value.getValue(m_valueRange.getType()), evalCtx.getExecutionMask());
}
|
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
movq %rsi, %rbx
movq %rdi, %r14
movq 0xa0(%rdi), %rdi
movq (%rdi), %rax
callq *0x20(%rax)
movq 0xa8(%r14), %rdi
movq (%rdi), %rax
movq %rbx, %rsi
callq *0x20(%rax)
leaq 0x88(%r14), %r12
leaq 0x8(%r14), %r15
movq %r15, %rdi
callq 0x97dc5e
shll $0x6, %eax
movslq %eax, %rsi
movq %r12, %rdi
callq 0x97355e
movq 0xa8(%r14), %rdi
movq (%rdi), %rax
callq *0x28(%rax)
movq %rdx, %r12
movq 0x88(%r14), %r13
movq %r15, %rdi
callq 0x97dc5e
testl %eax, %eax
je 0x981db1
shll $0x6, %eax
je 0x981db1
movslq %eax, %rdx
shlq $0x2, %rdx
movq %r13, %rdi
movq %r12, %rsi
callq 0x325220
movq 0xa0(%r14), %rdi
movq (%rdi), %rax
callq *0x30(%rax)
movq %rax, %r12
movq %rdx, %r13
movq 0x88(%r14), %r14
movq %rbx, %rdi
callq 0x97300c
movq %rdx, %r9
movq %r12, %rdi
movq %r13, %rsi
movq %r15, %rdx
movq %r14, %rcx
movq %rax, %r8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
jmp 0x973031
nop
|
/kaydenl[P]VK-GL-CTS/framework/randomshaders/rsgExpression.cpp
|
rsg::(anonymous namespace)::allocateNewVariable(rsg::GeneratorState&, rsg::ConstValueRangeAccess)
|
Variable* allocateNewVariable (GeneratorState& state, ConstValueRangeAccess valueRange)
{
Variable* variable = state.getVariableManager().allocate(valueRange.getType());
// Update value range
state.getVariableManager().setValue(variable, valueRange);
// Random storage \todo [pyry] Check that scalar count in uniform/input classes is not exceeded
static const Variable::Storage storages[] =
{
Variable::STORAGE_CONST,
Variable::STORAGE_UNIFORM,
Variable::STORAGE_LOCAL,
Variable::STORAGE_SHADER_IN
};
float weights[DE_LENGTH_OF_ARRAY(storages)];
// Dynamic vs. constant weight.
float dynWeight = computeDynamicRangeWeight(valueRange);
int numScalars = valueRange.getType().getScalarSize();
bool uniformOk = state.getVariableManager().getNumAllocatedUniformScalars() + numScalars <= state.getShaderParameters().maxUniformScalars;
bool shaderInOk = isShaderInOutSupportedType(valueRange.getType()) &&
(state.getVariableManager().getNumAllocatedShaderInVariables() + NUM_RESERVED_SHADER_INPUTS < state.getShaderParameters().maxInputVariables);
weights[0] = de::max(1.0f-dynWeight, 0.1f);
weights[1] = uniformOk ? dynWeight*0.5f : 0.0f;
weights[2] = dynWeight;
weights[3] = shaderInOk ? dynWeight*2.0f : 0.0f;
state.getVariableManager().setStorage(variable, state.getRandom().chooseWeighted<Variable::Storage>(&storages[0], &storages[DE_LENGTH_OF_ARRAY(storages)], &weights[0]));
return variable;
}
|
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $0x38, %rsp
movq %rdi, %r14
movq 0x28(%rdi), %rdi
movq 0x60(%rsp), %r12
movq %r12, %rsi
callq 0x97bfce
movq %rax, %rbx
movq 0x28(%r14), %rdi
movq 0x70(%rsp), %rax
movq %rax, 0x10(%rsp)
movaps 0x60(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %rbx, %rsi
callq 0x97c2fa
movq 0x70(%rsp), %rax
movq %rax, 0x10(%rsp)
movaps 0x60(%rsp), %xmm0
movups %xmm0, (%rsp)
callq 0x97a858
movss %xmm0, 0x1c(%rsp)
movq %r12, %rdi
callq 0x97dc5e
movq 0x10(%r14), %rcx
movq 0x28(%r14), %r15
movl 0x54(%r15), %edx
movl 0x14(%rcx), %esi
cmpl $0x1, (%r12)
jne 0x9828b5
movl 0x50(%r15), %edi
incl %edi
cmpl 0x18(%rcx), %edi
setl %cl
jmp 0x9828b7
xorl %ecx, %ecx
addl %eax, %edx
movss 0x793df(%rip), %xmm0 # 0x9fbca0
movss 0x1c(%rsp), %xmm2
subss %xmm2, %xmm0
maxss 0x80031(%rip), %xmm0 # 0xa02904
movss %xmm0, 0x20(%rsp)
xorps %xmm0, %xmm0
xorps %xmm1, %xmm1
cmpl %esi, %edx
jg 0x9828ef
movss 0x793b1(%rip), %xmm1 # 0x9fbc9c
mulss %xmm2, %xmm1
movss %xmm1, 0x24(%rsp)
movss %xmm2, 0x28(%rsp)
testb %cl, %cl
je 0x982906
addss %xmm2, %xmm2
movaps %xmm2, %xmm0
leaq 0x20(%rsp), %rcx
movss %xmm0, 0xc(%rcx)
movq 0x8(%r14), %rdi
leaq 0x11d885(%rip), %rsi # 0xaa01a0
leaq 0x11d88e(%rip), %rdx # 0xaa01b0
callq 0x98cec6
movq %r15, %rdi
movq %rbx, %rsi
movl %eax, %edx
callq 0x97c0ee
movq %rbx, %rax
addq $0x38, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
retq
nop
|
/kaydenl[P]VK-GL-CTS/framework/randomshaders/rsgExpression.cpp
|
rsg::VariableRead::getWeight(rsg::GeneratorState const&, rsg::ConstValueRangeAccess)
|
float VariableRead::getWeight (const GeneratorState& state, ConstValueRangeAccess valueRange)
{
if (valueRange.getType().isVoid())
{
if (state.getVariableManager().hasEntry(IsReadableEntry(state.getExpressionFlags())) ||
state.getVariableManager().getNumAllocatedScalars() < state.getShaderParameters().maxCombinedVariableScalars)
return unusedValueWeight;
else
return 0.0f;
}
if (!canAllocateVariable(state, valueRange.getType()) &&
!state.getVariableManager().hasEntry(IsReadableIntersectingEntry(valueRange, state.getExpressionFlags())))
return 0.0f;
else
return 1.0f;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %rbx
subq $0x48, %rsp
movq %rdi, %rbx
movq 0x70(%rsp), %rsi
cmpl $0x0, (%rsi)
je 0x982a27
movq %rbx, %rdi
callq 0x9819a9
testb %al, %al
jne 0x982a1d
leaq 0x70(%rsp), %rax
movq 0x28(%rbx), %r14
movq 0x48(%rbx), %rcx
movl -0x4(%rcx), %ecx
movq 0x10(%rax), %rdx
movq %rdx, 0x40(%rsp)
movups (%rax), %xmm0
movups %xmm0, 0x30(%rsp)
movl %ecx, 0x8(%rsp)
movq 0x38(%rsp), %rax
movq %rax, 0x18(%rsp)
movq 0x40(%rsp), %rax
movq %rax, 0x20(%rsp)
movups 0x2c(%rsp), %xmm0
movups %xmm0, 0xc(%rsp)
movq 0x30(%r14), %r15
cmpq 0x38(%r14), %r15
setne %bpl
je 0x982a15
movq (%r15), %rsi
leaq 0x8(%rsp), %rdi
callq 0x986a56
testb %al, %al
jne 0x982a15
addq $0x8, %r15
leaq 0x8(%rsp), %rbx
cmpq 0x38(%r14), %r15
setne %bpl
je 0x982a15
movq (%r15), %rsi
movq %rbx, %rdi
callq 0x986a56
addq $0x8, %r15
testb %al, %al
je 0x9829f8
xorps %xmm0, %xmm0
testb %bpl, %bpl
je 0x982a90
movss 0x7927b(%rip), %xmm0 # 0x9fbca0
jmp 0x982a90
movq 0x28(%rbx), %rax
movq 0x30(%rax), %rdx
movq 0x38(%rax), %rsi
cmpq %rsi, %rdx
setne %cl
je 0x982a75
movq 0x48(%rbx), %rdi
movl -0x4(%rdi), %edi
andl $0x1, %edi
je 0x982a75
movq (%rdx), %r8
movq (%r8), %r8
cmpl $0x4, 0x50(%r8)
je 0x982a75
addq $0x8, %rdx
cmpq %rsi, %rdx
setne %cl
je 0x982a75
testl %edi, %edi
je 0x982a75
movq (%rdx), %r8
movq (%r8), %r8
addq $0x8, %rdx
cmpl $0x4, 0x50(%r8)
jne 0x982a58
movss 0x9a11f(%rip), %xmm0 # 0xa1cb9c
testb %cl, %cl
jne 0x982a90
movl 0x48(%rax), %eax
movq 0x10(%rbx), %rcx
cmpl 0x10(%rcx), %eax
jl 0x982a90
xorps %xmm0, %xmm0
addq $0x48, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
nop
|
/kaydenl[P]VK-GL-CTS/framework/randomshaders/rsgExpression.cpp
|
rsg::VariableWrite::VariableWrite(rsg::GeneratorState&, rsg::ConstValueRangeAccess)
|
VariableWrite::VariableWrite (GeneratorState& state, ConstValueRangeAccess valueRange)
{
DE_ASSERT(!valueRange.getType().isVoid());
// Find variable with range that is superset of given range
IsWritableSupersetEntry::Iterator first = state.getVariableManager().getBegin(IsWritableSupersetEntry(valueRange));
IsWritableSupersetEntry::Iterator end = state.getVariableManager().getEnd(IsWritableSupersetEntry(valueRange));
const float createOnAssignWeight = 0.1f; // Will essentially create an unused variable
bool createVar = canAllocateVariable(state, valueRange.getType()) && (first == end || getWeightedBool(state.getRandom(), createOnAssignWeight));
if (createVar)
{
m_variable = state.getVariableManager().allocate(valueRange.getType());
// \note Storage will be LOCAL
}
else
{
// Choose random
DE_ASSERT(first != end);
const ValueEntry* entry = state.getRandom().choose<const ValueEntry*>(first, end);
m_variable = entry->getVariable();
}
DE_ASSERT(m_variable);
// Reset value range.
const ValueEntry* parentEntry = state.getVariableManager().getParentValue(m_variable);
if (parentEntry)
{
// Use parent value range.
state.getVariableManager().setValue(m_variable, parentEntry->getValueRange());
}
else
{
// Use infinite range.
ValueRange infRange(m_variable->getType());
setInfiniteRange(infRange);
state.getVariableManager().setValue(m_variable, infRange.asAccess());
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x118, %rsp # imm = 0x118
movq %rdi, %r12
leaq 0x150(%rsp), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 0x8(%rdi)
movq $0x0, 0x18(%rdi)
leaq 0x2d7132(%rip), %rax # 0xc59c00
movq %rax, (%rdi)
movq %rsi, 0x18(%rsp)
movq 0x28(%rsi), %r14
movq 0x10(%rbx), %rax
movq %rax, 0xa0(%rsp)
movaps (%rbx), %xmm0
movaps %xmm0, 0x90(%rsp)
movq 0x30(%r14), %r13
cmpq 0x38(%r14), %r13
je 0x982b1d
leaq 0x90(%rsp), %r15
movq (%r13), %rsi
movq %r15, %rdi
callq 0x986acc
testb %al, %al
jne 0x982b1d
addq $0x8, %r13
cmpq 0x38(%r14), %r13
jne 0x982b03
addq $0x8, %r12
movq 0x38(%r14), %rbp
movq 0xa0(%rsp), %rax
movq %rax, 0x70(%rsp)
movaps 0x90(%rsp), %xmm0
movaps %xmm0, 0x60(%rsp)
movq 0x18(%rsp), %rcx
movq 0x28(%rcx), %rax
movq 0x38(%rax), %r14
movq (%rbx), %r15
movq %rcx, %rbx
movq %rcx, %rdi
movq %r15, %rsi
callq 0x9819a9
testb %al, %al
je 0x982b90
cmpq %r14, %r13
je 0x982b7c
movq 0x8(%rbx), %rdi
callq 0x9fa367
movss 0x7fd8d(%rip), %xmm1 # 0xa02904
ucomiss %xmm0, %xmm1
jbe 0x982b90
movq 0x28(%rbx), %rdi
movq %r15, %rsi
callq 0x97bfce
movq %rax, %rsi
jmp 0x982c1c
movq %r12, 0x20(%rsp)
movq 0x8(%rbx), %rax
movq %rax, 0x28(%rsp)
xorl %ebx, %ebx
leaq 0x90(%rsp), %r12
xorl %r15d, %r15d
testl %r15d, %r15d
jle 0x982bc6
movq 0x28(%rsp), %rdi
callq 0x9fa2ea
leal 0x1(%r15), %ecx
xorl %edx, %edx
divl %ecx
testl %edx, %edx
jg 0x982bca
movq (%r13), %rbx
movq 0x70(%rsp), %rax
movq %rax, 0xa0(%rsp)
movaps 0x60(%rsp), %xmm0
movaps %xmm0, 0x90(%rsp)
addq $0x8, %r13
movq %r13, %rcx
movq %rcx, %r13
cmpq %rbp, %rcx
je 0x982c07
movq (%r13), %rsi
movq %r12, %rdi
callq 0x986acc
leaq 0x8(%r13), %rcx
testb %al, %al
je 0x982beb
incl %r15d
cmpq %r14, %r13
jne 0x982bab
movq (%rbx), %rsi
movq 0x20(%rsp), %r12
movq 0x18(%rsp), %rbx
movq %rsi, (%r12)
movq 0x28(%rbx), %rdi
callq 0x97c282
testq %rax, %rax
je 0x982c84
movq 0x28(%rbx), %rdi
movq %rax, %rcx
addq $0x8, %rcx
movq 0x58(%rax), %rdx
movq 0x70(%rax), %r8
xorl %esi, %esi
cmpq 0x60(%rax), %rdx
cmoveq %rsi, %rdx
cmpq 0x78(%rax), %r8
cmoveq %rsi, %r8
movq (%r12), %rsi
movq %rcx, 0x78(%rsp)
movq %rdx, 0x80(%rsp)
movq %r8, 0x88(%rsp)
movq %r8, 0x10(%rsp)
movups 0x78(%rsp), %xmm0
movups %xmm0, (%rsp)
callq 0x97c2fa
jmp 0x982d3f
movq (%r12), %rsi
leaq 0x90(%rsp), %r15
movq %r15, %rdi
callq 0x97f2ca
movq 0x50(%r15), %rax
movq 0x68(%r15), %rcx
xorl %edx, %edx
cmpq 0x58(%r15), %rax
cmoveq %rdx, %rax
cmpq 0x70(%r15), %rcx
cmoveq %rdx, %rcx
movq %r15, 0x48(%rsp)
movq %rax, 0x50(%rsp)
movq %rcx, 0x58(%rsp)
movq 0x58(%rsp), %rax
movq %rax, 0x10(%rsp)
movups 0x48(%rsp), %xmm0
movups %xmm0, (%rsp)
callq 0x982d6b
movq 0x28(%rbx), %rdi
movq 0xe0(%rsp), %rax
movq 0xf8(%rsp), %rcx
xorl %edx, %edx
cmpq 0xe8(%rsp), %rax
cmoveq %rdx, %rax
cmpq 0x100(%rsp), %rcx
cmoveq %rdx, %rcx
movq (%r12), %rsi
movq %r15, 0x30(%rsp)
movq %rax, 0x38(%rsp)
movq %rcx, 0x40(%rsp)
movq 0x40(%rsp), %rax
movq %rax, 0x10(%rsp)
movups 0x30(%rsp), %xmm0
movups %xmm0, (%rsp)
callq 0x97c2fa
leaq 0x90(%rsp), %rdi
callq 0x97f67a
addq $0x118, %rsp # imm = 0x118
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0x982d53
movq %rax, %rbx
leaq 0x90(%rsp), %rdi
callq 0x97f67a
movq %rbx, %rdi
callq 0x3259a0
|
/kaydenl[P]VK-GL-CTS/framework/randomshaders/rsgExpression.cpp
|
rsg::ParenOp::tokenize(rsg::GeneratorState&, rsg::TokenStream&) const
|
void ParenOp::tokenize (GeneratorState& state, TokenStream& str) const
{
str << Token::LEFT_PAREN;
m_child->tokenize(state, str);
str << Token::RIGHT_PAREN;
}
|
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $0x18, %rsp
movq %rdx, %rbx
movq %rsi, %r14
movq %rdi, %r15
movl $0x10, 0x8(%rsp)
movq 0x8(%rdx), %rax
movq 0x18(%rdx), %rsi
subq (%rdx), %rax
sarq $0x4, %rax
cmpq %rsi, %rax
jne 0x983132
addq $0x40, %rsi
movq %rbx, %rdi
callq 0x974778
movq 0x18(%rbx), %rdi
shlq $0x4, %rdi
addq (%rbx), %rdi
leaq 0x8(%rsp), %rsi
callq 0x979a4e
incq 0x18(%rbx)
leaq 0x8(%rsp), %r12
movq %r12, %rdi
callq 0x979a34
movq 0x88(%r15), %rdi
movq (%rdi), %rax
movq %r14, %rsi
movq %rbx, %rdx
callq *0x18(%rax)
movl $0x11, (%r12)
movq 0x8(%rbx), %rax
movq 0x18(%rbx), %rsi
subq (%rbx), %rax
sarq $0x4, %rax
cmpq %rsi, %rax
jne 0x983193
addq $0x40, %rsi
movq %rbx, %rdi
callq 0x974778
movq 0x18(%rbx), %rdi
shlq $0x4, %rdi
addq (%rbx), %rdi
leaq 0x8(%rsp), %rsi
callq 0x979a4e
incq 0x18(%rbx)
leaq 0x8(%rsp), %rdi
callq 0x979a34
addq $0x18, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
retq
jmp 0x9831c4
movq %rax, %rbx
leaq 0x8(%rsp), %rdi
callq 0x979a34
movq %rbx, %rdi
callq 0x3259a0
nop
|
/kaydenl[P]VK-GL-CTS/framework/randomshaders/rsgExpression.cpp
|
rsg::SwizzleOp::createNextChild(rsg::GeneratorState&)
|
Expression* SwizzleOp::createNextChild (GeneratorState& state)
{
if (m_child)
return DE_NULL;
// Compute input value range.
VariableType inVarType = VariableType(m_outValueRange.getType().getBaseType(), m_numInputElements);
ValueRange inValueRange = ValueRange(inVarType);
// Initialize all inputs to -inf..inf
setInfiniteRange(inValueRange);
// Compute intersections.
int numOutputElements = m_outValueRange.getType().getNumElements();
for (int outElemNdx = 0; outElemNdx < numOutputElements; outElemNdx++)
{
int inElemNdx = m_swizzle[outElemNdx];
ValueRange::computeIntersection(inValueRange.asAccess().component(inElemNdx), inValueRange.asAccess().component(inElemNdx), m_outValueRange.asAccess().component(outElemNdx));
}
// Create child.
state.pushPrecedence(swizzlePrecedence);
m_child = Expression::createRandom(state, inValueRange.asAccess());
state.popPrecedence();
return m_child;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x178, %rsp # imm = 0x178
cmpq $0x0, 0x90(%rdi)
je 0x9834a7
xorl %r15d, %r15d
jmp 0x9837d7
movq %rsi, %rbx
movq %rdi, %rbp
movl 0x8(%rdi), %eax
movl 0x88(%rdi), %ecx
leaq 0x140(%rsp), %rdx
movl %eax, -0x18(%rdx)
movl $0x0, -0x14(%rdx)
movq %rdx, -0x10(%rdx)
movq $0x0, -0x8(%rdx)
movb $0x0, (%rdx)
movl %ecx, 0x10(%rdx)
xorps %xmm0, %xmm0
movups %xmm0, 0x18(%rdx)
movups %xmm0, 0x28(%rdx)
leaq 0x90(%rsp), %r14
leaq 0x128(%rsp), %rsi
movq %r14, %rdi
callq 0x97f2ca
movq %rbx, 0x40(%rsp)
movq 0xe0(%rsp), %rax
movq 0xf8(%rsp), %rcx
xorl %edx, %edx
cmpq 0xe8(%rsp), %rax
cmoveq %rdx, %rax
cmpq 0x100(%rsp), %rcx
cmoveq %rdx, %rcx
movq %r14, 0x78(%rsp)
movq %rax, 0x80(%rsp)
movq %rcx, 0x88(%rsp)
movq 0x88(%rsp), %rax
movq %rax, 0x10(%rsp)
movups 0x78(%rsp), %xmm0
movups %xmm0, (%rsp)
callq 0x982d6b
movslq 0x30(%rbp), %rax
movq %rax, 0x30(%rsp)
testq %rax, %rax
jle 0x9836ed
leaq 0x8(%rbp), %rax
movq %rax, 0x48(%rsp)
leaq 0x8c(%rbp), %rax
shlq $0x2, 0x30(%rsp)
leaq 0x90(%rsp), %r12
movq $0x0, 0x38(%rsp)
movq %rbp, 0x50(%rsp)
movq %rax, 0x58(%rsp)
movzbl (%rax), %r13d
movq 0xe0(%rsp), %rbx
movq 0xe8(%rsp), %r15
movq 0xf8(%rsp), %r14
movq 0x100(%rsp), %rbp
movq %r12, %rdi
callq 0x97dd26
cmpq %rbp, %r14
movl $0x0, %ecx
cmoveq %rcx, %r14
cmpq %r15, %rbx
cmoveq %rcx, %rbx
leal (,%r13,4), %ecx
addq %rcx, %rbx
addq %rcx, %r14
movq %rax, 0x60(%rsp)
movq %rbx, 0x68(%rsp)
movq %r14, 0x70(%rsp)
movq 0xe0(%rsp), %rbx
movq 0xe8(%rsp), %r15
movq 0xf8(%rsp), %r14
movq 0x100(%rsp), %rbp
movq %r12, %rdi
callq 0x97dd26
cmpq %rbp, %r14
movl $0x0, %ecx
cmoveq %rcx, %r14
cmpq %r15, %rbx
cmoveq %rcx, %rbx
shll $0x2, %r13d
addq %r13, %rbx
addq %r13, %r14
movq %rax, 0x18(%rsp)
movq %rbx, 0x20(%rsp)
movq %r14, 0x28(%rsp)
movq 0x50(%rsp), %rbp
movq 0x58(%rbp), %rbx
movq 0x60(%rbp), %r15
movq 0x70(%rbp), %r14
movq 0x78(%rbp), %r13
movq 0x48(%rsp), %rdi
callq 0x97dd26
movq 0x38(%rsp), %r12
cmpq %r13, %r14
movl $0x0, %ecx
cmoveq %rcx, %r14
cmpq %r15, %rbx
cmoveq %rcx, %rbx
addq %r12, %rbx
addq %r12, %r14
movq %rax, 0x110(%rsp)
movq %rbx, 0x118(%rsp)
movq %r14, 0x120(%rsp)
movq 0x70(%rsp), %rax
movq %rax, 0x10(%rsp)
movups 0x60(%rsp), %xmm0
movups %xmm0, (%rsp)
leaq 0x18(%rsp), %rdi
leaq 0x110(%rsp), %rsi
callq 0x97f6fe
movq 0x58(%rsp), %rax
incq %rax
addq $0x4, %r12
movq %r12, 0x38(%rsp)
cmpq %r12, 0x30(%rsp)
leaq 0x90(%rsp), %r12
jne 0x98359a
movl $0x2, 0x18(%rsp)
movq 0x40(%rsp), %rbx
movq 0x60(%rbx), %rsi
cmpq 0x68(%rbx), %rsi
je 0x98371c
movl $0x2, (%rsi)
addq $0x4, %rsi
movq %rsi, 0x60(%rbx)
leaq 0x90(%rsp), %rsi
jmp 0x983732
leaq 0x58(%rbx), %rdi
leaq 0x18(%rsp), %rdx
callq 0x334698
leaq 0x90(%rsp), %rsi
movq 0xe0(%rsp), %rax
movq 0xf8(%rsp), %rcx
xorl %edx, %edx
cmpq 0xe8(%rsp), %rax
cmoveq %rdx, %rax
cmpq 0x100(%rsp), %rcx
cmoveq %rdx, %rcx
movq %rsi, 0x18(%rsp)
movq %rax, 0x20(%rsp)
movq %rcx, 0x28(%rsp)
movq 0x28(%rsp), %rax
movq %rax, 0x10(%rsp)
movups 0x18(%rsp), %xmm0
movups %xmm0, (%rsp)
leaq 0x2d5d2b(%rip), %rsi # 0xc594b0
movq %rbx, %rdi
movl $0x1e, %edx
callq 0x97fce9
movq 0x8(%rax), %rax
movq 0x28(%rsp), %rcx
movq %rcx, 0x10(%rsp)
movups 0x18(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %rbx, %rdi
callq *%rax
movq %rax, %r15
movq %rax, 0x90(%rbp)
addq $-0x4, 0x60(%rbx)
leaq 0x90(%rsp), %rdi
callq 0x97f67a
leaq 0x128(%rsp), %rdi
callq 0x81aa2c
movq %r15, %rax
addq $0x178, %rsp # imm = 0x178
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0x9837fb
jmp 0x9837fb
movq %rax, %rbx
jmp 0x98380b
jmp 0x9837fb
jmp 0x9837fb
jmp 0x9837fb
movq %rax, %rbx
leaq 0x90(%rsp), %rdi
callq 0x97f67a
leaq 0x128(%rsp), %rdi
callq 0x81aa2c
movq %rbx, %rdi
callq 0x3259a0
|
/kaydenl[P]VK-GL-CTS/framework/randomshaders/rsgExpression.cpp
|
rsg::TexLookup::createNextChild(rsg::GeneratorState&)
|
Expression* TexLookup::createNextChild (GeneratorState& state)
{
bool hasLodBias = m_type == TYPE_TEXTURE2D_LOD ||
m_type == TYPE_TEXTURE2D_PROJ_LOD ||
m_type == TYPE_TEXTURECUBE_LOD;
if (hasLodBias && !m_lodBiasExpr)
{
ValueRange lodRange(VariableType(VariableType::TYPE_FLOAT, 1));
setInfiniteRange(lodRange); // Any value is valid.
m_lodBiasExpr = Expression::createRandom(state, lodRange.asAccess());
return m_lodBiasExpr;
}
if (!m_coordExpr)
{
if (m_type == TYPE_TEXTURECUBE || m_type == TYPE_TEXTURECUBE_LOD)
{
// Make sure major axis selection can be done.
int majorAxisNdx = state.getRandom().getInt(0, 2);
ValueRange coordRange(VariableType(VariableType::TYPE_FLOAT, 3));
for (int ndx = 0; ndx < 3; ndx++)
{
if (ndx == majorAxisNdx)
{
bool neg = state.getRandom().getBool();
coordRange.getMin().component(ndx) = neg ? -4.0f : 2.25f;
coordRange.getMax().component(ndx) = neg ? -2.25f : 4.0f;
}
else
{
coordRange.getMin().component(ndx) = -2.0f;
coordRange.getMax().component(ndx) = 2.0f;
}
}
m_coordExpr = Expression::createRandom(state, coordRange.asAccess());
}
else
{
bool isProj = m_type == TYPE_TEXTURE2D_PROJ || m_type == TYPE_TEXTURE2D_PROJ_LOD;
int coordScalarSize = isProj ? 3 : 2;
ValueRange coordRange(VariableType(VariableType::TYPE_FLOAT, coordScalarSize));
setInfiniteRange(coordRange); // Initialize base range with -inf..inf
if (isProj)
{
// w coordinate must be something sane, and not 0.
bool neg = state.getRandom().getBool();
coordRange.getMin().component(2) = neg ? -4.0f : 0.25f;
coordRange.getMax().component(2) = neg ? -0.25f : 4.0f;
}
m_coordExpr = Expression::createRandom(state, coordRange.asAccess());
}
DE_ASSERT(m_coordExpr);
return m_coordExpr;
}
return DE_NULL; // Done.
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x128, %rsp # imm = 0x128
movq %rsi, %rbx
movq %rdi, %r13
movl 0x8(%rdi), %eax
cmpl $0x5, %eax
ja 0x98408a
movl $0x2a, %ecx
btl %eax, %ecx
jae 0x98408a
cmpq $0x0, 0x20(%r13)
je 0x98409d
cmpq $0x0, 0x18(%r13)
je 0x9841d9
xorl %r14d, %r14d
jmp 0x9845bb
leaq 0x38(%rsp), %rax
movq $0x1, -0x18(%rax)
movq %rax, -0x10(%rax)
movq $0x0, -0x8(%rax)
movb $0x0, (%rax)
movl $0x1, 0x10(%rax)
xorps %xmm0, %xmm0
movups %xmm0, 0x18(%rax)
movups %xmm0, 0x28(%rax)
leaq 0x70(%rsp), %r15
leaq 0x20(%rsp), %rsi
movq %r15, %rdi
callq 0x97f2ca
leaq 0x20(%rsp), %rdi
callq 0x81aa2c
movq 0xc0(%rsp), %rax
movq 0xd8(%rsp), %rcx
xorl %edx, %edx
cmpq 0xc8(%rsp), %rax
cmoveq %rdx, %rax
cmpq 0xe0(%rsp), %rcx
cmoveq %rdx, %rcx
movq %r15, 0x110(%rsp)
movq %rax, 0x118(%rsp)
movq %rcx, 0x120(%rsp)
movq 0x120(%rsp), %rax
movq %rax, 0x10(%rsp)
movups 0x110(%rsp), %xmm0
movups %xmm0, (%rsp)
callq 0x982d6b
movq 0xc0(%rsp), %rax
movq 0xd8(%rsp), %rcx
xorl %edx, %edx
cmpq 0xc8(%rsp), %rax
cmoveq %rdx, %rax
cmpq 0xe0(%rsp), %rcx
cmoveq %rdx, %rcx
movq %r15, 0x20(%rsp)
movq %rax, 0x28(%rsp)
movq %rcx, 0x30(%rsp)
movq 0x30(%rsp), %rax
movq %rax, 0x10(%rsp)
movups 0x20(%rsp), %xmm0
movups %xmm0, (%rsp)
leaq 0x2d5316(%rip), %rsi # 0xc594b0
movq %rbx, %rdi
movl $0x1e, %edx
callq 0x97fce9
movq 0x8(%rax), %rax
movq 0x30(%rsp), %rcx
movq %rcx, 0x10(%rsp)
movups 0x20(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %rbx, %rdi
callq *%rax
movq %rax, %r14
movq %rax, 0x20(%r13)
leaq 0x70(%rsp), %rdi
callq 0x97f67a
jmp 0x9845bb
movb $0x1, %bpl
leal -0x2(%rax), %ecx
cmpl $0x2, %ecx
jb 0x9843fe
addl $-0x4, %eax
cmpl $0x1, %eax
ja 0x9843fc
movq %r13, 0xf0(%rsp)
movq %rbx, 0x18(%rsp)
movq 0x8(%rbx), %rdi
callq 0x9fa2ea
movl %eax, %r15d
leaq 0x38(%rsp), %rax
movq $0x1, -0x18(%rax)
movq %rax, -0x10(%rax)
movq $0x0, -0x8(%rax)
movb $0x0, (%rax)
movl $0x3, 0x10(%rax)
xorps %xmm0, %xmm0
movups %xmm0, 0x18(%rax)
movups %xmm0, 0x28(%rax)
leaq 0x70(%rsp), %rdi
leaq 0x20(%rsp), %rsi
callq 0x97f2ca
movl %r15d, %eax
movl $0xaaaaaaab, %ecx # imm = 0xAAAAAAAB
imulq %rax, %rcx
shrq $0x21, %rcx
leal (%rcx,%rcx,2), %eax
subl %eax, %r15d
leaq 0x20(%rsp), %rdi
callq 0x81aa2c
xorl %r13d, %r13d
leaq 0x70(%rsp), %r12
cmpq %r13, %r15
jne 0x984303
movq 0x18(%rsp), %rax
movq 0x8(%rax), %rdi
callq 0x9fa3c4
movl %eax, %ebp
movq 0xc0(%rsp), %rbx
movq 0xc8(%rsp), %r14
movq %r12, %rdi
callq 0x97dd26
cmpq %r14, %rbx
movl $0x0, %eax
cmovneq %rbx, %rax
xorl %ecx, %ecx
cmpl $0x1, %ebp
sete %cl
leaq 0x11bda6(%rip), %rdx # 0xaa0064
movss (%rdx,%rcx,4), %xmm0
movss %xmm0, (%rax,%r13,4)
movq 0xd8(%rsp), %rbx
movq 0xe0(%rsp), %r14
movq %r12, %rdi
callq 0x97dd26
xorl %ecx, %ecx
cmpl $0x1, %ebp
sete %cl
cmpq %r14, %rbx
movl $0x0, %eax
cmovneq %rbx, %rax
leaq 0x11bd70(%rip), %rdx # 0xaa006c
movss (%rdx,%rcx,4), %xmm0
jmp 0x98435b
movq 0xc0(%rsp), %rbx
movq 0xc8(%rsp), %r14
movq %r12, %rdi
callq 0x97dd26
cmpq %r14, %rbx
movl $0x0, %eax
cmovneq %rbx, %rax
movl $0xc0000000, (%rax,%r13,4) # imm = 0xC0000000
movq 0xd8(%rsp), %rbx
movq 0xe0(%rsp), %r14
movq %r12, %rdi
callq 0x97dd26
cmpq %r14, %rbx
movl $0x0, %eax
cmovneq %rbx, %rax
movss 0x7cf49(%rip), %xmm0 # 0xa012a4
movss %xmm0, (%rax,%r13,4)
incq %r13
cmpq $0x3, %r13
jne 0x984272
movq 0xc0(%rsp), %rax
movq 0xd8(%rsp), %rcx
xorl %edx, %edx
cmpq 0xc8(%rsp), %rax
cmoveq %rdx, %rax
cmpq 0xe0(%rsp), %rcx
cmoveq %rdx, %rcx
movq %r12, 0x20(%rsp)
movq %rax, 0x28(%rsp)
movq %rcx, 0x30(%rsp)
movq 0x30(%rsp), %rax
movq %rax, 0x10(%rsp)
movups 0x20(%rsp), %xmm0
movups %xmm0, (%rsp)
leaq 0x2d50ef(%rip), %rsi # 0xc594b0
movq 0x18(%rsp), %rbx
movq %rbx, %rdi
movl $0x1e, %edx
callq 0x97fce9
movq 0x8(%rax), %rax
movq 0x30(%rsp), %rcx
movq %rcx, 0x10(%rsp)
movups 0x20(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %rbx, %rdi
callq *%rax
movq 0xf0(%rsp), %r13
jmp 0x9845a9
xorl %ebp, %ebp
movzbl %bpl, %eax
orl $0x2, %eax
leaq 0x38(%rsp), %rcx
movq $0x1, -0x18(%rcx)
movq %rcx, -0x10(%rcx)
movq $0x0, -0x8(%rcx)
movb $0x0, (%rcx)
movl %eax, 0x10(%rcx)
xorps %xmm0, %xmm0
movups %xmm0, 0x18(%rcx)
movups %xmm0, 0x28(%rcx)
leaq 0x70(%rsp), %r15
leaq 0x20(%rsp), %rsi
movq %r15, %rdi
callq 0x97f2ca
leaq 0x20(%rsp), %rdi
callq 0x81aa2c
movq 0xc0(%rsp), %rax
movq 0xd8(%rsp), %rcx
xorl %edx, %edx
cmpq 0xc8(%rsp), %rax
cmoveq %rdx, %rax
cmpq 0xe0(%rsp), %rcx
cmoveq %rdx, %rcx
movq %r15, 0xf8(%rsp)
movq %rax, 0x100(%rsp)
movq %rcx, 0x108(%rsp)
movq 0x108(%rsp), %rax
movq %rax, 0x10(%rsp)
movups 0xf8(%rsp), %xmm0
movups %xmm0, (%rsp)
callq 0x982d6b
testb %bpl, %bpl
je 0x98452d
movq 0x8(%rbx), %rdi
callq 0x9fa3c4
movl %eax, %ebp
movq %rbx, 0x18(%rsp)
leaq 0x70(%rsp), %rdi
movq 0x50(%rdi), %rbx
movq 0x58(%rdi), %r14
callq 0x97dd26
xorl %eax, %eax
cmpq %r14, %rbx
cmovneq %rbx, %rax
xorl %ecx, %ecx
cmpl $0x1, %ebp
sete %cl
leaq 0x11bb8a(%rip), %rdx # 0xaa0074
movss (%rdx,%rcx,4), %xmm0
movss %xmm0, 0x8(%rax)
leaq 0x70(%rsp), %rdi
movq 0x68(%rdi), %r12
movq 0x70(%rdi), %rbx
callq 0x97dd26
xorl %eax, %eax
cmpl $0x1, %ebp
sete %al
xorl %ecx, %ecx
cmpq %rbx, %r12
cmovneq %r12, %rcx
leaq 0x11bb5e(%rip), %rdx # 0xaa007c
movss (%rdx,%rax,4), %xmm0
movss %xmm0, 0x8(%rcx)
movq 0x18(%rsp), %rbx
movq 0xc0(%rsp), %rax
movq 0xd8(%rsp), %rcx
xorl %edx, %edx
cmpq 0xc8(%rsp), %rax
cmoveq %rdx, %rax
cmpq 0xe0(%rsp), %rcx
cmoveq %rdx, %rcx
movq %r15, 0x20(%rsp)
movq %rax, 0x28(%rsp)
movq %rcx, 0x30(%rsp)
movq 0x30(%rsp), %rax
movq %rax, 0x10(%rsp)
movups 0x20(%rsp), %xmm0
movups %xmm0, (%rsp)
leaq 0x2d4f30(%rip), %rsi # 0xc594b0
movq %rbx, %rdi
movl $0x1e, %edx
callq 0x97fce9
movq 0x8(%rax), %rax
movq 0x30(%rsp), %rcx
movq %rcx, 0x10(%rsp)
movups 0x20(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %rbx, %rdi
callq *%rax
movq %rax, 0x18(%r13)
leaq 0x70(%rsp), %rdi
callq 0x97f67a
movq 0x18(%r13), %r14
movq %r14, %rax
addq $0x128, %rsp # imm = 0x128
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0x9845fb
jmp 0x9845fb
jmp 0x9845fb
jmp 0x9845e0
jmp 0x9845fb
jmp 0x9845e0
jmp 0x9845fb
jmp 0x9845fb
movq %rax, %rbx
leaq 0x20(%rsp), %rdi
callq 0x81aa2c
jmp 0x984608
jmp 0x9845fb
jmp 0x9845fb
jmp 0x9845fb
jmp 0x9845fb
jmp 0x9845fb
jmp 0x9845fb
movq %rax, %rbx
leaq 0x70(%rsp), %rdi
callq 0x97f67a
movq %rbx, %rdi
callq 0x3259a0
|
/kaydenl[P]VK-GL-CTS/framework/randomshaders/rsgExpression.cpp
|
tcu::intThresholdCompare(tcu::TestLog&, char const*, char const*, tcu::ConstPixelBufferAccess const&, tcu::ConstPixelBufferAccess const&, tcu::Vector<unsigned int, 4> const&, tcu::CompareLogMode)
|
bool intThresholdCompare (TestLog& log, const char* imageSetName, const char* imageSetDesc, const ConstPixelBufferAccess& reference, const ConstPixelBufferAccess& result, const UVec4& threshold, CompareLogMode logMode)
{
int width = reference.getWidth();
int height = reference.getHeight();
int depth = reference.getDepth();
TextureLevel errorMaskStorage (TextureFormat(TextureFormat::RGB, TextureFormat::UNORM_INT8), width, height, depth);
PixelBufferAccess errorMask = errorMaskStorage.getAccess();
UVec4 maxDiff (0, 0, 0, 0);
Vec4 pixelBias (0.0f, 0.0f, 0.0f, 0.0f);
Vec4 pixelScale (1.0f, 1.0f, 1.0f, 1.0f);
TCU_CHECK_INTERNAL(result.getWidth() == width && result.getHeight() == height && result.getDepth() == depth);
for (int z = 0; z < depth; z++)
{
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
IVec4 refPix = reference.getPixelInt(x, y, z);
IVec4 cmpPix = result.getPixelInt(x, y, z);
UVec4 diff = abs(refPix - cmpPix).cast<deUint32>();
bool isOk = boolAll(lessThanEqual(diff, threshold));
maxDiff = max(maxDiff, diff);
errorMask.setPixel(isOk ? IVec4(0, 0xff, 0, 0xff) : IVec4(0xff, 0, 0, 0xff), x, y, z);
}
}
}
bool compareOk = boolAll(lessThanEqual(maxDiff, threshold));
if (!compareOk || logMode == COMPARE_LOG_EVERYTHING)
{
// All formats except normalized unsigned fixed point ones need remapping in order to fit into unorm channels in logged images.
if (tcu::getTextureChannelClass(reference.getFormat().type) != tcu::TEXTURECHANNELCLASS_UNSIGNED_FIXED_POINT ||
tcu::getTextureChannelClass(result.getFormat().type) != tcu::TEXTURECHANNELCLASS_UNSIGNED_FIXED_POINT)
{
computeScaleAndBias(reference, result, pixelScale, pixelBias);
log << TestLog::Message << "Result and reference images are normalized with formula p * " << pixelScale << " + " << pixelBias << TestLog::EndMessage;
}
if (!compareOk)
log << TestLog::Message << "Image comparison failed: max difference = " << maxDiff << ", threshold = " << threshold << TestLog::EndMessage;
log << TestLog::ImageSet(imageSetName, imageSetDesc)
<< TestLog::Image("Result", "Result", result, pixelScale, pixelBias)
<< TestLog::Image("Reference", "Reference", reference, pixelScale, pixelBias)
<< TestLog::Image("ErrorMask", "Error mask", errorMask)
<< TestLog::EndImageSet;
}
else if (logMode == COMPARE_LOG_RESULT)
{
if (result.getFormat() != TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8))
computePixelScaleBias(result, pixelScale, pixelBias);
log << TestLog::ImageSet(imageSetName, imageSetDesc)
<< TestLog::Image("Result", "Result", result, pixelScale, pixelBias)
<< TestLog::EndImageSet;
}
return compareOk;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x4b8, %rsp # imm = 0x4B8
movq %r9, %r13
movq %r8, %r12
movq %rdx, %rbp
movq %rsi, 0xe0(%rsp)
movq %rdi, %r15
movl 0x8(%rcx), %ebx
movl 0xc(%rcx), %r9d
movq %rcx, 0x28(%rsp)
movl 0x10(%rcx), %r8d
movabsq $0x300000007, %rax # imm = 0x300000007
leaq 0x2e8(%rsp), %rsi
movq %rax, (%rsi)
leaq 0x490(%rsp), %r14
movq %r14, %rdi
movl %ebx, %edx
movl %r9d, 0x20(%rsp)
movl %r9d, %ecx
movl %r8d, 0x1c(%rsp)
callq 0x9b4270
leaq 0x468(%rsp), %rdi
movq %r14, %rsi
callq 0x41071a
movq %rbp, 0xd8(%rsp)
movq %r15, 0xe8(%rsp)
xorps %xmm0, %xmm0
movaps %xmm0, 0xf0(%rsp)
movaps %xmm0, 0x90(%rsp)
movaps 0x5e35a(%rip), %xmm0 # 0xa00c70
movups %xmm0, 0xa8(%rsp)
cmpl %ebx, 0x8(%r12)
jne 0x9a332a
movl 0x20(%rsp), %eax
cmpl %eax, 0xc(%r12)
jne 0x9a332a
movl 0x1c(%rsp), %eax
cmpl %eax, 0x10(%r12)
jne 0x9a332a
movq %r12, 0x10(%rsp)
cmpl $0x0, 0x1c(%rsp)
jle 0x9a2ae6
xorl %r14d, %r14d
movabsq $0xff00000000, %rbp # imm = 0xFF00000000
cmpl $0x0, 0x20(%rsp)
jle 0x9a2ad8
xorl %r15d, %r15d
testl %ebx, %ebx
jle 0x9a2aca
xorl %r12d, %r12d
leaq 0x2e8(%rsp), %rdi
movq 0x28(%rsp), %rsi
movl %r12d, %edx
movl %r15d, %ecx
movl %r14d, %r8d
callq 0x9ad9f2
leaq 0x1c0(%rsp), %rdi
movq 0x10(%rsp), %rsi
movl %r12d, %edx
movl %r15d, %ecx
movl %r14d, %r8d
callq 0x9ad9f2
xorps %xmm0, %xmm0
movaps %xmm0, 0x30(%rsp)
xorl %eax, %eax
movl 0x2e8(%rsp,%rax,4), %ecx
subl 0x1c0(%rsp,%rax,4), %ecx
movl %ecx, 0x30(%rsp,%rax,4)
incq %rax
cmpq $0x4, %rax
jne 0x9a29bd
xorps %xmm0, %xmm0
movaps %xmm0, 0x50(%rsp)
xorl %eax, %eax
movl 0x30(%rsp,%rax,4), %ecx
movl %ecx, %edx
negl %edx
cmovsl %ecx, %edx
movl %edx, 0x50(%rsp,%rax,4)
incq %rax
cmpq $0x4, %rax
jne 0x9a29e2
movaps 0x50(%rsp), %xmm0
movaps %xmm0, 0x250(%rsp)
movl $0x0, 0x50(%rsp)
xorl %eax, %eax
movl 0x250(%rsp,%rax,4), %ecx
cmpl (%r13,%rax,4), %ecx
setbe 0x50(%rsp,%rax)
incq %rax
cmpq $0x4, %rax
jne 0x9a2a11
cmpb $0x0, 0x50(%rsp)
je 0x9a2a51
xorl %ecx, %ecx
movq %rcx, %rax
cmpq $0x3, %rcx
je 0x9a2a48
leaq 0x1(%rax), %rcx
cmpb $0x0, 0x51(%rsp,%rax)
jne 0x9a2a34
cmpq $0x3, %rax
setae %al
jmp 0x9a2a53
xorl %eax, %eax
xorps %xmm0, %xmm0
movaps %xmm0, 0x50(%rsp)
xorl %ecx, %ecx
movl 0xf0(%rsp,%rcx,4), %edx
movl 0x250(%rsp,%rcx,4), %esi
cmpl %esi, %edx
cmoval %edx, %esi
movl %esi, 0x50(%rsp,%rcx,4)
incq %rcx
cmpq $0x4, %rcx
jne 0x9a2a5d
movaps 0x50(%rsp), %xmm0
movaps %xmm0, 0xf0(%rsp)
testb %al, %al
je 0x9a2a95
movq %rbp, 0x50(%rsp)
jmp 0x9a2a9e
movq $0xff, 0x50(%rsp)
movq %rbp, 0x58(%rsp)
leaq 0x468(%rsp), %rdi
leaq 0x50(%rsp), %rsi
movl %r12d, %edx
movl %r15d, %ecx
movl %r14d, %r8d
callq 0x9af5c4
incl %r12d
cmpl %ebx, %r12d
jne 0x9a297d
incl %r15d
cmpl 0x20(%rsp), %r15d
jne 0x9a2972
incl %r14d
cmpl 0x1c(%rsp), %r14d
jne 0x9a2964
movl 0x4f0(%rsp), %eax
movl $0x0, 0x2e8(%rsp)
xorl %ecx, %ecx
movl 0xf0(%rsp,%rcx,4), %edx
cmpl (%r13,%rcx,4), %edx
setbe 0x2e8(%rsp,%rcx)
incq %rcx
cmpq $0x4, %rcx
jne 0x9a2afa
cmpb $0x0, 0x2e8(%rsp)
movq 0xe8(%rsp), %rbp
je 0x9a2b4b
xorl %edx, %edx
movq %rdx, %rcx
cmpq $0x3, %rdx
je 0x9a2b42
leaq 0x1(%rcx), %rdx
cmpb $0x0, 0x2e9(%rsp,%rcx)
jne 0x9a2b2b
cmpq $0x3, %rcx
setae %bl
jmp 0x9a2b4d
xorl %ebx, %ebx
testl %eax, %eax
setne %cl
testb %bl, %cl
je 0x9a2d86
cmpl $0x1, %eax
jne 0x9a3309
movq 0x10(%rsp), %rax
cmpl $0x8, (%rax)
jne 0x9a2b78
movq 0x10(%rsp), %rax
cmpl $0x3, 0x4(%rax)
je 0x9a2b92
leaq 0xa8(%rsp), %rsi
leaq 0x90(%rsp), %rdx
movq 0x10(%rsp), %rdi
callq 0x9bd0da
leaq 0x250(%rsp), %rdi
leaq 0x100(%rsp), %rdx
movq 0xe0(%rsp), %rsi
callq 0x333da2
leaq 0x50(%rsp), %rdi
leaq 0x120(%rsp), %rdx
movq 0xd8(%rsp), %rsi
callq 0x333da2
leaq 0x1c0(%rsp), %rdi
leaq 0x250(%rsp), %rsi
leaq 0x50(%rsp), %rdx
callq 0x34f564
movq 0x1c0(%rsp), %rsi
movq 0x1e0(%rsp), %rdx
movq %rbp, %rdi
callq 0x9ab85c
leaq 0x40(%rsp), %r14
movq %r14, -0x10(%r14)
leaq 0x79ee4(%rip), %rsi # 0xa1caef
leaq 0x79ee3(%rip), %rdx # 0xa1caf5
leaq 0x30(%rsp), %rdi
callq 0x334070
leaq 0xc8(%rsp), %r15
movq %r15, -0x10(%r15)
leaq 0x79ec0(%rip), %rsi # 0xa1caef
leaq 0x79ebf(%rip), %rdx # 0xa1caf5
leaq 0xb8(%rsp), %rdi
callq 0x334070
movl $0x2, (%rsp)
leaq 0x2e8(%rsp), %rdi
leaq 0x30(%rsp), %rsi
leaq 0xb8(%rsp), %rdx
leaq 0xa8(%rsp), %r8
leaq 0x90(%rsp), %r9
movq 0x10(%rsp), %rcx
callq 0x9aa796
leaq 0x2e8(%rsp), %rdi
movq %rbp, %rsi
callq 0x9aa92e
movq %rbp, %rdi
callq 0x9ab8b2
leaq 0x318(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x9a2cb2
movq 0x318(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0x2f8(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x9a2cd3
movq 0x2f8(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0xb8(%rsp), %rdi
cmpq %r15, %rdi
je 0x9a2cf0
movq 0xc8(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x30(%rsp), %rdi
cmpq %r14, %rdi
je 0x9a2d07
movq 0x40(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0x1f0(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x9a2d28
movq 0x1f0(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0x1d0(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x9a2d49
movq 0x1d0(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0x60(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x9a2d64
movq 0x60(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0x260(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x9a3309
movq 0x260(%rsp), %rsi
jmp 0x9a3301
movq 0x28(%rsp), %rax
movl 0x4(%rax), %edi
callq 0x9ba40c
cmpl $0x1, %eax
jne 0x9a2dae
movq 0x10(%rsp), %rax
movl 0x4(%rax), %edi
callq 0x9ba40c
cmpl $0x1, %eax
je 0x9a2e59
leaq 0xa8(%rsp), %rdx
leaq 0x90(%rsp), %rcx
movq 0x28(%rsp), %rdi
movq 0x10(%rsp), %rsi
callq 0x99ecdb
leaq 0x2f0(%rsp), %r14
movq %rbp, -0x8(%r14)
movq %r14, %rdi
callq 0x325e00
leaq 0xffd1f(%rip), %rsi # 0xaa2b07
movl $0x3c, %edx
movq %r14, %rdi
callq 0x325e70
leaq 0xa8(%rsp), %rsi
movq %r14, %rdi
callq 0x35a850
leaq 0xaf66c(%rip), %rsi # 0xa52478
movl $0x3, %edx
movq %r14, %rdi
callq 0x325e70
leaq 0x90(%rsp), %rsi
movq %r14, %rdi
callq 0x35a850
leaq 0x10045d(%rip), %rsi # 0xaa328d
leaq 0x2e8(%rsp), %rdi
callq 0x9aba2a
movq 0x2b9ce4(%rip), %rsi # 0xc5cb28
movq %r14, %rdi
callq 0x325aa0
leaq 0x360(%rsp), %rdi
callq 0x325a80
testb %bl, %bl
jne 0x9a2ee8
leaq 0x2f0(%rsp), %r14
movq %rbp, -0x8(%r14)
movq %r14, %rdi
callq 0x325e00
leaq 0x74697(%rip), %rsi # 0xa17513
movl $0x2a, %edx
movq %r14, %rdi
callq 0x325e70
leaq 0xf0(%rsp), %rsi
movq %r14, %rdi
callq 0x83120c
leaq 0xffd71(%rip), %rsi # 0xaa2c11
movl $0xe, %edx
movq %r14, %rdi
callq 0x325e70
movq %r14, %rdi
movq %r13, %rsi
callq 0x83120c
leaq 0x1003ce(%rip), %rsi # 0xaa328d
leaq 0x2e8(%rsp), %rdi
callq 0x9aba2a
movq 0x2b9c55(%rip), %rsi # 0xc5cb28
movq %r14, %rdi
callq 0x325aa0
leaq 0x360(%rsp), %rdi
callq 0x325a80
leaq 0x30(%rsp), %rdi
leaq 0x27(%rsp), %rdx
movq 0xe0(%rsp), %rsi
callq 0x333da2
leaq 0xb8(%rsp), %rdi
leaq 0x26(%rsp), %rdx
movq 0xd8(%rsp), %rsi
callq 0x333da2
leaq 0x50(%rsp), %rdi
leaq 0x30(%rsp), %rsi
leaq 0xb8(%rsp), %rdx
callq 0x34f564
movq 0x50(%rsp), %rsi
movq 0x70(%rsp), %rdx
movq %rbp, %rdi
callq 0x9ab85c
leaq 0x110(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0x79b9a(%rip), %rsi # 0xa1caef
leaq 0x79b99(%rip), %rdx # 0xa1caf5
leaq 0x100(%rsp), %rdi
callq 0x334070
leaq 0x130(%rsp), %r13
movq %r13, -0x10(%r13)
leaq 0x79b73(%rip), %rsi # 0xa1caef
leaq 0x79b72(%rip), %rdx # 0xa1caf5
leaq 0x120(%rsp), %rdi
callq 0x334070
movl $0x2, (%rsp)
leaq 0x2e8(%rsp), %rdi
leaq 0x100(%rsp), %rsi
leaq 0x120(%rsp), %rdx
leaq 0xa8(%rsp), %r8
leaq 0x90(%rsp), %r9
movq 0x10(%rsp), %rcx
callq 0x9aa796
movl %ebx, 0x10(%rsp)
leaq 0x2e8(%rsp), %rdi
movq %rbp, %rsi
callq 0x9aa92e
leaq 0x1b0(%rsp), %rbx
movq %rbx, -0x10(%rbx)
leaq 0x59ddf(%rip), %rsi # 0x9fcdcf
leaq 0x59de1(%rip), %rdx # 0x9fcdd8
leaq 0x1a0(%rsp), %rdi
callq 0x334070
leaq 0x190(%rsp), %r14
movq %r14, -0x10(%r14)
leaq 0x59db8(%rip), %rsi # 0x9fcdcf
leaq 0x59dba(%rip), %rdx # 0x9fcdd8
leaq 0x180(%rsp), %rdi
callq 0x334070
movl $0x2, (%rsp)
leaq 0x1c0(%rsp), %rdi
leaq 0x1a0(%rsp), %rsi
leaq 0x180(%rsp), %rdx
leaq 0xa8(%rsp), %r8
leaq 0x90(%rsp), %r9
movq 0x28(%rsp), %rcx
callq 0x9aa796
leaq 0x1c0(%rsp), %rdi
movq %rbp, %rsi
callq 0x9aa92e
movq %r14, %rbp
leaq 0x170(%rsp), %r12
movq %r12, -0x10(%r12)
leaq 0x943d4(%rip), %rsi # 0xa3745f
leaq 0x943d6(%rip), %rdx # 0xa37468
leaq 0x160(%rsp), %rdi
callq 0x334070
leaq 0x150(%rsp), %r14
movq %r14, -0x10(%r14)
leaq 0x71c77(%rip), %rsi # 0xa14d29
leaq 0x71c7a(%rip), %rdx # 0xa14d33
leaq 0x140(%rsp), %rdi
callq 0x334070
leaq 0x250(%rsp), %rdi
leaq 0x160(%rsp), %rsi
leaq 0x140(%rsp), %rdx
leaq 0x468(%rsp), %rcx
movl $0x2, %r8d
callq 0x9aa588
leaq 0x250(%rsp), %rdi
movq 0xe8(%rsp), %r15
movq %r15, %rsi
callq 0x9aa92e
movq %r15, %rdi
callq 0x9ab8b2
leaq 0x280(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x9a3132
movq 0x280(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0x260(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x9a3153
movq 0x260(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x140(%rsp), %rdi
cmpq %r14, %rdi
je 0x9a3170
movq 0x150(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x160(%rsp), %rdi
cmpq %r12, %rdi
leaq 0x110(%rsp), %r12
je 0x9a3195
movq 0x170(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0x1f0(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x9a31b6
movq 0x1f0(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0x1d0(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x9a31d7
movq 0x1d0(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x180(%rsp), %rdi
cmpq %rbp, %rdi
je 0x9a31f4
movq 0x190(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x1a0(%rsp), %rdi
cmpq %rbx, %rdi
je 0x9a3211
movq 0x1b0(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0x318(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
movl 0x10(%rsp), %ebx
je 0x9a3236
movq 0x318(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0x2f8(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x9a3257
movq 0x2f8(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x120(%rsp), %rdi
cmpq %r13, %rdi
je 0x9a3274
movq 0x130(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x100(%rsp), %rdi
cmpq %r12, %rdi
je 0x9a3291
movq 0x110(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0x80(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x9a32b2
movq 0x80(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0x60(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x9a32cd
movq 0x60(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0xc8(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x9a32ee
movq 0xc8(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0x40(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x9a3309
movq 0x40(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0x490(%rsp), %rdi
callq 0x9b433a
movl %ebx, %eax
addq $0x4b8, %rsp # imm = 0x4B8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movl $0x38, %edi
callq 0x325650
movq %rax, %r14
leaq 0xff700(%rip), %rdx # 0xaa2a3e
leaq 0xff752(%rip), %rcx # 0xaa2a97
movq %rax, %rdi
xorl %esi, %esi
movl $0x2f2, %r8d # imm = 0x2F2
callq 0x99c1d4
leaq 0x277d24(%rip), %rsi # 0xc1b080
leaq -0x671583(%rip), %rdx # 0x331de0
movq %r14, %rdi
callq 0x325940
jmp 0x9a3607
movq %rax, %rbx
jmp 0x9a3420
movq %rax, %rbx
jmp 0x9a343d
jmp 0x9a3382
movq %rax, %rbx
jmp 0x9a3454
movq %rax, %rbx
jmp 0x9a3461
movq %rax, %rbx
jmp 0x9a347c
movq %rax, %rbx
jmp 0x9a349d
jmp 0x9a3607
movq %rax, %r15
jmp 0x9a34b5
movq %rax, %r15
jmp 0x9a34d2
jmp 0x9a33bc
movq %r14, %rbp
movq %rax, %r15
jmp 0x9a34ef
movq %r14, %rbp
movq %rax, %r15
jmp 0x9a34fc
movq %rax, %r15
jmp 0x9a3519
jmp 0x9a33d9
movq %rax, %r15
jmp 0x9a3536
movq %rax, %r15
jmp 0x9a3543
movq %rax, %r15
jmp 0x9a3560
jmp 0x9a33f3
movq %rax, %r15
jmp 0x9a3585
movq %rax, %r15
jmp 0x9a358f
movq %rax, %r15
jmp 0x9a35b0
jmp 0x9a3607
movq %rax, %rbx
leaq 0x2e8(%rsp), %rdi
callq 0x34f5c8
movq 0xb8(%rsp), %rdi
cmpq %r15, %rdi
je 0x9a343d
movq 0xc8(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x30(%rsp), %rdi
cmpq %r14, %rdi
je 0x9a3454
movq 0x40(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0x1c0(%rsp), %rdi
callq 0x34f5fe
leaq 0x60(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x9a347c
movq 0x60(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0x260(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x9a349d
movq 0x260(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq %rbx, %r15
jmp 0x9a360a
movq %rax, %r15
leaq 0x250(%rsp), %rdi
callq 0x34f5c8
movq 0x140(%rsp), %rdi
cmpq %r14, %rdi
je 0x9a34d2
movq 0x150(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x160(%rsp), %rdi
cmpq %r12, %rdi
je 0x9a34ef
movq 0x170(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0x1c0(%rsp), %rdi
callq 0x34f5c8
movq 0x180(%rsp), %rdi
cmpq %rbp, %rdi
je 0x9a3519
movq 0x190(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x1a0(%rsp), %rdi
cmpq %rbx, %rdi
je 0x9a3536
movq 0x1b0(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0x2e8(%rsp), %rdi
callq 0x34f5c8
movq 0x120(%rsp), %rdi
cmpq %r13, %rdi
je 0x9a3560
movq 0x130(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x100(%rsp), %rdi
leaq 0x110(%rsp), %rax
cmpq %rax, %rdi
je 0x9a3585
movq 0x110(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0x50(%rsp), %rdi
callq 0x34f5fe
leaq 0xc8(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x9a35b0
movq 0xc8(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0x40(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x9a360a
movq 0x40(%rsp), %rsi
incq %rsi
callq 0x3251a0
jmp 0x9a360a
jmp 0x9a3607
movq %rax, %r15
movq %r14, %rdi
callq 0x325d40
jmp 0x9a360a
jmp 0x9a3607
jmp 0x9a35e2
jmp 0x9a3607
movq %rax, %r15
movq 0x2b953c(%rip), %rsi # 0xc5cb28
movq %r14, %rdi
callq 0x325aa0
leaq 0x360(%rsp), %rdi
callq 0x325a80
jmp 0x9a360a
jmp 0x9a3607
jmp 0x9a3607
movq %rax, %r15
leaq 0x490(%rsp), %rdi
callq 0x9b433a
movq %r15, %rdi
callq 0x3259a0
|
/kaydenl[P]VK-GL-CTS/framework/common/tcuImageCompare.cpp
|
tcu::intThresholdPositionDeviationCompare(tcu::TestLog&, char const*, char const*, tcu::ConstPixelBufferAccess const&, tcu::ConstPixelBufferAccess const&, tcu::Vector<unsigned int, 4> const&, tcu::Vector<int, 3> const&, bool, tcu::CompareLogMode)
|
bool intThresholdPositionDeviationCompare (TestLog& log, const char* imageSetName, const char* imageSetDesc, const ConstPixelBufferAccess& reference, const ConstPixelBufferAccess& result, const UVec4& threshold, const tcu::IVec3& maxPositionDeviation, bool acceptOutOfBoundsAsAnyValue, CompareLogMode logMode)
{
const int width = reference.getWidth();
const int height = reference.getHeight();
const int depth = reference.getDepth();
TextureLevel errorMaskStorage (TextureFormat(TextureFormat::RGB, TextureFormat::UNORM_INT8), width, height, depth);
PixelBufferAccess errorMask = errorMaskStorage.getAccess();
const int numFailingPixels = findNumPositionDeviationFailingPixels(errorMask, reference, result, threshold, maxPositionDeviation, acceptOutOfBoundsAsAnyValue);
const bool compareOk = numFailingPixels == 0;
Vec4 pixelBias (0.0f, 0.0f, 0.0f, 0.0f);
Vec4 pixelScale (1.0f, 1.0f, 1.0f, 1.0f);
if (!compareOk || logMode == COMPARE_LOG_EVERYTHING)
{
// All formats except normalized unsigned fixed point ones need remapping in order to fit into unorm channels in logged images.
if (tcu::getTextureChannelClass(reference.getFormat().type) != tcu::TEXTURECHANNELCLASS_UNSIGNED_FIXED_POINT ||
tcu::getTextureChannelClass(result.getFormat().type) != tcu::TEXTURECHANNELCLASS_UNSIGNED_FIXED_POINT)
{
computeScaleAndBias(reference, result, pixelScale, pixelBias);
log << TestLog::Message << "Result and reference images are normalized with formula p * " << pixelScale << " + " << pixelBias << TestLog::EndMessage;
}
if (!compareOk)
log << TestLog::Message
<< "Image comparison failed:\n"
<< "\tallowed position deviation = " << maxPositionDeviation << "\n"
<< "\tcolor threshold = " << threshold
<< TestLog::EndMessage;
log << TestLog::ImageSet(imageSetName, imageSetDesc)
<< TestLog::Image("Result", "Result", result, pixelScale, pixelBias)
<< TestLog::Image("Reference", "Reference", reference, pixelScale, pixelBias)
<< TestLog::Image("ErrorMask", "Error mask", errorMask)
<< TestLog::EndImageSet;
}
else if (logMode == COMPARE_LOG_RESULT)
{
if (result.getFormat() != TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8))
computePixelScaleBias(result, pixelScale, pixelBias);
log << TestLog::ImageSet(imageSetName, imageSetDesc)
<< TestLog::Image("Result", "Result", result, pixelScale, pixelBias)
<< TestLog::EndImageSet;
}
return compareOk;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x478, %rsp # imm = 0x478
movq %r9, %rbx
movq %r8, %r15
movq %rcx, %r12
movq %rdx, 0x78(%rsp)
movq %rsi, 0x80(%rsp)
movq %rdi, %r14
movl 0x8(%rcx), %edx
movl 0xc(%rcx), %ecx
movl 0x10(%r12), %r8d
movabsq $0x300000007, %rax # imm = 0x300000007
leaq 0x2f8(%rsp), %rsi
movq %rax, (%rsi)
leaq 0x2d0(%rsp), %r13
movq %r13, %rdi
callq 0x9b4270
leaq 0x2a8(%rsp), %rdi
movq %r13, %rsi
callq 0x41071a
movq 0x4b0(%rsp), %r13
movzbl 0x4b8(%rsp), %r9d
leaq 0x2a8(%rsp), %rdi
movq %r12, %rsi
movq %r15, %rdx
movq %rbx, %rcx
movq %r13, %r8
callq 0x9a412f
movl %eax, %ebp
movl 0x4c0(%rsp), %eax
testl %ebp, %ebp
sete %cl
xorps %xmm0, %xmm0
movaps %xmm0, 0x10(%rsp)
movaps 0x5d5a0(%rip), %xmm0 # 0xa00c70
movups %xmm0, 0x28(%rsp)
testl %eax, %eax
setne %dl
testb %cl, %dl
je 0x9a38f1
cmpl $0x1, %eax
jne 0x9a3e6c
cmpl $0x8, (%r15)
jne 0x9a36f8
cmpl $0x3, 0x4(%r15)
je 0x9a370a
leaq 0x28(%rsp), %rsi
leaq 0x10(%rsp), %rdx
movq %r15, %rdi
callq 0x9bd0da
leaq 0x218(%rsp), %rdi
leaq 0xc8(%rsp), %rdx
movq 0x80(%rsp), %rsi
callq 0x333da2
leaq 0x148(%rsp), %rdi
leaq 0xa8(%rsp), %rdx
movq 0x78(%rsp), %rsi
callq 0x333da2
leaq 0x188(%rsp), %rdi
leaq 0x218(%rsp), %rsi
leaq 0x148(%rsp), %rdx
callq 0x34f564
movq 0x188(%rsp), %rsi
movq 0x1a8(%rsp), %rdx
movq %r14, %rdi
callq 0x9ab85c
leaq 0x68(%rsp), %rbx
movq %rbx, -0x10(%rbx)
leaq 0x79369(%rip), %rsi # 0xa1caef
leaq 0x79368(%rip), %rdx # 0xa1caf5
leaq 0x58(%rsp), %rdi
callq 0x334070
leaq 0x48(%rsp), %r12
movq %r12, -0x10(%r12)
leaq 0x79347(%rip), %rsi # 0xa1caef
leaq 0x79346(%rip), %rdx # 0xa1caf5
leaq 0x38(%rsp), %rdi
callq 0x334070
movl $0x2, (%rsp)
leaq 0x2f8(%rsp), %rdi
leaq 0x58(%rsp), %rsi
leaq 0x38(%rsp), %rdx
leaq 0x28(%rsp), %r8
leaq 0x10(%rsp), %r9
movq %r15, %rcx
callq 0x9aa796
leaq 0x2f8(%rsp), %rdi
movq %r14, %rsi
callq 0x9aa92e
movq %r14, %rdi
callq 0x9ab8b2
leaq 0x328(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x9a381d
movq 0x328(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0x308(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x9a383e
movq 0x308(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x38(%rsp), %rdi
cmpq %r12, %rdi
je 0x9a3855
movq 0x48(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x58(%rsp), %rdi
cmpq %rbx, %rdi
je 0x9a386c
movq 0x68(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0x1b8(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x9a388d
movq 0x1b8(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0x198(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x9a38ae
movq 0x198(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0x158(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x9a38cf
movq 0x158(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0x228(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x9a3e6c
movq 0x228(%rsp), %rsi
jmp 0x9a3e64
movl 0x4(%r12), %edi
callq 0x9ba40c
cmpl $0x1, %eax
movl %ebp, 0xc(%rsp)
jne 0x9a3916
movl 0x4(%r15), %edi
callq 0x9ba40c
cmpl $0x1, %eax
je 0x9a39b5
leaq 0x28(%rsp), %rdx
leaq 0x10(%rsp), %rcx
movq %r12, %rdi
movq %r15, %rsi
callq 0x99ecdb
leaq 0x300(%rsp), %rbp
movq %r14, -0x8(%rbp)
movq %rbp, %rdi
callq 0x325e00
leaq 0xff1c1(%rip), %rsi # 0xaa2b07
movl $0x3c, %edx
movq %rbp, %rdi
callq 0x325e70
leaq 0x28(%rsp), %rsi
movq %rbp, %rdi
callq 0x35a850
leaq 0xaeb11(%rip), %rsi # 0xa52478
movl $0x3, %edx
movq %rbp, %rdi
callq 0x325e70
leaq 0x10(%rsp), %rsi
movq %rbp, %rdi
callq 0x35a850
leaq 0xff905(%rip), %rsi # 0xaa328d
leaq 0x2f8(%rsp), %rdi
callq 0x9aba2a
movq 0x2b918c(%rip), %rsi # 0xc5cb28
movq %rbp, %rdi
callq 0x325aa0
leaq 0x370(%rsp), %rdi
callq 0x325a80
movl 0xc(%rsp), %ebp
testl %ebp, %ebp
je 0x9a3a67
leaq 0x300(%rsp), %rbp
movq %r14, -0x8(%rbp)
movq %rbp, %rdi
callq 0x325e00
leaq 0xff1a8(%rip), %rsi # 0xaa2b80
movl $0x19, %edx
movq %rbp, %rdi
callq 0x325e70
leaq 0xff1ae(%rip), %rsi # 0xaa2b9a
movl $0x1e, %edx
movq %rbp, %rdi
callq 0x325e70
movq %rbp, %rdi
movq %r13, %rsi
callq 0x83114a
leaq 0xb052a(%rip), %rsi # 0xa53f35
movl $0x1, %edx
movq %rbp, %rdi
callq 0x325e70
leaq 0xff19a(%rip), %rsi # 0xaa2bb9
movl $0x13, %edx
movq %rbp, %rdi
callq 0x325e70
movq %rbp, %rdi
movq %rbx, %rsi
callq 0x83120c
leaq 0xff84f(%rip), %rsi # 0xaa328d
leaq 0x2f8(%rsp), %rdi
callq 0x9aba2a
movq 0x2b90d6(%rip), %rsi # 0xc5cb28
movq %rbp, %rdi
callq 0x325aa0
leaq 0x370(%rsp), %rdi
callq 0x325a80
leaq 0x58(%rsp), %rdi
leaq 0xb(%rsp), %rdx
movq 0x80(%rsp), %rsi
callq 0x333da2
leaq 0x38(%rsp), %rdi
leaq 0xa(%rsp), %rdx
movq 0x78(%rsp), %rsi
callq 0x333da2
leaq 0x148(%rsp), %rdi
leaq 0x58(%rsp), %rsi
leaq 0x38(%rsp), %rdx
callq 0x34f564
movq 0x148(%rsp), %rsi
movq 0x168(%rsp), %rdx
movq %r14, %rdi
callq 0x9ab85c
leaq 0xd8(%rsp), %rbp
movq %rbp, -0x10(%rbp)
leaq 0x7901b(%rip), %rsi # 0xa1caef
leaq 0x7901a(%rip), %rdx # 0xa1caf5
leaq 0xc8(%rsp), %rdi
callq 0x334070
leaq 0xb8(%rsp), %r13
movq %r13, -0x10(%r13)
leaq 0x78ff4(%rip), %rsi # 0xa1caef
leaq 0x78ff3(%rip), %rdx # 0xa1caf5
leaq 0xa8(%rsp), %rdi
callq 0x334070
movl $0x2, (%rsp)
leaq 0x2f8(%rsp), %rdi
leaq 0xc8(%rsp), %rsi
leaq 0xa8(%rsp), %rdx
leaq 0x28(%rsp), %r8
leaq 0x10(%rsp), %r9
movq %r15, %rcx
callq 0x9aa796
leaq 0x2f8(%rsp), %rdi
movq %r14, %rsi
callq 0x9aa92e
leaq 0x98(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0x5926c(%rip), %rsi # 0x9fcdcf
leaq 0x5926e(%rip), %rdx # 0x9fcdd8
leaq 0x88(%rsp), %rdi
callq 0x334070
leaq 0x138(%rsp), %rbx
movq %rbx, -0x10(%rbx)
leaq 0x59245(%rip), %rsi # 0x9fcdcf
leaq 0x59247(%rip), %rdx # 0x9fcdd8
leaq 0x128(%rsp), %rdi
callq 0x334070
movl $0x2, (%rsp)
leaq 0x188(%rsp), %rdi
leaq 0x88(%rsp), %rsi
leaq 0x128(%rsp), %rdx
leaq 0x28(%rsp), %r8
leaq 0x10(%rsp), %r9
movq %r12, %rcx
callq 0x9aa796
leaq 0x188(%rsp), %rdi
movq %r14, %rsi
callq 0x9aa92e
movq %r13, %r15
leaq 0x118(%rsp), %r12
movq %r12, -0x10(%r12)
leaq 0x93869(%rip), %rsi # 0xa3745f
leaq 0x9386b(%rip), %rdx # 0xa37468
leaq 0x108(%rsp), %rdi
callq 0x334070
leaq 0xf8(%rsp), %r13
movq %r13, -0x10(%r13)
leaq 0x7110c(%rip), %rsi # 0xa14d29
leaq 0x7110f(%rip), %rdx # 0xa14d33
leaq 0xe8(%rsp), %rdi
callq 0x334070
leaq 0x218(%rsp), %rdi
leaq 0x108(%rsp), %rsi
leaq 0xe8(%rsp), %rdx
leaq 0x2a8(%rsp), %rcx
movl $0x2, %r8d
callq 0x9aa588
leaq 0x218(%rsp), %rdi
movq %r14, %rsi
callq 0x9aa92e
movq %r14, %rdi
callq 0x9ab8b2
leaq 0x248(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x9a3c95
movq 0x248(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0x228(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x9a3cb6
movq 0x228(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0xe8(%rsp), %rdi
cmpq %r13, %rdi
je 0x9a3cd3
movq 0xf8(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x108(%rsp), %rdi
cmpq %r12, %rdi
je 0x9a3cf0
movq 0x118(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0x1b8(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x9a3d11
movq 0x1b8(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0x198(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x9a3d32
movq 0x198(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x128(%rsp), %rdi
cmpq %rbx, %rdi
je 0x9a3d4f
movq 0x138(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x88(%rsp), %rdi
leaq 0x98(%rsp), %rax
cmpq %rax, %rdi
je 0x9a3d74
movq 0x98(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0x328(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x9a3d95
movq 0x328(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0x308(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x9a3db6
movq 0x308(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0xa8(%rsp), %rdi
cmpq %r15, %rdi
je 0x9a3dd3
movq 0xb8(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0xc8(%rsp), %rdi
cmpq %rbp, %rdi
movl 0xc(%rsp), %ebp
je 0x9a3df4
movq 0xd8(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0x178(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x9a3e15
movq 0x178(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0x158(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x9a3e36
movq 0x158(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0x48(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x9a3e51
movq 0x48(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0x68(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x9a3e6c
movq 0x68(%rsp), %rsi
incq %rsi
callq 0x3251a0
testl %ebp, %ebp
sete %bl
leaq 0x2d0(%rsp), %rdi
callq 0x9b433a
movl %ebx, %eax
addq $0x478, %rsp # imm = 0x478
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq %rax, %r14
jmp 0x9a3f4d
movq %rax, %r14
jmp 0x9a3f64
jmp 0x9a3ea4
movq %rax, %r14
jmp 0x9a3f7b
movq %rax, %r14
jmp 0x9a3f88
movq %rax, %r14
jmp 0x9a3fa9
jmp 0x9a40f4
jmp 0x9a40f4
jmp 0x9a40f4
movq %rax, %r14
jmp 0x9a3fdb
movq %rax, %r14
jmp 0x9a3ff8
jmp 0x9a3ee0
movq %r13, %r15
movq %rax, %r14
jmp 0x9a4015
movq %r13, %r15
movq %rax, %r14
jmp 0x9a4022
movq %r13, %r15
movq %rax, %r14
jmp 0x9a403f
jmp 0x9a3f00
movq %r13, %r15
movq %rax, %r14
jmp 0x9a4064
movq %r13, %r15
movq %rax, %r14
jmp 0x9a4071
movq %rax, %r14
jmp 0x9a408e
jmp 0x9a3f20
movq %rax, %r14
jmp 0x9a40ab
movq %rax, %r14
jmp 0x9a40b8
movq %rax, %r14
jmp 0x9a40d3
jmp 0x9a40f4
movq %rax, %r14
leaq 0x2f8(%rsp), %rdi
callq 0x34f5c8
movq 0x38(%rsp), %rdi
cmpq %r12, %rdi
je 0x9a3f64
movq 0x48(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x58(%rsp), %rdi
cmpq %rbx, %rdi
je 0x9a3f7b
movq 0x68(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0x188(%rsp), %rdi
callq 0x34f5fe
leaq 0x158(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x9a3fa9
movq 0x158(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0x228(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x9a411a
movq 0x228(%rsp), %rsi
jmp 0x9a40e6
movq %rax, %r14
leaq 0x218(%rsp), %rdi
callq 0x34f5c8
movq 0xe8(%rsp), %rdi
cmpq %r13, %rdi
je 0x9a3ff8
movq 0xf8(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x108(%rsp), %rdi
cmpq %r12, %rdi
je 0x9a4015
movq 0x118(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0x188(%rsp), %rdi
callq 0x34f5c8
movq 0x128(%rsp), %rdi
cmpq %rbx, %rdi
je 0x9a403f
movq 0x138(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0x88(%rsp), %rdi
leaq 0x98(%rsp), %rax
cmpq %rax, %rdi
je 0x9a4064
movq 0x98(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0x2f8(%rsp), %rdi
callq 0x34f5c8
movq 0xa8(%rsp), %rdi
cmpq %r15, %rdi
je 0x9a408e
movq 0xb8(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq 0xc8(%rsp), %rdi
cmpq %rbp, %rdi
je 0x9a40ab
movq 0xd8(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0x148(%rsp), %rdi
callq 0x34f5fe
leaq 0x48(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x9a40d3
movq 0x48(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0x68(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x9a411a
movq 0x68(%rsp), %rsi
incq %rsi
callq 0x3251a0
jmp 0x9a411a
jmp 0x9a40f4
jmp 0x9a40f4
movq %rax, %r14
jmp 0x9a411a
jmp 0x9a40fb
movq %rax, %r14
movq 0x2b8a23(%rip), %rsi # 0xc5cb28
movq %rbp, %rdi
callq 0x325aa0
leaq 0x370(%rsp), %rdi
callq 0x325a80
leaq 0x2d0(%rsp), %rdi
callq 0x9b433a
movq %r14, %rdi
callq 0x3259a0
|
/kaydenl[P]VK-GL-CTS/framework/common/tcuImageCompare.cpp
|
tcu::(anonymous namespace)::findNumPositionDeviationFailingPixels(tcu::PixelBufferAccess const&, tcu::ConstPixelBufferAccess const&, tcu::ConstPixelBufferAccess const&, tcu::Vector<unsigned int, 4> const&, tcu::Vector<int, 3> const&, bool)
|
static int findNumPositionDeviationFailingPixels (const PixelBufferAccess& errorMask, const ConstPixelBufferAccess& reference, const ConstPixelBufferAccess& result, const UVec4& threshold, const tcu::IVec3& maxPositionDeviation, bool acceptOutOfBoundsAsAnyValue)
{
const tcu::IVec4 okColor (0, 255, 0, 255);
const tcu::IVec4 errorColor (255, 0, 0, 255);
const int width = reference.getWidth();
const int height = reference.getHeight();
const int depth = reference.getDepth();
int numFailingPixels = 0;
// Accept pixels "sampling" over the image bounds pixels since "taps" could be anything
const int beginX = (acceptOutOfBoundsAsAnyValue) ? (maxPositionDeviation.x()) : (0);
const int beginY = (acceptOutOfBoundsAsAnyValue) ? (maxPositionDeviation.y()) : (0);
const int beginZ = (acceptOutOfBoundsAsAnyValue) ? (maxPositionDeviation.z()) : (0);
const int endX = (acceptOutOfBoundsAsAnyValue) ? (width - maxPositionDeviation.x()) : (width);
const int endY = (acceptOutOfBoundsAsAnyValue) ? (height - maxPositionDeviation.y()) : (height);
const int endZ = (acceptOutOfBoundsAsAnyValue) ? (depth - maxPositionDeviation.z()) : (depth);
TCU_CHECK_INTERNAL(result.getWidth() == width && result.getHeight() == height && result.getDepth() == depth);
DE_ASSERT(endX > 0 && endY > 0 && endZ > 0); // most likely a bug
tcu::clear(errorMask, okColor);
for (int z = beginZ; z < endZ; z++)
{
for (int y = beginY; y < endY; y++)
{
for (int x = beginX; x < endX; x++)
{
const IVec4 refPix = reference.getPixelInt(x, y, z);
const IVec4 cmpPix = result.getPixelInt(x, y, z);
// Exact match
{
const UVec4 diff = abs(refPix - cmpPix).cast<deUint32>();
const bool isOk = boolAll(lessThanEqual(diff, threshold));
if (isOk)
continue;
}
// Find matching pixels for both result and reference pixel
{
bool pixelFoundForReference = false;
// Find deviated result pixel for reference
for (int sz = de::max(0, z - maxPositionDeviation.z()); sz <= de::min(depth - 1, z + maxPositionDeviation.z()) && !pixelFoundForReference; ++sz)
for (int sy = de::max(0, y - maxPositionDeviation.y()); sy <= de::min(height - 1, y + maxPositionDeviation.y()) && !pixelFoundForReference; ++sy)
for (int sx = de::max(0, x - maxPositionDeviation.x()); sx <= de::min(width - 1, x + maxPositionDeviation.x()) && !pixelFoundForReference; ++sx)
{
const IVec4 deviatedCmpPix = result.getPixelInt(sx, sy, sz);
const UVec4 diff = abs(refPix - deviatedCmpPix).cast<deUint32>();
const bool isOk = boolAll(lessThanEqual(diff, threshold));
pixelFoundForReference = isOk;
}
if (!pixelFoundForReference)
{
errorMask.setPixel(errorColor, x, y, z);
++numFailingPixels;
continue;
}
}
{
bool pixelFoundForResult = false;
// Find deviated reference pixel for result
for (int sz = de::max(0, z - maxPositionDeviation.z()); sz <= de::min(depth - 1, z + maxPositionDeviation.z()) && !pixelFoundForResult; ++sz)
for (int sy = de::max(0, y - maxPositionDeviation.y()); sy <= de::min(height - 1, y + maxPositionDeviation.y()) && !pixelFoundForResult; ++sy)
for (int sx = de::max(0, x - maxPositionDeviation.x()); sx <= de::min(width - 1, x + maxPositionDeviation.x()) && !pixelFoundForResult; ++sx)
{
const IVec4 deviatedRefPix = reference.getPixelInt(sx, sy, sz);
const UVec4 diff = abs(cmpPix - deviatedRefPix).cast<deUint32>();
const bool isOk = boolAll(lessThanEqual(diff, threshold));
pixelFoundForResult = isOk;
}
if (!pixelFoundForResult)
{
errorMask.setPixel(errorColor, x, y, z);
++numFailingPixels;
continue;
}
}
}
}
}
return numFailingPixels;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xd8, %rsp
movq %r8, 0x8(%rsp)
movq %rcx, %r14
movq %rdi, 0x68(%rsp)
movaps 0x9de4c(%rip), %xmm0 # 0xa41fa0
movups %xmm0, 0xc8(%rsp)
movaps 0x9de2d(%rip), %xmm0 # 0xa41f90
movups %xmm0, 0xb8(%rsp)
movq %rsi, 0x60(%rsp)
movl 0x8(%rsi), %ebx
movl %r9d, 0x3c(%rsp)
movl %r9d, 0x38(%rsp)
movl %r9d, %eax
testl %r9d, %r9d
je 0x9a419a
movq 0x8(%rsp), %rax
movl (%rax), %ecx
movl %ecx, 0x3c(%rsp)
movl 0x4(%rax), %ecx
movl %ecx, 0x38(%rsp)
movl 0x8(%rax), %eax
cmpl %ebx, 0x8(%rdx)
jne 0x9a4757
movq 0x60(%rsp), %rcx
movl 0xc(%rcx), %ecx
cmpl %ecx, 0xc(%rdx)
jne 0x9a4757
movl %ecx, %ebp
movq 0x60(%rsp), %rcx
movl 0x10(%rcx), %r13d
cmpl %r13d, 0x10(%rdx)
jne 0x9a4757
movq %rdx, 0x90(%rsp)
movl %r13d, %ecx
subl %eax, %ecx
movl %ecx, 0x40(%rsp)
movl $0x0, 0x14(%rsp)
movl $0x0, %r12d
testb %r9b, %r9b
je 0x9a41f6
movq 0x8(%rsp), %rax
movl 0x8(%rax), %r12d
leaq 0xc8(%rsp), %rsi
movq 0x68(%rsp), %rdi
callq 0x9bb6a6
movl %r12d, 0x18(%rsp)
cmpl 0x40(%rsp), %r12d
jge 0x9a4741
movl %ebx, %eax
subl 0x3c(%rsp), %eax
movl %eax, 0x48(%rsp)
movl %ebp, %eax
subl 0x38(%rsp), %eax
movl %eax, 0x44(%rsp)
decl %r13d
decl %ebp
movl %ebp, 0x1c(%rsp)
decl %ebx
movl $0x0, 0x14(%rsp)
movl %r13d, 0x4c(%rsp)
movl 0x38(%rsp), %eax
movl %eax, 0x4(%rsp)
cmpl 0x44(%rsp), %eax
movl 0x18(%rsp), %ebp
jge 0x9a4731
movl 0x3c(%rsp), %eax
movl %eax, %r8d
cmpl 0x48(%rsp), %eax
jge 0x9a471d
leaq 0xa8(%rsp), %rdi
movq 0x60(%rsp), %rsi
movl %r8d, %edx
movl 0x4(%rsp), %r12d
movl %r12d, %ecx
movl %r8d, %r15d
movl %ebp, %r8d
callq 0x9ad9f2
leaq 0x98(%rsp), %rdi
movq 0x90(%rsp), %rsi
movl %r15d, 0x10(%rsp)
movl %r15d, %edx
movl %r12d, %ecx
movl %ebp, %r8d
callq 0x9ad9f2
xorps %xmm0, %xmm0
movaps %xmm0, 0x20(%rsp)
xorl %eax, %eax
movl 0xa8(%rsp,%rax,4), %ecx
subl 0x98(%rsp,%rax,4), %ecx
movl %ecx, 0x20(%rsp,%rax,4)
incq %rax
cmpq $0x4, %rax
jne 0x9a42bb
xorps %xmm0, %xmm0
movaps %xmm0, 0x50(%rsp)
xorl %eax, %eax
movl 0x20(%rsp,%rax,4), %ecx
movl %ecx, %edx
negl %edx
cmovsl %ecx, %edx
movl %edx, 0x50(%rsp,%rax,4)
incq %rax
cmpq $0x4, %rax
jne 0x9a42e0
movaps 0x50(%rsp), %xmm0
movaps %xmm0, 0x80(%rsp)
movl $0x0, 0x50(%rsp)
xorl %eax, %eax
movl 0x10(%rsp), %r8d
movl 0x80(%rsp,%rax,4), %ecx
cmpl (%r14,%rax,4), %ecx
setbe 0x50(%rsp,%rax)
incq %rax
cmpq $0x4, %rax
jne 0x9a4314
cmpb $0x0, 0x50(%rsp)
je 0x9a4354
xorl %ecx, %ecx
movq %rcx, %rax
cmpq $0x3, %rcx
je 0x9a434a
leaq 0x1(%rax), %rcx
cmpb $0x0, 0x51(%rsp,%rax)
jne 0x9a4336
cmpq $0x2, %rax
ja 0x9a470f
movq 0x8(%rsp), %rax
movl 0x8(%rax), %eax
movl %ebp, %r12d
subl %eax, %r12d
xorl %r15d, %r15d
testl %r12d, %r12d
cmovlel %r15d, %r12d
addl %ebp, %eax
cmpl %eax, %r13d
cmovll %r13d, %eax
cmpl %eax, %r12d
jg 0x9a4508
xorl %r15d, %r15d
movq 0x8(%rsp), %rax
movl 0x4(%rax), %eax
movl 0x4(%rsp), %edx
movl %edx, %r13d
subl %eax, %r13d
testl %r13d, %r13d
movl $0x0, %ecx
cmovlel %ecx, %r13d
addl %edx, %eax
movl 0x1c(%rsp), %ecx
cmpl %eax, %ecx
cmovll %ecx, %eax
cmpl %eax, %r13d
jg 0x9a44dc
testb $0x1, %r15b
jne 0x9a44dc
movq 0x8(%rsp), %rax
movl (%rax), %eax
movl %r8d, %ebp
subl %eax, %ebp
testl %ebp, %ebp
movl $0x0, %ecx
cmovlel %ecx, %ebp
addl %r8d, %eax
cmpl %eax, %ebx
cmovll %ebx, %eax
cmpl %eax, %ebp
jg 0x9a44c8
testb $0x1, %r15b
jne 0x9a44c8
leaq 0x80(%rsp), %rdi
movq 0x90(%rsp), %rsi
movl %ebp, %edx
movl %r13d, %ecx
movl %r12d, %r8d
callq 0x9ad9f2
xorps %xmm0, %xmm0
movaps %xmm0, 0x70(%rsp)
xorl %eax, %eax
movl 0xa8(%rsp,%rax,4), %ecx
subl 0x80(%rsp,%rax,4), %ecx
movl %ecx, 0x70(%rsp,%rax,4)
incq %rax
cmpq $0x4, %rax
jne 0x9a4414
xorps %xmm0, %xmm0
movaps %xmm0, 0x20(%rsp)
xorl %eax, %eax
movl 0x70(%rsp,%rax,4), %ecx
movl %ecx, %edx
negl %edx
cmovsl %ecx, %edx
movl %edx, 0x20(%rsp,%rax,4)
incq %rax
cmpq $0x4, %rax
jne 0x9a4439
movaps 0x20(%rsp), %xmm0
movaps %xmm0, 0x50(%rsp)
movl $0x0, 0x20(%rsp)
xorl %eax, %eax
movl 0x10(%rsp), %r8d
movl 0x50(%rsp,%rax,4), %ecx
cmpl (%r14,%rax,4), %ecx
setbe 0x20(%rsp,%rax)
incq %rax
cmpq $0x4, %rax
jne 0x9a446a
cmpb $0x0, 0x20(%rsp)
je 0x9a44a7
xorl %ecx, %ecx
movq %rcx, %rax
cmpq $0x3, %rcx
je 0x9a449d
leaq 0x1(%rax), %rcx
cmpb $0x0, 0x21(%rsp,%rax)
jne 0x9a4489
cmpq $0x3, %rax
setae %r15b
jmp 0x9a44aa
xorl %r15d, %r15d
incl %ebp
movq 0x8(%rsp), %rax
movl (%rax), %eax
addl %r8d, %eax
cmpl %eax, %ebx
cmovll %ebx, %eax
cmpl %eax, %ebp
jg 0x9a44c8
testb %r15b, %r15b
je 0x9a43ed
incl %r13d
movq 0x8(%rsp), %rax
movl 0x4(%rax), %eax
addl 0x4(%rsp), %eax
jmp 0x9a43a1
incl %r12d
movq 0x8(%rsp), %rax
movl 0x8(%rax), %eax
movl 0x18(%rsp), %ebp
addl %ebp, %eax
movl 0x4c(%rsp), %r13d
cmpl %eax, %r13d
cmovll %r13d, %eax
cmpl %eax, %r12d
jg 0x9a4508
testb $0x1, %r15b
je 0x9a4381
testb $0x1, %r15b
jne 0x9a4533
movq 0x68(%rsp), %rdi
leaq 0xb8(%rsp), %rsi
movl %r8d, %edx
movl 0x4(%rsp), %ecx
movl %ebp, %r8d
callq 0x9af5c4
movl 0x10(%rsp), %r8d
incl 0x14(%rsp)
testb $0x1, %r15b
je 0x9a470f
movq 0x8(%rsp), %rax
movl 0x8(%rax), %eax
movl %ebp, %r12d
subl %eax, %r12d
testl %r12d, %r12d
movl $0x0, %ecx
cmovlel %ecx, %r12d
addl %ebp, %eax
cmpl %eax, %r13d
cmovll %r13d, %eax
cmpl %eax, %r12d
jg 0x9a46ea
xorl %eax, %eax
movq 0x8(%rsp), %rcx
movl 0x4(%rcx), %ecx
movl 0x4(%rsp), %esi
movl %esi, %r13d
subl %ecx, %r13d
testl %r13d, %r13d
movl $0x0, %edx
cmovlel %edx, %r13d
addl %esi, %ecx
movl 0x1c(%rsp), %edx
cmpl %ecx, %edx
cmovll %edx, %ecx
cmpl %ecx, %r13d
jg 0x9a46bc
testb $0x1, %al
jne 0x9a46bc
movq 0x8(%rsp), %rcx
movl (%rcx), %ecx
movl %r8d, %ebp
subl %ecx, %ebp
testl %ebp, %ebp
movl $0x0, %edx
cmovlel %edx, %ebp
addl %r8d, %ecx
cmpl %ecx, %ebx
cmovll %ebx, %ecx
cmpl %ecx, %ebp
jg 0x9a46a8
testb $0x1, %al
jne 0x9a46a8
leaq 0x80(%rsp), %rdi
movq 0x60(%rsp), %rsi
movl %ebp, %edx
movl %r13d, %ecx
movl %r12d, %r8d
callq 0x9ad9f2
xorps %xmm0, %xmm0
movaps %xmm0, 0x70(%rsp)
xorl %eax, %eax
movl 0x98(%rsp,%rax,4), %ecx
subl 0x80(%rsp,%rax,4), %ecx
movl %ecx, 0x70(%rsp,%rax,4)
incq %rax
cmpq $0x4, %rax
jne 0x9a45f7
xorps %xmm0, %xmm0
movaps %xmm0, 0x20(%rsp)
xorl %eax, %eax
movl 0x70(%rsp,%rax,4), %ecx
movl %ecx, %edx
negl %edx
cmovsl %ecx, %edx
movl %edx, 0x20(%rsp,%rax,4)
incq %rax
cmpq $0x4, %rax
jne 0x9a461c
movaps 0x20(%rsp), %xmm0
movaps %xmm0, 0x50(%rsp)
movl $0x0, 0x20(%rsp)
xorl %eax, %eax
movl 0x10(%rsp), %r8d
movl 0x50(%rsp,%rax,4), %ecx
cmpl (%r14,%rax,4), %ecx
setbe 0x20(%rsp,%rax)
incq %rax
cmpq $0x4, %rax
jne 0x9a464d
cmpb $0x0, 0x20(%rsp)
je 0x9a4689
xorl %ecx, %ecx
movq %rcx, %rax
cmpq $0x3, %rcx
je 0x9a4680
leaq 0x1(%rax), %rcx
cmpb $0x0, 0x21(%rsp,%rax)
jne 0x9a466c
cmpq $0x3, %rax
setae %al
jmp 0x9a468b
xorl %eax, %eax
incl %ebp
movq 0x8(%rsp), %rcx
movl (%rcx), %ecx
addl %r8d, %ecx
cmpl %ecx, %ebx
cmovll %ebx, %ecx
cmpl %ecx, %ebp
jg 0x9a46a8
testb %al, %al
je 0x9a45d3
incl %r13d
movq 0x8(%rsp), %rcx
movl 0x4(%rcx), %ecx
addl 0x4(%rsp), %ecx
jmp 0x9a458b
incl %r12d
movq 0x8(%rsp), %rcx
movl 0x8(%rcx), %ecx
movl 0x18(%rsp), %ebp
addl %ebp, %ecx
movl 0x4c(%rsp), %r13d
cmpl %ecx, %r13d
cmovll %r13d, %ecx
cmpl %ecx, %r12d
jg 0x9a46e6
testb $0x1, %al
je 0x9a456b
testb $0x1, %al
jne 0x9a470f
movq 0x68(%rsp), %rdi
leaq 0xb8(%rsp), %rsi
movl %r8d, %edx
movl 0x4(%rsp), %ecx
movl %ebp, %r8d
callq 0x9af5c4
movl 0x10(%rsp), %r8d
incl 0x14(%rsp)
incl %r8d
cmpl 0x48(%rsp), %r8d
jl 0x9a426b
movl 0x4(%rsp), %eax
incl %eax
movl %eax, 0x4(%rsp)
cmpl 0x44(%rsp), %eax
jl 0x9a425a
incl %ebp
movl %ebp, 0x18(%rsp)
cmpl 0x40(%rsp), %ebp
jl 0x9a4244
movl 0x14(%rsp), %eax
addq $0xd8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movl $0x38, %edi
callq 0x325650
movq %rax, %rbx
leaq 0xfe2d3(%rip), %rdx # 0xaa2a3e
leaq 0xfe325(%rip), %rcx # 0xaa2a97
movq %rax, %rdi
xorl %esi, %esi
movl $0x6c, %r8d
callq 0x99c1d4
leaq 0x2768f7(%rip), %rsi # 0xc1b080
leaq -0x6729b0(%rip), %rdx # 0x331de0
movq %rbx, %rdi
callq 0x325940
movq %rax, %r14
movq %rbx, %rdi
callq 0x325d40
movq %r14, %rdi
callq 0x3259a0
|
/kaydenl[P]VK-GL-CTS/framework/common/tcuImageCompare.cpp
|
tcu::getPositionOfIEEEFloatWithoutDenormals(float)
|
static deInt32 getPositionOfIEEEFloatWithoutDenormals (float x)
{
DE_ASSERT(!deIsNaN(x)); // not sane
if (x == 0.0f)
return 0;
else if (x < 0.0f)
return -getPositionOfIEEEFloatWithoutDenormals(-x);
else
{
DE_ASSERT(x > 0.0f);
const tcu::Float32 f(x);
if (f.isDenorm())
{
// Denorms are flushed to zero
return 0;
}
else
{
// sign is 0, and it's a normal number. Natural position is its bit
// pattern but since we've collapsed the denorms, we must remove
// the gap here too to keep the float enumeration continuous.
//
// Denormals occupy one exponent pattern. Removing one from
// exponent should to the trick. Add one since the removed range
// contained one representable value, 0.
return (deInt32)(f.bits() - (1u << 23u) + 1u);
}
}
}
|
xorl %eax, %eax
xorps %xmm1, %xmm1
ucomiss %xmm1, %xmm0
jne 0x9a5f64
jp 0x9a5f64
retq
ucomiss %xmm0, %xmm1
jbe 0x9a5f7d
pushq %rax
xorps 0x5af5f(%rip), %xmm0 # 0xa00ed0
callq 0x9a5f57
negl %eax
addq $0x8, %rsp
retq
movd %xmm0, %ecx
leal -0x7fffff(%rcx), %edx
xorl %eax, %eax
testl $0x7fffff, %ecx # imm = 0x7FFFFF
cmovel %edx, %eax
testl $0x7f800000, %ecx # imm = 0x7F800000
cmovnel %edx, %eax
retq
|
/kaydenl[P]VK-GL-CTS/framework/common/tcuImageCompare.cpp
|
tcu::applyMonotone(tcu::Interval (&)(double, double), tcu::Interval const&, tcu::Interval const&)
|
Interval applyMonotone (DoubleIntervalFunc2& func, const Interval& arg0, const Interval& arg1)
{
double lo0 = arg0.lo(), hi0 = arg0.hi(), lo1 = arg1.lo(), hi1 = arg1.hi();
return Interval(Interval(func(lo0, lo1), func(lo0, hi1)),
Interval(func(hi0, lo1), func(hi0, hi1)));
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $0xa0, %rsp
movq %rsi, %r14
movq %rdi, %rbx
movsd 0x8(%rdx), %xmm2
movsd %xmm2, 0x10(%rsp)
movsd 0x10(%rdx), %xmm0
movsd %xmm0, 0x8(%rsp)
movsd 0x8(%rcx), %xmm1
movsd %xmm1, 0x28(%rsp)
movsd 0x10(%rcx), %xmm0
movsd %xmm0, (%rsp)
leaq 0x88(%rsp), %r15
movq %r15, %rdi
movaps %xmm2, %xmm0
callq *%rsi
leaq 0x70(%rsp), %r12
movq %r12, %rdi
movsd 0x10(%rsp), %xmm0
movsd (%rsp), %xmm1
callq *%r14
movb (%r12), %bpl
movsd 0x8(%r12), %xmm0
movsd 0x8(%r15), %xmm1
movsd 0x10(%r15), %xmm2
movapd %xmm1, %xmm3
cmplesd %xmm0, %xmm3
andpd %xmm3, %xmm1
andnpd %xmm0, %xmm3
orpd %xmm1, %xmm3
movapd %xmm3, 0x10(%rsp)
movsd 0x10(%r12), %xmm0
movapd %xmm0, %xmm1
cmplesd %xmm2, %xmm1
andpd %xmm1, %xmm2
andnpd %xmm0, %xmm1
orpd %xmm2, %xmm1
movapd %xmm1, 0x30(%rsp)
orb (%r15), %bpl
leaq 0x58(%rsp), %r15
movq %r15, %rdi
movsd 0x8(%rsp), %xmm0
movsd 0x28(%rsp), %xmm1
callq *%r14
leaq 0x40(%rsp), %r12
movq %r12, %rdi
movsd 0x8(%rsp), %xmm0
movsd (%rsp), %xmm1
callq *%r14
movsd 0x8(%r15), %xmm1
movsd 0x8(%r12), %xmm2
movsd 0x10(%r12), %xmm3
movapd %xmm1, %xmm0
cmplesd %xmm2, %xmm0
andpd %xmm0, %xmm1
andnpd %xmm2, %xmm0
orpd %xmm1, %xmm0
movsd 0x10(%r15), %xmm2
movapd %xmm3, %xmm1
cmplesd %xmm2, %xmm1
andpd %xmm1, %xmm2
andnpd %xmm3, %xmm1
orpd %xmm2, %xmm1
testb $0x1, (%r15)
movl $0x1, %eax
movl $0x1, %ecx
jne 0x9a77d5
movl 0x40(%rsp), %ecx
testb $0x1, %bpl
cmovnel %eax, %ecx
movb %cl, (%rbx)
movapd 0x10(%rsp), %xmm4
movapd %xmm4, %xmm2
cmplesd %xmm0, %xmm2
movapd %xmm2, %xmm3
andpd %xmm4, %xmm3
andnpd %xmm0, %xmm2
orpd %xmm3, %xmm2
movlpd %xmm2, 0x8(%rbx)
movapd %xmm1, %xmm0
movapd 0x30(%rsp), %xmm3
cmplesd %xmm3, %xmm0
movapd %xmm0, %xmm2
andpd %xmm3, %xmm2
andnpd %xmm1, %xmm0
orpd %xmm2, %xmm0
movlpd %xmm0, 0x10(%rbx)
movq %rbx, %rax
addq $0xa0, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
|
/kaydenl[P]VK-GL-CTS/framework/common/tcuInterval.cpp
|
tcu::RGBA::toVec() const
|
int getAlpha (void) const { return (int)((m_value >> (deUint32)ALPHA_SHIFT) & 0xFFu); }
|
movl (%rsi), %ecx
movl $0xff, %edx
movl %ecx, %eax
andl %edx, %eax
cvtsi2ss %eax, %xmm0
movq %rdi, %rax
movl %ecx, %esi
shrl $0x8, %esi
andl %edx, %esi
cvtsi2ss %esi, %xmm1
movss 0x598d1(%rip), %xmm2 # 0xa02500
divss %xmm2, %xmm0
movl %ecx, %esi
shrl $0x10, %esi
andl %edx, %esi
cvtsi2ss %esi, %xmm3
shrl $0x18, %ecx
cvtsi2ss %ecx, %xmm4
divss %xmm2, %xmm1
divss %xmm2, %xmm3
divss %xmm2, %xmm4
movss %xmm0, (%rdi)
movss %xmm1, 0x4(%rdi)
movss %xmm3, 0x8(%rdi)
movss %xmm4, 0xc(%rdi)
retq
nop
|
/kaydenl[P]VK-GL-CTS/framework/common/tcuRGBA.hpp
|
tcu::FileResource::FileResource(char const*)
|
FileResource::FileResource (const char* filename)
: Resource(std::string(filename))
{
m_file = fopen(filename, "rb");
if (!m_file)
throw ResourceError("Failed to open file", filename, __FILE__, __LINE__);
}
|
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x30, %rsp
movq %rsi, %r14
movq %rdi, %rbx
leaq 0x10(%rsp), %rdi
leaq 0xf(%rsp), %rdx
callq 0x333da2
leaq 0x2b2a59(%rip), %r13 # 0xc5b9a8
movq %r13, (%rbx)
leaq 0x8(%rbx), %rdi
leaq 0x18(%rbx), %r12
movq %r12, 0x8(%rbx)
movq 0x10(%rsp), %rsi
movq 0x18(%rsp), %rdx
addq %rsi, %rdx
callq 0x334442
leaq 0x20(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x9a8f8b
movq 0x20(%rsp), %rsi
incq %rsi
callq 0x3251a0
leaq 0x2b2946(%rip), %rax # 0xc5b8d8
movq %rax, (%rbx)
leaq 0xf9f71(%rip), %rsi # 0xaa2f0d
movq %r14, %rdi
callq 0x3261e0
movq %rax, 0x28(%rbx)
testq %rax, %rax
je 0x9a8fbb
addq $0x30, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
retq
movl $0x38, %edi
callq 0x325650
movq %rax, %r15
leaq 0xf9f41(%rip), %rsi # 0xaa2f10
leaq 0xf9f4e(%rip), %rcx # 0xaa2f24
movq %rax, %rdi
movq %r14, %rdx
movl $0x35, %r8d
callq 0x99c20c
leaq 0x27207a(%rip), %rsi # 0xc1b068
leaq -0x677215(%rip), %rdx # 0x331de0
movq %r15, %rdi
callq 0x325940
movq %rax, %r14
jmp 0x9a900d
movq %rax, %r14
movq %r15, %rdi
callq 0x325d40
movq %r13, (%rbx)
movq 0x8(%rbx), %rdi
cmpq %r12, %rdi
je 0x9a9042
movq (%r12), %rsi
jmp 0x9a9035
movq %rax, %r14
leaq 0x20(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x9a9042
movq 0x20(%rsp), %rsi
incq %rsi
callq 0x3251a0
jmp 0x9a9042
movq %rax, %r14
movq %r14, %rdi
callq 0x3259a0
|
/kaydenl[P]VK-GL-CTS/framework/common/tcuResource.cpp
|
tcu::ResourcePrefix::getResource(char const*) const
|
Resource* ResourcePrefix::getResource (const char* name) const
{
return m_archive.getResource((m_prefix + name).c_str());
}
|
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $0x28, %rsp
movq %rsi, %r14
movq 0x8(%rdi), %rbx
leaq 0x18(%rsp), %r12
movq %r12, -0x10(%r12)
movq 0x10(%rdi), %rsi
movq 0x18(%rdi), %rdx
addq %rsi, %rdx
leaq 0x8(%rsp), %r15
movq %r15, %rdi
callq 0x334442
movq %r15, %rdi
movq %r14, %rsi
callq 0x3259c0
movq 0x8(%rsp), %rsi
movq (%rbx), %rax
movq %rbx, %rdi
callq *0x10(%rax)
movq %rax, %rbx
movq 0x8(%rsp), %rdi
cmpq %r12, %rdi
je 0x9a9209
movq 0x18(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq %rbx, %rax
addq $0x28, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
retq
jmp 0x9a921a
movq %rax, %rbx
movq 0x8(%rsp), %rdi
cmpq %r12, %rdi
je 0x9a9234
movq 0x18(%rsp), %rsi
incq %rsi
callq 0x3251a0
movq %rbx, %rdi
callq 0x3259a0
|
/kaydenl[P]VK-GL-CTS/framework/common/tcuResource.cpp
|
tcu::ResourcePrefix::~ResourcePrefix()
|
virtual ~ResourcePrefix (void) {}
|
pushq %rbx
movq %rdi, %rbx
leaq 0x2b26ab(%rip), %rax # 0xc5b918
movq %rax, (%rdi)
movq 0x10(%rdi), %rdi
leaq 0x20(%rbx), %rax
cmpq %rax, %rdi
je 0x9a9288
movq (%rax), %rsi
incq %rsi
callq 0x3251a0
movl $0x30, %esi
movq %rbx, %rdi
popq %rbx
jmp 0x3251a0
|
/kaydenl[P]VK-GL-CTS/framework/common/tcuResource.hpp
|
tcu::TextureCube::operator=(tcu::TextureCube const&)
|
TextureCube& TextureCube::operator= (const TextureCube& other)
{
if (this == &other)
return *this;
const int numLevels = computeMipPyramidLevels(other.m_size);
const ConstPixelBufferAccess* levels[CUBEFACE_LAST];
for (int face = 0; face < CUBEFACE_LAST; face++)
{
m_data[face].resize(numLevels);
m_access[face].resize(numLevels);
levels[face] = &m_access[face][0];
}
m_format = other.m_format;
m_size = other.m_size;
m_view = TextureCubeView(numLevels, levels);
for (int levelNdx = 0; levelNdx < numLevels; levelNdx++)
{
for (int face = 0; face < CUBEFACE_LAST; face++)
{
if (!isLevelEmpty((CubeFace)face, levelNdx))
clearLevel((CubeFace)face, levelNdx);
if (!other.isLevelEmpty((CubeFace)face, levelNdx))
{
allocLevel((CubeFace)face, levelNdx);
copy(getLevelFace(levelNdx, (CubeFace)face),
other.getLevelFace(levelNdx, (CubeFace)face));
}
}
}
return *this;
}
|
movq %rdi, %rcx
cmpq %rsi, %rdi
je 0x9b74cd
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x98, %rsp
movq %rsi, %r13
movl 0x8(%rsi), %ebp
movl $0x20, %r15d
movl $0x20, %eax
testl %ebp, %ebp
je 0x9b7328
bsrl %ebp, %eax
xorl $0x1f, %eax
subl %eax, %r15d
movq %rcx, 0x8(%rsp)
leaq 0xa0(%rcx), %r12
xorl %r14d, %r14d
leaq -0x90(%r12), %rdi
movq %r15, %rsi
callq 0x9b9b2e
movq %r12, %rdi
movq %r15, %rsi
callq 0x9b9b9a
movq (%r12), %rax
movq %rax, 0x60(%rsp,%r14,8)
incq %r14
addq $0x18, %r12
cmpq $0x6, %r14
jne 0x9b733a
movq (%r13), %rax
movq 0x8(%rsp), %rcx
movq %rax, (%rcx)
movq %r13, 0x20(%rsp)
movl 0x8(%r13), %eax
movl %eax, 0x8(%rcx)
movaps 0x60(%rsp), %xmm0
movaps 0x70(%rsp), %xmm1
movaps 0x80(%rsp), %xmm2
movups %xmm2, 0x50(%rsp)
movups %xmm1, 0x40(%rsp)
movups %xmm0, 0x30(%rsp)
movl %r15d, 0x130(%rcx)
movups 0x2c(%rsp), %xmm0
movups 0x3c(%rsp), %xmm1
movups 0x4c(%rsp), %xmm2
movups %xmm0, 0x134(%rcx)
movups %xmm1, 0x144(%rcx)
movups %xmm2, 0x154(%rcx)
movl 0x5c(%rsp), %eax
movl %eax, 0x164(%rcx)
testl %ebp, %ebp
je 0x9b74bc
cmpl $0x2, %r15d
movl $0x1, %eax
cmovgel %r15d, %eax
movq %rax, 0x18(%rsp)
xorl %edx, %edx
movq 0x20(%rsp), %rbx
movl $0xa0, %r13d
movq %rdx, %r15
shlq $0x4, %r15
leaq (,%rdx,8), %rax
leaq (%rax,%rax,4), %r14
movq %rdx, 0x10(%rsp)
leaq (%rdx,%rdx,4), %rbp
xorl %r12d, %r12d
movq -0x90(%rcx,%r13), %rdi
cmpq $0x0, 0x8(%rdi,%r15)
je 0x9b745b
addq %r15, %rdi
callq 0x8af038
movq 0x8(%rsp), %rcx
movq (%rcx,%r13), %rax
movabsq $0x2600000015, %rdx # imm = 0x2600000015
movq %rdx, (%rax,%rbp,8)
xorps %xmm0, %xmm0
movups %xmm0, 0x8(%rax,%rbp,8)
movups %xmm0, 0x18(%rax,%rbp,8)
movq -0x90(%rbx,%r13), %rax
cmpq $0x0, 0x8(%rax,%r15)
je 0x9b7498
movq %rcx, %rdi
movl %r12d, %esi
movq 0x10(%rsp), %rdx
callq 0x9b7232
movq 0x8(%rsp), %rax
movq (%rax,%r13), %rdi
addq %r14, %rdi
movq (%rbx,%r13), %rsi
addq %r14, %rsi
callq 0x9bc798
movq 0x8(%rsp), %rcx
incq %r12
addq $0x18, %r13
cmpq $0x6, %r12
jne 0x9b741f
movq 0x10(%rsp), %rdx
incq %rdx
cmpq 0x18(%rsp), %rdx
jne 0x9b73fa
addq $0x98, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
movq %rcx, %rax
retq
nop
|
/kaydenl[P]VK-GL-CTS/framework/common/tcuTexture.cpp
|
tcu::Texture2DArray::allocLevel(int)
|
void Texture2DArray::allocLevel (int levelNdx)
{
DE_ASSERT(de::inBounds(levelNdx, 0, getNumLevels()));
const int width = getMipPyramidLevelSize(m_width, levelNdx);
const int height = getMipPyramidLevelSize(m_height, levelNdx);
TextureLevelPyramid::allocLevel(levelNdx, width, height, m_numLayers);
}
|
movl %esi, %ecx
movl 0x38(%rdi), %edx
movl 0x3c(%rdi), %eax
sarl %cl, %edx
cmpl $0x2, %edx
movl $0x1, %esi
cmovll %esi, %edx
sarl %cl, %eax
cmpl $0x2, %eax
cmovll %esi, %eax
movl 0x40(%rdi), %r8d
movl %ecx, %esi
movl %eax, %ecx
jmp 0x9b5b64
|
/kaydenl[P]VK-GL-CTS/framework/common/tcuTexture.cpp
|
tcu::Texture3D::operator=(tcu::Texture3D const&)
|
Texture3D& Texture3D::operator= (const Texture3D& other)
{
if (this == &other)
return *this;
TextureLevelPyramid::operator=(other);
m_width = other.m_width;
m_height = other.m_height;
m_depth = other.m_depth;
m_view = Texture3DView(getNumLevels(), getLevels());
return *this;
}
|
pushq %r14
pushq %rbx
pushq %rax
movq %rdi, %rbx
cmpq %rsi, %rdi
je 0x9b7e00
movq %rsi, %r14
movq %rbx, %rdi
callq 0x9b5958
movl 0x38(%r14), %eax
movl %eax, 0x38(%rbx)
movl 0x3c(%r14), %eax
movl %eax, 0x3c(%rbx)
movl 0x40(%r14), %eax
movl %eax, 0x40(%rbx)
movq 0x20(%rbx), %rax
movq 0x28(%rbx), %rcx
subq %rax, %rcx
shrq $0x3, %rcx
imull $0xcccccccd, %ecx, %ecx # imm = 0xCCCCCCCD
movl %ecx, 0x48(%rbx)
movq %rax, 0x50(%rbx)
movq %rbx, %rax
addq $0x8, %rsp
popq %rbx
popq %r14
retq
nop
|
/kaydenl[P]VK-GL-CTS/framework/common/tcuTexture.cpp
|
van_kampen::Graph::mergeNodes(int, int, std::unordered_set<int, std::hash<int>, std::equal_to<int>, std::allocator<int>> const&)
|
void Graph::mergeNodes(nodeId_t alive, nodeId_t dead, const std::unordered_set<nodeId_t> &untouchable)
{
for (Transition &edgeFromDead : node(dead).transitions())
{
if (untouchable.count(edgeFromDead.to))
{
continue;
}
node(alive).addTransition(edgeFromDead.to, edgeFromDead.label, false, false); // TODO
for (Transition &edge : node(edgeFromDead.to).transitions())
{
if (edge.to == dead)
{
edge.to = alive;
}
}
}
removedNodes_.insert(dead);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x18, %rsp
movq %rcx, %r14
movl %esi, %ebp
movq %rdi, %rbx
movl %edx, 0xc(%rsp)
movl %edx, %esi
callq 0x4c996
movq 0x20(%rax), %r15
movq 0x30(%rax), %r12
movq 0x38(%rax), %rcx
movq %rcx, 0x10(%rsp)
movq 0x40(%rax), %r13
cmpq %r13, %r15
je 0x4cd93
movq %r14, %rdi
movq %r15, %rsi
callq 0x4e640
testq %rax, %rax
je 0x4cd31
addq $0x48, %r15
cmpq %r12, %r15
jne 0x4ccf4
movq 0x10(%rsp), %rax
movq 0x8(%rax), %r15
addq $0x8, %rax
movq %rax, 0x10(%rsp)
leaq 0x1f8(%r15), %r12
jmp 0x4ccf4
movq %rbx, %rdi
movl %ebp, %esi
callq 0x4c996
movl (%r15), %esi
leaq 0x8(%r15), %rdx
movq %rax, %rdi
xorl %ecx, %ecx
xorl %r8d, %r8d
callq 0x4c434
movl (%r15), %esi
movq %rbx, %rdi
callq 0x4c996
movq 0x20(%rax), %rcx
movq 0x30(%rax), %rsi
movq 0x38(%rax), %rdx
movq 0x40(%rax), %rax
cmpq %rax, %rcx
je 0x4cd0d
movl (%rcx), %edi
cmpl 0xc(%rsp), %edi
jne 0x4cd79
movl %ebp, (%rcx)
addq $0x48, %rcx
cmpq %rsi, %rcx
jne 0x4cd6a
movq 0x8(%rdx), %rcx
addq $0x8, %rdx
leaq 0x1f8(%rcx), %rsi
jmp 0x4cd6a
addq $0x50, %rbx
leaq 0xc(%rsp), %rsi
movq %rbx, %rdi
callq 0x4e654
addq $0x18, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/Glebanister[P]van_kampen_diagramm/src/Graph.cpp
|
cxxopts::values::abstract_value<unsigned long>::implicit_value(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>> const&)
|
std::shared_ptr<Value>
implicit_value(const std::string& value) override
{
m_implicit = true;
m_implicit_value = value;
return shared_from_this();
}
|
pushq %r14
pushq %rbx
pushq %rax
movq %rsi, %rbx
movq %rdi, %r14
movb $0x1, 0x31(%rsi)
leaq 0x58(%rsi), %rdi
movq %rdx, %rsi
callq 0x3e200
addq $0x8, %rbx
movq %r14, %rdi
movq %rbx, %rsi
callq 0x5970a
movq %r14, %rax
addq $0x8, %rsp
popq %rbx
popq %r14
retq
|
/Glebanister[P]van_kampen_diagramm/extern/cxxopts/include/cxxopts.hpp
|
cxxopts::values::abstract_value<unsigned long>::abstract_value(cxxopts::values::abstract_value<unsigned long> const&)
|
abstract_value(const abstract_value& rhs)
{
if (rhs.m_result)
{
m_result = std::make_shared<T>();
m_store = m_result.get();
}
else
{
m_store = rhs.m_store;
}
m_default = rhs.m_default;
m_implicit = rhs.m_implicit;
m_default_value = rhs.m_default_value;
m_implicit_value = rhs.m_implicit_value;
}
|
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x10, %rsp
movq %rsi, %r15
movq %rdi, %rbx
xorps %xmm0, %xmm0
movups %xmm0, 0x8(%rdi)
leaq 0x1b5e5(%rip), %rax # 0x77ae8
movq %rax, (%rdi)
movups %xmm0, 0x18(%rdi)
andw $0x0, 0x30(%rdi)
leaq 0x48(%rdi), %rax
movq %rax, 0x38(%rdi)
andq $0x0, 0x40(%rdi)
leaq 0x38(%rdi), %r14
xorl %eax, %eax
movb %al, 0x48(%rdi)
leaq 0x68(%rdi), %rcx
movq %rcx, 0x58(%rdi)
andq $0x0, 0x60(%rdi)
leaq 0x58(%rdi), %r12
movb %al, 0x68(%rdi)
cmpq %rax, 0x18(%rsi)
je 0x5c562
movq %rsp, %rdi
callq 0x5c5d7
leaq 0x18(%rbx), %r13
movq %rsp, %rsi
movq %r13, %rdi
callq 0x5c718
leaq 0x8(%rsp), %rdi
callq 0x4a22c
jmp 0x5c566
leaq 0x28(%r15), %r13
movq (%r13), %rax
movq %rax, 0x28(%rbx)
movb 0x30(%r15), %al
movb %al, 0x30(%rbx)
movb 0x31(%r15), %al
movb %al, 0x31(%rbx)
leaq 0x38(%r15), %rsi
movq %r14, %rdi
callq 0x3e200
addq $0x58, %r15
movq %r12, %rdi
movq %r15, %rsi
callq 0x3e200
addq $0x10, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
retq
jmp 0x5c5a7
movq %rax, %r15
movq %r12, %rdi
callq 0x3e6d8
movq %r14, %rdi
callq 0x3e6d8
leaq 0x20(%rbx), %rdi
callq 0x4a22c
addq $0x10, %rbx
movq %rbx, %rdi
callq 0x5935a
movq %r15, %rdi
callq 0x3e5c0
|
/Glebanister[P]van_kampen_diagramm/extern/cxxopts/include/cxxopts.hpp
|
van_kampen::LargeFirstAlgorithm::generate(std::vector<std::vector<van_kampen::GroupElement, std::allocator<van_kampen::GroupElement>>, std::allocator<std::vector<van_kampen::GroupElement, std::allocator<van_kampen::GroupElement>>>> const&)
|
void LargeFirstAlgorithm::generate(const std::vector<std::vector<GroupElement>> &words)
{
std::size_t totalIterations = words.size();
if (cellsLimit)
{
totalIterations = std::min(totalIterations, cellsLimit);
}
ProcessLogger logger(totalIterations, std::clog, "Relations used", quiet);
std::vector<bool> isAdded(words.size());
using iterator = std::vector<std::vector<GroupElement>>::const_iterator;
auto added = [&](iterator it) {
return isAdded[it - begin(words)];
};
diagramm_.bindWord(words.back(), false, true);
isAdded.back() = true;
logger.iterate();
bool oneAdded = false;
bool force = false;
auto add = [&](iterator it) {
if (added(it))
{
throw std::logic_error("trying to add already added word");
}
if (diagramm_.bindWord(*it, force, false))
{
isAdded[it - begin(words)] = true;
oneAdded = true;
return true;
}
return false;
};
auto smallIt = words.begin();
auto bigIt = prev(prev(words.end()));
auto nextNotAdded = [&](iterator it) {
while (it < end(words) && isAdded[it - begin(words)])
++it;
return it;
};
auto prevNotAdded = [&](iterator it) {
while (--it >= begin(words) && isAdded[it - begin(words)])
;
return it;
};
while (true)
{
int rest = maximalSmallForOneBig;
bool infinite = rest == 0;
bool success = true;
while (!add(bigIt))
{
if (rest-- == 0 && !infinite)
{
success = false;
break;
}
if (add(smallIt) && logger.iterate() >= totalIterations)
return;
smallIt = nextNotAdded(smallIt + 1);
}
if (success && logger.iterate() >= totalIterations)
return;
bigIt = prevNotAdded(bigIt);
if (smallIt >= bigIt)
{
smallIt = nextNotAdded(begin(words));
bigIt = prevNotAdded(words.end());
if (!oneAdded)
{
if (force && !quiet)
{
if (!quiet)
std::clog << "can not bind " << totalIterations - logger.getIteration() << " relations, finishing";
return;
}
force = true;
}
oneAdded = false;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x108, %rsp # imm = 0x108
movq %rsi, %r14
movq %rdi, %r13
movq 0x8(%rsi), %rax
subq (%rsi), %rax
pushq $0x18
popq %rbx
cqto
idivq %rbx
movq 0x18(%rdi), %rcx
cmpq %rax, %rcx
movq %rax, %r15
cmovbq %rcx, %r15
testq %rcx, %rcx
cmoveq %rax, %r15
leaq 0x4bd4(%rip), %rsi # 0x61ff0
leaq 0x58(%rsp), %rdi
leaq 0x88(%rsp), %rdx
callq 0x40344
movzbl 0x20(%r13), %r8d
movq 0x1aaf6(%rip), %rdx # 0x77f30
leaq 0xb0(%rsp), %rdi
leaq 0x58(%rsp), %rcx
movq %r15, %rsi
callq 0x50c6c
leaq 0x58(%rsp), %rdi
callq 0x3e6d8
movq 0x8(%r14), %rax
subq (%r14), %rax
cqto
idivq %rbx
movq %r14, %rbx
leaq 0x88(%rsp), %r14
leaq 0x58(%rsp), %rdx
movq %r14, %rdi
movq %rax, %rsi
callq 0x5cf08
movq %r14, 0x30(%rsp)
movq %rbx, 0x38(%rsp)
movq 0x8(%rbx), %rsi
addq $-0x18, %rsi
leaq 0x40(%rsp), %rdi
callq 0x40496
leaq 0x28(%r13), %rdi
leaq 0x40(%rsp), %rsi
pushq $0x1
popq %rcx
xorl %edx, %edx
callq 0x4eb32
leaq 0x40(%rsp), %rdi
callq 0x4081a
leaq 0x88(%rsp), %rbp
movq %rbp, %rdi
callq 0x5d8a0
orq %rdx, (%rax)
leaq 0xb0(%rsp), %rdi
callq 0x50cc0
movq %r15, 0x8(%rsp)
xorl %eax, %eax
leaq 0x7(%rsp), %rcx
movb %al, (%rcx)
leaq 0x6(%rsp), %rdx
movb %al, (%rdx)
leaq 0x30(%rsp), %rax
leaq 0x58(%rsp), %r12
movq %rax, (%r12)
movq %r13, 0x10(%rsp)
movq %r13, 0x8(%r12)
movq %rdx, 0x10(%r12)
movq %rbp, 0x18(%r12)
movq %rbx, 0x20(%r12)
movq %rcx, 0x28(%r12)
movq (%rbx), %r15
movq 0x8(%rbx), %r14
addq $-0x30, %r14
leaq 0xf8(%rsp), %r13
movq %rbx, (%r13)
movq %rbp, 0x8(%r13)
leaq 0x20(%rsp), %rax
movq %rbx, 0x18(%rsp)
movq %rbx, (%rax)
movq %rbp, 0x8(%rax)
movq 0x10(%rsp), %rax
movl 0x24(%rax), %ebx
movl %ebx, %ebp
movq %r12, %rdi
movq %r14, %rsi
callq 0x5d6f2
testb %al, %al
jne 0x5d5a9
testl %ebx, %ebx
sete %al
addl $-0x1, %ebp
setb %cl
orb %al, %cl
je 0x5d5c1
movq %r12, %rdi
movq %r15, %rsi
callq 0x5d6f2
testb %al, %al
je 0x5d595
leaq 0xb0(%rsp), %rdi
callq 0x50cc0
cmpq 0x8(%rsp), %rax
jae 0x5d668
addq $0x18, %r15
movq %r13, %rdi
movq %r15, %rsi
callq 0x5d7ec
movq %rax, %r15
jmp 0x5d550
leaq 0xb0(%rsp), %rdi
callq 0x50cc0
cmpq 0x8(%rsp), %rax
jae 0x5d668
leaq 0x20(%rsp), %rdi
movq %r14, %rsi
callq 0x5d840
movq %rax, %r14
cmpq %rax, %r15
jb 0x5d546
movq 0x18(%rsp), %rbx
movq (%rbx), %rsi
movq %r13, %rdi
callq 0x5d7ec
movq %rax, %r15
movq 0x8(%rbx), %rsi
leaq 0x20(%rsp), %rdi
callq 0x5d840
movq %rax, %r14
cmpb $0x0, 0x7(%rsp)
jne 0x5d61c
cmpb $0x1, 0x6(%rsp)
jne 0x5d617
movq 0x10(%rsp), %rax
cmpb $0x0, 0x20(%rax)
je 0x5d626
movb $0x1, 0x6(%rsp)
movb $0x0, 0x7(%rsp)
jmp 0x5d546
movq 0x1a903(%rip), %rdi # 0x77f30
leaq 0x49cb(%rip), %rsi # 0x61fff
callq 0x3e310
movq %rax, %r14
leaq 0xb0(%rsp), %rdi
callq 0x50d62
movq 0x8(%rsp), %rsi
subq %rax, %rsi
movq %r14, %rdi
callq 0x3e250
leaq 0x49ad(%rip), %rsi # 0x6200d
movq %rax, %rdi
callq 0x3e310
leaq 0x88(%rsp), %rdi
callq 0x5cf1c
leaq 0xb0(%rsp), %rdi
callq 0x50d68
addq $0x108, %rsp # imm = 0x108
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0x5d6cc
jmp 0x5d6cc
movq %rax, %rbx
leaq 0x40(%rsp), %rdi
callq 0x4081a
jmp 0x5d6cf
movq %rax, %rbx
jmp 0x5d6dc
movq %rax, %rbx
leaq 0x58(%rsp), %rdi
callq 0x3e6d8
jmp 0x5d6e9
movq %rax, %rbx
jmp 0x5d6e9
jmp 0x5d6cc
jmp 0x5d6cc
jmp 0x5d6cc
jmp 0x5d6cc
jmp 0x5d6cc
jmp 0x5d6cc
movq %rax, %rbx
leaq 0x88(%rsp), %rdi
callq 0x5cf1c
leaq 0xb0(%rsp), %rdi
callq 0x50d68
movq %rbx, %rdi
callq 0x3e5c0
nop
|
/Glebanister[P]van_kampen_diagramm/src/LargeFirstAlgorithm.cpp
|
van_kampen::MergingAlgorithm::MergingAlgorithm()
|
MergingAlgorithm::MergingAlgorithm()
: result_(graph_) {}
|
pushq %r15
pushq %r14
pushq %rbx
subq $0x10, %rsp
movq %rdi, %rbx
callq 0x5ce98
leaq 0x1a340(%rip), %rax # 0x77cd8
movq %rax, (%rbx)
andq $0x0, 0x18(%rbx)
movb $0x0, 0x20(%rbx)
leaq 0x28(%rbx), %r14
leaq 0x8(%rbx), %rsi
movq %rsp, %r15
movq %r15, %rdi
callq 0x4f9ce
movq %r14, %rdi
movq %r15, %rsi
callq 0x4e9ae
leaq 0x8(%rsp), %rdi
callq 0x4a22c
addq $0x10, %rsp
popq %rbx
popq %r14
popq %r15
retq
movq %rax, %r14
leaq 0x8(%rsp), %rdi
callq 0x4a22c
movq %rbx, %rdi
callq 0x5cf7c
movq %r14, %rdi
callq 0x3e5c0
nop
|
/Glebanister[P]van_kampen_diagramm/src/MergingAlgorithm.cpp
|
generate_anchors(int, ncnn::Mat const&, ncnn::Mat const&)
|
static ncnn::Mat generate_anchors(int base_size, const ncnn::Mat& ratios, const ncnn::Mat& scales)
{
int num_ratio = ratios.w;
int num_scale = scales.w;
ncnn::Mat anchors;
anchors.create(4, num_ratio * num_scale);
const float cx = 0;
const float cy = 0;
for (int i = 0; i < num_ratio; i++)
{
float ar = ratios[i];
int r_w = round(base_size / sqrt(ar));
int r_h = round(r_w * ar); //round(base_size * sqrt(ar));
for (int j = 0; j < num_scale; j++)
{
float scale = scales[j];
float rs_w = r_w * scale;
float rs_h = r_h * scale;
float* anchor = anchors.row(i * num_scale + j);
anchor[0] = cx - rs_w * 0.5f;
anchor[1] = cy - rs_h * 0.5f;
anchor[2] = cx + rs_w * 0.5f;
anchor[3] = cy + rs_h * 0.5f;
}
}
return anchors;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x38, %rsp
movq %rdi, %rbx
movq %rdx, 0x10(%rsp)
movl 0x2c(%rdx), %r13d
movq %rcx, 0x18(%rsp)
movslq 0x2c(%rcx), %r12
andq $0x0, 0x40(%rdi)
movl %esi, %r14d
movl %r12d, %edx
imull %r13d, %edx
xorps %xmm0, %xmm0
movups %xmm0, (%rdi)
movups %xmm0, 0xc(%rdi)
movups %xmm0, 0x20(%rdi)
movups %xmm0, 0x2c(%rdi)
xorl %ebp, %ebp
pushq $0x4
popq %rsi
pushq $0x4
popq %rcx
xorl %r8d, %r8d
callq 0x636fa
cvtsi2ss %r14d, %xmm0
movss %xmm0, (%rsp)
testl %r12d, %r12d
movl $0x0, %r14d
cmovgl %r12d, %r14d
testl %r13d, %r13d
cmovlel %ebp, %r13d
xorl %r15d, %r15d
movq %r12, 0x8(%rsp)
movq %r13, %r12
cmpq %r13, %r15
je 0x61777
movq 0x10(%rsp), %rax
movq (%rax), %rax
movss (%rax,%r15,4), %xmm0
ucomiss 0x38c952(%rip), %xmm0 # 0x3ee010
movss %xmm0, 0x4(%rsp)
jb 0x616cc
sqrtss %xmm0, %xmm0
jmp 0x616d1
callq 0x5f520
movss (%rsp), %xmm1
divss %xmm0, %xmm1
movaps %xmm1, %xmm0
callq 0x5f2d0
movq 0x18(%rsp), %rax
movq (%rax), %r13
cvttps2dq %xmm0, %xmm0
cvtdq2ps %xmm0, %xmm1
movaps %xmm1, 0x20(%rsp)
movss 0x4(%rsp), %xmm0
mulss %xmm1, %xmm0
callq 0x5f2d0
movaps 0x20(%rsp), %xmm5
cvttps2dq %xmm0, %xmm0
cvtdq2ps %xmm0, %xmm0
movslq 0x2c(%rbx), %rax
imulq 0x10(%rbx), %rax
unpcklps %xmm0, %xmm5 # xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
movq %rax, %rcx
imulq %rbp, %rcx
addq (%rbx), %rcx
xorl %edx, %edx
xorps %xmm2, %xmm2
movaps 0x38c95d(%rip), %xmm3 # 0x3ee090
movaps 0x38c966(%rip), %xmm4 # 0x3ee0a0
cmpq %rdx, %r14
je 0x61767
movss (%r13,%rdx,4), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
mulps %xmm5, %xmm0
movaps %xmm0, %xmm1
xorps %xmm3, %xmm1
movlhps %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0]
mulps %xmm4, %xmm1
addps %xmm2, %xmm1
movups %xmm1, (%rcx)
incq %rdx
addq %rax, %rcx
jmp 0x6173a
incq %r15
addq 0x8(%rsp), %rbp
movq %r12, %r13
jmp 0x616a0
movq %rbx, %rax
addq $0x38, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq %rax, %r14
movq 0x8(%rbx), %rax
testq %rax, %rax
je 0x617b6
lock
decl (%rax)
jne 0x617b6
movq (%rbx), %rsi
movq 0x20(%rbx), %rdi
testq %rdi, %rdi
jne 0x617b0
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x617b6
movq (%rdi), %rax
callq *0x18(%rax)
andq $0x0, 0x40(%rbx)
xorps %xmm0, %xmm0
movups %xmm0, (%rbx)
movups %xmm0, 0xc(%rbx)
movups %xmm0, 0x28(%rbx)
andl $0x0, 0x38(%rbx)
movq %r14, %rdi
callq 0x5f340
movq %rax, %rdi
callq 0x61d68
|
/csukuangfj[P]ncnn/examples/scrfd_crowdhuman.cpp
|
ncnn::Mat::create(int, int, int, unsigned long, int, ncnn::Allocator*)
|
void Mat::create(int _w, int _h, int _c, size_t _elemsize, int _elempack, Allocator* _allocator)
{
if (dims == 3 && w == _w && h == _h && c == _c && elemsize == _elemsize && elempack == _elempack && allocator == _allocator)
return;
release();
elemsize = _elemsize;
elempack = _elempack;
allocator = _allocator;
dims = 3;
w = _w;
h = _h;
d = 1;
c = _c;
cstep = alignSize((size_t)w * h * elemsize, 16) / elemsize;
size_t totalsize = alignSize(total() * elemsize, 4);
if (totalsize > 0)
{
if (allocator)
data = allocator->fastMalloc(totalsize + (int)sizeof(*refcount));
else
data = fastMalloc(totalsize + (int)sizeof(*refcount));
}
if (data)
{
refcount = (int*)(((unsigned char*)data) + totalsize);
*refcount = 1;
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
movl %r9d, %r14d
movq %r8, %r15
movl %ecx, %ebp
movl %edx, %r12d
movl %esi, %r13d
movq %rdi, %rbx
movq 0x40(%rsp), %rdi
cmpl $0x3, 0x28(%rbx)
jne 0x62940
cmpl %r13d, 0x2c(%rbx)
jne 0x62940
cmpl %r12d, 0x30(%rbx)
jne 0x62940
cmpl %ebp, 0x38(%rbx)
jne 0x62940
cmpq %r15, 0x10(%rbx)
jne 0x62940
cmpl %r14d, 0x18(%rbx)
jne 0x62940
cmpq %rdi, 0x20(%rbx)
je 0x62a16
movq 0x8(%rbx), %rax
testq %rax, %rax
je 0x6296f
lock
decl (%rax)
jne 0x6296f
movq (%rbx), %rsi
movq 0x20(%rbx), %rdi
testq %rdi, %rdi
je 0x62962
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x6296a
movq %rsi, %rdi
callq 0x5f3e0
movq 0x40(%rsp), %rdi
xorps %xmm0, %xmm0
movups %xmm0, (%rbx)
movq %r15, 0x10(%rbx)
movl %r14d, 0x18(%rbx)
movq %rdi, 0x20(%rbx)
movl $0x3, 0x28(%rbx)
movl %r13d, 0x2c(%rbx)
movl %r12d, 0x30(%rbx)
movl $0x1, 0x34(%rbx)
movl %ebp, 0x38(%rbx)
movslq %r13d, %rcx
movslq %r12d, %rax
imulq %rcx, %rax
imulq %r15, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %r15
movq %rax, 0x40(%rbx)
movslq %ebp, %r14
imulq %r15, %r14
imulq %rax, %r14
addq $0x3, %r14
andq $-0x4, %r14
je 0x62a16
testq %rdi, %rdi
je 0x629df
leaq 0x4(%r14), %rsi
movq (%rdi), %rax
callq *0x10(%rax)
jmp 0x629fe
movq %rsp, %rdi
andq $0x0, (%rdi)
leaq 0x44(%r14), %rdx
pushq $0x40
popq %rsi
callq 0x5f130
testl %eax, %eax
jne 0x629fc
movq (%rsp), %rax
jmp 0x629fe
xorl %eax, %eax
movq %rax, (%rbx)
testq %rax, %rax
je 0x62a16
leaq (%rax,%r14), %rcx
movq %rcx, 0x8(%rbx)
movl $0x1, (%rax,%r14)
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/csukuangfj[P]ncnn/src/mat.cpp
|
ncnn::Mat::reshape(int, int, ncnn::Allocator*) const
|
Mat Mat::reshape(int _w, int _h, Allocator* _allocator) const
{
if (w * h * d * c != _w * _h)
return Mat();
if (dims >= 3 && cstep != (size_t)w * h * d)
{
Mat m;
m.create(_w, _h, elemsize, elempack, _allocator);
// flatten
for (int i = 0; i < c; i++)
{
const void* ptr = (unsigned char*)data + i * cstep * elemsize;
void* mptr = (unsigned char*)m.data + (size_t)i * w * h * d * elemsize;
memcpy(mptr, ptr, (size_t)w * h * d * elemsize);
}
return m;
}
Mat m = *this;
m.dims = 2;
m.w = _w;
m.h = _h;
m.d = 1;
m.c = 1;
m.cstep = (size_t)_w * _h;
return m;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
movq %r8, %r9
movq %rsi, %r14
movq %rdi, %rbx
movl 0x2c(%rsi), %eax
movl 0x30(%rsi), %esi
movl %esi, %r10d
imull %eax, %r10d
movl 0x34(%r14), %edi
movl 0x38(%r14), %r8d
movl %edi, %r11d
imull %r8d, %r11d
imull %r10d, %r11d
movl %ecx, %r10d
imull %edx, %r10d
cmpl %r10d, %r11d
jne 0x62f25
movl 0x28(%r14), %r11d
movq 0x40(%r14), %r10
cmpl $0x3, %r11d
jl 0x62eb7
movslq %eax, %r15
movslq %esi, %r12
imulq %r15, %r12
movslq %edi, %r15
imulq %r12, %r15
cmpq %r15, %r10
jne 0x62f48
movq (%r14), %r9
movq %r9, (%rbx)
movq 0x8(%r14), %r9
movq %r9, 0x8(%rbx)
movq 0x10(%r14), %r15
movq %r15, 0x10(%rbx)
movl 0x18(%r14), %ebp
movl %ebp, 0x18(%rbx)
movq 0x20(%r14), %r14
movq %r14, 0x20(%rbx)
movl %r11d, 0x28(%rbx)
movl %eax, 0x2c(%rbx)
movl %esi, 0x30(%rbx)
movl %edi, 0x34(%rbx)
movl %r8d, 0x38(%rbx)
movq %r10, 0x40(%rbx)
testq %r9, %r9
je 0x62efa
lock
incl (%r9)
movl $0x2, 0x28(%rbx)
movl %edx, 0x2c(%rbx)
movl %ecx, 0x30(%rbx)
movabsq $0x100000001, %rax # imm = 0x100000001
movq %rax, 0x34(%rbx)
movslq %edx, %rax
movslq %ecx, %rcx
imulq %rax, %rcx
movq %rcx, 0x40(%rbx)
jmp 0x62f3c
andq $0x0, 0x40(%rbx)
xorps %xmm0, %xmm0
movups %xmm0, (%rbx)
movups %xmm0, 0xc(%rbx)
movups %xmm0, 0x20(%rbx)
movups %xmm0, 0x2c(%rbx)
movq %rbx, %rax
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
andq $0x0, 0x40(%rbx)
xorps %xmm0, %xmm0
movups %xmm0, (%rbx)
movups %xmm0, 0xc(%rbx)
movups %xmm0, 0x20(%rbx)
movups %xmm0, 0x2c(%rbx)
movq 0x10(%r14), %rax
movl 0x18(%r14), %r8d
movq %rbx, %rdi
movl %edx, %esi
movl %ecx, %edx
movq %rax, %rcx
callq 0x627de
movq (%rbx), %r15
xorl %r12d, %r12d
movslq 0x38(%r14), %rax
cmpq %rax, %r12
jge 0x62f3c
movq 0x10(%r14), %rax
movq %rax, %rcx
imulq %r12, %rcx
movq 0x40(%r14), %rsi
imulq %rcx, %rsi
addq (%r14), %rsi
movslq 0x2c(%r14), %rdi
movslq 0x30(%r14), %rdx
movslq 0x34(%r14), %r8
imulq %rdi, %rax
imulq %rdx, %rdi
imulq %rcx, %rdi
imulq %r8, %rdi
addq %r15, %rdi
imulq %r8, %rdx
imulq %rax, %rdx
callq 0x5f3c0
incq %r12
jmp 0x62f7c
movq %rax, %r14
movq 0x8(%rbx), %rax
testq %rax, %rax
je 0x62ff9
lock
decl (%rax)
jne 0x62ff9
movq (%rbx), %rsi
movq 0x20(%rbx), %rdi
testq %rdi, %rdi
jne 0x62ff3
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x62ff9
movq (%rdi), %rax
callq *0x18(%rax)
andq $0x0, 0x40(%rbx)
xorps %xmm0, %xmm0
movups %xmm0, (%rbx)
movups %xmm0, 0xc(%rbx)
movups %xmm0, 0x28(%rbx)
andl $0x0, 0x38(%rbx)
movq %r14, %rdi
callq 0x5f340
movq %rax, %rdi
callq 0x61d68
|
/csukuangfj[P]ncnn/src/mat.cpp
|
ncnn::Mat::reshape(int, int, int, ncnn::Allocator*) const
|
Mat Mat::reshape(int _w, int _h, int _c, Allocator* _allocator) const
{
if (w * h * d * c != _w * _h * _c)
return Mat();
if (dims < 3)
{
if ((size_t)_w * _h != alignSize((size_t)_w * _h * elemsize, 16) / elemsize)
{
Mat m;
m.create(_w, _h, _c, elemsize, elempack, _allocator);
// align channel
for (int i = 0; i < _c; i++)
{
const void* ptr = (unsigned char*)data + (size_t)i * _w * _h * elemsize;
void* mptr = (unsigned char*)m.data + i * m.cstep * m.elemsize;
memcpy(mptr, ptr, (size_t)_w * _h * elemsize);
}
return m;
}
}
else if (c != _c)
{
// flatten and then align
Mat tmp = reshape(_w * _h * _c, _allocator);
return tmp.reshape(_w, _h, _c, _allocator);
}
Mat m = *this;
m.dims = 3;
m.w = _w;
m.h = _h;
m.d = 1;
m.c = _c;
m.cstep = alignSize((size_t)_w * _h * elemsize, 16) / elemsize;
return m;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x68, %rsp
movq %r9, %r13
movl %edx, %r12d
movq %rdi, %rbx
movl 0x2c(%rsi), %edi
movl 0x30(%rsi), %r9d
movl %r9d, %eax
imull %edi, %eax
movl 0x34(%rsi), %r10d
movl 0x38(%rsi), %r11d
movl %r10d, %edx
imull %r11d, %edx
imull %eax, %edx
movl %ecx, %eax
imull %r12d, %eax
imull %r8d, %eax
cmpl %eax, %edx
jne 0x6313a
movl %r8d, %r14d
movl %ecx, %ebp
movq %rsi, %r15
movl 0x28(%rsi), %esi
movslq %r12d, %rax
movslq %ecx, %rcx
cmpl $0x2, %esi
movq %rbx, 0x18(%rsp)
jg 0x63159
movq %r13, 0x10(%rsp)
movq %rcx, %r13
movq %rax, 0x8(%rsp)
imulq %rax, %r13
movq 0x10(%r15), %r8
movq %r8, %rax
imulq %r13, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %r8
cmpq %rax, %r13
je 0x6316b
andq $0x0, 0x40(%rbx)
leaq 0x28(%rbx), %rax
movq %rax, 0x8(%rsp)
xorps %xmm0, %xmm0
movups %xmm0, (%rbx)
movups %xmm0, 0xc(%rbx)
movups %xmm0, 0x20(%rbx)
movups %xmm0, 0x2c(%rbx)
movl 0x18(%r15), %r9d
movq 0x10(%rsp), %rax
movq %rax, (%rsp)
movq %rbx, %rdi
movl %r12d, %esi
movl %ebp, %edx
movl %r14d, %ecx
callq 0x628f2
xorl %ebp, %ebp
testl %r14d, %r14d
cmovlel %ebp, %r14d
movq (%rbx), %r12
movq %rbx, %rax
movq 0x40(%rbx), %rbx
imulq 0x10(%rax), %rbx
subq $0x1, %r14
jb 0x631e9
movq 0x10(%r15), %rdx
movq %rbp, %rsi
imulq %rdx, %rsi
addq (%r15), %rsi
imulq %r13, %rdx
movq %r12, %rdi
callq 0x5f3c0
addq %rbx, %r12
addq %r13, %rbp
jmp 0x6310e
andq $0x0, 0x40(%rbx)
xorps %xmm0, %xmm0
movups %xmm0, (%rbx)
movups %xmm0, 0xc(%rbx)
movups %xmm0, 0x20(%rbx)
movups %xmm0, 0x2c(%rbx)
movq %rbx, %rax
jmp 0x631ee
cmpl %r14d, %r11d
jne 0x631fd
movq %rax, 0x8(%rsp)
movq 0x10(%r15), %r8
movq (%r15), %rax
movq %rax, (%rbx)
movq 0x8(%r15), %rax
movq %rax, 0x8(%rbx)
movq %r8, 0x10(%rbx)
movl 0x18(%r15), %edx
movl %edx, 0x18(%rbx)
movq 0x20(%r15), %rdx
movq %rdx, 0x20(%rbx)
movl %esi, 0x28(%rbx)
movl %edi, 0x2c(%rbx)
movl %r9d, 0x30(%rbx)
movl %r10d, 0x34(%rbx)
movl %r11d, 0x38(%rbx)
movq 0x40(%r15), %rdx
movq %rdx, 0x40(%rbx)
testq %rax, %rax
je 0x631b2
lock
incl (%rax)
movq 0x10(%r15), %r8
movl $0x3, 0x28(%rbx)
movl %r12d, 0x2c(%rbx)
movl %ebp, 0x30(%rbx)
movl $0x1, 0x34(%rbx)
movl %r14d, 0x38(%rbx)
imulq 0x8(%rsp), %rcx
imulq %r8, %rcx
addq $0xf, %rcx
andq $-0x10, %rcx
movq %rcx, %rax
xorl %edx, %edx
divq %r8
movq %rax, 0x40(%rbx)
movq 0x18(%rsp), %rax
addq $0x68, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
leaq 0x20(%rsp), %rdi
movq %r15, %rsi
movq %r13, %rcx
callq 0x62c8a
movq %rbx, %rdi
leaq 0x20(%rsp), %rsi
movl %r12d, %edx
movl %ebp, %ecx
movl %r14d, %r8d
movq %r13, %r9
callq 0x63020
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x631e9
lock
decl (%rax)
jne 0x631e9
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x6324b
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x631e9
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x631e9
jmp 0x632e8
movq %rax, %r14
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x632e0
lock
decl (%rax)
jne 0x632e0
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
jne 0x63285
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x632e0
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x632e0
jmp 0x632e8
movq 0x8(%rsp), %r12
movq %rax, %r14
movq %rbx, %r15
movq 0x8(%rbx), %rax
testq %rax, %rax
je 0x632c4
lock
decl (%rax)
jne 0x632c4
movq (%r15), %rsi
movq 0x20(%r15), %rdi
testq %rdi, %rdi
jne 0x632be
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x632c4
movq (%rdi), %rax
callq *0x18(%rax)
andq $0x0, 0x40(%r15)
xorps %xmm0, %xmm0
movups %xmm0, (%r15)
movups %xmm0, 0xc(%r15)
movups %xmm0, (%r12)
andl $0x0, 0x10(%r12)
movq %r14, %rdi
callq 0x5f340
movq %rax, %rdi
callq 0x61d68
|
/csukuangfj[P]ncnn/src/mat.cpp
|
ncnn::Blob::~Blob()
|
class NCNN_EXPORT Blob
{
public:
// empty
Blob();
public:
#if NCNN_STRING
// blob name
std::string name;
#endif // NCNN_STRING
// layer index which produce this blob as output
int producer;
// layer index which need this blob as input
int consumer;
// shape hint
Mat shape;
}
|
pushq %rbx
movq %rdi, %rbx
movq 0x30(%rdi), %rax
testq %rax, %rax
je 0x6ff89
lock
decl (%rax)
jne 0x6ff89
movq 0x28(%rbx), %rsi
movq 0x48(%rbx), %rdi
testq %rdi, %rdi
je 0x6ff81
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x6ff89
movq %rsi, %rdi
callq 0x5f3e0
leaq 0x28(%rbx), %rax
andq $0x0, 0x68(%rbx)
xorps %xmm0, %xmm0
movups %xmm0, 0xc(%rax)
movups %xmm0, (%rax)
movups %xmm0, 0x50(%rbx)
andl $0x0, 0x60(%rbx)
movq %rbx, %rdi
popq %rbx
jmp 0x5f470
movq %rax, %rdi
callq 0x61d68
nop
|
/csukuangfj[P]ncnn/src/blob.h
|
ncnn::ParamDict::ParamDict(ncnn::ParamDict const&)
|
ParamDict::ParamDict(const ParamDict& rhs)
: d(new ParamDictPrivate)
{
for (int i = 0; i < NCNN_MAX_PARAM_COUNT; i++)
{
int type = rhs.d->params[i].type;
d->params[i].type = type;
if (type == 1 || type == 2 || type == 3)
{
d->params[i].i = rhs.d->params[i].i;
}
else // if (type == 4 || type == 5 || type == 6)
{
d->params[i].v = rhs.d->params[i].v;
}
}
}
|
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
movq %rsi, %rbx
movq %rdi, %r14
leaq 0x3ecc1e(%rip), %rax # 0x45e288
movq %rax, (%rdi)
movl $0xa00, %edi # imm = 0xA00
callq 0x5f3d0
movq %rax, %r15
movq %rax, %rdi
callq 0x72028
movq %r15, 0x8(%r14)
xorl %r15d, %r15d
xorps %xmm1, %xmm1
cmpq $0xa00, %r15 # imm = 0xA00
je 0x7176a
movq 0x8(%rbx), %r12
movl (%r12,%r15), %eax
movq 0x8(%r14), %r13
movl %eax, (%r13,%r15)
decl %eax
cmpl $0x2, %eax
ja 0x716c0
movl 0x4(%r12,%r15), %eax
movl %eax, 0x4(%r13,%r15)
jmp 0x71761
cmpq %r12, %r13
je 0x71761
movq 0x10(%r12,%r15), %rax
testq %rax, %rax
je 0x716d6
lock
incl (%rax)
movq 0x10(%r13,%r15), %rax
testq %rax, %rax
je 0x71707
lock
decl (%rax)
jne 0x71707
movq 0x8(%r13,%r15), %rsi
movq 0x28(%r13,%r15), %rdi
testq %rdi, %rdi
je 0x716fc
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x71704
movq %rsi, %rdi
callq 0x5f3e0
xorps %xmm1, %xmm1
leaq (%r15,%r13), %rax
addq $0x8, %rax
andq $0x0, 0x40(%rax)
movups %xmm1, 0xc(%rax)
movups %xmm1, (%rax)
andl $0x0, 0x38(%rax)
movups %xmm1, 0x28(%rax)
movups 0x8(%r12,%r15), %xmm0
movups %xmm0, (%rax)
movq 0x18(%r12,%r15), %rcx
movq %rcx, 0x10(%rax)
movl 0x20(%r12,%r15), %ecx
movl %ecx, 0x18(%rax)
movq 0x28(%r12,%r15), %rcx
movq %rcx, 0x20(%rax)
movups 0x30(%r12,%r15), %xmm0
movups %xmm0, 0x28(%rax)
movl 0x40(%r12,%r15), %ecx
movl %ecx, 0x38(%rax)
movq 0x48(%r12,%r15), %rcx
movq %rcx, 0x40(%rax)
addq $0x50, %r15
jmp 0x7168c
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
retq
movq %rax, %rbx
movl $0xa00, %esi # imm = 0xA00
movq %r15, %rdi
callq 0x5f280
movq %rbx, %rdi
callq 0x5f340
|
/csukuangfj[P]ncnn/src/paramdict.cpp
|
ncnn::vstr_is_float(char const*)
|
static bool vstr_is_float(const char vstr[16])
{
// look ahead for determine isfloat
for (int j = 0; j < 16; j++)
{
if (vstr[j] == '\0')
break;
if (vstr[j] == '.' || tolower(vstr[j]) == 'e')
return true;
}
return false;
}
|
pushq %r14
pushq %rbx
pushq %rax
movq %rdi, %rbx
xorl %r14d, %r14d
cmpq $0x10, %r14
je 0x71d26
movzbl (%rbx,%r14), %eax
testl %eax, %eax
je 0x71d26
cmpl $0x2e, %eax
je 0x71d22
movsbl %al, %edi
callq 0x5f500
incq %r14
cmpl $0x65, %eax
jne 0x71cfe
movb $0x1, %al
jmp 0x71d28
xorl %eax, %eax
addq $0x8, %rsp
popq %rbx
popq %r14
retq
|
/csukuangfj[P]ncnn/src/paramdict.cpp
|
ncnn::cpu_support_x86_fma()
|
int cpu_support_x86_fma()
{
try_initialize_global_cpu_info();
#if defined(__i386__) || defined(__x86_64__) || defined(_M_IX86) || defined(_M_X64)
return g_cpu_support_x86_fma;
#else
return 0;
#endif
}
|
pushq %rax
callq 0x72ac4
movl 0x41d187(%rip), %eax # 0x490460
popq %rcx
retq
|
/csukuangfj[P]ncnn/src/cpu.cpp
|
ncnn::cpu_support_x86_xop()
|
int cpu_support_x86_xop()
{
try_initialize_global_cpu_info();
#if defined(__i386__) || defined(__x86_64__) || defined(_M_IX86) || defined(_M_X64)
return g_cpu_support_x86_xop;
#else
return 0;
#endif
}
|
pushq %rax
callq 0x72ac4
movl 0x41d17d(%rip), %eax # 0x490464
popq %rcx
retq
|
/csukuangfj[P]ncnn/src/cpu.cpp
|
ncnn::cpu_support_riscv_v()
|
int cpu_support_riscv_v()
{
try_initialize_global_cpu_info();
#if defined __ANDROID__ || defined __linux__
#if __riscv
return g_hwcaps & COMPAT_HWCAP_ISA_V;
#else
return 0;
#endif
#else
return 0;
#endif
}
|
pushq %rax
callq 0x72ac4
xorl %eax, %eax
popq %rcx
retq
|
/csukuangfj[P]ncnn/src/cpu.cpp
|
ncnn::test_prefer_winograd63(int, int, int, int)
|
static bool test_prefer_winograd63(int num_input, int num_output, int w, int h)
{
// winograd selection strategy (profiled on i7-7700 single thread)
int minwh = std::min(w, h);
if (num_input >= 64)
{
return false;
}
if (num_input >= 32)
{
if (num_output >= 64) return false;
if (num_output >= 32) return (minwh >= 11 && minwh <= 14)
|| (minwh >= 19 && minwh <= 20)
|| (minwh >= 23 && minwh <= 44)
|| (minwh >= 47 && minwh <= 56)
|| (minwh >= 63 && minwh <= 130);
if (num_output >= 16) return (minwh >= 13 && minwh <= 14)
|| (minwh >= 19 && minwh <= 20)
|| (minwh >= 23 && minwh <= 38)
|| (minwh >= 43 && minwh <= 44)
|| (minwh >= 47 && minwh <= 140);
if (num_output >= 8) return (minwh >= 11 && minwh <= 14)
|| (minwh >= 19 && minwh <= 20)
|| (minwh >= 31 && minwh <= 38)
|| (minwh >= 43 && minwh <= 44)
|| (minwh >= 55 && minwh <= 162);
return false;
}
if (num_input >= 16)
{
if (num_output >= 64) return false;
if (num_output >= 32) return (minwh >= 11 && minwh <= 14)
|| (minwh >= 19 && minwh <= 20)
|| (minwh >= 23 && minwh <= 44)
|| (minwh >= 47 && minwh <= 92)
|| (minwh >= 95 && minwh <= 188);
if (num_output >= 16) return (minwh >= 11 && minwh <= 14)
|| (minwh >= 27 && minwh <= 38)
|| (minwh >= 43 && minwh <= 44)
|| (minwh >= 47 && minwh <= 74)
|| (minwh >= 81 && minwh <= 110)
|| (minwh >= 117 && minwh <= 170)
|| (minwh >= 177 && minwh <= 182);
if (num_output >= 8) return (minwh >= 19 && minwh <= 20)
|| (minwh >= 33 && minwh <= 38)
|| (minwh >= 43 && minwh <= 44)
|| (minwh >= 47 && minwh <= 128)
|| (minwh >= 155 && minwh <= 210);
return false;
}
if (num_input >= 8)
{
if (num_output >= 64) return false;
if (num_output >= 32) return (minwh >= 7 && minwh <= 14)
|| (minwh >= 17 && minwh <= 20)
|| (minwh >= 23 && minwh <= 26)
|| (minwh >= 31 && minwh <= 38)
|| (minwh >= 43 && minwh <= 162);
if (num_output >= 16) return minwh == 31 || minwh == 32
|| (minwh >= 39 && minwh <= 44)
|| (minwh >= 47 && minwh <= 212);
if (num_output >= 8) return false;
return false;
}
return false;
}
|
cmpl %edx, %ecx
cmovll %ecx, %edx
cmpl $0x3f, %edi
jle 0x9ba1d
xorl %eax, %eax
retq
cmpl $0x20, %edi
jl 0x9ba66
cmpl $0x3f, %esi
jg 0x9ba1a
cmpl $0x20, %esi
jl 0x9baed
movd %edx, %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
paddd 0x35614f(%rip), %xmm0 # 0x3f1b90
pxor 0x352647(%rip), %xmm0 # 0x3ee090
pcmpgtd 0x35615f(%rip), %xmm0 # 0x3f1bb0
movmskps %xmm0, %ecx
movb $0x1, %al
cmpl $0xf, %ecx
jne 0x9ba1c
addl $-0x3f, %edx
cmpl $0x44, %edx
jmp 0x9bbdc
cmpl $0x10, %edi
jl 0x9baad
cmpl $0x3f, %esi
jg 0x9ba1a
cmpl $0x20, %esi
jl 0x9bb25
movd %edx, %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
paddd 0x356106(%rip), %xmm0 # 0x3f1b90
pxor 0x3525fe(%rip), %xmm0 # 0x3ee090
pcmpgtd 0x356106(%rip), %xmm0 # 0x3f1ba0
movmskps %xmm0, %ecx
movb $0x1, %al
cmpl $0xf, %ecx
jne 0x9ba1c
addl $-0x5f, %edx
jmp 0x9bb1d
cmpl $0x8, %edi
setl %al
cmpl $0x40, %esi
setge %cl
orb %al, %cl
jne 0x9ba1a
cmpl $0x20, %esi
jl 0x9bb83
leal -0x7(%rdx), %ecx
cmpl $0x20, %ecx
jae 0x9bae2
movb $0x1, %al
movl $0xff0f3cff, %esi # imm = 0xFF0F3CFF
btl %ecx, %esi
jb 0x9ba1c
addl $-0x2b, %edx
cmpl $0x78, %edx
jmp 0x9bbdc
cmpl $0x10, %esi
jl 0x9bbaf
leal -0x17(%rdx), %ecx
movb $0x1, %al
cmpl $0x10, %ecx
jb 0x9ba1c
leal -0xd(%rdx), %ecx
cmpl $0x20, %ecx
jae 0x9bb1a
movl $0xc00000c3, %esi # imm = 0xC00000C3
btl %ecx, %esi
jb 0x9ba1c
addl $-0x2f, %edx
cmpl $0x5e, %edx
jmp 0x9bbdc
cmpl $0x10, %esi
jl 0x9bbe0
movd %edx, %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
paddd 0x356031(%rip), %xmm0 # 0x3f1b70
leal -0x51(%rdx), %eax
cmpl $0x1e, %eax
setb %al
leal -0x75(%rdx), %ecx
cmpl $0x36, %ecx
setb %cl
pxor 0x352537(%rip), %xmm0 # 0x3ee090
pcmpgtd 0x35601f(%rip), %xmm0 # 0x3f1b80
movmskps %xmm0, %esi
cmpl $0xf, %esi
setne %sil
orb %al, %cl
orb %sil, %cl
movb $0x1, %al
jne 0x9ba1c
addl $0xffffff4f, %edx # imm = 0xFFFFFF4F
cmpl $0x6, %edx
jmp 0x9bbdc
cmpl $0x10, %esi
jl 0x9ba1a
leal -0x1f(%rdx), %ecx
cmpl $0xe, %ecx
jae 0x9bba4
movb $0x1, %al
movl $0x3f03, %esi # imm = 0x3F03
btl %ecx, %esi
jb 0x9ba1c
addl $-0x2f, %edx
cmpl $0xa6, %edx
jmp 0x9bbdc
cmpl $0x8, %esi
jl 0x9ba1a
leal -0xb(%rdx), %ecx
cmpl $0x22, %ecx
jae 0x9bbd6
movb $0x1, %al
movabsq $0x30ff0030f, %rsi # imm = 0x30FF0030F
btq %rcx, %rsi
jb 0x9ba1c
addl $-0x37, %edx
cmpl $0x6c, %edx
setb %al
retq
cmpl $0x8, %esi
jl 0x9ba1a
leal -0x2f(%rdx), %ecx
movb $0x1, %al
cmpl $0x52, %ecx
jb 0x9ba1c
leal -0x13(%rdx), %ecx
cmpl $0x1a, %ecx
jae 0x9bc0d
movl $0x30fc003, %esi # imm = 0x30FC003
btl %ecx, %esi
jb 0x9ba1c
addl $0xffffff65, %edx # imm = 0xFFFFFF65
cmpl $0x38, %edx
jmp 0x9bbdc
|
/csukuangfj[P]ncnn/src/layer/x86/convolution_x86.cpp
|
ncnn::test_prefer_winograd23(int, int, int, int)
|
static bool test_prefer_winograd23(int num_input, int num_output, int w, int h)
{
int minwh = std::min(w, h);
if (num_input >= 512)
{
if (num_output >= 512) return (minwh >= 3 && minwh <= 14);
if (num_output >= 256) return (minwh >= 3 && minwh <= 14);
if (num_output >= 128) return (minwh >= 3 && minwh <= 14);
if (num_output >= 64) return (minwh >= 3 && minwh <= 8) || (minwh >= 11 && minwh <= 12);
if (num_output >= 32) return (minwh >= 3 && minwh <= 8);
if (num_output >= 16) return (minwh >= 3 && minwh <= 8);
if (num_output >= 8) return (minwh >= 3 && minwh <= 6);
return false;
}
if (num_input >= 256)
{
if (num_output >= 512) return (minwh >= 3 && minwh <= 14);
if (num_output >= 256) return (minwh >= 3 && minwh <= 14);
if (num_output >= 128) return (minwh >= 3 && minwh <= 12);
if (num_output >= 64) return (minwh >= 3 && minwh <= 4);
if (num_output >= 32) return (minwh >= 3 && minwh <= 8);
if (num_output >= 16) return (minwh >= 3 && minwh <= 8);
if (num_output >= 8) return (minwh >= 3 && minwh <= 6);
return false;
}
if (num_input >= 128)
{
if (num_output >= 512) return (minwh >= 3 && minwh <= 14);
if (num_output >= 256) return (minwh >= 3 && minwh <= 8) || (minwh >= 11 && minwh <= 12);
if (num_output >= 128) return (minwh >= 3 && minwh <= 10);
if (num_output >= 64) return (minwh >= 3 && minwh <= 8);
if (num_output >= 32) return (minwh >= 3 && minwh <= 10);
if (num_output >= 16) return (minwh >= 3 && minwh <= 6);
if (num_output >= 8) return (minwh >= 3 && minwh <= 6);
return false;
}
if (num_input >= 64)
{
if (num_output >= 512) return (minwh >= 3 && minwh <= 8) || (minwh >= 11 && minwh <= 12) || (minwh >= 15 && minwh <= 20);
if (num_output >= 256) return (minwh >= 7 && minwh <= 8);
if (num_output >= 128) return (minwh >= 3 && minwh <= 8) || (minwh >= 19 && minwh <= 22);
if (num_output >= 64) return (minwh >= 3 && minwh <= 12);
if (num_output >= 32) return (minwh >= 3 && minwh <= 12);
if (num_output >= 16) return (minwh >= 3 && minwh <= 12);
if (num_output >= 8) return (minwh >= 3 && minwh <= 12);
return false;
}
if (num_input >= 32)
{
if (num_output >= 512) return (minwh >= 3 && minwh <= 6) || (minwh >= 11 && minwh <= 12);
if (num_output >= 256) return (minwh >= 3 && minwh <= 6) || (minwh >= 11 && minwh <= 12);
if (num_output >= 128) return (minwh >= 3 && minwh <= 4) || (minwh >= 7 && minwh <= 16);
if (num_output >= 64) return (minwh >= 3 && minwh <= 8);
if (num_output >= 32) return (minwh >= 7 && minwh <= 8);
if (num_output >= 16) return (minwh >= 7 && minwh <= 8);
if (num_output >= 8) return (minwh >= 3 && minwh <= 10);
return false;
}
if (num_input >= 16)
{
if (num_output >= 512) return (minwh >= 11 && minwh <= 12);
if (num_output >= 256) return (minwh >= 3 && minwh <= 12);
if (num_output >= 128) return (minwh >= 3 && minwh <= 6)
|| (minwh >= 9 && minwh <= 18);
if (num_output >= 64) return (minwh >= 3 && minwh <= 4)
|| (minwh >= 7 && minwh <= 8)
|| (minwh >= 11 && minwh <= 12)
|| (minwh >= 15 && minwh <= 18);
if (num_output >= 32) return (minwh >= 3 && minwh <= 4)
|| (minwh >= 9 && minwh <= 10);
if (num_output >= 16) return (minwh >= 3 && minwh <= 10);
if (num_output >= 8) return (minwh >= 3 && minwh <= 8)
|| (minwh >= 11 && minwh <= 12);
return false;
}
if (num_input >= 8)
{
if (num_output >= 128) return false;
if (num_output >= 64) return (minwh >= 3 && minwh <= 4)
|| (minwh >= 7 && minwh <= 14)
|| (minwh >= 47 && minwh <= 48);
if (num_output >= 32) return (minwh >= 3 && minwh <= 6)
|| (minwh >= 15 && minwh <= 16);
if (num_output >= 16) return (minwh >= 3 && minwh <= 6)
|| (minwh >= 9 && minwh <= 14)
|| (minwh >= 47 && minwh <= 212);
if (num_output >= 8) return true;
return false;
}
return false;
}
|
cmpl %edx, %ecx
cmovll %ecx, %edx
cmpl $0x200, %edi # imm = 0x200
jl 0x9bc47
cmpl $0x200, %esi # imm = 0x200
jge 0x9bc83
cmpl $0x100, %esi # imm = 0x100
jge 0x9bc83
cmpl $0x80, %esi
jge 0x9bc83
cmpl $0x40, %esi
jge 0x9bcbe
jmp 0x9bd01
cmpl $0x100, %edi # imm = 0x100
jl 0x9bc73
cmpl $0x200, %esi # imm = 0x200
jge 0x9bc83
cmpl $0x100, %esi # imm = 0x100
jge 0x9bc83
cmpl $0x80, %esi
jl 0x9bcf4
addl $-0x3, %edx
cmpl $0xa, %edx
jmp 0x9bc89
cmpl $0x80, %edi
jl 0x9bc8d
cmpl $0x200, %esi # imm = 0x200
jl 0x9bcb6
addl $-0x3, %edx
cmpl $0xc, %edx
setb %al
retq
cmpl $0x40, %edi
jl 0x9bcc6
cmpl $0x200, %esi # imm = 0x200
jl 0x9bd19
leal -0x3(%rdx), %ecx
cmpl $0xa, %ecx
jae 0x9bcae
movb $0x1, %al
movl $0x33f, %esi # imm = 0x33F
btl %ecx, %esi
jb 0x9bc8c
addl $-0xf, %edx
jmp 0x9bd7a
cmpl $0x100, %esi # imm = 0x100
jl 0x9bd29
leal -0x3(%rdx), %eax
cmpl $0x6, %eax
jmp 0x9bce5
cmpl $0x20, %edi
jl 0x9bd3c
cmpl $0x200, %esi # imm = 0x200
jge 0x9bcdf
cmpl $0x100, %esi # imm = 0x100
jl 0x9bd99
leal -0x3(%rdx), %eax
cmpl $0x4, %eax
setb %cl
addl $-0xb, %edx
cmpl $0x2, %edx
setb %al
orb %cl, %al
retq
cmpl $0x40, %esi
jl 0x9bd01
addl $-0x3, %edx
jmp 0x9be42
cmpl $0x20, %esi
jge 0x9bd77
cmpl $0x10, %esi
jge 0x9bd77
cmpl $0x8, %esi
jl 0x9bd96
jmp 0x9be17
cmpl $0x100, %esi # imm = 0x100
jl 0x9bd51
addl $-0x7, %edx
jmp 0x9be42
cmpl $0x80, %esi
jl 0x9bd6e
addl $-0x3, %edx
cmpl $0x8, %edx
jmp 0x9bc89
cmpl $0x10, %edi
jl 0x9bd82
cmpl $0x200, %esi # imm = 0x200
jl 0x9bdb3
addl $-0xb, %edx
jmp 0x9be42
cmpl $0x80, %esi
jl 0x9bddf
leal -0x3(%rdx), %eax
cmpl $0x6, %eax
setb %cl
addl $-0x13, %edx
cmpl $0x4, %edx
jmp 0x9bcee
cmpl $0x40, %esi
jl 0x9be05
addl $-0x3, %edx
cmpl $0x6, %edx
jmp 0x9bc89
cmpl $0x8, %edi
setl %al
cmpl $0x7f, %esi
setg %cl
orb %al, %cl
je 0x9be22
xorl %eax, %eax
retq
cmpl $0x80, %esi
jl 0x9be4a
leal -0x3(%rdx), %eax
cmpl $0x2, %eax
setb %cl
addl $-0x7, %edx
jmp 0x9bdd7
cmpl $0x100, %esi # imm = 0x100
jge 0x9bc6b
cmpl $0x80, %esi
jl 0x9be89
leal -0x3(%rdx), %eax
cmpl $0x4, %eax
setb %cl
addl $-0x9, %edx
cmpl $0xa, %edx
jmp 0x9bcee
cmpl $0x40, %esi
jge 0x9bc6b
cmpl $0x20, %esi
jge 0x9bc6b
cmpl $0x10, %esi
jge 0x9bc6b
cmpl $0x8, %esi
jge 0x9bc6b
jmp 0x9bd96
cmpl $0x20, %esi
jge 0x9bd31
cmpl $0x10, %esi
jl 0x9bd0b
addl $-0x3, %edx
cmpl $0x4, %edx
jmp 0x9bc89
cmpl $0x40, %esi
jl 0x9be73
leal -0x3(%rdx), %ecx
cmpl $0xc, %ecx
jae 0x9be3f
movb $0x1, %al
movl $0xff3, %esi # imm = 0xFF3
btl %ecx, %esi
jb 0x9bc8c
addl $-0x2f, %edx
cmpl $0x2, %edx
jmp 0x9bc89
cmpl $0x40, %esi
jge 0x9bd77
cmpl $0x20, %esi
jge 0x9bd21
cmpl $0x10, %esi
jge 0x9bd21
cmpl $0x8, %esi
jge 0x9bd31
jmp 0x9bd96
cmpl $0x20, %esi
jl 0x9beae
leal -0x3(%rdx), %eax
cmpl $0x4, %eax
setb %cl
addl $-0xf, %edx
jmp 0x9bceb
cmpl $0x40, %esi
jl 0x9bed9
leal -0x3(%rdx), %ecx
cmpl $0xa, %ecx
jae 0x9bea6
movb $0x1, %al
movl $0x333, %esi # imm = 0x333
btl %ecx, %esi
jb 0x9bc8c
addl $-0xf, %edx
jmp 0x9be1a
cmpl $0x10, %esi
jl 0x9beef
leal -0x3(%rdx), %ecx
cmpl $0xc, %ecx
jae 0x9becb
movb $0x1, %al
movl $0xfcf, %esi # imm = 0xFCF
btl %ecx, %esi
jb 0x9bc8c
addl $-0x2f, %edx
cmpl $0xa6, %edx
jmp 0x9bc89
cmpl $0x20, %esi
jl 0x9bef6
leal -0x3(%rdx), %eax
cmpl $0x2, %eax
setb %cl
addl $-0x9, %edx
jmp 0x9bceb
cmpl $0x8, %esi
setge %al
retq
cmpl $0x10, %esi
jge 0x9bd31
cmpl $0x8, %esi
jge 0x9bcbe
jmp 0x9bd96
nop
|
/csukuangfj[P]ncnn/src/layer/x86/convolution_x86.cpp
|
ncnn::Convolution_x86::destroy_pipeline(ncnn::Option const&)
|
int Convolution_x86::destroy_pipeline(const Option& opt)
{
if (activation)
{
activation->destroy_pipeline(opt);
delete activation;
activation = 0;
}
if (convolution_dilation1)
{
convolution_dilation1->destroy_pipeline(opt);
delete convolution_dilation1;
convolution_dilation1 = 0;
}
if (gemm)
{
gemm->destroy_pipeline(opt);
delete gemm;
gemm = 0;
}
return 0;
}
|
pushq %r14
pushq %rbx
pushq %rax
movq %rsi, %r14
movq %rdi, %rbx
movq 0x8(%rdi), %rdi
testq %rdi, %rdi
je 0x9bf50
movq (%rdi), %rax
movq %r14, %rsi
callq *0x28(%rax)
movq 0x8(%rbx), %rdi
testq %rdi, %rdi
je 0x9bf4b
movq (%rdi), %rax
callq *0x8(%rax)
andq $0x0, 0x8(%rbx)
movq 0x180(%rbx), %rdi
testq %rdi, %rdi
je 0x9bf7f
movq (%rdi), %rax
movq %r14, %rsi
callq *0x28(%rax)
movq 0x180(%rbx), %rdi
testq %rdi, %rdi
je 0x9bf77
movq (%rdi), %rax
callq *0x8(%rax)
andq $0x0, 0x180(%rbx)
movq 0x188(%rbx), %rdi
testq %rdi, %rdi
je 0x9bfae
movq (%rdi), %rax
movq %r14, %rsi
callq *0x28(%rax)
movq 0x188(%rbx), %rdi
testq %rdi, %rdi
je 0x9bfa6
movq (%rdi), %rax
callq *0x8(%rax)
andq $0x0, 0x188(%rbx)
xorl %eax, %eax
addq $0x8, %rsp
popq %rbx
popq %r14
retq
|
/csukuangfj[P]ncnn/src/layer/x86/convolution_x86.cpp
|
virtual thunk to ncnn::Convolution_x86::destroy_pipeline(ncnn::Option const&)
|
int Convolution_x86::destroy_pipeline(const Option& opt)
{
if (activation)
{
activation->destroy_pipeline(opt);
delete activation;
activation = 0;
}
if (convolution_dilation1)
{
convolution_dilation1->destroy_pipeline(opt);
delete convolution_dilation1;
convolution_dilation1 = 0;
}
if (gemm)
{
gemm->destroy_pipeline(opt);
delete gemm;
gemm = 0;
}
return 0;
}
|
pushq %rax
movq (%rdi), %rax
addq -0x38(%rax), %rdi
callq 0x9bf20
xorl %eax, %eax
popq %rcx
retq
nop
|
/csukuangfj[P]ncnn/src/layer/x86/convolution_x86.cpp
|
ncnn::Convolution_x86::forward_int8_x86(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const
|
int Convolution_x86::forward_int8_x86(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
int elembits = bottom_blob.elembits();
Mat bottom_blob_int8 = bottom_blob;
if (elembits != 8)
{
Option opt_q = opt;
opt_q.blob_allocator = opt.workspace_allocator;
quantize_to_int8(bottom_blob, bottom_blob_int8, bottom_blob_int8_scales, opt_q);
}
// NCNN_LOGE("Convolution_arm input %d x %d ksize=%d %d stride=%d %d", w, h, kernel_w, kernel_h, stride_w, stride_h);
Mat bottom_blob_bordered;
make_padding(bottom_blob_int8, bottom_blob_bordered, opt);
if (bottom_blob_bordered.empty())
return -100;
int w = bottom_blob_bordered.w;
int h = bottom_blob_bordered.h;
int channels = bottom_blob_bordered.c;
int elempack = bottom_blob_bordered.elempack;
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
int outw = (w - kernel_extent_w) / stride_w + 1;
int outh = (h - kernel_extent_h) / stride_h + 1;
bool use_int8_requantize = int8_scale_term > 100;
int out_elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
if (use_int8_requantize)
out_elempack = num_output % 8 == 0 ? 8 : 1;
else
out_elempack = num_output % 4 == 0 ? 4 : 1;
}
#endif // __SSE2__
size_t out_elemsize = use_int8_requantize ? 1u * out_elempack : 4u * out_elempack;
// NCNN_LOGE("forward_int8_arm %d %d %d %d %d", w, h, bottom_blob_bordered.c, elempack, out_elempack);
top_blob.create(outw, outh, num_output / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
const int num_input = channels * elempack;
int out_elempack_int32 = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
out_elempack_int32 = num_output % 4 == 0 ? 4 : 1;
}
#endif // __SSE2__
Mat top_blob_int32;
top_blob_int32.create(outw, outh, num_output / out_elempack_int32, (size_t)(4u * out_elempack_int32), out_elempack_int32, opt.workspace_allocator);
if (top_blob_int32.empty())
return -100;
#if __SSE2__
if (elempack == 8 && out_elempack_int32 == 4)
{
if (kernel_w == 1 && kernel_h == 1 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
conv1x1s1_sgemm_pack8to4_int8_sse(bottom_blob_bordered, top_blob_int32, weight_sgemm_data, opt);
}
else if (kernel_w == 1 && kernel_h == 1 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
conv1x1s2_sgemm_pack8to4_int8_sse(bottom_blob_bordered, top_blob_int32, weight_sgemm_data, opt);
}
else if (opt.use_winograd_convolution && opt.use_winograd43_convolution && kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
conv3x3s1_winograd43_pack8to4_int8_sse(bottom_blob_bordered, top_blob_int32, weight_winograd43_data, opt);
}
else if (opt.use_sgemm_convolution)
{
convolution_im2col_sgemm_pack8to4_int8_sse(bottom_blob_bordered, top_blob_int32, weight_sgemm_data, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, opt);
}
else
{
convolution_pack8to4_int8_sse(bottom_blob_bordered, top_blob_int32, weight_data_tm, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, opt);
}
}
if (elempack == 1 && out_elempack_int32 == 4)
{
if (kernel_w == 1 && kernel_h == 1 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
conv1x1s1_sgemm_pack1to4_int8_sse(bottom_blob_bordered, top_blob_int32, weight_sgemm_data, opt);
}
else if (kernel_w == 1 && kernel_h == 1 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
conv1x1s2_sgemm_pack1to4_int8_sse(bottom_blob_bordered, top_blob_int32, weight_sgemm_data, opt);
}
else if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
conv3x3s1_pack1to4_int8_sse(bottom_blob_bordered, top_blob_int32, weight_sgemm_data, opt);
}
else if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
conv3x3s2_pack1to4_int8_sse(bottom_blob_bordered, top_blob_int32, weight_sgemm_data, opt);
}
else if (kernel_w == 7 && kernel_h == 7 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
conv7x7s2_pack1to4_int8_sse(bottom_blob_bordered, top_blob_int32, weight_sgemm_data, opt);
}
else if (opt.use_sgemm_convolution) // TODO better condition && num_input >= 8 && num_output >= 8)
{
convolution_im2col_sgemm_pack1to4_int8_sse(bottom_blob_bordered, top_blob_int32, weight_sgemm_data, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, opt);
}
else
{
convolution_pack1to4_int8_sse(bottom_blob_bordered, top_blob_int32, weight_data_tm, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, opt);
}
}
if (elempack == 8 && out_elempack_int32 == 1)
{
if (kernel_w == 1 && kernel_h == 1 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
conv1x1s1_sgemm_pack8to1_int8_sse(bottom_blob_bordered, top_blob_int32, weight_sgemm_data, opt);
}
else if (kernel_w == 1 && kernel_h == 1 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
conv1x1s2_sgemm_pack8to1_int8_sse(bottom_blob_bordered, top_blob_int32, weight_sgemm_data, opt);
}
else if (opt.use_winograd_convolution && opt.use_winograd43_convolution && kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
conv3x3s1_winograd43_pack8to1_int8_sse(bottom_blob_bordered, top_blob_int32, weight_winograd43_data, opt);
}
else if (opt.use_sgemm_convolution) // TODO better condition && num_input >= 8 && num_output >= 8)
{
convolution_im2col_sgemm_pack8to1_int8_sse(bottom_blob_bordered, top_blob_int32, weight_sgemm_data, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, opt);
}
else
{
convolution_pack8to1_int8_sse(bottom_blob_bordered, top_blob_int32, weight_data_tm, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, opt);
}
}
#endif // __SSE2__
if (elempack == 1 && out_elempack_int32 == 1)
{
if (kernel_w == 1 && kernel_h == 1 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
conv1x1s1_sgemm_int8_sse(bottom_blob_bordered, top_blob_int32, weight_sgemm_data, opt);
}
else if (kernel_w == 1 && kernel_h == 1 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
conv1x1s2_sgemm_int8_sse(bottom_blob_bordered, top_blob_int32, weight_sgemm_data, opt);
}
else if (opt.use_winograd_convolution && opt.use_winograd23_convolution && kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1 && num_input >= 16 && num_output >= 16)
{
conv3x3s1_winograd23_int8_sse(bottom_blob_bordered, top_blob_int32, weight_winograd23_data, opt);
// conv3x3s1_winograd43_int8_sse(bottom_blob_bordered, top_blob_int32, weight_winograd43_data, opt);
}
else if (opt.use_sgemm_convolution)
{
convolution_im2col_sgemm_int8_sse(bottom_blob_bordered, top_blob_int32, weight_sgemm_data, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, opt);
}
else
{
convolution_int8(bottom_blob_bordered, top_blob_int32, weight_data_tm, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, opt);
}
}
if (use_int8_requantize)
{
requantize_from_int32_to_int8(top_blob_int32, top_blob, scale_in_data, top_blob_int8_scales, bias_data, activation_type, activation_params, opt);
}
else
{
dequantize_from_int32(top_blob_int32, top_blob, scale_in_data, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
}
return 0;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x6f8, %rsp # imm = 0x6F8
movq %rcx, %r14
movq %rdx, %rbx
movq %rdi, 0x20(%rsp)
movl 0x18(%rsi), %ecx
movq 0x10(%rsi), %rdi
testl %ecx, %ecx
je 0xa62eb
leal (,%rdi,8), %eax
cltd
idivl %ecx
cmpl $0x8, %eax
sete %al
jmp 0xa62ed
xorl %eax, %eax
movq (%rsi), %rdx
movq %rdx, 0x628(%rsp)
movq 0x8(%rsi), %rdx
movq %rdx, 0x630(%rsp)
movq %rdi, 0x638(%rsp)
movl %ecx, 0x640(%rsp)
movq 0x20(%rsi), %rcx
movq %rcx, 0x648(%rsp)
movdqu 0x28(%rsi), %xmm0
movdqu %xmm0, 0x650(%rsp)
movl 0x38(%rsi), %ecx
movl %ecx, 0x660(%rsp)
movq 0x40(%rsi), %rcx
movq %rcx, 0x668(%rsp)
testq %rdx, %rdx
je 0xa634b
lock
incl (%rdx)
testb %al, %al
jne 0xa63b0
movdqu (%r14), %xmm0
movdqu 0x10(%r14), %xmm1
movdqu 0x20(%r14), %xmm2
movdqu 0x30(%r14), %xmm3
leaq 0x360(%rsp), %rcx
movdqa %xmm3, 0x30(%rcx)
movdqa %xmm2, 0x20(%rcx)
movdqa %xmm1, 0x10(%rcx)
movdqa %xmm0, (%rcx)
movq 0x10(%r14), %rax
movq %rax, 0x8(%rcx)
movq 0x20(%rsp), %rdx
movq (%rdx), %rax
movq -0x18(%rax), %rax
leaq 0x238(%rdx,%rax), %rdx
leaq 0x628(%rsp), %rax
movq %rsi, %rdi
movq %rax, %rsi
callq 0x652e3
leaq 0x90(%rsp), %rdx
andq $0x0, 0x40(%rdx)
pxor %xmm0, %xmm0
movdqa %xmm0, (%rdx)
movdqu %xmm0, 0xc(%rdx)
movdqa %xmm0, 0x20(%rdx)
movdqu %xmm0, 0x2c(%rdx)
movq 0x20(%rsp), %rcx
movq (%rcx), %rax
movq -0x18(%rax), %rdi
addq %rcx, %rdi
leaq 0x628(%rsp), %rsi
movq %r14, %rcx
callq 0x96742
pushq $-0x64
popq %rbp
cmpq $0x0, 0x90(%rsp)
je 0xa9168
movslq 0xc8(%rsp), %rcx
movq 0xd0(%rsp), %rax
imulq %rcx, %rax
testq %rax, %rax
je 0xa9168
movq %rcx, 0x100(%rsp)
movq 0x20(%rsp), %r8
movq (%r8), %rax
movq -0x18(%rax), %rdi
movl 0xd0(%r8,%rdi), %ecx
movl 0xd4(%r8,%rdi), %eax
decl %eax
imull 0xdc(%r8,%rdi), %eax
notl %eax
movl 0xd8(%r8,%rdi), %esi
decl %esi
imull 0xe0(%r8,%rdi), %esi
notl %esi
addl 0xbc(%rsp), %eax
cltd
idivl 0xe4(%r8,%rdi)
movl %eax, %r13d
incl %r13d
addl 0xc0(%rsp), %esi
movl %esi, %eax
cltd
idivl 0xe8(%r8,%rdi)
movl %eax, %r12d
incl %r12d
movl 0x108(%r8,%rdi), %r15d
cmpb $0x1, 0x27(%r14)
jne 0xa64bb
cmpl $0x65, %r15d
jl 0xa64c1
pushq $0x8
popq %rax
testb $0x7, %cl
pushq $0x1
popq %r9
cmovel %eax, %r9d
jmp 0xa64d0
pushq $0x1
popq %r9
jmp 0xa64d0
xorl %eax, %eax
testb $0x3, %cl
sete %al
leal (%rax,%rax,2), %r9d
incl %r9d
movl 0xa8(%rsp), %eax
movl %eax, 0x70(%rsp)
leal (,%r9,4), %r8d
cmpl $0x65, %r15d
cmovgel %r9d, %r8d
movl %ecx, %eax
cltd
idivl %r9d
movq 0x8(%r14), %rcx
movq %rcx, (%rsp)
movq %rbx, %rdi
movl %r13d, %esi
movl %r12d, %edx
movl %eax, %ecx
callq 0x628f2
cmpq $0x0, (%rbx)
je 0xa9168
movslq 0x38(%rbx), %rax
imulq 0x40(%rbx), %rax
testq %rax, %rax
je 0xa9168
movl %r15d, 0x34c(%rsp)
movq 0x20(%rsp), %rsi
movq (%rsi), %rax
cmpb $0x1, 0x27(%r14)
movq %rbx, 0x338(%rsp)
jne 0xa655e
movq -0x18(%rax), %rcx
xorl %edx, %edx
testb $0x3, 0xd0(%rsi,%rcx)
sete %dl
leal (%rdx,%rdx,2), %r9d
incl %r9d
jmp 0xa6562
pushq $0x1
popq %r9
leaq 0x110(%rsp), %rdi
andq $0x0, 0x40(%rdi)
pxor %xmm0, %xmm0
movdqa %xmm0, (%rdi)
movdqu %xmm0, 0xc(%rdi)
movdqa %xmm0, 0x20(%rdi)
movdqu %xmm0, 0x2c(%rdi)
movq -0x18(%rax), %rax
movl 0xd0(%rsi,%rax), %eax
cltd
idivl %r9d
leal (,%r9,4), %r8d
movq %r14, 0x40(%rsp)
movq 0x10(%r14), %rcx
movq %rcx, (%rsp)
movl %r13d, %esi
movl %r12d, %edx
movl %eax, %ecx
movq %r9, 0x88(%rsp)
callq 0x628f2
pushq $-0x64
popq %rbp
cmpq $0x0, 0x110(%rsp)
je 0xa9131
movslq 0x148(%rsp), %rcx
movq 0x150(%rsp), %rax
movq %rcx, 0x38(%rsp)
imulq %rcx, %rax
testq %rax, %rax
je 0xa9131
movl 0x70(%rsp), %ebx
cmpl $0x8, %ebx
movq 0x20(%rsp), %r14
movq 0x88(%rsp), %r15
jne 0xa6e2f
cmpl $0x4, %r15d
jne 0xa6e2f
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xd4(%r14,%rax), %edi
cmpl $0x1, %edi
movq %r14, %rcx
movq 0x40(%rsp), %r14
jne 0xa67cd
cmpl $0x1, 0xd8(%rcx,%rax)
jne 0xa67cd
cmpl $0x1, 0xdc(%rcx,%rax)
jne 0xa668e
cmpl $0x1, 0xe0(%rcx,%rax)
jne 0xa668e
cmpl $0x1, 0xe4(%rcx,%rax)
jne 0xa668e
cmpl $0x1, 0xe8(%rcx,%rax)
jne 0xa668e
leaq 0x60(%rcx), %rdx
leaq 0x90(%rsp), %rdi
leaq 0x110(%rsp), %rsi
movq %r14, %rcx
callq 0xae99b
jmp 0xa9087
cmpl $0x1, 0xdc(%rcx,%rax)
jne 0xa67cd
cmpl $0x1, 0xe0(%rcx,%rax)
jne 0xa67cd
cmpl $0x2, 0xe4(%rcx,%rax)
jne 0xa67cd
cmpl $0x2, 0xe8(%rcx,%rax)
jne 0xa67cd
movl 0xbc(%rsp), %ebx
movl 0xc8(%rsp), %r13d
movq 0xa0(%rsp), %r8
movl 0xa8(%rsp), %r9d
leaq 0x360(%rsp), %rdi
andq $0x0, 0x40(%rdi)
movl 0x13c(%rsp), %ebp
movl 0x140(%rsp), %r12d
pxor %xmm0, %xmm0
movdqa %xmm0, (%rdi)
movdqu %xmm0, 0xc(%rdi)
movdqa %xmm0, 0x20(%rdi)
movdqu %xmm0, 0x2c(%rdi)
movq 0x10(%r14), %rax
movq %rax, (%rsp)
movl %ebp, %esi
movl %r12d, %edx
movl %r13d, %ecx
callq 0x628f2
movq 0x20(%rsp), %rax
leaq 0x60(%rax), %rdx
subl %ebp, %ebx
addl %ebx, %ebx
movq 0x90(%rsp), %rax
movq 0x360(%rsp), %rcx
xorl %esi, %esi
testl %ebp, %ebp
cmovlel %esi, %ebp
movslq %ebx, %rdi
testl %r12d, %r12d
cmovlel %esi, %r12d
testl %r13d, %r13d
cmovlel %esi, %r13d
shlq $0x3, %rdi
cmpq %r13, %rsi
je 0xa91ea
movq 0xd0(%rsp), %r8
imulq %rsi, %r8
imulq 0xa0(%rsp), %r8
addq %rax, %r8
movq 0x3a0(%rsp), %r9
imulq %rsi, %r9
imulq 0x370(%rsp), %r9
addq %rcx, %r9
xorl %r10d, %r10d
movl %ebp, %r11d
cmpl %r12d, %r10d
je 0xa67c8
subl $0x1, %r11d
jb 0xa67c0
movq (%r8), %rbx
movq %rbx, (%r9)
addq $0x10, %r8
addq $0x8, %r9
jmp 0xa67aa
addq %rdi, %r8
incl %r10d
jmp 0xa67a2
incq %rsi
jmp 0xa6766
cmpb $0x1, 0x1c(%r14)
jne 0xa686f
cmpl $0x3, %edi
jne 0xa686f
cmpb $0x0, 0x38(%r14)
je 0xa686f
movq 0x20(%rsp), %rcx
cmpl $0x3, 0xd8(%rcx,%rax)
jne 0xa686f
movq 0x20(%rsp), %rcx
cmpl $0x1, 0xdc(%rcx,%rax)
jne 0xa686f
movq 0x20(%rsp), %rcx
cmpl $0x1, 0xe0(%rcx,%rax)
jne 0xa686f
movq 0x20(%rsp), %rcx
cmpl $0x1, 0xe4(%rcx,%rax)
jne 0xa686f
movq 0x20(%rsp), %rcx
cmpl $0x1, 0xe8(%rcx,%rax)
jne 0xa686f
callq 0x732db
testl %eax, %eax
je 0xa94ad
movq 0x20(%rsp), %rax
leaq 0xf0(%rax), %rdx
leaq 0x90(%rsp), %rdi
leaq 0x110(%rsp), %rsi
movq 0x40(%rsp), %rcx
callq 0x13b55a
jmp 0xa9087
movq 0x20(%rsp), %rcx
movl 0xd8(%rcx,%rax), %edx
movslq 0xdc(%rcx,%rax), %r15
movslq 0xe0(%rcx,%rax), %rsi
movq %rsi, 0x18(%rsp)
movslq 0xe4(%rcx,%rax), %rbx
movslq 0xe8(%rcx,%rax), %rax
movq %rax, 0x48(%rsp)
movl 0xbc(%rsp), %eax
movl %eax, 0x60(%rsp)
movl 0xc8(%rsp), %r13d
movq %r14, %rax
movslq 0x13c(%rsp), %r14
movl 0x140(%rsp), %r12d
cmpb $0x1, 0x1d(%rax)
movq %rdi, 0x30(%rsp)
jne 0xa6a71
movl %r12d, %esi
imull %r14d, %esi
movq %rdx, %rbp
imull %edi, %edx
movq 0x10(%rax), %rax
leaq 0x360(%rsp), %rdi
andq $0x0, 0x40(%rdi)
pxor %xmm0, %xmm0
movdqa %xmm0, (%rdi)
movdqu %xmm0, 0xc(%rdi)
movdqa %xmm0, 0x20(%rdi)
movdqu %xmm0, 0x2c(%rdi)
movq %rax, (%rsp)
pushq $0x8
popq %r8
pushq $0x8
popq %r9
movl %r13d, %ecx
callq 0x628f2
movl 0x60(%rsp), %edx
imull 0x48(%rsp), %edx
movl %ebx, %ecx
imull %r14d, %ecx
xorl %esi, %esi
testl %r14d, %r14d
cmovlel %esi, %r14d
testl %r12d, %r12d
cmovlel %esi, %r12d
movq 0x20(%rsp), %rax
addq $0x60, %rax
movq %rax, 0x50(%rsp)
movq 0x30(%rsp), %r9
testl %r9d, %r9d
cmovlel %esi, %r9d
movslq 0xbc(%rsp), %rax
testl %ebp, %ebp
cmovlel %esi, %ebp
movq 0x90(%rsp), %rdi
movq %rdi, 0x48(%rsp)
testl %r13d, %r13d
cmovlel %esi, %r13d
movq 0x360(%rsp), %rsi
movq %rsi, 0x38(%rsp)
imulq 0x18(%rsp), %rax
movq %rax, 0x58(%rsp)
movslq %edx, %r8
shlq $0x3, %r8
movslq %ecx, %rax
shlq $0x3, %rax
subq %rax, %r8
shlq $0x3, %rbx
movq %rbp, %rcx
xorl %edx, %edx
movq %rbp, 0x28(%rsp)
cmpq %r13, %rdx
je 0xa6afe
movq 0xd0(%rsp), %rsi
imulq %rdx, %rsi
movq 0xa0(%rsp), %rdi
imulq %rdi, %rsi
movq 0x3a0(%rsp), %r11
movq %rdx, 0x30(%rsp)
imulq %rdx, %r11
imulq 0x370(%rsp), %r11
addq 0x48(%rsp), %rsi
movq %rsi, 0x60(%rsp)
addq 0x38(%rsp), %r11
imulq 0x58(%rsp), %rdi
movq %rdi, 0x18(%rsp)
xorl %ebp, %ebp
cmpq %rcx, %rbp
je 0xa6a64
movq 0x18(%rsp), %r10
imulq %rbp, %r10
addq 0x60(%rsp), %r10
xorl %edx, %edx
cmpq %r9, %rdx
je 0xa6a5a
movq %rdx, %rax
imulq %r15, %rax
leaq (%r10,%rax,8), %rdi
xorl %ecx, %ecx
cmpl %r12d, %ecx
je 0xa6a55
movl %r14d, %eax
subl $0x1, %eax
jb 0xa6a4e
movq (%rdi), %rsi
movq %rsi, (%r11)
addq $0x8, %r11
addq %rbx, %rdi
jmp 0xa6a3a
addq %r8, %rdi
incl %ecx
jmp 0xa6a32
incq %rdx
jmp 0xa6a20
incq %rbp
movq 0x28(%rsp), %rcx
jmp 0xa6a0b
movq 0x30(%rsp), %rdx
incq %rdx
jmp 0xa69b4
movl %edx, %ebp
imull %edi, %ebp
movslq %ebp, %rsi
leaq 0x360(%rsp), %rdi
movq %rdx, 0x28(%rsp)
leaq 0x1c0(%rsp), %rdx
callq 0x73bbe
movl 0x60(%rsp), %ecx
imull 0x18(%rsp), %ecx
movl %r15d, %eax
movq 0x30(%rsp), %rdx
imull %edx, %eax
subl %eax, %ecx
movl %ecx, 0x60(%rsp)
xorl %ecx, %ecx
testl %edx, %edx
cmovlel %ecx, %edx
movq 0x28(%rsp), %r11
testl %r11d, %r11d
cmovlel %ecx, %r11d
movq 0x360(%rsp), %rax
movq %rdx, %r10
xorl %esi, %esi
xorl %edi, %edi
cmpl %r11d, %ecx
je 0xa6b6c
movslq %edx, %r8
movslq %edi, %r9
leal (%r9,%r10), %edi
cmpq %r9, %r8
je 0xa6af3
movl %esi, (%rax,%r9,4)
incq %r9
addl %r15d, %esi
jmp 0xa6ae2
addl 0x60(%rsp), %esi
incl %ecx
addl %r10d, %edx
jmp 0xa6acf
leaq 0x360(%rsp), %rdi
leaq 0x110(%rsp), %rsi
movq 0x50(%rsp), %rdx
movq 0x40(%rsp), %rcx
callq 0xb2c76
movq 0x368(%rsp), %rax
testq %rax, %rax
movq 0x20(%rsp), %r14
movl 0x70(%rsp), %ebx
movq 0x88(%rsp), %r15
je 0xa6e2f
lock
decl (%rax)
jne 0xa6e2f
movq 0x360(%rsp), %rsi
movq 0x380(%rsp), %rdi
testq %rdi, %rdi
je 0xa924d
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xa6e2f
movl %r14d, %ecx
shll $0x3, %ebx
shlq $0x2, %r14
xorl %edx, %edx
testl %ebp, %ebp
cmovlel %edx, %ebp
testl %r13d, %r13d
cmovlel %edx, %r13d
testl %ecx, %ecx
movl $0x0, %esi
cmovgl %ecx, %esi
movq %rsi, 0x18(%rsp)
testl %r12d, %r12d
cmovlel %edx, %r12d
movq 0x38(%rsp), %rcx
testl %ecx, %ecx
cmovlel %edx, %ecx
movq %rcx, 0x38(%rsp)
movq %rbp, %rsi
shlq $0x5, %rsi
shlq $0x2, %rbp
cmpq 0x38(%rsp), %rdx
je 0xa6e11
movq 0x150(%rsp), %rcx
imulq %rdx, %rcx
imulq 0x120(%rsp), %rcx
addq 0x110(%rsp), %rcx
movq %rcx, 0x60(%rsp)
xorl %ecx, %ecx
movq %rdx, 0x30(%rsp)
cmpq %r12, %rcx
je 0xa6e09
movq %rcx, 0x58(%rsp)
imulq 0x48(%rsp), %rcx
movq %rcx, 0x28(%rsp)
xorl %r10d, %r10d
cmpq 0x18(%rsp), %r10
je 0xa6dee
movq 0x20(%rsp), %rcx
movq 0x58(%rcx), %r11
imulq %rdx, %r11
imulq 0x28(%rcx), %r11
addq 0x18(%rcx), %r11
movl %ebx, %ecx
imull %r10d, %ecx
movslq 0xbc(%rsp), %rdi
imulq 0x28(%rsp), %rdi
movq 0xa0(%rsp), %r8
movq 0xd0(%rsp), %rdx
imulq %r8, %rdi
addq 0x90(%rsp), %rdi
imulq %r8, %rdx
movslq %ecx, %r8
addq %rdi, %r8
pxor %xmm2, %xmm2
xorl %ecx, %ecx
pxor %xmm1, %xmm1
pxor %xmm3, %xmm3
pxor %xmm0, %xmm0
cmpq %r13, %rcx
je 0xa6d94
movq %rdx, %r15
imulq %rcx, %r15
addq %r8, %r15
xorl %edi, %edi
cmpq %rdi, %rbp
je 0xa6d89
movslq (%rax,%rdi), %r9
movq (%r15,%r9,8), %xmm12
pxor %xmm4, %xmm4
pcmpgtb %xmm12, %xmm4
punpcklbw %xmm4, %xmm12 # xmm12 = xmm12[0],xmm4[0],xmm12[1],xmm4[1],xmm12[2],xmm4[2],xmm12[3],xmm4[3],xmm12[4],xmm4[4],xmm12[5],xmm4[5],xmm12[6],xmm4[6],xmm12[7],xmm4[7]
movdqu (%r11,%rdi,8), %xmm5
movdqu 0x10(%r11,%rdi,8), %xmm4
pxor %xmm6, %xmm6
pcmpgtb %xmm5, %xmm6
pxor %xmm8, %xmm8
pcmpgtb %xmm4, %xmm8
movdqa %xmm5, %xmm7
punpcklbw %xmm6, %xmm7 # xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3],xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
punpckhbw %xmm6, %xmm5 # xmm5 = xmm5[8],xmm6[8],xmm5[9],xmm6[9],xmm5[10],xmm6[10],xmm5[11],xmm6[11],xmm5[12],xmm6[12],xmm5[13],xmm6[13],xmm5[14],xmm6[14],xmm5[15],xmm6[15]
movdqa %xmm4, %xmm6
punpcklbw %xmm8, %xmm6 # xmm6 = xmm6[0],xmm8[0],xmm6[1],xmm8[1],xmm6[2],xmm8[2],xmm6[3],xmm8[3],xmm6[4],xmm8[4],xmm6[5],xmm8[5],xmm6[6],xmm8[6],xmm6[7],xmm8[7]
punpckhbw %xmm8, %xmm4 # xmm4 = xmm4[8],xmm8[8],xmm4[9],xmm8[9],xmm4[10],xmm8[10],xmm4[11],xmm8[11],xmm4[12],xmm8[12],xmm4[13],xmm8[13],xmm4[14],xmm8[14],xmm4[15],xmm8[15]
movdqa %xmm12, %xmm8
pmullw %xmm7, %xmm8
pmulhw %xmm12, %xmm7
movdqa %xmm12, %xmm9
pmullw %xmm5, %xmm9
pmulhw %xmm12, %xmm5
movdqa %xmm12, %xmm10
pmullw %xmm6, %xmm10
pmulhw %xmm12, %xmm6
movdqa %xmm12, %xmm11
pmullw %xmm4, %xmm11
pmulhw %xmm12, %xmm4
movdqa %xmm8, %xmm12
punpcklwd %xmm7, %xmm12 # xmm12 = xmm12[0],xmm7[0],xmm12[1],xmm7[1],xmm12[2],xmm7[2],xmm12[3],xmm7[3]
paddd %xmm12, %xmm0
movdqa %xmm9, %xmm12
punpcklwd %xmm5, %xmm12 # xmm12 = xmm12[0],xmm5[0],xmm12[1],xmm5[1],xmm12[2],xmm5[2],xmm12[3],xmm5[3]
paddd %xmm12, %xmm3
movdqa %xmm10, %xmm12
punpcklwd %xmm6, %xmm12 # xmm12 = xmm12[0],xmm6[0],xmm12[1],xmm6[1],xmm12[2],xmm6[2],xmm12[3],xmm6[3]
paddd %xmm12, %xmm1
movdqa %xmm11, %xmm12
punpcklwd %xmm4, %xmm12 # xmm12 = xmm12[0],xmm4[0],xmm12[1],xmm4[1],xmm12[2],xmm4[2],xmm12[3],xmm4[3]
paddd %xmm12, %xmm2
punpckhwd %xmm7, %xmm8 # xmm8 = xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7]
paddd %xmm8, %xmm0
punpckhwd %xmm5, %xmm9 # xmm9 = xmm9[4],xmm5[4],xmm9[5],xmm5[5],xmm9[6],xmm5[6],xmm9[7],xmm5[7]
paddd %xmm9, %xmm3
punpckhwd %xmm6, %xmm10 # xmm10 = xmm10[4],xmm6[4],xmm10[5],xmm6[5],xmm10[6],xmm6[6],xmm10[7],xmm6[7]
paddd %xmm10, %xmm1
punpckhwd %xmm4, %xmm11 # xmm11 = xmm11[4],xmm4[4],xmm11[5],xmm4[5],xmm11[6],xmm4[6],xmm11[7],xmm4[7]
paddd %xmm11, %xmm2
addq $0x4, %rdi
jmp 0xa6c86
addq %rsi, %r11
incq %rcx
jmp 0xa6c71
movdqa %xmm0, %xmm4
punpckldq %xmm3, %xmm4 # xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
movdqa %xmm1, %xmm5
punpckldq %xmm2, %xmm5 # xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1]
punpckhdq %xmm3, %xmm0 # xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
punpckhdq %xmm2, %xmm1 # xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
movdqa %xmm4, %xmm2
punpcklqdq %xmm5, %xmm2 # xmm2 = xmm2[0],xmm5[0]
punpckhqdq %xmm5, %xmm4 # xmm4 = xmm4[1],xmm5[1]
paddd %xmm2, %xmm4
movdqa %xmm0, %xmm2
punpcklqdq %xmm1, %xmm2 # xmm2 = xmm2[0],xmm1[0]
punpckhqdq %xmm1, %xmm0 # xmm0 = xmm0[1],xmm1[1]
paddd %xmm2, %xmm0
paddd %xmm4, %xmm0
movq %r10, %rcx
shlq $0x4, %rcx
movq 0x60(%rsp), %rdx
movdqu %xmm0, (%rdx,%rcx)
incq %r10
movq 0x30(%rsp), %rdx
jmp 0xa6c04
movq 0x60(%rsp), %rcx
leaq (%rcx,%r14,4), %rcx
movq %rcx, 0x60(%rsp)
movq 0x58(%rsp), %rcx
incq %rcx
jmp 0xa6be8
incq %rdx
jmp 0xa6bb4
leaq 0x360(%rsp), %rdi
callq 0x624be
movq 0x20(%rsp), %r14
movl 0x70(%rsp), %ebx
movq 0x88(%rsp), %r15
cmpl $0x1, %ebx
jne 0xa7bfe
cmpl $0x4, %r15d
jne 0xa7bfe
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xd4(%r14,%rax), %ecx
cmpl $0x7, %ecx
je 0xa6eda
cmpl $0x3, %ecx
je 0xa70fe
cmpl $0x1, %ecx
jne 0xa76b7
cmpl $0x1, 0xd8(%r14,%rax)
jne 0xa76b7
cmpl $0x1, 0xdc(%r14,%rax)
jne 0xa750e
cmpl $0x1, 0xe0(%r14,%rax)
jne 0xa750e
cmpl $0x1, 0xe4(%r14,%rax)
jne 0xa750e
cmpl $0x1, 0xe8(%r14,%rax)
jne 0xa750e
leaq 0x60(%r14), %rdx
leaq 0x90(%rsp), %rdi
leaq 0x110(%rsp), %rsi
movq 0x40(%rsp), %rcx
callq 0xaea84
jmp 0xa9087
cmpl $0x7, 0xd8(%r14,%rax)
jne 0xa76b7
cmpl $0x1, 0xdc(%r14,%rax)
jne 0xa76b7
cmpl $0x1, 0xe0(%r14,%rax)
jne 0xa76b7
cmpl $0x2, 0xe4(%r14,%rax)
jne 0xa76b7
cmpl $0x2, 0xe8(%r14,%rax)
jne 0xa76b7
movl 0xbc(%rsp), %r14d
movl 0xc8(%rsp), %r15d
movl 0x13c(%rsp), %ebx
movl 0x140(%rsp), %ebp
movl %ebp, %esi
imull %ebx, %esi
movq 0x40(%rsp), %rax
movq 0x10(%rax), %rax
leaq 0x360(%rsp), %rdi
andq $0x0, 0x40(%rdi)
pxor %xmm0, %xmm0
movdqa %xmm0, (%rdi)
movdqu %xmm0, 0xc(%rdi)
movdqa %xmm0, 0x20(%rdi)
movdqu %xmm0, 0x2c(%rdi)
movq %rax, (%rsp)
pushq $0x31
popq %rdx
pushq $0x1
popq %r8
pushq $0x1
popq %r9
movl %r15d, %ecx
callq 0x628f2
movq 0x20(%rsp), %rax
addq $0x60, %rax
movq %rax, 0x30(%rsp)
subl %ebx, %r14d
addl %r14d, %r14d
xorl %edx, %edx
testl %ebp, %ebp
cmovlel %edx, %ebp
movslq %r14d, %rsi
testl %r15d, %r15d
cmovlel %edx, %r15d
movq 0x40(%rsp), %rcx
movq %r15, 0x28(%rsp)
cmpq %r15, %rdx
je 0xa92b0
movslq 0xbc(%rsp), %r9
movq 0xd0(%rsp), %rdi
imulq %rdx, %rdi
movq 0xa0(%rsp), %rax
imulq %rax, %rdi
addq 0x90(%rsp), %rdi
movq 0x3a0(%rsp), %r8
movq %rdx, 0x18(%rsp)
imulq %rdx, %r8
imulq 0x370(%rsp), %r8
addq 0x360(%rsp), %r8
imulq %rax, %r9
movq %r9, 0x60(%rsp)
xorl %r9d, %r9d
cmpq $0x7, %r9
je 0xa70ec
movq 0x60(%rsp), %r10
imulq %r9, %r10
addq %rdi, %r10
xorl %r11d, %r11d
cmpq $0x7, %r11
je 0xa70df
leaq (%r10,%r11), %rax
xorl %r14d, %r14d
cmpl %ebp, %r14d
je 0xa70d7
leaq (%rax,%rsi), %r12
xorl %r13d, %r13d
movq %rax, %r15
leal 0x3(%r13), %edx
cmpl %ebx, %edx
jge 0xa70ab
movb (%r15), %dl
movb %dl, (%r8,%r13)
movb 0x2(%r15), %dl
movb %dl, 0x1(%r8,%r13)
movb 0x4(%r15), %dl
movb %dl, 0x2(%r8,%r13)
movb 0x6(%r15), %dl
movb %dl, 0x3(%r8,%r13)
addq $0x8, %r15
addq $0x4, %r13
addq $0x8, %r12
jmp 0xa7057
movb (%r15), %dl
movb %dl, (%r8,%r13)
movb 0x2(%r15), %dl
movb %dl, 0x1(%r8,%r13)
addq $0x4, %r15
addq $0x2, %r13
addq $0x4, %r12
leal 0x1(%r13), %edx
cmpl %ebx, %edx
jl 0xa708f
jmp 0xa70c4
movb (%rax,%r13,2), %dl
movb %dl, (%r8,%r13)
incq %r13
addq $0x2, %r12
cmpl %ebx, %r13d
jl 0xa70b5
incl %r14d
addq %r13, %r8
movq %r12, %rax
jmp 0xa7044
incq %r11
jmp 0xa7033
incq %r9
movq 0x40(%rsp), %rcx
jmp 0xa701a
movq 0x18(%rsp), %rdx
incq %rdx
movq 0x28(%rsp), %r15
jmp 0xa6fbb
cmpl $0x3, 0xd8(%r14,%rax)
jne 0xa76b7
cmpl $0x1, 0xdc(%r14,%rax)
jne 0xa72f9
cmpl $0x1, 0xe0(%r14,%rax)
jne 0xa72f9
cmpl $0x1, 0xe4(%r14,%rax)
jne 0xa72f9
cmpl $0x1, 0xe8(%r14,%rax)
jne 0xa72f9
movl 0xbc(%rsp), %r14d
movl 0xc8(%rsp), %r15d
movl 0x13c(%rsp), %ebx
movl 0x140(%rsp), %ebp
movl %ebp, %esi
imull %ebx, %esi
movq 0x40(%rsp), %rax
movq 0x10(%rax), %rax
leaq 0x360(%rsp), %rdi
andq $0x0, 0x40(%rdi)
pxor %xmm0, %xmm0
movdqa %xmm0, (%rdi)
movdqu %xmm0, 0xc(%rdi)
movdqa %xmm0, 0x20(%rdi)
movdqu %xmm0, 0x2c(%rdi)
movq %rax, (%rsp)
pushq $0x9
popq %rdx
pushq $0x1
popq %r8
pushq $0x1
popq %r9
movl %r15d, %ecx
callq 0x628f2
movq 0x20(%rsp), %rax
addq $0x60, %rax
movq %rax, 0x18(%rsp)
subl %ebx, %r14d
xorl %edx, %edx
testl %ebp, %ebp
cmovlel %edx, %ebp
movslq %r14d, %rax
testl %r15d, %r15d
cmovlel %edx, %r15d
movq 0x40(%rsp), %rcx
movq %r15, 0x60(%rsp)
cmpq %r15, %rdx
je 0xa9319
movslq 0xbc(%rsp), %rsi
movq 0xd0(%rsp), %rdi
imulq %rdx, %rdi
movq 0xa0(%rsp), %r9
imulq %r9, %rdi
addq 0x90(%rsp), %rdi
movq 0x3a0(%rsp), %r8
imulq %rdx, %r8
imulq 0x370(%rsp), %r8
addq 0x360(%rsp), %r8
imulq %r9, %rsi
xorl %r9d, %r9d
cmpq $0x3, %r9
je 0xa72ec
movq %rsi, %r10
imulq %r9, %r10
addq %rdi, %r10
xorl %r11d, %r11d
cmpq $0x3, %r11
je 0xa72df
leaq (%r10,%r11), %r14
xorl %r15d, %r15d
cmpl %ebp, %r15d
je 0xa72d7
xorl %r12d, %r12d
leal 0x3(%r12), %r13d
cmpl %ebx, %r13d
jge 0xa72ad
movb (%r14,%r12), %r13b
movb %r13b, (%r8,%r12)
movb 0x1(%r14,%r12), %r13b
movb %r13b, 0x1(%r8,%r12)
movb 0x2(%r14,%r12), %r13b
movb %r13b, 0x2(%r8,%r12)
movb 0x3(%r14,%r12), %r13b
movb %r13b, 0x3(%r8,%r12)
addq $0x4, %r12
jmp 0xa7261
movb (%r14,%r12), %r13b
movb %r13b, (%r8,%r12)
movb 0x1(%r14,%r12), %r13b
movb %r13b, 0x1(%r8,%r12)
addq $0x2, %r12
leal 0x1(%r12), %r13d
cmpl %ebx, %r13d
jl 0xa7297
jmp 0xa72c4
movb (%r14,%r12), %r13b
movb %r13b, (%r8,%r12)
incq %r12
cmpl %ebx, %r12d
jl 0xa72b9
addq %rax, %r14
addq %r12, %r14
incl %r15d
addq %r12, %r8
jmp 0xa7259
incq %r11
jmp 0xa7248
incq %r9
movq 0x40(%rsp), %rcx
jmp 0xa7231
incq %rdx
movq 0x60(%rsp), %r15
jmp 0xa71dc
cmpl $0x1, 0xdc(%r14,%rax)
jne 0xa76b7
cmpl $0x1, 0xe0(%r14,%rax)
jne 0xa76b7
cmpl $0x2, 0xe4(%r14,%rax)
jne 0xa76b7
cmpl $0x2, 0xe8(%r14,%rax)
jne 0xa76b7
movl 0xbc(%rsp), %r14d
movl 0xc8(%rsp), %r15d
movl 0x13c(%rsp), %ebx
movl 0x140(%rsp), %ebp
movl %ebp, %esi
imull %ebx, %esi
movq 0x40(%rsp), %rax
movq 0x10(%rax), %rax
leaq 0x360(%rsp), %rdi
andq $0x0, 0x40(%rdi)
pxor %xmm0, %xmm0
movdqa %xmm0, (%rdi)
movdqu %xmm0, 0xc(%rdi)
movdqa %xmm0, 0x20(%rdi)
movdqu %xmm0, 0x2c(%rdi)
movq %rax, (%rsp)
pushq $0x9
popq %rdx
pushq $0x1
popq %r8
pushq $0x1
popq %r9
movl %r15d, %ecx
callq 0x628f2
movq 0x20(%rsp), %rax
addq $0x60, %rax
movq %rax, 0x30(%rsp)
subl %ebx, %r14d
addl %r14d, %r14d
xorl %edx, %edx
testl %ebp, %ebp
cmovlel %edx, %ebp
movslq %r14d, %rsi
testl %r15d, %r15d
cmovlel %edx, %r15d
movq 0x40(%rsp), %rcx
movq %r15, 0x28(%rsp)
cmpq %r15, %rdx
je 0xa9382
movslq 0xbc(%rsp), %r9
movq 0xd0(%rsp), %rdi
imulq %rdx, %rdi
movq 0xa0(%rsp), %rax
imulq %rax, %rdi
addq 0x90(%rsp), %rdi
movq 0x3a0(%rsp), %r8
movq %rdx, 0x18(%rsp)
imulq %rdx, %r8
imulq 0x370(%rsp), %r8
addq 0x360(%rsp), %r8
imulq %rax, %r9
movq %r9, 0x60(%rsp)
xorl %r9d, %r9d
cmpq $0x3, %r9
je 0xa74fc
movq 0x60(%rsp), %r10
imulq %r9, %r10
addq %rdi, %r10
xorl %r11d, %r11d
cmpq $0x3, %r11
je 0xa74ef
leaq (%r10,%r11), %rax
xorl %r14d, %r14d
cmpl %ebp, %r14d
je 0xa74e7
leaq (%rax,%rsi), %r12
xorl %r13d, %r13d
movq %rax, %r15
leal 0x3(%r13), %edx
cmpl %ebx, %edx
jge 0xa74bb
movb (%r15), %dl
movb %dl, (%r8,%r13)
movb 0x2(%r15), %dl
movb %dl, 0x1(%r8,%r13)
movb 0x4(%r15), %dl
movb %dl, 0x2(%r8,%r13)
movb 0x6(%r15), %dl
movb %dl, 0x3(%r8,%r13)
addq $0x8, %r15
addq $0x4, %r13
addq $0x8, %r12
jmp 0xa7467
movb (%r15), %dl
movb %dl, (%r8,%r13)
movb 0x2(%r15), %dl
movb %dl, 0x1(%r8,%r13)
addq $0x4, %r15
addq $0x2, %r13
addq $0x4, %r12
leal 0x1(%r13), %edx
cmpl %ebx, %edx
jl 0xa749f
jmp 0xa74d4
movb (%rax,%r13,2), %dl
movb %dl, (%r8,%r13)
incq %r13
addq $0x2, %r12
cmpl %ebx, %r13d
jl 0xa74c5
incl %r14d
addq %r13, %r8
movq %r12, %rax
jmp 0xa7454
incq %r11
jmp 0xa7443
incq %r9
movq 0x40(%rsp), %rcx
jmp 0xa742a
movq 0x18(%rsp), %rdx
incq %rdx
movq 0x28(%rsp), %r15
jmp 0xa73cb
cmpl $0x1, 0xdc(%r14,%rax)
jne 0xa76b7
cmpl $0x1, 0xe0(%r14,%rax)
jne 0xa76b7
cmpl $0x2, 0xe4(%r14,%rax)
jne 0xa76b7
cmpl $0x2, 0xe8(%r14,%rax)
jne 0xa76b7
movl 0xbc(%rsp), %ebx
movl 0xc8(%rsp), %r15d
movq 0xa0(%rsp), %r8
movl 0xa8(%rsp), %r9d
leaq 0x360(%rsp), %rdi
andq $0x0, 0x40(%rdi)
movl 0x13c(%rsp), %ebp
movl 0x140(%rsp), %r13d
pxor %xmm0, %xmm0
movdqa %xmm0, (%rdi)
movdqu %xmm0, 0xc(%rdi)
movdqa %xmm0, 0x20(%rdi)
movdqu %xmm0, 0x2c(%rdi)
movq 0x40(%rsp), %r14
movq 0x10(%r14), %rax
movq %rax, (%rsp)
movl %ebp, %esi
movl %r13d, %edx
movl %r15d, %ecx
callq 0x628f2
movq 0x20(%rsp), %rax
leaq 0x60(%rax), %rdx
subl %ebp, %ebx
addl %ebx, %ebx
xorl %eax, %eax
testl %r13d, %r13d
cmovlel %eax, %r13d
movslq %ebx, %rcx
testl %r15d, %r15d
cmovlel %eax, %r15d
cmpq %r15, %rax
je 0xa93e7
movq 0xd0(%rsp), %rsi
imulq %rax, %rsi
imulq 0xa0(%rsp), %rsi
addq 0x90(%rsp), %rsi
movq 0x3a0(%rsp), %rdi
imulq %rax, %rdi
imulq 0x370(%rsp), %rdi
addq 0x360(%rsp), %rdi
xorl %r8d, %r8d
cmpl %r13d, %r8d
je 0xa76af
leaq (%rsi,%rcx), %r9
xorl %r10d, %r10d
movq %rsi, %r11
leal 0x3(%r10), %ebx
cmpl %ebp, %ebx
jge 0xa7683
movb (%r11), %bl
movb %bl, (%rdi,%r10)
movb 0x2(%r11), %bl
movb %bl, 0x1(%rdi,%r10)
movb 0x4(%r11), %bl
movb %bl, 0x2(%rdi,%r10)
movb 0x6(%r11), %bl
movb %bl, 0x3(%rdi,%r10)
addq $0x8, %r11
addq $0x4, %r10
addq $0x8, %r9
jmp 0xa762f
movb (%r11), %bl
movb %bl, (%rdi,%r10)
movb 0x2(%r11), %bl
movb %bl, 0x1(%rdi,%r10)
addq $0x4, %r11
addq $0x2, %r10
addq $0x4, %r9
leal 0x1(%r10), %ebx
cmpl %ebp, %ebx
jl 0xa7667
jmp 0xa769c
movb (%rsi,%r10,2), %r11b
movb %r11b, (%rdi,%r10)
addq $0x2, %r9
incq %r10
cmpl %ebp, %r10d
jl 0xa768d
incl %r8d
addq %r10, %rdi
movq %r9, %rsi
jmp 0xa761c
incq %rax
jmp 0xa75d6
movl 0xd8(%r14,%rax), %ebp
movslq 0xdc(%r14,%rax), %rdx
movq %rdx, 0x18(%rsp)
movslq 0xe0(%r14,%rax), %rdx
movq %rdx, 0x78(%rsp)
movslq 0xe4(%r14,%rax), %rbx
movslq 0xe8(%r14,%rax), %rax
movq %rax, 0x38(%rsp)
movl 0xbc(%rsp), %r15d
movl 0xc8(%rsp), %r13d
movslq 0x13c(%rsp), %r14
movl 0x140(%rsp), %r12d
movq %rbp, 0x48(%rsp)
movq %rcx, 0x58(%rsp)
imull %ecx, %ebp
movq 0x40(%rsp), %rax
cmpb $0x1, 0x1d(%rax)
jne 0xa7944
movl %r12d, %esi
imull %r14d, %esi
movq 0x10(%rax), %rax
leaq 0x360(%rsp), %rdi
andq $0x0, 0x40(%rdi)
pxor %xmm0, %xmm0
movdqa %xmm0, (%rdi)
movdqu %xmm0, 0xc(%rdi)
movdqa %xmm0, 0x20(%rdi)
movdqu %xmm0, 0x2c(%rdi)
movq %rax, (%rsp)
pushq $0x1
popq %r8
pushq $0x1
popq %r9
movl %ebp, %edx
movl %r13d, %ecx
callq 0x628f2
movq 0x20(%rsp), %rax
addq $0x60, %rax
movq %rax, 0xe0(%rsp)
imull 0x38(%rsp), %r15d
movl %r14d, %eax
imull %ebx, %eax
subl %eax, %r15d
leal (%rbx,%rbx), %eax
cltq
leal (%rbx,%rbx,2), %ecx
movslq %ecx, %rcx
xorl %r9d, %r9d
testl %r12d, %r12d
cmovlel %r9d, %r12d
leal (,%rbx,4), %edx
movq 0x58(%rsp), %rsi
testl %esi, %esi
cmovlel %r9d, %esi
movslq %edx, %rdi
movq 0x48(%rsp), %rdx
testl %edx, %edx
cmovlel %r9d, %edx
movq %rdx, 0x48(%rsp)
movslq %r15d, %r8
testl %r13d, %r13d
cmovlel %r9d, %r13d
movq %rsi, 0x58(%rsp)
cmpq %r13, %r9
je 0xa79dc
movq 0xd0(%rsp), %r10
imulq %r9, %r10
movq 0xa0(%rsp), %rdx
imulq %rdx, %r10
addq 0x90(%rsp), %r10
movq %r10, 0x38(%rsp)
movq 0x3a0(%rsp), %r10
movq %r9, 0x80(%rsp)
imulq %r9, %r10
imulq 0x370(%rsp), %r10
addq 0x360(%rsp), %r10
movslq 0xbc(%rsp), %r9
imulq %rdx, %r9
imulq 0x78(%rsp), %r9
movq %r9, 0x50(%rsp)
xorl %edx, %edx
cmpq 0x48(%rsp), %rdx
je 0xa7934
movq 0x50(%rsp), %r9
imulq %rdx, %r9
addq 0x38(%rsp), %r9
movq %r9, 0x30(%rsp)
xorl %r9d, %r9d
movq %rdx, 0x28(%rsp)
cmpq %rsi, %r9
je 0xa792c
movq %r9, 0x60(%rsp)
imulq 0x18(%rsp), %r9
addq 0x30(%rsp), %r9
xorl %esi, %esi
cmpl %r12d, %esi
je 0xa7915
xorl %r15d, %r15d
xorl %ebp, %ebp
movq %r9, %rdx
leal 0x3(%rbp), %r11d
cmpl %r14d, %r11d
jge 0xa78e7
movb (%rdx), %r11b
movb %r11b, (%r10,%rbp)
movb (%rdx,%rbx), %r11b
movb %r11b, 0x1(%r10,%rbp)
movb (%rdx,%rax), %r11b
movb %r11b, 0x2(%r10,%rbp)
movb (%rdx,%rcx), %r11b
movb %r11b, 0x3(%r10,%rbp)
addq %rdi, %rdx
addq $0x4, %rbp
addq %rdi, %r15
jmp 0xa7896
movb (%rdx), %r11b
movb %r11b, (%r10,%rbp)
movb (%rdx,%rbx), %r11b
movb %r11b, 0x1(%r10,%rbp)
addq %rax, %rdx
addq $0x2, %rbp
addq %rax, %r15
leal 0x1(%rbp), %r11d
cmpl %r14d, %r11d
jl 0xa78cd
jmp 0xa7900
movb (%r9,%r15), %dl
movb %dl, (%r10,%rbp)
incq %rbp
addq %rbx, %r15
cmpl %r14d, %ebp
jl 0xa78f2
addq %r8, %r9
addq %r15, %r9
incl %esi
addq %rbp, %r10
jmp 0xa7885
movq 0x60(%rsp), %r9
incq %r9
movq 0x58(%rsp), %rsi
movq 0x28(%rsp), %rdx
jmp 0xa786a
incq %rdx
jmp 0xa7844
movq 0x80(%rsp), %r9
incq %r9
jmp 0xa77d8
movl 0x148(%rsp), %eax
movq %rax, 0x50(%rsp)
movslq %ebp, %rsi
leaq 0x360(%rsp), %rdi
leaq 0x1c0(%rsp), %rdx
callq 0x73bbe
movl %r15d, %ecx
imull 0x78(%rsp), %ecx
movq 0x18(%rsp), %rax
movq 0x58(%rsp), %r15
imull %r15d, %eax
subl %eax, %ecx
movl %ecx, 0x60(%rsp)
xorl %ecx, %ecx
testl %r15d, %r15d
cmovlel %ecx, %r15d
movq 0x48(%rsp), %r11
testl %r11d, %r11d
cmovlel %ecx, %r11d
movq 0x360(%rsp), %rax
movl %r15d, %edx
xorl %esi, %esi
xorl %edi, %edi
movq 0x18(%rsp), %r10
cmpl %r11d, %ecx
je 0xa7a4d
movslq %edx, %r8
movslq %edi, %r9
leal (%r9,%r15), %edi
cmpq %r9, %r8
je 0xa79d1
movl %esi, (%rax,%r9,4)
incq %r9
addl %r10d, %esi
jmp 0xa79c0
addl 0x60(%rsp), %esi
incl %ecx
addl %r15d, %edx
jmp 0xa79ad
leaq 0x360(%rsp), %rdi
leaq 0x110(%rsp), %rsi
movq 0xe0(%rsp), %rdx
movq 0x40(%rsp), %rcx
callq 0xb31de
movq 0x368(%rsp), %rax
testq %rax, %rax
movq 0x20(%rsp), %r14
movl 0x70(%rsp), %ebx
movq 0x88(%rsp), %r15
je 0xa7bfe
lock
decl (%rax)
jne 0xa7bfe
movq 0x360(%rsp), %rsi
movq 0x380(%rsp), %rdi
testq %rdi, %rdi
je 0xa944a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xa7bfe
movl %r14d, %ecx
shlq $0x2, %r14
xorl %edx, %edx
testl %ebp, %ebp
cmovlel %edx, %ebp
testl %r13d, %r13d
cmovlel %edx, %r13d
testl %ecx, %ecx
movl $0x0, %esi
cmovgl %ecx, %esi
movq %rsi, 0x18(%rsp)
testl %r12d, %r12d
cmovlel %edx, %r12d
movq 0x50(%rsp), %rcx
testl %ecx, %ecx
cmovlel %edx, %ecx
movq %rcx, 0x50(%rsp)
leaq (,%rbp,4), %rsi
cmpq 0x50(%rsp), %rdx
je 0xa7be0
movq 0x150(%rsp), %rcx
imulq %rdx, %rcx
imulq 0x120(%rsp), %rcx
addq 0x110(%rsp), %rcx
movq %rcx, 0x60(%rsp)
xorl %ecx, %ecx
movq %rdx, 0x30(%rsp)
cmpq %r12, %rcx
je 0xa7bd8
movq %rcx, 0x58(%rsp)
imulq 0x38(%rsp), %rcx
movq %rcx, 0x28(%rsp)
xorl %r8d, %r8d
cmpq 0x18(%rsp), %r8
je 0xa7bbd
movq 0x20(%rsp), %rcx
movq 0x58(%rcx), %r11
imulq %rdx, %r11
imulq 0x28(%rcx), %r11
addq 0x18(%rcx), %r11
movslq 0xbc(%rsp), %rcx
imulq 0x28(%rsp), %rcx
movq 0xa0(%rsp), %rdi
movq 0xd0(%rsp), %rdx
imulq %rdi, %rdx
imulq %rdi, %rcx
addq 0x90(%rsp), %rcx
movq %r8, %r10
imulq %rbx, %r10
addq %rcx, %r10
pxor %xmm0, %xmm0
xorl %ecx, %ecx
cmpq %r13, %rcx
je 0xa7b9f
movq %rdx, %r15
imulq %rcx, %r15
addq %r10, %r15
xorl %edi, %edi
cmpq %rdi, %rbp
je 0xa7b97
movslq (%rax,%rdi,4), %r9
movsbl (%r15,%r9), %r9d
movd %r9d, %xmm1
pshuflw $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0,4,5,6,7]
pshufd $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
movq (%r11,%rdi,4), %xmm2
pxor %xmm3, %xmm3
pcmpgtb %xmm2, %xmm3
punpcklbw %xmm3, %xmm2 # xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
movdqa %xmm1, %xmm3
pmullw %xmm2, %xmm3
pmulhw %xmm1, %xmm2
punpcklwd %xmm2, %xmm3 # xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
paddd %xmm3, %xmm0
incq %rdi
jmp 0xa7b4f
addq %rsi, %r11
incq %rcx
jmp 0xa7b3e
movq %r8, %rcx
shlq $0x4, %rcx
movq 0x60(%rsp), %rdx
movdqu %xmm0, (%rdx,%rcx)
incq %r8
movq 0x30(%rsp), %rdx
jmp 0xa7adf
movq 0x60(%rsp), %rcx
leaq (%rcx,%r14,4), %rcx
movq %rcx, 0x60(%rsp)
movq 0x58(%rsp), %rcx
incq %rcx
jmp 0xa7ac3
incq %rdx
jmp 0xa7a8f
leaq 0x360(%rsp), %rdi
callq 0x624be
movq 0x20(%rsp), %r14
movl 0x70(%rsp), %ebx
movq 0x88(%rsp), %r15
cmpl $0x8, %ebx
jne 0xa7c9d
cmpl $0x1, %r15d
jne 0xa7c9d
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xd4(%r14,%rax), %ecx
movq %rcx, 0x28(%rsp)
cmpl $0x1, %ecx
movq %r14, %rcx
movq 0x40(%rsp), %r14
jne 0xa7e7b
cmpl $0x1, 0xd8(%rcx,%rax)
jne 0xa7e7b
cmpl $0x1, 0xdc(%rcx,%rax)
jne 0xa7d3c
cmpl $0x1, 0xe0(%rcx,%rax)
jne 0xa7d3c
cmpl $0x1, 0xe4(%rcx,%rax)
jne 0xa7d3c
cmpl $0x1, 0xe8(%rcx,%rax)
jne 0xa7d3c
leaq 0x60(%rcx), %rdx
leaq 0x90(%rsp), %rdi
leaq 0x110(%rsp), %rsi
movq %r14, %rcx
callq 0xaeb6d
jmp 0xa9087
xorl $0x1, %r15d
movl %ebx, %eax
xorl $0x1, %eax
orl %r15d, %eax
jne 0xa9087
movq 0x20(%rsp), %rcx
movq (%rcx), %rax
movq -0x18(%rax), %rax
movl 0xd4(%rcx,%rax), %edx
movq %rdx, 0x18(%rsp)
cmpl $0x1, %edx
movq 0x40(%rsp), %r14
jne 0xa8590
cmpl $0x1, 0xd8(%rcx,%rax)
jne 0xa8590
cmpl $0x1, 0xdc(%rcx,%rax)
jne 0xa83f0
cmpl $0x1, 0xe0(%rcx,%rax)
jne 0xa83f0
cmpl $0x1, 0xe4(%rcx,%rax)
jne 0xa83f0
cmpl $0x1, 0xe8(%rcx,%rax)
jne 0xa83f0
leaq 0x60(%rcx), %rdx
leaq 0x90(%rsp), %rdi
leaq 0x110(%rsp), %rsi
movq %r14, %rcx
callq 0xaec56
jmp 0xa9087
cmpl $0x1, 0xdc(%rcx,%rax)
jne 0xa7e7b
cmpl $0x1, 0xe0(%rcx,%rax)
jne 0xa7e7b
cmpl $0x2, 0xe4(%rcx,%rax)
jne 0xa7e7b
cmpl $0x2, 0xe8(%rcx,%rax)
jne 0xa7e7b
movl 0xbc(%rsp), %ebx
movl 0xc8(%rsp), %r12d
movq 0xa0(%rsp), %r8
movl 0xa8(%rsp), %r9d
leaq 0x360(%rsp), %rdi
andq $0x0, 0x40(%rdi)
movl 0x13c(%rsp), %ebp
movl 0x140(%rsp), %r13d
pxor %xmm0, %xmm0
movdqa %xmm0, (%rdi)
movdqu %xmm0, 0xc(%rdi)
movdqa %xmm0, 0x20(%rdi)
movdqu %xmm0, 0x2c(%rdi)
movq 0x10(%r14), %rax
movq %rax, (%rsp)
movl %ebp, %esi
movl %r13d, %edx
movl %r12d, %ecx
callq 0x628f2
movq 0x20(%rsp), %rax
leaq 0x60(%rax), %rdx
subl %ebp, %ebx
addl %ebx, %ebx
movq 0x90(%rsp), %rax
movq 0x360(%rsp), %rcx
xorl %esi, %esi
testl %ebp, %ebp
cmovlel %esi, %ebp
movslq %ebx, %rdi
testl %r13d, %r13d
cmovlel %esi, %r13d
testl %r12d, %r12d
cmovlel %esi, %r12d
shlq $0x3, %rdi
cmpq %r12, %rsi
je 0xa925a
movq 0xd0(%rsp), %r8
imulq %rsi, %r8
imulq 0xa0(%rsp), %r8
addq %rax, %r8
movq 0x3a0(%rsp), %r9
imulq %rsi, %r9
imulq 0x370(%rsp), %r9
addq %rcx, %r9
xorl %r10d, %r10d
movl %ebp, %r11d
cmpl %r13d, %r10d
je 0xa7e76
subl $0x1, %r11d
jb 0xa7e6e
movq (%r8), %rbx
movq %rbx, (%r9)
addq $0x10, %r8
addq $0x8, %r9
jmp 0xa7e58
addq %rdi, %r8
incl %r10d
jmp 0xa7e50
incq %rsi
jmp 0xa7e14
cmpb $0x1, 0x1c(%r14)
jne 0xa7f1f
cmpl $0x3, 0x28(%rsp)
jne 0xa7f1f
cmpb $0x0, 0x38(%r14)
je 0xa7f1f
movq 0x20(%rsp), %rcx
cmpl $0x3, 0xd8(%rcx,%rax)
jne 0xa7f1f
movq 0x20(%rsp), %rcx
cmpl $0x1, 0xdc(%rcx,%rax)
jne 0xa7f1f
movq 0x20(%rsp), %rcx
cmpl $0x1, 0xe0(%rcx,%rax)
jne 0xa7f1f
movq 0x20(%rsp), %rcx
cmpl $0x1, 0xe4(%rcx,%rax)
jne 0xa7f1f
movq 0x20(%rsp), %rcx
cmpl $0x1, 0xe8(%rcx,%rax)
jne 0xa7f1f
callq 0x732db
testl %eax, %eax
je 0xa9a5a
movq 0x20(%rsp), %rax
leaq 0xf0(%rax), %rdx
leaq 0x90(%rsp), %rdi
leaq 0x110(%rsp), %rsi
movq 0x40(%rsp), %rcx
callq 0x1397bb
jmp 0xa9087
movq 0x20(%rsp), %rcx
movl 0xd8(%rcx,%rax), %r13d
movslq 0xdc(%rcx,%rax), %rbp
movslq 0xe0(%rcx,%rax), %rdx
movq %rdx, 0x30(%rsp)
movslq 0xe4(%rcx,%rax), %rbx
movslq 0xe8(%rcx,%rax), %rax
movq %rax, 0x50(%rsp)
movl 0xbc(%rsp), %eax
movl %eax, 0x18(%rsp)
movl 0xc8(%rsp), %r12d
movq %r14, %rax
movslq 0x13c(%rsp), %r14
movl 0x140(%rsp), %r15d
movq %r13, 0x60(%rsp)
imull 0x28(%rsp), %r13d
cmpb $0x1, 0x1d(%rax)
jne 0xa8123
movl %r15d, %esi
imull %r14d, %esi
movq 0x10(%rax), %rax
leaq 0x360(%rsp), %rdi
andq $0x0, 0x40(%rdi)
pxor %xmm0, %xmm0
movdqa %xmm0, (%rdi)
movdqu %xmm0, 0xc(%rdi)
movdqa %xmm0, 0x20(%rdi)
movdqu %xmm0, 0x2c(%rdi)
movq %rax, (%rsp)
pushq $0x8
popq %r8
pushq $0x8
popq %r9
movl %r13d, %edx
movl %r12d, %ecx
callq 0x628f2
movl 0x18(%rsp), %edx
imull 0x50(%rsp), %edx
movl %ebx, %ecx
imull %r14d, %ecx
xorl %esi, %esi
testl %r14d, %r14d
cmovlel %esi, %r14d
testl %r15d, %r15d
cmovlel %esi, %r15d
movq 0x20(%rsp), %rax
addq $0x60, %rax
movq %rax, 0x50(%rsp)
movq 0x28(%rsp), %r9
testl %r9d, %r9d
cmovlel %esi, %r9d
movslq 0xbc(%rsp), %rdi
movq 0x60(%rsp), %rax
testl %eax, %eax
cmovlel %esi, %eax
movq %rax, 0x60(%rsp)
movq 0x90(%rsp), %rax
movq %rax, 0x48(%rsp)
testl %r12d, %r12d
cmovlel %esi, %r12d
movq 0x360(%rsp), %rax
movq %rax, 0x38(%rsp)
imulq 0x30(%rsp), %rdi
movq %rdi, 0x58(%rsp)
movslq %edx, %r8
shlq $0x3, %r8
movslq %ecx, %rax
shlq $0x3, %rax
subq %rax, %r8
shlq $0x3, %rbx
cmpq %r12, %rsi
je 0xa81b3
movq 0xd0(%rsp), %rax
imulq %rsi, %rax
movq 0xa0(%rsp), %rcx
imulq %rcx, %rax
movq 0x3a0(%rsp), %r11
movq %rsi, 0x30(%rsp)
imulq %rsi, %r11
imulq 0x370(%rsp), %r11
addq 0x48(%rsp), %rax
movq %rax, 0x18(%rsp)
addq 0x38(%rsp), %r11
imulq 0x58(%rsp), %rcx
movq %rcx, 0x28(%rsp)
xorl %r13d, %r13d
cmpq 0x60(%rsp), %r13
je 0xa8116
movq 0x28(%rsp), %r10
imulq %r13, %r10
addq 0x18(%rsp), %r10
xorl %edx, %edx
cmpq %r9, %rdx
je 0xa8111
movq %rdx, %rax
imulq %rbp, %rax
leaq (%r10,%rax,8), %rdi
xorl %ecx, %ecx
cmpl %r15d, %ecx
je 0xa810c
movl %r14d, %eax
subl $0x1, %eax
jb 0xa8105
movq (%rdi), %rsi
movq %rsi, (%r11)
addq $0x8, %r11
addq %rbx, %rdi
jmp 0xa80f1
addq %r8, %rdi
incl %ecx
jmp 0xa80e9
incq %rdx
jmp 0xa80d7
incq %r13
jmp 0xa80c0
movq 0x30(%rsp), %rsi
incq %rsi
jmp 0xa8068
movl 0x148(%rsp), %eax
movq %rax, 0x80(%rsp)
movslq %r13d, %rsi
leaq 0x360(%rsp), %rdi
leaq 0x1c0(%rsp), %rdx
callq 0x73bbe
movl 0x18(%rsp), %ecx
imull 0x30(%rsp), %ecx
movl %ebp, %eax
movq 0x28(%rsp), %rdx
imull %edx, %eax
subl %eax, %ecx
movl %ecx, 0x18(%rsp)
xorl %ecx, %ecx
testl %edx, %edx
cmovlel %ecx, %edx
movq 0x60(%rsp), %r11
testl %r11d, %r11d
cmovlel %ecx, %r11d
movq 0x360(%rsp), %rax
movq %rdx, %r10
xorl %esi, %esi
xorl %edi, %edi
cmpl %r11d, %ecx
je 0xa8210
movslq %edx, %r8
movslq %edi, %r9
leal (%r9,%r10), %edi
cmpq %r9, %r8
je 0xa81a8
movl %esi, (%rax,%r9,4)
incq %r9
addl %ebp, %esi
jmp 0xa8198
addl 0x18(%rsp), %esi
incl %ecx
addl %r10d, %edx
jmp 0xa8185
leaq 0x360(%rsp), %rdi
leaq 0x110(%rsp), %rsi
movq 0x50(%rsp), %rdx
movq 0x40(%rsp), %rcx
callq 0xb38fd
movq 0x368(%rsp), %rax
testq %rax, %rax
je 0xa9087
lock
decl (%rax)
jne 0xa9087
movq 0x360(%rsp), %rsi
movq 0x380(%rsp), %rdi
testq %rdi, %rdi
je 0xac95d
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xa9087
movq 0x120(%rsp), %rcx
imulq 0x150(%rsp), %rcx
movq %rcx, 0x70(%rsp)
shll $0x3, %ebx
xorl %edx, %edx
testl %r13d, %r13d
cmovlel %edx, %r13d
testl %r12d, %r12d
cmovlel %edx, %r12d
testl %r14d, %r14d
movl $0x0, %ecx
cmovgl %r14d, %ecx
movq %rcx, 0x28(%rsp)
movq 0x110(%rsp), %rcx
movq %rcx, 0x78(%rsp)
testl %r15d, %r15d
cmovlel %edx, %r15d
movq 0x80(%rsp), %rcx
testl %ecx, %ecx
cmovlel %edx, %ecx
movq %rcx, 0x80(%rsp)
xorl %ecx, %ecx
leaq (,%r13,8), %r8
cmpq 0x80(%rsp), %rcx
je 0xa907a
movq 0x70(%rsp), %rdx
imulq %rcx, %rdx
addq 0x78(%rsp), %rdx
movq %rdx, 0x18(%rsp)
xorl %esi, %esi
movq %rcx, 0x38(%rsp)
cmpq %r15, %rsi
je 0xa83e8
movq %rcx, %rdx
movq 0x20(%rsp), %rcx
movq 0x58(%rcx), %rdi
imulq %rdx, %rdi
imulq 0x28(%rcx), %rdi
addq 0x18(%rcx), %rdi
movq %rdi, 0x58(%rsp)
movq %rsi, %rcx
imulq 0x50(%rsp), %rcx
movq %rcx, 0x48(%rsp)
xorl %edi, %edi
movq %rsi, 0x30(%rsp)
cmpq 0x28(%rsp), %rdi
je 0xa83cd
movl %ebx, %edx
movq %rdi, 0x60(%rsp)
imull %edi, %edx
movslq 0xbc(%rsp), %rsi
imulq 0x48(%rsp), %rsi
movq 0xa0(%rsp), %rdi
movq 0xd0(%rsp), %rbp
imulq %rdi, %rbp
imulq %rdi, %rsi
addq 0x90(%rsp), %rsi
movslq %edx, %rdx
addq %rsi, %rdx
xorl %r10d, %r10d
xorl %esi, %esi
movq 0x58(%rsp), %r11
cmpq %r12, %r10
je 0xa83b3
movq %rbp, %rdi
imulq %r10, %rdi
addq %rdx, %rdi
xorl %r9d, %r9d
cmpq %r9, %r13
je 0xa83ab
movslq (%rax,%r9,4), %rcx
movq (%rdi,%rcx,8), %xmm0
pxor %xmm1, %xmm1
pcmpgtb %xmm0, %xmm1
punpcklbw %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
movq (%r11,%r9,8), %xmm1
pxor %xmm2, %xmm2
pcmpgtb %xmm1, %xmm2
punpcklbw %xmm2, %xmm1 # xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
movdqa %xmm1, %xmm2
pmullw %xmm0, %xmm2
pmulhw %xmm0, %xmm1
movdqa %xmm2, %xmm0
punpcklwd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
punpckhwd %xmm1, %xmm2 # xmm2 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
paddd %xmm0, %xmm2
pshufd $0xee, %xmm2, %xmm0 # xmm0 = xmm2[2,3,2,3]
paddd %xmm2, %xmm0
pshufd $0x55, %xmm0, %xmm1 # xmm1 = xmm0[1,1,1,1]
paddd %xmm0, %xmm1
movd %xmm1, %ecx
addl %ecx, %esi
incq %r9
jmp 0xa8346
addq %r8, %r11
incq %r10
jmp 0xa8334
movq 0x18(%rsp), %rcx
movq 0x60(%rsp), %rdi
movl %esi, (%rcx,%rdi,4)
incq %rdi
movq 0x30(%rsp), %rsi
jmp 0xa82e1
movq 0x18(%rsp), %rcx
leaq (%rcx,%r14,4), %rcx
movq %rcx, 0x18(%rsp)
incq %rsi
movq 0x38(%rsp), %rcx
jmp 0xa82a5
incq %rcx
jmp 0xa827d
cmpl $0x1, 0xdc(%rcx,%rax)
jne 0xa8590
cmpl $0x1, 0xe0(%rcx,%rax)
jne 0xa8590
cmpl $0x2, 0xe4(%rcx,%rax)
jne 0xa8590
cmpl $0x2, 0xe8(%rcx,%rax)
jne 0xa8590
movl 0xbc(%rsp), %ebx
movl 0xc8(%rsp), %r12d
movq 0xa0(%rsp), %r8
movl 0xa8(%rsp), %r9d
leaq 0x360(%rsp), %rdi
andq $0x0, 0x40(%rdi)
movl 0x13c(%rsp), %ebp
movl 0x140(%rsp), %r13d
pxor %xmm0, %xmm0
movdqa %xmm0, (%rdi)
movdqu %xmm0, 0xc(%rdi)
movdqa %xmm0, 0x20(%rdi)
movdqu %xmm0, 0x2c(%rdi)
movq 0x10(%r14), %rax
movq %rax, (%rsp)
movl %ebp, %esi
movl %r13d, %edx
movl %r12d, %ecx
callq 0x628f2
movq 0x20(%rsp), %rax
leaq 0x60(%rax), %rdx
subl %ebp, %ebx
addl %ebx, %ebx
xorl %eax, %eax
testl %r13d, %r13d
cmovlel %eax, %r13d
movslq %ebx, %rcx
testl %r12d, %r12d
cmovlel %eax, %r12d
cmpq %r12, %rax
je 0xa9457
movq 0xd0(%rsp), %rsi
imulq %rax, %rsi
imulq 0xa0(%rsp), %rsi
addq 0x90(%rsp), %rsi
movq 0x3a0(%rsp), %rdi
imulq %rax, %rdi
imulq 0x370(%rsp), %rdi
addq 0x360(%rsp), %rdi
xorl %r8d, %r8d
cmpl %r13d, %r8d
je 0xa8588
leaq (%rsi,%rcx), %r9
xorl %r10d, %r10d
movq %rsi, %r11
leal 0x3(%r10), %ebx
cmpl %ebp, %ebx
jge 0xa855c
movb (%r11), %bl
movb %bl, (%rdi,%r10)
movb 0x2(%r11), %bl
movb %bl, 0x1(%rdi,%r10)
movb 0x4(%r11), %bl
movb %bl, 0x2(%rdi,%r10)
movb 0x6(%r11), %bl
movb %bl, 0x3(%rdi,%r10)
addq $0x8, %r11
addq $0x4, %r10
addq $0x8, %r9
jmp 0xa8508
movb (%r11), %bl
movb %bl, (%rdi,%r10)
movb 0x2(%r11), %bl
movb %bl, 0x1(%rdi,%r10)
addq $0x4, %r11
addq $0x2, %r10
addq $0x4, %r9
leal 0x1(%r10), %ebx
cmpl %ebp, %ebx
jl 0xa8540
jmp 0xa8575
movb (%rsi,%r10,2), %r11b
movb %r11b, (%rdi,%r10)
addq $0x2, %r9
incq %r10
cmpl %ebp, %r10d
jl 0xa8566
incl %r8d
addq %r10, %rdi
movq %r9, %rsi
jmp 0xa84f5
incq %rax
jmp 0xa84af
cmpb $0x1, 0x1c(%r14)
jne 0xa8b2f
cmpl $0x3, 0x18(%rsp)
jne 0xa8b2f
cmpb $0x0, 0x37(%r14)
je 0xa8b2f
movq 0x20(%rsp), %rcx
cmpl $0x3, 0xd8(%rcx,%rax)
jne 0xa8b2f
movq 0x20(%rsp), %rcx
cmpl $0x1, 0xdc(%rcx,%rax)
jne 0xa8b2f
movq 0x20(%rsp), %rcx
cmpl $0x1, 0xe0(%rcx,%rax)
jne 0xa8b2f
movq 0x20(%rsp), %rcx
cmpl $0x1, 0xe4(%rcx,%rax)
jne 0xa8b2f
movq 0x20(%rsp), %rcx
cmpl $0x1, 0xe8(%rcx,%rax)
jne 0xa8b2f
movl 0x70(%rsp), %ecx
imull 0x100(%rsp), %ecx
cmpl $0x10, %ecx
jl 0xa8b2f
movq 0x20(%rsp), %rcx
cmpl $0x10, 0xd0(%rcx,%rax)
jl 0xa8b2f
movslq 0xc8(%rsp), %rdi
movl 0x13c(%rsp), %eax
movl 0x140(%rsp), %ecx
movslq 0x148(%rsp), %rdx
movq %rdx, 0x108(%rsp)
movq 0x90(%rsp), %rsi
movq 0x98(%rsp), %rdx
movq %rsi, 0x360(%rsp)
movq %rdx, 0x368(%rsp)
movq 0xa0(%rsp), %rsi
movq %rsi, 0x370(%rsp)
movl 0xa8(%rsp), %esi
movl %esi, 0x378(%rsp)
movq 0xb0(%rsp), %rsi
movq %rsi, 0x380(%rsp)
movups 0xb8(%rsp), %xmm0
movups %xmm0, 0x388(%rsp)
movq %rdi, 0x60(%rsp)
movl %edi, 0x398(%rsp)
movl 0xbc(%rsp), %esi
movl 0xc0(%rsp), %edi
movq 0xd0(%rsp), %r8
movq %r8, 0x3a0(%rsp)
testq %rdx, %rdx
je 0xa86fc
lock
incl (%rdx)
movl 0xbc(%rsp), %esi
movl 0xc0(%rsp), %edi
incl %eax
pushq $0x2
popq %r8
cltd
idivl %r8d
movl %eax, %r9d
incl %ecx
movl %ecx, %eax
cltd
idivl %r8d
movq %rax, %rbx
leal (%rax,%rax), %edx
movq %r9, 0x328(%rsp)
leal 0x2(%r9,%r9), %r14d
movq 0x40(%rsp), %rcx
movdqu (%rcx), %xmm0
movdqu 0x10(%rcx), %xmm1
movdqu 0x20(%rcx), %xmm2
movdqu 0x30(%rcx), %xmm3
leaq 0x290(%rsp), %rax
movdqa %xmm3, 0x30(%rax)
movdqa %xmm2, 0x20(%rax)
movdqa %xmm1, 0x10(%rax)
movdqa %xmm0, (%rax)
movq 0x10(%rcx), %rcx
movq %rcx, 0x8(%rax)
movl %edx, 0x348(%rsp)
movl %edx, %ecx
subl %edi, %ecx
addl $0x2, %ecx
movl %r14d, %r9d
subl %esi, %r9d
movq %rax, 0x8(%rsp)
andl $0x0, (%rsp)
leaq 0x90(%rsp), %rdi
leaq 0x360(%rsp), %rsi
pxor %xmm0, %xmm0
xorl %edx, %edx
xorl %r8d, %r8d
callq 0x6466c
leaq 0x1c0(%rsp), %rdi
andq $0x0, 0x40(%rdi)
movl %ebx, %edx
imull 0x328(%rsp), %edx
pxor %xmm0, %xmm0
movdqa %xmm0, (%rdi)
movdqu %xmm0, 0xc(%rdi)
movdqa %xmm0, 0x20(%rdi)
movdqu %xmm0, 0x2c(%rdi)
movq 0x40(%rsp), %rax
movq 0x10(%rax), %r9
pushq $0x10
popq %rsi
pushq $0x2
popq %r8
movq %rdx, 0x50(%rsp)
movq 0x60(%rsp), %rcx
callq 0x63810
leaq 0x380(%rsp), %rsi
movq 0x328(%rsp), %rax
leal (%rax,%rax), %ecx
movl %ecx, 0x344(%rsp)
movq -0x10(%rsi), %rcx
imulq 0x20(%rsi), %rcx
movq %rcx, 0x28(%rsp)
leaq 0x1e0(%rsp), %rdx
movq -0x10(%rdx), %rcx
imulq 0x20(%rdx), %rcx
movq %rcx, 0x30(%rsp)
movq -0x20(%rsi), %rcx
movq %rcx, 0x48(%rsp)
movslq %r14d, %rsi
addl %r14d, %r14d
xorl %edi, %edi
testl %eax, %eax
movl $0x0, %ecx
cmovgl %eax, %ecx
movl %ecx, 0x58(%rsp)
movq -0x20(%rdx), %rax
movq %rax, 0x38(%rsp)
testl %ebx, %ebx
cmovlel %edi, %ebx
movq %rbx, 0x80(%rsp)
movq 0x60(%rsp), %rax
testl %eax, %eax
movl $0x0, %ecx
cmovgl %eax, %ecx
movq %rcx, 0x18(%rsp)
cmpq 0x18(%rsp), %rdi
je 0xa9fd5
movq 0x28(%rsp), %r10
imulq %rdi, %r10
addq 0x48(%rsp), %r10
movq 0x30(%rsp), %r11
imulq %rdi, %r11
addq 0x38(%rsp), %r11
xorl %ebx, %ebx
cmpq 0x80(%rsp), %rbx
je 0xa8b27
movl %r14d, %edx
imull %ebx, %edx
movslq %edx, %r12
addq %r10, %r12
leaq (%r12,%rsi), %r13
leaq (%rsi,%r13), %r15
leaq (%r15,%rsi), %rdx
xorl %r8d, %r8d
cmpl 0x58(%rsp), %r8d
je 0xa8b1f
xorl %ebp, %ebp
cmpq $0x4, %rbp
je 0xa890c
movsbl (%r12,%rbp), %eax
movw %ax, 0x170(%rsp,%rbp,2)
movsbl (%r13,%rbp), %eax
movw %ax, 0x230(%rsp,%rbp,2)
movsbl (%r15,%rbp), %eax
movw %ax, 0x6b0(%rsp,%rbp,2)
movsbl (%rdx,%rbp), %eax
movw %ax, 0x670(%rsp,%rbp,2)
incq %rbp
jmp 0xa88cd
xorl %ebp, %ebp
cmpq $0x8, %rbp
je 0xa8965
movzwl 0x170(%rsp,%rbp), %eax
movzwl 0x6b0(%rsp,%rbp), %ecx
subl %ecx, %eax
movw %ax, 0x5e0(%rsp,%rbp)
movzwl 0x230(%rsp,%rbp), %eax
leal (%rax,%rcx), %r9d
movw %r9w, 0x350(%rsp,%rbp)
subl %eax, %ecx
movw %cx, 0x310(%rsp,%rbp)
movzwl 0x670(%rsp,%rbp), %ecx
subl %eax, %ecx
movw %cx, 0x300(%rsp,%rbp)
addq $0x2, %rbp
jmp 0xa890e
movzwl 0x5e0(%rsp), %eax
movzwl 0x5e2(%rsp), %ebp
movw %ax, 0x228(%rsp)
movw %bp, 0x220(%rsp)
movzwl 0x5e4(%rsp), %eax
movw %ax, 0x218(%rsp)
movzwl 0x5e6(%rsp), %eax
movw %ax, 0x210(%rsp)
movzwl 0x350(%rsp), %eax
movzwl 0x352(%rsp), %ebp
movw %ax, 0x22a(%rsp)
movw %bp, 0x222(%rsp)
movzwl 0x354(%rsp), %eax
movw %ax, 0x21a(%rsp)
movzwl 0x356(%rsp), %eax
movw %ax, 0x212(%rsp)
movzwl 0x310(%rsp), %eax
movzwl 0x312(%rsp), %ebp
movw %ax, 0x22c(%rsp)
movw %bp, 0x224(%rsp)
movzwl 0x314(%rsp), %eax
movzwl 0x316(%rsp), %ebp
movw %ax, 0x21c(%rsp)
movw %bp, 0x214(%rsp)
movzwl 0x300(%rsp), %eax
movzwl 0x302(%rsp), %ebp
movw %ax, 0x22e(%rsp)
movw %bp, 0x226(%rsp)
movzwl 0x304(%rsp), %eax
movw %ax, 0x21e(%rsp)
movzwl 0x306(%rsp), %eax
movw %ax, 0x216(%rsp)
xorl %ebp, %ebp
cmpq $0x8, %rbp
je 0xa8abe
movzwl 0x228(%rsp,%rbp), %eax
movzwl 0x218(%rsp,%rbp), %ecx
subl %ecx, %eax
movw %ax, 0x170(%rsp,%rbp)
movzwl 0x220(%rsp,%rbp), %eax
leal (%rax,%rcx), %r9d
movw %r9w, 0x230(%rsp,%rbp)
subl %eax, %ecx
movw %cx, 0x6b0(%rsp,%rbp)
movzwl 0x210(%rsp,%rbp), %ecx
subl %eax, %ecx
movw %cx, 0x670(%rsp,%rbp)
addq $0x2, %rbp
jmp 0xa8a67
xorl %ebp, %ebp
cmpq $0x8, %rbp
je 0xa8b03
movzwl 0x170(%rsp,%rbp), %eax
movw %ax, (%r11,%rbp)
movzwl 0x230(%rsp,%rbp), %eax
movw %ax, 0x8(%r11,%rbp)
movzwl 0x6b0(%rsp,%rbp), %eax
movw %ax, 0x10(%r11,%rbp)
movzwl 0x670(%rsp,%rbp), %eax
movw %ax, 0x18(%r11,%rbp)
addq $0x2, %rbp
jmp 0xa8ac0
addq $0x2, %r12
addq $0x2, %r13
addq $0x2, %r15
addq $0x2, %rdx
addq $0x20, %r11
incl %r8d
jmp 0xa88c0
incq %rbx
jmp 0xa8897
incq %rdi
jmp 0xa886e
movq 0x20(%rsp), %rcx
movl 0xd8(%rcx,%rax), %r13d
movslq 0xdc(%rcx,%rax), %rdx
movq %rdx, 0x28(%rsp)
movslq 0xe0(%rcx,%rax), %rdx
movq %rdx, 0x70(%rsp)
movslq 0xe4(%rcx,%rax), %rbx
movslq 0xe8(%rcx,%rax), %rax
movq %rax, 0x80(%rsp)
movl 0xbc(%rsp), %r15d
movl 0xc8(%rsp), %r12d
movq %r14, %rax
movslq 0x13c(%rsp), %r14
movl 0x140(%rsp), %ecx
movq %r13, 0x48(%rsp)
imull 0x18(%rsp), %r13d
cmpb $0x1, 0x1d(%rax)
jne 0xa8dca
movq %rcx, %rbp
movl %ecx, %esi
imull %r14d, %esi
movq 0x10(%rax), %rax
leaq 0x360(%rsp), %rdi
andq $0x0, 0x40(%rdi)
pxor %xmm0, %xmm0
movdqa %xmm0, (%rdi)
movdqu %xmm0, 0xc(%rdi)
movdqa %xmm0, 0x20(%rdi)
movdqu %xmm0, 0x2c(%rdi)
movq %rax, (%rsp)
pushq $0x1
popq %r8
pushq $0x1
popq %r9
movl %r13d, %edx
movl %r12d, %ecx
callq 0x628f2
movq 0x20(%rsp), %rax
addq $0x60, %rax
movq %rax, 0x78(%rsp)
movl %r15d, %r8d
imull 0x80(%rsp), %r8d
movl %r14d, %eax
imull %ebx, %eax
subl %eax, %r8d
leal (%rbx,%rbx), %eax
cltq
leal (%rbx,%rbx,2), %ecx
movslq %ecx, %rcx
xorl %r9d, %r9d
movq %rbp, %r15
testl %r15d, %r15d
cmovlel %r9d, %r15d
leal (,%rbx,4), %edx
movq 0x18(%rsp), %rsi
testl %esi, %esi
cmovlel %r9d, %esi
movq %rsi, 0x18(%rsp)
movslq %edx, %rdi
movq 0x48(%rsp), %rdx
testl %edx, %edx
cmovlel %r9d, %edx
movq %rdx, 0x48(%rsp)
movslq %r8d, %r8
testl %r12d, %r12d
cmovlel %r9d, %r12d
cmpq %r12, %r9
je 0xa8e67
movq 0xd0(%rsp), %rsi
imulq %r9, %rsi
movq 0xa0(%rsp), %rdx
imulq %rdx, %rsi
addq 0x90(%rsp), %rsi
movq %rsi, 0x38(%rsp)
movq 0x3a0(%rsp), %r10
movq %r9, 0x80(%rsp)
imulq %r9, %r10
imulq 0x370(%rsp), %r10
addq 0x360(%rsp), %r10
movslq 0xbc(%rsp), %rsi
imulq %rdx, %rsi
imulq 0x70(%rsp), %rsi
movq %rsi, 0x50(%rsp)
xorl %edx, %edx
cmpq 0x48(%rsp), %rdx
je 0xa8dba
movq 0x50(%rsp), %rsi
imulq %rdx, %rsi
addq 0x38(%rsp), %rsi
movq %rsi, 0x58(%rsp)
xorl %esi, %esi
movq %rdx, 0x30(%rsp)
cmpq 0x18(%rsp), %rsi
je 0xa8db2
movq %rsi, 0x60(%rsp)
movq %rsi, %rdx
imulq 0x28(%rsp), %rdx
addq 0x58(%rsp), %rdx
xorl %r9d, %r9d
cmpl %r15d, %r9d
je 0xa8da0
xorl %r13d, %r13d
xorl %r11d, %r11d
movq %rdx, %rsi
leal 0x3(%r11), %ebp
cmpl %r14d, %ebp
jge 0xa8d71
movb (%rsi), %bpl
movb %bpl, (%r10,%r11)
movb (%rsi,%rbx), %bpl
movb %bpl, 0x1(%r10,%r11)
movb (%rsi,%rax), %bpl
movb %bpl, 0x2(%r10,%r11)
movb (%rsi,%rcx), %bpl
movb %bpl, 0x3(%r10,%r11)
addq %rdi, %rsi
addq $0x4, %r11
addq %rdi, %r13
jmp 0xa8d20
movb (%rsi), %bpl
movb %bpl, (%r10,%r11)
movb (%rsi,%rbx), %bpl
movb %bpl, 0x1(%r10,%r11)
addq %rax, %rsi
addq $0x2, %r11
addq %rax, %r13
leal 0x1(%r11), %ebp
cmpl %r14d, %ebp
jl 0xa8d57
jmp 0xa8d8a
movb (%rdx,%r13), %sil
movb %sil, (%r10,%r11)
incq %r11
addq %rbx, %r13
cmpl %r14d, %r11d
jl 0xa8d7c
addq %r8, %rdx
addq %r13, %rdx
incl %r9d
addq %r11, %r10
jmp 0xa8d0e
movq 0x60(%rsp), %rsi
incq %rsi
movq 0x30(%rsp), %rdx
jmp 0xa8ced
incq %rdx
jmp 0xa8cc8
movq 0x80(%rsp), %r9
incq %r9
jmp 0xa8c5c
movq %rcx, 0x38(%rsp)
movl 0x148(%rsp), %eax
movq %rax, 0x78(%rsp)
movslq %r13d, %rbp
leaq 0x360(%rsp), %rdi
leaq 0x1c0(%rsp), %rdx
movq %rbp, %rsi
callq 0x73bbe
imull 0x70(%rsp), %r15d
movq 0x28(%rsp), %rax
movq 0x18(%rsp), %rdx
imull %edx, %eax
subl %eax, %r15d
movl %r15d, 0x60(%rsp)
xorl %ecx, %ecx
testl %edx, %edx
cmovlel %ecx, %edx
movq 0x48(%rsp), %r11
testl %r11d, %r11d
cmovlel %ecx, %r11d
movq 0x360(%rsp), %rax
movq %rdx, %r10
xorl %esi, %esi
xorl %edi, %edi
movq 0x28(%rsp), %r15
cmpl %r11d, %esi
je 0xa8ec4
movslq %edx, %r8
movslq %edi, %r9
leal (%r9,%r10), %edi
cmpq %r9, %r8
je 0xa8e5c
movl %ecx, (%rax,%r9,4)
incq %r9
addl %r15d, %ecx
jmp 0xa8e4b
addl 0x60(%rsp), %ecx
incl %esi
addl %r10d, %edx
jmp 0xa8e38
leaq 0x360(%rsp), %rdi
leaq 0x110(%rsp), %rsi
movq 0x78(%rsp), %rdx
movq 0x40(%rsp), %rcx
callq 0xb40f2
movq 0x368(%rsp), %rax
testq %rax, %rax
je 0xa9087
lock
decl (%rax)
jne 0xa9087
movq 0x360(%rsp), %rsi
movq 0x380(%rsp), %rdi
testq %rdi, %rdi
je 0xac95d
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xa9087
movq 0x120(%rsp), %rcx
imulq 0x150(%rsp), %rcx
movq %rcx, 0x100(%rsp)
movl %r12d, %ecx
imull %r13d, %ecx
movl %ecx, 0x88(%rsp)
xorl %edx, %edx
testl %r13d, %r13d
cmovlel %edx, %r13d
testl %r12d, %r12d
cmovlel %edx, %r12d
testl %r14d, %r14d
movl $0x0, %ecx
cmovgl %r14d, %ecx
movq %rcx, 0x28(%rsp)
movq 0x38(%rsp), %r15
testl %r15d, %r15d
cmovlel %edx, %r15d
movq %r15, 0x38(%rsp)
movq 0x110(%rsp), %rcx
movq %rcx, 0xe0(%rsp)
movq 0x78(%rsp), %rcx
testl %ecx, %ecx
cmovlel %edx, %ecx
movq %rcx, 0x78(%rsp)
xorl %ecx, %ecx
cmpq 0x78(%rsp), %rcx
je 0xa907a
movq 0x100(%rsp), %rdx
imulq %rcx, %rdx
addq 0xe0(%rsp), %rdx
movq %rdx, 0x18(%rsp)
movq %rcx, %rdx
movl 0x88(%rsp), %ecx
movq %rdx, 0x70(%rsp)
imull %edx, %ecx
movslq %ecx, %rcx
movq %rcx, 0x50(%rsp)
xorl %ecx, %ecx
cmpq 0x38(%rsp), %rcx
je 0xa906d
movq %rcx, %rdx
movq 0x20(%rsp), %rcx
movq 0x18(%rcx), %rcx
addq 0x50(%rsp), %rcx
movq %rcx, 0x30(%rsp)
movq %rdx, 0x48(%rsp)
imulq 0x80(%rsp), %rdx
movq %rdx, 0x58(%rsp)
xorl %r8d, %r8d
cmpq 0x28(%rsp), %r8
je 0xa9052
movslq 0xbc(%rsp), %rcx
imulq 0x58(%rsp), %rcx
movq 0xa0(%rsp), %rdx
movq 0xd0(%rsp), %r9
imulq %rdx, %r9
imulq %rdx, %rcx
addq 0x90(%rsp), %rcx
movq %r8, 0x60(%rsp)
imulq %rbx, %r8
addq %rcx, %r8
xorl %edx, %edx
xorl %edi, %edi
movq 0x30(%rsp), %rcx
cmpq %r12, %rdx
je 0xa903c
movq %r9, %rsi
imulq %rdx, %rsi
addq %r8, %rsi
xorl %r11d, %r11d
cmpq %r11, %r13
je 0xa9034
movslq (%rax,%r11,4), %r10
movsbl (%rsi,%r10), %r10d
movsbl (%rcx,%r11), %r15d
imull %r10d, %r15d
addl %r15d, %edi
incq %r11
jmp 0xa9015
addq %rbp, %rcx
incq %rdx
jmp 0xa9003
movq 0x18(%rsp), %rcx
movq 0x60(%rsp), %r8
movl %edi, (%rcx,%r8,4)
incq %r8
jmp 0xa8fb5
movq 0x18(%rsp), %rcx
leaq (%rcx,%r14,4), %rcx
movq %rcx, 0x18(%rsp)
movq 0x48(%rsp), %rcx
incq %rcx
jmp 0xa8f7e
movq 0x70(%rsp), %rcx
incq %rcx
jmp 0xa8f3e
leaq 0x360(%rsp), %rdi
callq 0x624be
movq 0x20(%rsp), %rsi
leaq 0x190(%rsi), %rdx
movq (%rsi), %rax
movq -0x18(%rax), %rcx
addq %rsi, %rcx
cmpl $0x65, 0x34c(%rsp)
jl 0xa90ed
leaq 0x280(%rcx), %rax
leaq 0x1a8(%rcx), %r8
movl 0x10c(%rcx), %r9d
addq $0x110, %rcx # imm = 0x110
xorl %ebp, %ebp
movq 0x40(%rsp), %rsi
movq %rsi, 0x8(%rsp)
movq %rcx, (%rsp)
leaq 0x110(%rsp), %rdi
movq 0x338(%rsp), %rsi
movq %rax, %rcx
callq 0x657e8
jmp 0xa9131
addq $0x1a8, %rcx # imm = 0x1A8
leaq 0x110(%rsp), %rdi
movq 0x338(%rsp), %rsi
movq 0x40(%rsp), %r8
callq 0x654c7
movq 0x20(%rsp), %rax
movq 0x8(%rax), %rdi
xorl %ebp, %ebp
testq %rdi, %rdi
je 0xa9131
movq (%rdi), %rax
movq 0x338(%rsp), %rsi
movq 0x40(%rsp), %rdx
callq *0x48(%rax)
movq 0x118(%rsp), %rax
testq %rax, %rax
je 0xa9168
lock
decl (%rax)
jne 0xa9168
movq 0x110(%rsp), %rsi
movq 0x130(%rsp), %rdi
testq %rdi, %rdi
je 0xa9160
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xa9168
movq %rsi, %rdi
callq 0x5f3e0
movq 0x98(%rsp), %rax
testq %rax, %rax
je 0xa919f
lock
decl (%rax)
jne 0xa919f
movq 0x90(%rsp), %rsi
movq 0xb0(%rsp), %rdi
testq %rdi, %rdi
je 0xa9197
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xa919f
movq %rsi, %rdi
callq 0x5f3e0
movq 0x630(%rsp), %rax
testq %rax, %rax
je 0xa91d6
lock
decl (%rax)
jne 0xa91d6
movq 0x628(%rsp), %rsi
movq 0x648(%rsp), %rdi
testq %rdi, %rdi
je 0xa91ce
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xa91d6
movq %rsi, %rdi
callq 0x5f3e0
movl %ebp, %eax
addq $0x6f8, %rsp # imm = 0x6F8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
leaq 0x360(%rsp), %rdi
leaq 0x110(%rsp), %rsi
movq %r14, %rcx
callq 0xae99b
movq 0x368(%rsp), %rax
testq %rax, %rax
movq 0x20(%rsp), %r14
movl 0x70(%rsp), %ebx
movq 0x88(%rsp), %r15
je 0xa6e2f
lock
decl (%rax)
jne 0xa6e2f
movq 0x360(%rsp), %rsi
movq 0x380(%rsp), %rdi
testq %rdi, %rdi
je 0xa924d
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xa6e2f
movq %rsi, %rdi
callq 0x5f3e0
jmp 0xa6e2f
leaq 0x360(%rsp), %rdi
leaq 0x110(%rsp), %rsi
movq %r14, %rcx
callq 0xaeb6d
movq 0x368(%rsp), %rax
testq %rax, %rax
je 0xa9087
lock
decl (%rax)
jne 0xa9087
movq 0x360(%rsp), %rsi
movq 0x380(%rsp), %rdi
testq %rdi, %rdi
je 0xac95d
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xa9087
leaq 0x360(%rsp), %rdi
leaq 0x110(%rsp), %rsi
movq 0x30(%rsp), %rdx
callq 0xb31de
movq 0x20(%rsp), %r14
movq 0x368(%rsp), %rax
testq %rax, %rax
movl 0x70(%rsp), %ebx
movq 0x88(%rsp), %r15
je 0xa7bfe
lock
decl (%rax)
jne 0xa7bfe
movq 0x360(%rsp), %rsi
movq 0x380(%rsp), %rdi
testq %rdi, %rdi
je 0xa944a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xa7bfe
leaq 0x360(%rsp), %rdi
leaq 0x110(%rsp), %rsi
movq 0x18(%rsp), %rdx
callq 0xb31de
movq 0x20(%rsp), %r14
movq 0x368(%rsp), %rax
testq %rax, %rax
movl 0x70(%rsp), %ebx
movq 0x88(%rsp), %r15
je 0xa7bfe
lock
decl (%rax)
jne 0xa7bfe
movq 0x360(%rsp), %rsi
movq 0x380(%rsp), %rdi
testq %rdi, %rdi
je 0xa944a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xa7bfe
leaq 0x360(%rsp), %rdi
leaq 0x110(%rsp), %rsi
movq 0x30(%rsp), %rdx
callq 0xb31de
movq 0x20(%rsp), %r14
movq 0x368(%rsp), %rax
testq %rax, %rax
movl 0x70(%rsp), %ebx
movq 0x88(%rsp), %r15
je 0xa7bfe
lock
decl (%rax)
jne 0xa7bfe
movq 0x360(%rsp), %rsi
movq 0x380(%rsp), %rdi
testq %rdi, %rdi
je 0xa944a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xa7bfe
leaq 0x360(%rsp), %rdi
leaq 0x110(%rsp), %rsi
movq %r14, %rcx
callq 0xaea84
movq 0x368(%rsp), %rax
testq %rax, %rax
movq 0x20(%rsp), %r14
movl 0x70(%rsp), %ebx
movq 0x88(%rsp), %r15
je 0xa7bfe
lock
decl (%rax)
jne 0xa7bfe
movq 0x360(%rsp), %rsi
movq 0x380(%rsp), %rdi
testq %rdi, %rdi
je 0xa944a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xa7bfe
movq %rsi, %rdi
callq 0x5f3e0
jmp 0xa7bfe
leaq 0x360(%rsp), %rdi
leaq 0x110(%rsp), %rsi
movq %r14, %rcx
callq 0xaec56
movq 0x368(%rsp), %rax
testq %rax, %rax
je 0xa9087
lock
decl (%rax)
jne 0xa9087
movq 0x360(%rsp), %rsi
movq 0x380(%rsp), %rdi
testq %rdi, %rdi
je 0xac95d
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xa9087
movl 0xc8(%rsp), %esi
movl 0xa8(%rsp), %edx
movq 0x13c(%rsp), %xmm0
movl 0x148(%rsp), %eax
movl %eax, 0x78(%rsp)
movq 0x90(%rsp), %rcx
movq 0x98(%rsp), %rax
movq %rcx, 0x1c0(%rsp)
movq %rax, 0x1c8(%rsp)
movq 0xa0(%rsp), %rcx
movq %rcx, 0x1d0(%rsp)
movq %rdx, 0xe0(%rsp)
movl %edx, 0x1d8(%rsp)
movq 0xb0(%rsp), %rcx
movq %rcx, 0x1e0(%rsp)
movups 0xb8(%rsp), %xmm1
movups %xmm1, 0x1e8(%rsp)
movl %esi, 0xf0(%rsp)
movl %esi, 0x1f8(%rsp)
movq 0xd0(%rsp), %rcx
movq %rcx, 0x200(%rsp)
testq %rax, %rax
je 0xa9554
lock
incl (%rax)
movl 0xbc(%rsp), %eax
movl 0xc0(%rsp), %edx
paddd 0x3486d6(%rip), %xmm0 # 0x3f1c40
movdqa %xmm0, %xmm2
psrad $0x1f, %xmm2
psrld $0x1e, %xmm2
paddd %xmm0, %xmm2
movdqa 0x34879c(%rip), %xmm1 # 0x3f1d20
movdqa %xmm2, 0x60(%rsp)
pand %xmm2, %xmm1
pshufd $0x55, %xmm1, %xmm0 # xmm0 = xmm1[1,1,1,1]
movd %xmm0, %ecx
movdqa %xmm1, 0x2f0(%rsp)
movd %xmm1, %esi
movq %rsi, 0x208(%rsp)
leal 0x2(%rsi), %ebx
movq %rcx, 0x168(%rsp)
addl $0x2, %ecx
subl %edx, %ecx
movl %ebx, %r9d
subl %eax, %r9d
movq 0x40(%rsp), %r15
movq %r15, 0x8(%rsp)
andl $0x0, (%rsp)
leaq 0x90(%rsp), %rdi
leaq 0x1c0(%rsp), %rsi
pxor %xmm0, %xmm0
xorl %edx, %edx
xorl %r8d, %r8d
callq 0x6466c
movdqa 0x60(%rsp), %xmm0
psrad $0x2, %xmm0
leaq 0x170(%rsp), %rdi
andq $0x0, 0x40(%rdi)
movd %xmm0, %r13d
imull $0x6, %r13d, %eax
pshufd $0x55, %xmm0, %xmm0 # xmm0 = xmm0[1,1,1,1]
movd %xmm0, %ecx
movl %eax, 0x48(%rsp)
movq %rcx, 0x58(%rsp)
imull %ecx, %eax
movl %eax, 0xf8(%rsp)
cltd
pushq $0x6
popq %rcx
idivl %ecx
movq 0xe0(%rsp), %r9
leal (%r9,%r9), %r8d
pxor %xmm0, %xmm0
movdqa %xmm0, (%rdi)
movdqu %xmm0, 0xc(%rdi)
movdqa %xmm0, 0x20(%rdi)
movdqu %xmm0, 0x2c(%rdi)
movq 0x10(%r15), %rcx
movq %rax, %r15
movq %rcx, (%rsp)
pushq $0x24
popq %rdx
movl %r15d, %esi
movl 0xf0(%rsp), %r14d
movl %r14d, %ecx
movq %r8, 0x280(%rsp)
callq 0x628f2
shll $0x3, %ebx
movslq %ebx, %rcx
leal (,%r15,8), %eax
movslq %eax, %r12
movl %r15d, %eax
shll $0x4, %eax
movslq %eax, %rbp
imull $0x18, %r15d, %eax
movslq %eax, %rbx
movl %r15d, %eax
shll $0x5, %eax
movslq %eax, %rsi
imull $0x28, %r15d, %eax
movslq %eax, %rdi
movq %r15, 0x28(%rsp)
imull $0x30, %r15d, %eax
xorl %r8d, %r8d
testl %r13d, %r13d
movl $0x0, %edx
movl %r13d, 0x160(%rsp)
cmovgl %r13d, %edx
movq %rdx, 0x60(%rsp)
movslq %eax, %r9
movq 0x58(%rsp), %rax
testl %eax, %eax
cmovlel %r8d, %eax
testl %r14d, %r14d
movl $0x0, %r13d
cmovgl %r14d, %r13d
addq %r9, %r9
pushq $-0x60
popq %r15
pushq $0x50
popq %r11
movdqa 0x34862f(%rip), %xmm0 # 0x3f1d30
movq %rax, %r14
movq %rax, 0x58(%rsp)
cmpq %r13, %r8
je 0xa9a1c
movq %r8, %rdx
movslq 0x1ec(%rsp), %rax
movq 0x1c0(%rsp), %r8
movq %r8, 0x30(%rsp)
movq 0x1d0(%rsp), %r8
movq %r8, 0x18(%rsp)
movq 0x1b0(%rsp), %r10
movq 0x200(%rsp), %r8
imulq 0x18(%rsp), %r8
imulq 0x180(%rsp), %r10
imulq %rdx, %r10
addq 0x170(%rsp), %r10
movq %r10, 0x50(%rsp)
imulq 0x18(%rsp), %rax
shlq $0x2, %rax
movq %rax, 0x38(%rsp)
movq %rdx, 0x80(%rsp)
imulq %rdx, %r8
movq 0x30(%rsp), %rax
addq %r8, %rax
addq $0x20, %rax
movq %rax, 0x18(%rsp)
xorl %edx, %edx
cmpq %r14, %rdx
je 0xa9a0c
movl 0x48(%rsp), %eax
movq %rdx, 0x30(%rsp)
imull %edx, %eax
cltd
pushq $0x6
popq %r8
idivl %r8d
cltq
shlq $0x4, %rax
addq 0x50(%rsp), %rax
movq 0x18(%rsp), %r8
xorl %r14d, %r14d
cmpq 0x60(%rsp), %r14
je 0xa99eb
movq %r8, %rdx
movq %r15, %r10
testq %r10, %r10
je 0xa98f0
movdqu -0x20(%rdx), %xmm2
movdqu -0x10(%rdx), %xmm1
movdqu (%rdx), %xmm3
pxor %xmm4, %xmm4
pcmpgtb %xmm2, %xmm4
pxor %xmm5, %xmm5
pcmpgtb %xmm1, %xmm5
pxor %xmm7, %xmm7
pcmpgtb %xmm3, %xmm7
movdqa %xmm2, %xmm6
punpcklbw %xmm4, %xmm6 # xmm6 = xmm6[0],xmm4[0],xmm6[1],xmm4[1],xmm6[2],xmm4[2],xmm6[3],xmm4[3],xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
punpckhbw %xmm4, %xmm2 # xmm2 = xmm2[8],xmm4[8],xmm2[9],xmm4[9],xmm2[10],xmm4[10],xmm2[11],xmm4[11],xmm2[12],xmm4[12],xmm2[13],xmm4[13],xmm2[14],xmm4[14],xmm2[15],xmm4[15]
movdqa %xmm1, %xmm8
punpcklbw %xmm5, %xmm8 # xmm8 = xmm8[0],xmm5[0],xmm8[1],xmm5[1],xmm8[2],xmm5[2],xmm8[3],xmm5[3],xmm8[4],xmm5[4],xmm8[5],xmm5[5],xmm8[6],xmm5[6],xmm8[7],xmm5[7]
punpckhbw %xmm5, %xmm1 # xmm1 = xmm1[8],xmm5[8],xmm1[9],xmm5[9],xmm1[10],xmm5[10],xmm1[11],xmm5[11],xmm1[12],xmm5[12],xmm1[13],xmm5[13],xmm1[14],xmm5[14],xmm1[15],xmm5[15]
movdqa %xmm3, %xmm4
punpcklbw %xmm7, %xmm4 # xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1],xmm4[2],xmm7[2],xmm4[3],xmm7[3],xmm4[4],xmm7[4],xmm4[5],xmm7[5],xmm4[6],xmm7[6],xmm4[7],xmm7[7]
punpckhbw %xmm7, %xmm3 # xmm3 = xmm3[8],xmm7[8],xmm3[9],xmm7[9],xmm3[10],xmm7[10],xmm3[11],xmm7[11],xmm3[12],xmm7[12],xmm3[13],xmm7[13],xmm3[14],xmm7[14],xmm3[15],xmm7[15]
psllw $0x2, %xmm6
paddw %xmm4, %xmm6
movdqa %xmm4, %xmm5
movdqa %xmm8, %xmm9
movdqa %xmm4, %xmm10
movdqa %xmm2, %xmm7
psubw %xmm8, %xmm7
psubw %xmm8, %xmm4
pmullw %xmm0, %xmm8
paddw %xmm6, %xmm8
paddw %xmm1, %xmm5
paddw %xmm2, %xmm9
psllw $0x2, %xmm9
psubw %xmm9, %xmm5
psubw %xmm1, %xmm10
psllw $0x2, %xmm7
paddw %xmm10, %xmm7
movdqa %xmm2, %xmm6
psubw %xmm1, %xmm6
paddw %xmm6, %xmm6
movdqa %xmm4, %xmm9
psubw %xmm6, %xmm9
paddw %xmm4, %xmm6
psllw $0x2, %xmm2
paddw %xmm3, %xmm2
pmullw %xmm0, %xmm1
paddw %xmm2, %xmm1
movdqa %xmm8, 0x3c0(%rsp,%r10)
movdqa %xmm5, 0x420(%rsp,%r10)
movdqa %xmm7, 0x480(%rsp,%r10)
movdqa %xmm9, 0x4e0(%rsp,%r10)
movdqa %xmm6, 0x540(%rsp,%r10)
movdqa %xmm1, 0x5a0(%rsp,%r10)
addq $0x10, %r10
addq %rcx, %rdx
jmp 0xa97d6
movq %rax, %rdx
movq %r11, %r10
cmpq $0x290, %r10 # imm = 0x290
je 0xa99db
movdqa 0x310(%rsp,%r10), %xmm1
movdqa 0x320(%rsp,%r10), %xmm2
movdqa 0x330(%rsp,%r10), %xmm6
movdqa 0x350(%rsp,%r10), %xmm4
psllw $0x2, %xmm1
paddw %xmm4, %xmm1
movdqa %xmm4, %xmm3
movdqa %xmm6, %xmm7
movdqa %xmm4, %xmm8
movdqa %xmm2, %xmm5
psubw %xmm6, %xmm5
psubw %xmm6, %xmm4
pmullw %xmm0, %xmm6
paddw %xmm6, %xmm1
movdqa 0x340(%rsp,%r10), %xmm6
paddw %xmm6, %xmm3
paddw %xmm2, %xmm7
psllw $0x2, %xmm7
psubw %xmm7, %xmm3
psubw %xmm6, %xmm8
psllw $0x2, %xmm5
paddw %xmm8, %xmm5
movdqa %xmm2, %xmm7
psubw %xmm6, %xmm7
paddw %xmm7, %xmm7
movdqa %xmm4, %xmm8
psubw %xmm7, %xmm8
paddw %xmm4, %xmm7
psllw $0x2, %xmm2
pmullw %xmm0, %xmm6
paddw %xmm2, %xmm6
paddw 0x360(%rsp,%r10), %xmm6
movdqu %xmm1, (%rdx)
movdqu %xmm3, (%rdx,%r12,2)
movdqu %xmm5, (%rdx,%rbp,2)
movdqu %xmm8, (%rdx,%rbx,2)
movdqu %xmm7, (%rdx,%rsi,2)
movdqu %xmm6, (%rdx,%rdi,2)
addq $0x60, %r10
addq %r9, %rdx
jmp 0xa98f6
incq %r14
addq $0x20, %r8
addq $0x10, %rax
jmp 0xa97c5
movq 0x30(%rsp), %rdx
incq %rdx
movq 0x18(%rsp), %rax
addq 0x38(%rsp), %rax
movq %rax, 0x18(%rsp)
movq 0x58(%rsp), %r14
jmp 0xa9795
movq 0x80(%rsp), %r8
incq %r8
jmp 0xa9709
movq 0x1c8(%rsp), %rax
testq %rax, %rax
je 0xaa01b
lock
decl (%rax)
jne 0xaa01b
movq 0x1c0(%rsp), %rsi
movq 0x1e0(%rsp), %rdi
testq %rdi, %rdi
je 0xaa013
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xaa01b
movl 0xc8(%rsp), %esi
movl 0xa8(%rsp), %edx
movq 0x13c(%rsp), %xmm0
movslq 0x148(%rsp), %rax
movq %rax, 0xe8(%rsp)
movq 0x90(%rsp), %rcx
movq 0x98(%rsp), %rax
movq %rcx, 0x1c0(%rsp)
movq %rax, 0x1c8(%rsp)
movq 0xa0(%rsp), %rcx
movq %rcx, 0x1d0(%rsp)
movq %rdx, 0x50(%rsp)
movl %edx, 0x1d8(%rsp)
movq 0xb0(%rsp), %rcx
movq %rcx, 0x1e0(%rsp)
movups 0xb8(%rsp), %xmm1
movups %xmm1, 0x1e8(%rsp)
movl %esi, 0x80(%rsp)
movl %esi, 0x1f8(%rsp)
movq 0xd0(%rsp), %rcx
movq %rcx, 0x200(%rsp)
testq %rax, %rax
je 0xa9b03
lock
incl (%rax)
movl 0xbc(%rsp), %eax
movl 0xc0(%rsp), %edx
paddd 0x348127(%rip), %xmm0 # 0x3f1c40
movdqa %xmm0, %xmm2
psrad $0x1f, %xmm2
psrld $0x1e, %xmm2
paddd %xmm0, %xmm2
movdqa 0x3481ed(%rip), %xmm1 # 0x3f1d20
movdqa %xmm2, 0x60(%rsp)
pand %xmm2, %xmm1
pshufd $0x55, %xmm1, %xmm0 # xmm0 = xmm1[1,1,1,1]
movd %xmm0, %esi
movdqa %xmm1, 0x280(%rsp)
movd %xmm1, %ecx
movq %rcx, 0xf0(%rsp)
leal 0x2(%rcx), %r12d
movq %rsi, 0xf8(%rsp)
leal 0x2(%rsi), %ecx
subl %edx, %ecx
movl %r12d, %r9d
subl %eax, %r9d
movq 0x40(%rsp), %r14
movq %r14, 0x8(%rsp)
andl $0x0, (%rsp)
leaq 0x90(%rsp), %rdi
leaq 0x1c0(%rsp), %rsi
pxor %xmm0, %xmm0
xorl %edx, %edx
xorl %r8d, %r8d
callq 0x6466c
movdqa 0x60(%rsp), %xmm0
psrad $0x2, %xmm0
leaq 0x170(%rsp), %rdi
andq $0x0, 0x40(%rdi)
movd %xmm0, %r15d
imull $0x6, %r15d, %eax
pshufd $0x55, %xmm0, %xmm0 # xmm0 = xmm0[1,1,1,1]
movd %xmm0, %ebx
movl %eax, 0x88(%rsp)
imull %ebx, %eax
movl %eax, 0x78(%rsp)
cltd
pushq $0x6
popq %rcx
idivl %ecx
movl %eax, %r13d
movq 0x50(%rsp), %r9
leal (%r9,%r9), %r8d
pxor %xmm0, %xmm0
movdqa %xmm0, (%rdi)
movdqu %xmm0, 0xc(%rdi)
movdqa %xmm0, 0x20(%rdi)
movdqu %xmm0, 0x2c(%rdi)
movq 0x10(%r14), %rax
movq %rax, (%rsp)
pushq $0x24
popq %rdx
movl %r13d, %esi
movl 0x80(%rsp), %r14d
movl %r14d, %ecx
movq %r8, 0x70(%rsp)
callq 0x628f2
pushq $-0x60
popq %rbp
pushq $0x50
popq %rsi
shll $0x3, %r12d
movslq %r12d, %rdi
leal (,%r13,8), %eax
movslq %eax, %r8
movl %r13d, %eax
shll $0x4, %eax
movslq %eax, %r9
imull $0x18, %r13d, %eax
movslq %eax, %r10
movl %r13d, %eax
shll $0x5, %eax
movslq %eax, %r11
imull $0x28, %r13d, %eax
movslq %eax, %r12
movq %r13, 0x60(%rsp)
imull $0x30, %r13d, %eax
movslq %eax, %r13
xorl %ecx, %ecx
testl %r15d, %r15d
cmovlel %ecx, %r15d
movq %r15, 0x18(%rsp)
testl %ebx, %ebx
cmovlel %ecx, %ebx
testl %r14d, %r14d
movl %r14d, %eax
movl $0x0, %r14d
cmovgl %eax, %r14d
addq %r13, %r13
movdqa 0x34809c(%rip), %xmm0 # 0x3f1d30
movq %rbx, 0x100(%rsp)
cmpq %r14, %rcx
je 0xa9f97
movq %rcx, %rbx
movslq 0x1ec(%rsp), %r15
movq 0x1c0(%rsp), %rax
movq %rax, 0x28(%rsp)
movq 0x1d0(%rsp), %rcx
movq 0x1b0(%rsp), %rax
movq 0x200(%rsp), %rdx
imulq %rcx, %rdx
imulq 0x180(%rsp), %rax
imulq %rbx, %rax
addq 0x170(%rsp), %rax
movq %rax, 0x48(%rsp)
imulq %rcx, %r15
shlq $0x2, %r15
movq %r15, 0x58(%rsp)
movq %rbx, 0x38(%rsp)
imulq %rbx, %rdx
movq 0x28(%rsp), %rax
leaq (%rax,%rdx), %r15
addq $0x20, %r15
xorl %ecx, %ecx
movq 0x100(%rsp), %rbx
cmpq %rbx, %rcx
je 0xa9f8a
movl 0x88(%rsp), %eax
movq %rcx, 0x30(%rsp)
imull %ecx, %eax
cltd
pushq $0x6
popq %rcx
idivl %ecx
cltq
shlq $0x4, %rax
addq 0x48(%rsp), %rax
movq %r15, 0x28(%rsp)
movq %r15, %rbx
xorl %r15d, %r15d
cmpq 0x18(%rsp), %r15
je 0xa9f73
movq %rbx, %rdx
movq %rbp, %rcx
testq %rcx, %rcx
je 0xa9e7b
movdqu -0x20(%rdx), %xmm2
movdqu -0x10(%rdx), %xmm1
movdqu (%rdx), %xmm3
pxor %xmm4, %xmm4
pcmpgtb %xmm2, %xmm4
pxor %xmm5, %xmm5
pcmpgtb %xmm1, %xmm5
pxor %xmm7, %xmm7
pcmpgtb %xmm3, %xmm7
movdqa %xmm2, %xmm6
punpcklbw %xmm4, %xmm6 # xmm6 = xmm6[0],xmm4[0],xmm6[1],xmm4[1],xmm6[2],xmm4[2],xmm6[3],xmm4[3],xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
punpckhbw %xmm4, %xmm2 # xmm2 = xmm2[8],xmm4[8],xmm2[9],xmm4[9],xmm2[10],xmm4[10],xmm2[11],xmm4[11],xmm2[12],xmm4[12],xmm2[13],xmm4[13],xmm2[14],xmm4[14],xmm2[15],xmm4[15]
movdqa %xmm1, %xmm8
punpcklbw %xmm5, %xmm8 # xmm8 = xmm8[0],xmm5[0],xmm8[1],xmm5[1],xmm8[2],xmm5[2],xmm8[3],xmm5[3],xmm8[4],xmm5[4],xmm8[5],xmm5[5],xmm8[6],xmm5[6],xmm8[7],xmm5[7]
punpckhbw %xmm5, %xmm1 # xmm1 = xmm1[8],xmm5[8],xmm1[9],xmm5[9],xmm1[10],xmm5[10],xmm1[11],xmm5[11],xmm1[12],xmm5[12],xmm1[13],xmm5[13],xmm1[14],xmm5[14],xmm1[15],xmm5[15]
movdqa %xmm3, %xmm4
punpcklbw %xmm7, %xmm4 # xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1],xmm4[2],xmm7[2],xmm4[3],xmm7[3],xmm4[4],xmm7[4],xmm4[5],xmm7[5],xmm4[6],xmm7[6],xmm4[7],xmm7[7]
punpckhbw %xmm7, %xmm3 # xmm3 = xmm3[8],xmm7[8],xmm3[9],xmm7[9],xmm3[10],xmm7[10],xmm3[11],xmm7[11],xmm3[12],xmm7[12],xmm3[13],xmm7[13],xmm3[14],xmm7[14],xmm3[15],xmm7[15]
psllw $0x2, %xmm6
paddw %xmm4, %xmm6
movdqa %xmm4, %xmm5
movdqa %xmm8, %xmm9
movdqa %xmm4, %xmm10
movdqa %xmm2, %xmm7
psubw %xmm8, %xmm7
psubw %xmm8, %xmm4
pmullw %xmm0, %xmm8
paddw %xmm6, %xmm8
paddw %xmm1, %xmm5
paddw %xmm2, %xmm9
psllw $0x2, %xmm9
psubw %xmm9, %xmm5
psubw %xmm1, %xmm10
psllw $0x2, %xmm7
paddw %xmm10, %xmm7
movdqa %xmm2, %xmm6
psubw %xmm1, %xmm6
paddw %xmm6, %xmm6
movdqa %xmm4, %xmm9
psubw %xmm6, %xmm9
paddw %xmm4, %xmm6
psllw $0x2, %xmm2
paddw %xmm3, %xmm2
pmullw %xmm0, %xmm1
paddw %xmm2, %xmm1
movdqa %xmm8, 0x3c0(%rsp,%rcx)
movdqa %xmm5, 0x420(%rsp,%rcx)
movdqa %xmm7, 0x480(%rsp,%rcx)
movdqa %xmm9, 0x4e0(%rsp,%rcx)
movdqa %xmm6, 0x540(%rsp,%rcx)
movdqa %xmm1, 0x5a0(%rsp,%rcx)
addq $0x10, %rcx
addq %rdi, %rdx
jmp 0xa9d65
movq %rax, %rcx
movq %rsi, %rdx
cmpq $0x290, %rdx # imm = 0x290
je 0xa9f63
movdqa 0x310(%rsp,%rdx), %xmm1
movdqa 0x320(%rsp,%rdx), %xmm2
movdqa 0x330(%rsp,%rdx), %xmm6
movdqa 0x350(%rsp,%rdx), %xmm4
psllw $0x2, %xmm1
paddw %xmm4, %xmm1
movdqa %xmm4, %xmm3
movdqa %xmm6, %xmm7
movdqa %xmm4, %xmm8
movdqa %xmm2, %xmm5
psubw %xmm6, %xmm5
psubw %xmm6, %xmm4
pmullw %xmm0, %xmm6
paddw %xmm6, %xmm1
movdqa 0x340(%rsp,%rdx), %xmm6
paddw %xmm6, %xmm3
paddw %xmm2, %xmm7
psllw $0x2, %xmm7
psubw %xmm7, %xmm3
psubw %xmm6, %xmm8
psllw $0x2, %xmm5
paddw %xmm8, %xmm5
movdqa %xmm2, %xmm7
psubw %xmm6, %xmm7
paddw %xmm7, %xmm7
movdqa %xmm4, %xmm8
psubw %xmm7, %xmm8
paddw %xmm4, %xmm7
psllw $0x2, %xmm2
pmullw %xmm0, %xmm6
paddw %xmm2, %xmm6
paddw 0x360(%rsp,%rdx), %xmm6
movdqu %xmm1, (%rcx)
movdqu %xmm3, (%rcx,%r8,2)
movdqu %xmm5, (%rcx,%r9,2)
movdqu %xmm8, (%rcx,%r10,2)
movdqu %xmm7, (%rcx,%r11,2)
movdqu %xmm6, (%rcx,%r12,2)
addq $0x60, %rdx
addq %r13, %rcx
jmp 0xa9e81
incq %r15
addq $0x20, %rbx
addq $0x10, %rax
jmp 0xa9d54
movq 0x30(%rsp), %rcx
incq %rcx
movq 0x28(%rsp), %r15
addq 0x58(%rsp), %r15
jmp 0xa9d18
movq 0x38(%rsp), %rcx
incq %rcx
jmp 0xa9c9c
movq 0x1c8(%rsp), %rax
testq %rax, %rax
je 0xaa744
lock
decl (%rax)
jne 0xaa744
movq 0x1c0(%rsp), %rsi
movq 0x1e0(%rsp), %rdi
testq %rdi, %rdi
je 0xaa73c
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xaa744
movq 0x368(%rsp), %rax
testq %rax, %rax
je 0xab130
lock
decl (%rax)
jne 0xab130
movq 0x360(%rsp), %rsi
movq 0x380(%rsp), %rdi
testq %rdi, %rdi
je 0xab128
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xab130
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x200(%rsp)
pxor %xmm0, %xmm0
movdqa %xmm0, 0x1c0(%rsp)
movdqu %xmm0, 0x1cc(%rsp)
andq $0x0, 0x270(%rsp)
leaq 0x1e0(%rsp), %rax
movdqu %xmm0, 0xc(%rax)
movdqu %xmm0, (%rax)
movdqa %xmm0, 0x230(%rsp)
movdqu %xmm0, 0x23c(%rsp)
leaq 0x360(%rsp), %rdi
andq $0x0, 0x40(%rdi)
movdqa %xmm0, 0x250(%rsp)
movdqu %xmm0, 0x25c(%rsp)
movq 0x28(%rsp), %r14
movl %r14d, %eax
shrl %eax
xorl %r8d, %r8d
cmpl $0xc, 0xf8(%rsp)
setge %cl
movdqu %xmm0, 0xc(%rdi)
movdqa %xmm0, (%rdi)
movdqu %xmm0, 0x2c(%rdi)
cmovgel %eax, %r8d
movl 0xf0(%rsp), %esi
shll %cl, %esi
movdqa %xmm0, 0x20(%rdi)
movl %r14d, %edx
subl %r8d, %edx
movq 0x40(%rsp), %rax
movq 0x10(%rax), %rax
movq %rax, (%rsp)
pushq $0x24
popq %rcx
movq 0x280(%rsp), %r8
movq 0xe0(%rsp), %r9
callq 0x628f2
movslq %r14d, %rax
movq %rax, 0x18(%rsp)
xorl %eax, %eax
cmpq $0x24, %rax
je 0xaa1fd
movslq 0x38c(%rsp), %rcx
movq 0x3a0(%rsp), %rdx
imulq %rax, %rdx
movq 0x370(%rsp), %rsi
imulq %rsi, %rdx
addq 0x360(%rsp), %rdx
imulq %rsi, %rcx
movq %rax, %rsi
imulq 0x18(%rsp), %rsi
xorl %edi, %edi
movq %rdi, %r8
orq $0x1, %r8
cmpq 0x18(%rsp), %r8
jge 0xaa1ee
movq %rdi, %r8
shrq %r8
imulq %rcx, %r8
addq %rdx, %r8
leaq (%rdi,%rsi), %r9
shlq $0x4, %r9
addq 0x170(%rsp), %r9
movl %r13d, %r10d
subl $0x1, %r10d
jb 0xaa196
movdqu (%r9), %xmm0
movdqu 0x10(%r9), %xmm1
movdqu %xmm0, (%r8)
movdqu %xmm1, 0x10(%r8)
movq 0x1b0(%rsp), %r11
shlq $0x4, %r11
addq %r11, %r9
addq $0x20, %r8
jmp 0xaa165
addq $0x2, %rdi
jmp 0xaa133
movl %edi, %r9d
shrl %r9d
movl %edi, %r8d
andl $0x1, %r8d
addl %r9d, %r8d
imulq %rcx, %r8
addq %rdx, %r8
leaq (%rdi,%rsi), %r9
shlq $0x4, %r9
addq 0x170(%rsp), %r9
movl %r13d, %r10d
subl $0x1, %r10d
jb 0xaa1eb
movdqu (%r9), %xmm0
movdqu %xmm0, (%r8)
movq 0x1b0(%rsp), %r11
shlq $0x4, %r11
addq %r11, %r9
addq $0x10, %r8
jmp 0xaa1c6
incq %rdi
cmpq 0x18(%rsp), %rdi
jl 0xaa19c
incq %rax
jmp 0xaa0f2
movq 0x178(%rsp), %rax
testq %rax, %rax
je 0xaa234
lock
decl (%rax)
jne 0xaa234
movq 0x170(%rsp), %rsi
movq 0x190(%rsp), %rdi
testq %rdi, %rdi
je 0xaa22c
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xaa234
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x1b0(%rsp)
pxor %xmm0, %xmm0
movdqa %xmm0, 0x170(%rsp)
movdqu %xmm0, 0x17c(%rsp)
leaq 0x190(%rsp), %rax
movdqu %xmm0, 0xc(%rax)
movdqu %xmm0, (%rax)
movq 0x40(%rsp), %rax
movq 0x10(%rax), %rax
movq %rax, (%rsp)
leaq 0x230(%rsp), %rdi
pushq $0x24
popq %rdx
pushq $0x10
popq %r8
pushq $0x4
popq %r9
movq 0x28(%rsp), %rsi
movl 0x78(%rsp), %ecx
callq 0x628f2
xorl %edx, %edx
movl 0x78(%rsp), %eax
testl %eax, %eax
movl $0x0, %ecx
cmovgl %eax, %ecx
movq %rcx, 0x50(%rsp)
movq 0x20(%rsp), %r14
cmpq 0x50(%rsp), %rdx
je 0xaa5c0
movq 0x270(%rsp), %rcx
imulq %rdx, %rcx
imulq 0x240(%rsp), %rcx
addq 0x230(%rsp), %rcx
movslq 0x11c(%r14), %rax
movq 0x100(%r14), %rsi
imulq %rsi, %rax
movq %rax, 0x38(%rsp)
imulq 0x130(%r14), %rsi
movq %rdx, 0x80(%rsp)
imulq %rdx, %rsi
addq 0xf0(%r14), %rsi
xorl %edi, %edi
cmpq $0x24, %rdi
je 0xaa5b0
movslq 0x38c(%rsp), %r8
movq 0x360(%rsp), %r9
movq 0x370(%rsp), %rax
movq 0x3a0(%rsp), %rdx
movq %rax, %r10
imulq %r8, %r10
movq %rdx, %r11
imulq %rax, %r11
movq %rdi, 0x30(%rsp)
imulq %rdi, %r11
addq %r9, %r11
xorl %r15d, %r15d
movq %r15, %rdi
orq $0x1, %rdi
cmpq 0x18(%rsp), %rdi
jge 0xaa4bf
pxor %xmm4, %xmm4
xorl %edi, %edi
movl %r13d, %r14d
pxor %xmm1, %xmm1
pxor %xmm5, %xmm5
pxor %xmm0, %xmm0
pxor %xmm6, %xmm6
pxor %xmm3, %xmm3
pxor %xmm7, %xmm7
pxor %xmm2, %xmm2
subl $0x1, %r14d
jb 0xaa420
movdqu (%r11,%rdi), %xmm9
movdqu 0x10(%r11,%rdi), %xmm8
movdqu (%rsi,%rdi,2), %xmm10
movdqu 0x10(%rsi,%rdi,2), %xmm11
movdqu 0x20(%rsi,%rdi,2), %xmm12
movdqu 0x30(%rsi,%rdi,2), %xmm13
movdqa %xmm9, %xmm14
pmaddwd %xmm10, %xmm14
paddd %xmm14, %xmm2
movdqa %xmm9, %xmm14
pmaddwd %xmm11, %xmm14
paddd %xmm14, %xmm7
movdqa %xmm9, %xmm14
pmaddwd %xmm12, %xmm14
paddd %xmm14, %xmm3
pmaddwd %xmm13, %xmm9
paddd %xmm9, %xmm6
pmaddwd %xmm8, %xmm10
paddd %xmm10, %xmm0
pmaddwd %xmm8, %xmm11
paddd %xmm11, %xmm5
pmaddwd %xmm8, %xmm12
paddd %xmm12, %xmm1
pmaddwd %xmm13, %xmm8
paddd %xmm8, %xmm4
addq $0x20, %rdi
jmp 0xaa386
movdqa %xmm2, %xmm8
punpckldq %xmm7, %xmm8 # xmm8 = xmm8[0],xmm7[0],xmm8[1],xmm7[1]
movdqa %xmm3, %xmm9
punpckldq %xmm6, %xmm9 # xmm9 = xmm9[0],xmm6[0],xmm9[1],xmm6[1]
punpckhdq %xmm7, %xmm2 # xmm2 = xmm2[2],xmm7[2],xmm2[3],xmm7[3]
punpckhdq %xmm6, %xmm3 # xmm3 = xmm3[2],xmm6[2],xmm3[3],xmm6[3]
movdqa %xmm0, %xmm6
punpckldq %xmm5, %xmm6 # xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
movdqa %xmm1, %xmm7
punpckldq %xmm4, %xmm7 # xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1]
punpckhdq %xmm5, %xmm0 # xmm0 = xmm0[2],xmm5[2],xmm0[3],xmm5[3]
punpckhdq %xmm4, %xmm1 # xmm1 = xmm1[2],xmm4[2],xmm1[3],xmm4[3]
movdqa %xmm8, %xmm4
punpcklqdq %xmm9, %xmm4 # xmm4 = xmm4[0],xmm9[0]
punpckhqdq %xmm9, %xmm8 # xmm8 = xmm8[1],xmm9[1]
paddd %xmm4, %xmm8
movdqa %xmm2, %xmm4
punpcklqdq %xmm3, %xmm4 # xmm4 = xmm4[0],xmm3[0]
punpckhqdq %xmm3, %xmm2 # xmm2 = xmm2[1],xmm3[1]
paddd %xmm4, %xmm2
paddd %xmm8, %xmm2
movdqa %xmm6, %xmm3
punpcklqdq %xmm7, %xmm3 # xmm3 = xmm3[0],xmm7[0]
punpckhqdq %xmm7, %xmm6 # xmm6 = xmm6[1],xmm7[1]
paddd %xmm3, %xmm6
movdqa %xmm0, %xmm3
punpcklqdq %xmm1, %xmm3 # xmm3 = xmm3[0],xmm1[0]
punpckhqdq %xmm1, %xmm0 # xmm0 = xmm0[1],xmm1[1]
paddd %xmm3, %xmm0
paddd %xmm6, %xmm0
movdqu %xmm2, (%rcx)
movdqu %xmm0, 0x10(%rcx)
addq $0x20, %rcx
addq $0x2, %r15
addq %r10, %r11
movq 0x20(%rsp), %r14
jmp 0xaa34f
imulq 0x30(%rsp), %rdx
cmpl 0x28(%rsp), %r15d
jge 0xaa59e
movl %r15d, %r10d
shrl %r10d
movl %r15d, %edi
andl $0x1, %edi
addl %r10d, %edi
imulq %r8, %rdi
addq %rdx, %rdi
imulq %rax, %rdi
addq %r9, %rdi
pxor %xmm2, %xmm2
xorl %r10d, %r10d
movl %r13d, %r11d
pxor %xmm1, %xmm1
pxor %xmm3, %xmm3
pxor %xmm0, %xmm0
subl $0x1, %r11d
jb 0xaa552
movdqu (%rdi,%r10), %xmm4
movdqu (%rsi,%r10,4), %xmm5
movdqu 0x10(%rsi,%r10,4), %xmm6
movdqu 0x20(%rsi,%r10,4), %xmm7
movdqu 0x30(%rsi,%r10,4), %xmm8
pmaddwd %xmm4, %xmm5
paddd %xmm5, %xmm0
pmaddwd %xmm4, %xmm6
paddd %xmm6, %xmm3
pmaddwd %xmm4, %xmm7
paddd %xmm7, %xmm1
pmaddwd %xmm4, %xmm8
paddd %xmm8, %xmm2
addq $0x10, %r10
jmp 0xaa503
movdqa %xmm0, %xmm4
punpckldq %xmm3, %xmm4 # xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
movdqa %xmm1, %xmm5
punpckldq %xmm2, %xmm5 # xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1]
punpckhdq %xmm3, %xmm0 # xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
punpckhdq %xmm2, %xmm1 # xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
movdqa %xmm4, %xmm2
punpcklqdq %xmm5, %xmm2 # xmm2 = xmm2[0],xmm5[0]
punpckhqdq %xmm5, %xmm4 # xmm4 = xmm4[1],xmm5[1]
paddd %xmm2, %xmm4
movdqa %xmm0, %xmm2
punpcklqdq %xmm1, %xmm2 # xmm2 = xmm2[0],xmm1[0]
punpckhqdq %xmm1, %xmm0 # xmm0 = xmm0[1],xmm1[1]
paddd %xmm2, %xmm0
paddd %xmm4, %xmm0
movdqu %xmm0, (%rcx)
addq $0x10, %rcx
incl %r15d
jmp 0xaa4c5
movq 0x30(%rsp), %rdi
incq %rdi
addq 0x38(%rsp), %rsi
jmp 0xaa308
movq 0x80(%rsp), %rdx
incq %rdx
jmp 0xaa2ac
movq 0x368(%rsp), %rax
testq %rax, %rax
je 0xaa5f7
lock
decl (%rax)
jne 0xaa5f7
movq 0x360(%rsp), %rsi
movq 0x380(%rsp), %rdi
testq %rdi, %rdi
je 0xaa5ef
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xaa5f7
movq %rsi, %rdi
callq 0x5f3e0
movq 0x178(%rsp), %rax
testq %rax, %rax
je 0xaa62e
lock
decl (%rax)
jne 0xaa62e
movq 0x170(%rsp), %rsi
movq 0x190(%rsp), %rdi
testq %rdi, %rdi
je 0xaa626
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xaa62e
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x1b0(%rsp)
pxor %xmm0, %xmm0
movdqa %xmm0, 0x170(%rsp)
movdqu %xmm0, 0x17c(%rsp)
leaq 0x190(%rsp), %rax
movdqu %xmm0, 0xc(%rax)
movdqu %xmm0, (%rax)
andq $0x0, 0x2d0(%rsp)
movdqa %xmm0, 0x290(%rsp)
movdqu %xmm0, 0x29c(%rsp)
movdqa %xmm0, 0x2b0(%rsp)
movdqu %xmm0, 0x2bc(%rsp)
movq 0x13c(%rsp), %xmm0
movdqa 0x2f0(%rsp), %xmm1
pcmpeqd %xmm0, %xmm1
pshufd $0x50, %xmm1, %xmm0 # xmm0 = xmm1[0,0,1,1]
movmskpd %xmm0, %eax
cmpl $0x3, %eax
jne 0xaa701
movq 0x118(%rsp), %rax
testq %rax, %rax
je 0xabef8
lock
incl (%rax)
movq 0x298(%rsp), %rax
testq %rax, %rax
je 0xabef8
lock
decl (%rax)
jne 0xabef8
movq 0x290(%rsp), %rsi
movq 0x2b0(%rsp), %rdi
testq %rdi, %rdi
je 0xabef0
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xabef8
movq 0x40(%rsp), %rax
movq 0x10(%rax), %rax
movq %rax, (%rsp)
leaq 0x290(%rsp), %rdi
pushq $0x10
popq %r8
pushq $0x4
popq %r9
movq 0x208(%rsp), %rsi
movq 0x168(%rsp), %rdx
movl 0x78(%rsp), %ecx
callq 0x628f2
jmp 0xabf66
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x200(%rsp)
pxor %xmm0, %xmm0
movdqa %xmm0, 0x1c0(%rsp)
movdqu %xmm0, 0x1cc(%rsp)
andq $0x0, 0x270(%rsp)
leaq 0x1e0(%rsp), %rax
movdqu %xmm0, 0xc(%rax)
movdqu %xmm0, (%rax)
movdqa %xmm0, 0x230(%rsp)
movdqu %xmm0, 0x23c(%rsp)
leaq 0x360(%rsp), %rdi
andq $0x0, 0x40(%rdi)
movdqa %xmm0, 0x250(%rsp)
movdqu %xmm0, 0x25c(%rsp)
movq 0x60(%rsp), %rdx
movl %edx, %eax
shrl %eax
xorl %r8d, %r8d
cmpl $0xc, 0x78(%rsp)
setge %cl
movdqu %xmm0, 0xc(%rdi)
movdqa %xmm0, (%rdi)
movdqu %xmm0, 0x2c(%rdi)
cmovgel %eax, %r8d
movl 0x80(%rsp), %esi
shll %cl, %esi
movdqa %xmm0, 0x20(%rdi)
subl %r8d, %edx
movq 0x40(%rsp), %rax
movq 0x10(%rax), %rax
movq %rax, (%rsp)
pushq $0x24
popq %rcx
movq 0x70(%rsp), %r8
movq 0x50(%rsp), %r9
callq 0x628f2
movslq 0x60(%rsp), %r13
xorl %eax, %eax
cmpq $0x24, %rax
je 0xaa910
movslq 0x38c(%rsp), %rcx
movq 0x3a0(%rsp), %rdx
imulq %rax, %rdx
movq 0x370(%rsp), %rsi
imulq %rsi, %rdx
addq 0x360(%rsp), %rdx
imulq %rsi, %rcx
movq %rax, %rsi
imulq %r13, %rsi
xorl %edi, %edi
movq %rdi, %r8
orq $0x1, %r8
cmpq %r13, %r8
jge 0xaa903
movq %rdi, %r8
shrq %r8
imulq %rcx, %r8
addq %rdx, %r8
leaq (%rdi,%rsi), %r9
shlq $0x4, %r9
addq 0x170(%rsp), %r9
movl %r14d, %r10d
subl $0x1, %r10d
jb 0xaa8ab
movdqu (%r9), %xmm0
movdqu 0x10(%r9), %xmm1
movdqu %xmm0, (%r8)
movdqu %xmm1, 0x10(%r8)
movq 0x1b0(%rsp), %r11
shlq $0x4, %r11
addq %r11, %r9
addq $0x20, %r8
jmp 0xaa87a
addq $0x2, %rdi
jmp 0xaa84a
movl %edi, %r9d
shrl %r9d
movl %edi, %r8d
andl $0x1, %r8d
addl %r9d, %r8d
imulq %rcx, %r8
addq %rdx, %r8
leaq (%rdi,%rsi), %r9
shlq $0x4, %r9
addq 0x170(%rsp), %r9
movl %r14d, %r10d
subl $0x1, %r10d
jb 0xaa900
movdqu (%r9), %xmm0
movdqu %xmm0, (%r8)
movq 0x1b0(%rsp), %r11
shlq $0x4, %r11
addq %r11, %r9
addq $0x10, %r8
jmp 0xaa8db
incq %rdi
cmpq %r13, %rdi
jl 0xaa8b1
incq %rax
jmp 0xaa80b
movq 0x178(%rsp), %rax
testq %rax, %rax
je 0xaa947
lock
decl (%rax)
jne 0xaa947
movq 0x170(%rsp), %rsi
movq 0x190(%rsp), %rdi
testq %rdi, %rdi
je 0xaa93f
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xaa947
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x1b0(%rsp)
pxor %xmm0, %xmm0
movdqa %xmm0, 0x170(%rsp)
movdqu %xmm0, 0x17c(%rsp)
leaq 0x190(%rsp), %rax
movdqu %xmm0, 0xc(%rax)
movdqu %xmm0, (%rax)
movq 0x40(%rsp), %rax
movq 0x10(%rax), %rax
movq %rax, (%rsp)
leaq 0x230(%rsp), %rdi
pushq $0x24
popq %rdx
pushq $0x4
popq %r8
pushq $0x1
popq %r9
movq 0x60(%rsp), %rsi
movq 0xe8(%rsp), %rcx
callq 0x628f2
movq 0xe8(%rsp), %rax
movl %eax, %ecx
sarl $0x2, %ecx
movq 0x230(%rsp), %rax
movq %rax, 0x50(%rsp)
movq 0x240(%rsp), %rax
imulq 0x270(%rsp), %rax
movq %rax, 0x80(%rsp)
movq 0x20(%rsp), %rax
movq 0xf0(%rax), %r9
movq 0x100(%rax), %rdx
movq 0x360(%rsp), %rsi
movq %rsi, 0x78(%rsp)
movq 0x370(%rsp), %rsi
movq 0x3a0(%rsp), %rdi
movq %rsi, 0x28(%rsp)
imulq %rsi, %rdi
movq %rdi, 0x30(%rsp)
xorl %esi, %esi
testl %ecx, %ecx
cmovlel %esi, %ecx
movq %rcx, 0x38(%rsp)
movq 0x130(%rax), %rax
movq %rdx, 0x70(%rsp)
imulq %rdx, %rax
movq %rax, 0xe0(%rsp)
cmpq 0x38(%rsp), %rsi
je 0xaada4
leaq (,%rsi,4), %r8
movq 0x80(%rsp), %rdx
imulq %rdx, %r8
movq 0x50(%rsp), %rcx
addq %rcx, %r8
leaq 0x1(,%rsi,4), %rdi
imulq %rdx, %rdi
addq %rcx, %rdi
leaq 0x2(,%rsi,4), %r15
imulq %rdx, %r15
addq %rcx, %r15
movq %rsi, 0x48(%rsp)
leaq 0x3(,%rsi,4), %rax
imulq %rdx, %rax
addq %rcx, %rax
movq 0x20(%rsp), %rcx
movslq 0x11c(%rcx), %rsi
imulq 0x70(%rsp), %rsi
movq 0x78(%rsp), %rcx
movq %r9, 0x58(%rsp)
movq %r9, %rdx
xorl %r11d, %r11d
cmpq $0x24, %r11
je 0xaad8a
movslq 0x38c(%rsp), %r12
imulq 0x28(%rsp), %r12
movq %rcx, %rbx
xorl %r9d, %r9d
movq %r9, %r10
orq $0x1, %r10
cmpq %r13, %r10
jge 0xaad6f
pxor %xmm4, %xmm4
xorl %ebp, %ebp
movl %r14d, %r10d
pxor %xmm2, %xmm2
pxor %xmm5, %xmm5
pxor %xmm0, %xmm0
pxor %xmm6, %xmm6
pxor %xmm3, %xmm3
pxor %xmm7, %xmm7
pxor %xmm1, %xmm1
subl $0x1, %r10d
jb 0xaaba6
movdqu (%rbx,%rbp), %xmm9
movdqu 0x10(%rbx,%rbp), %xmm8
movdqu (%rdx,%rbp,2), %xmm10
movdqu 0x10(%rdx,%rbp,2), %xmm11
movdqu 0x20(%rdx,%rbp,2), %xmm12
movdqu 0x30(%rdx,%rbp,2), %xmm13
movdqa %xmm9, %xmm14
pmaddwd %xmm10, %xmm14
paddd %xmm14, %xmm1
movdqa %xmm9, %xmm14
pmaddwd %xmm11, %xmm14
paddd %xmm14, %xmm7
movdqa %xmm9, %xmm14
pmaddwd %xmm12, %xmm14
paddd %xmm14, %xmm3
pmaddwd %xmm13, %xmm9
paddd %xmm9, %xmm6
pmaddwd %xmm8, %xmm10
paddd %xmm10, %xmm0
pmaddwd %xmm8, %xmm11
paddd %xmm11, %xmm5
pmaddwd %xmm8, %xmm12
paddd %xmm12, %xmm2
pmaddwd %xmm13, %xmm8
paddd %xmm8, %xmm4
addq $0x20, %rbp
jmp 0xaab0c
movdqa %xmm1, %xmm8
punpckldq %xmm7, %xmm8 # xmm8 = xmm8[0],xmm7[0],xmm8[1],xmm7[1]
movdqa %xmm3, %xmm9
punpckldq %xmm6, %xmm9 # xmm9 = xmm9[0],xmm6[0],xmm9[1],xmm6[1]
punpckhdq %xmm7, %xmm1 # xmm1 = xmm1[2],xmm7[2],xmm1[3],xmm7[3]
punpckhdq %xmm6, %xmm3 # xmm3 = xmm3[2],xmm6[2],xmm3[3],xmm6[3]
movdqa %xmm0, %xmm6
punpckldq %xmm5, %xmm6 # xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
movdqa %xmm2, %xmm7
punpckldq %xmm4, %xmm7 # xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1]
punpckhdq %xmm5, %xmm0 # xmm0 = xmm0[2],xmm5[2],xmm0[3],xmm5[3]
punpckhdq %xmm4, %xmm2 # xmm2 = xmm2[2],xmm4[2],xmm2[3],xmm4[3]
movdqa %xmm8, %xmm4
punpcklqdq %xmm9, %xmm4 # xmm4 = xmm4[0],xmm9[0]
punpckhqdq %xmm9, %xmm8 # xmm8 = xmm8[1],xmm9[1]
paddd %xmm4, %xmm8
movdqa %xmm1, %xmm4
punpcklqdq %xmm3, %xmm4 # xmm4 = xmm4[0],xmm3[0]
punpckhqdq %xmm3, %xmm1 # xmm1 = xmm1[1],xmm3[1]
paddd %xmm4, %xmm1
paddd %xmm8, %xmm1
movdqa %xmm6, %xmm3
punpcklqdq %xmm7, %xmm3 # xmm3 = xmm3[0],xmm7[0]
punpckhqdq %xmm7, %xmm6 # xmm6 = xmm6[1],xmm7[1]
paddd %xmm3, %xmm6
movdqa %xmm0, %xmm3
punpcklqdq %xmm2, %xmm3 # xmm3 = xmm3[0],xmm2[0]
punpckhqdq %xmm2, %xmm0 # xmm0 = xmm0[1],xmm2[1]
paddd %xmm3, %xmm0
paddd %xmm6, %xmm0
movd %xmm1, (%r8)
pshufd $0x55, %xmm1, %xmm2 # xmm2 = xmm1[1,1,1,1]
movd %xmm2, (%rdi)
pshufd $0xee, %xmm1, %xmm2 # xmm2 = xmm1[2,3,2,3]
movd %xmm2, (%r15)
pshufd $0xff, %xmm1, %xmm1 # xmm1 = xmm1[3,3,3,3]
movd %xmm1, (%rax)
movd %xmm0, 0x4(%r8)
pshufd $0x55, %xmm0, %xmm1 # xmm1 = xmm0[1,1,1,1]
movd %xmm1, 0x4(%rdi)
pshufd $0xee, %xmm0, %xmm1 # xmm1 = xmm0[2,3,2,3]
movd %xmm1, 0x4(%r15)
pshufd $0xff, %xmm0, %xmm0 # xmm0 = xmm0[3,3,3,3]
movd %xmm0, 0x4(%rax)
addq $0x8, %r8
addq $0x8, %rdi
addq $0x8, %r15
addq $0x8, %rax
addq $0x2, %r9
addq %r12, %rbx
jmp 0xaaad7
movl %r9d, %ebx
shrl %ebx
movl %r9d, %r10d
andl $0x1, %r10d
addl %ebx, %r10d
imulq %r12, %r10
addq %rcx, %r10
pxor %xmm0, %xmm0
movl %r14d, %ebx
xorl %ebp, %ebp
pxor %xmm2, %xmm2
pxor %xmm1, %xmm1
pxor %xmm3, %xmm3
subl $0x1, %ebx
jb 0xaacff
movdqu (%r10,%rbp), %xmm4
movdqu (%rdx,%rbp,4), %xmm5
movdqu 0x10(%rdx,%rbp,4), %xmm6
movdqu 0x20(%rdx,%rbp,4), %xmm7
movdqu 0x30(%rdx,%rbp,4), %xmm8
pmaddwd %xmm4, %xmm5
paddd %xmm5, %xmm0
pmaddwd %xmm4, %xmm6
paddd %xmm6, %xmm2
pmaddwd %xmm4, %xmm7
paddd %xmm7, %xmm1
pmaddwd %xmm4, %xmm8
paddd %xmm8, %xmm3
addq $0x10, %rbp
jmp 0xaacb4
movdqa %xmm0, %xmm4
punpckldq %xmm2, %xmm4 # xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
movdqa %xmm1, %xmm5
punpckldq %xmm3, %xmm5 # xmm5 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
punpckhdq %xmm2, %xmm0 # xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
punpckhdq %xmm3, %xmm1 # xmm1 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
movdqa %xmm4, %xmm2
punpcklqdq %xmm5, %xmm2 # xmm2 = xmm2[0],xmm5[0]
punpckhqdq %xmm5, %xmm4 # xmm4 = xmm4[1],xmm5[1]
paddd %xmm2, %xmm4
movdqa %xmm0, %xmm2
punpcklqdq %xmm1, %xmm2 # xmm2 = xmm2[0],xmm1[0]
punpckhqdq %xmm1, %xmm0 # xmm0 = xmm0[1],xmm1[1]
paddd %xmm2, %xmm0
paddd %xmm4, %xmm0
movd %xmm0, (%r8)
pshufd $0x55, %xmm0, %xmm1 # xmm1 = xmm0[1,1,1,1]
movd %xmm1, (%rdi)
pshufd $0xee, %xmm0, %xmm1 # xmm1 = xmm0[2,3,2,3]
movd %xmm1, (%r15)
pshufd $0xff, %xmm0, %xmm0 # xmm0 = xmm0[3,3,3,3]
movd %xmm0, (%rax)
addq $0x4, %r8
addq $0x4, %rdi
addq $0x4, %r15
addq $0x4, %rax
incl %r9d
cmpl 0x60(%rsp), %r9d
jl 0xaac89
incq %r11
addq %rsi, %rdx
addq 0x30(%rsp), %rcx
jmp 0xaaab9
movq 0x48(%rsp), %rsi
incq %rsi
movq 0x58(%rsp), %r9
addq 0xe0(%rsp), %r9
jmp 0xaaa3e
movq 0xe8(%rsp), %rcx
andq $-0x4, %rcx
movq 0x230(%rsp), %rax
movq %rax, 0x28(%rsp)
movq 0x240(%rsp), %rax
imulq 0x270(%rsp), %rax
movq %rax, 0x30(%rsp)
movq 0x20(%rsp), %rax
movq 0xf0(%rax), %rdx
movq %rdx, 0x58(%rsp)
movq 0x100(%rax), %rdx
movq 0x130(%rax), %rax
movq %rdx, 0x48(%rsp)
imulq %rdx, %rax
movq %rax, 0x38(%rsp)
movq 0x360(%rsp), %rax
movq %rax, 0x50(%rsp)
movq 0x370(%rsp), %rbx
movq 0x3a0(%rsp), %r15
imulq %rbx, %r15
cmpq 0xe8(%rsp), %rcx
jge 0xaaf90
movl %ecx, %eax
cltd
pushq $0x4
popq %rsi
idivl %esi
movq 0x30(%rsp), %rdi
imulq %rcx, %rdi
addq 0x28(%rsp), %rdi
addl %eax, %edx
movq 0x20(%rsp), %rax
movslq 0x11c(%rax), %rax
movslq %edx, %rdx
imulq 0x38(%rsp), %rdx
addq 0x58(%rsp), %rdx
imulq 0x48(%rsp), %rax
movq 0x50(%rsp), %r11
xorl %r8d, %r8d
cmpq $0x24, %r8
je 0xaaf88
movslq 0x38c(%rsp), %rsi
imulq %rbx, %rsi
movq %r11, %rbp
xorl %r9d, %r9d
movq %r9, %r10
orq $0x1, %r10
cmpq %r13, %r10
jge 0xaaf73
pxor %xmm0, %xmm0
movl %r14d, %r12d
xorl %r10d, %r10d
pxor %xmm1, %xmm1
subl $0x1, %r12d
jb 0xaaed9
movdqu (%rbp,%r10,2), %xmm2
movdqu 0x10(%rbp,%r10,2), %xmm3
movdqu (%rdx,%r10), %xmm4
pmaddwd %xmm4, %xmm2
paddd %xmm2, %xmm0
pmaddwd %xmm4, %xmm3
paddd %xmm3, %xmm1
addq $0x10, %r10
jmp 0xaaea9
pshufd $0xee, %xmm0, %xmm2 # xmm2 = xmm0[2,3,2,3]
paddd %xmm0, %xmm2
pshufd $0x55, %xmm2, %xmm0 # xmm0 = xmm2[1,1,1,1]
pshufd $0xee, %xmm1, %xmm3 # xmm3 = xmm1[2,3,2,3]
paddd %xmm1, %xmm3
pshufd $0x55, %xmm3, %xmm1 # xmm1 = xmm3[1,1,1,1]
punpckldq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
punpckldq %xmm3, %xmm2 # xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
paddd %xmm0, %xmm2
movq %xmm2, (%rdi)
addq $0x8, %rdi
addq $0x2, %r9
addq %rsi, %rbp
jmp 0xaae8b
movl %r9d, %r10d
shrl %r10d
movl %r9d, %ebp
andl $0x1, %ebp
addl %r10d, %ebp
imulq %rsi, %rbp
addq %r11, %rbp
pxor %xmm0, %xmm0
movl %r14d, %r10d
xorl %r12d, %r12d
subl $0x1, %r10d
jb 0xaaf56
movdqu (%rbp,%r12), %xmm1
movdqu (%rdx,%r12), %xmm2
pmaddwd %xmm1, %xmm2
paddd %xmm2, %xmm0
addq $0x10, %r12
jmp 0xaaf35
pshufd $0xee, %xmm0, %xmm1 # xmm1 = xmm0[2,3,2,3]
paddd %xmm0, %xmm1
pshufd $0x55, %xmm1, %xmm0 # xmm0 = xmm1[1,1,1,1]
paddd %xmm1, %xmm0
movd %xmm0, (%rdi)
addq $0x4, %rdi
incl %r9d
cmpl 0x60(%rsp), %r9d
jl 0xaaf15
incq %r8
addq %r15, %r11
addq %rax, %rdx
jmp 0xaae6f
incq %rcx
jmp 0xaae21
movq 0x368(%rsp), %rax
testq %rax, %rax
je 0xaafc7
lock
decl (%rax)
jne 0xaafc7
movq 0x360(%rsp), %rsi
movq 0x380(%rsp), %rdi
testq %rdi, %rdi
je 0xaafbf
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xaafc7
movq %rsi, %rdi
callq 0x5f3e0
movq 0x178(%rsp), %rax
testq %rax, %rax
je 0xaaffe
lock
decl (%rax)
jne 0xaaffe
movq 0x170(%rsp), %rsi
movq 0x190(%rsp), %rdi
testq %rdi, %rdi
je 0xaaff6
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xaaffe
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x1b0(%rsp)
pxor %xmm0, %xmm0
movdqa %xmm0, 0x170(%rsp)
movdqu %xmm0, 0x17c(%rsp)
leaq 0x190(%rsp), %rax
movdqu %xmm0, 0xc(%rax)
movdqu %xmm0, (%rax)
andq $0x0, 0x2d0(%rsp)
movdqa %xmm0, 0x290(%rsp)
movdqu %xmm0, 0x29c(%rsp)
movdqa %xmm0, 0x2b0(%rsp)
movdqu %xmm0, 0x2bc(%rsp)
movq 0x13c(%rsp), %xmm0
movdqa 0x280(%rsp), %xmm1
pcmpeqd %xmm0, %xmm1
pshufd $0x50, %xmm1, %xmm0 # xmm0 = xmm1[0,0,1,1]
movmskpd %xmm0, %eax
cmpl $0x3, %eax
jne 0xab0d1
movq 0x118(%rsp), %rax
testq %rax, %rax
je 0xac3d1
lock
incl (%rax)
movq 0x298(%rsp), %rax
testq %rax, %rax
je 0xac3d1
lock
decl (%rax)
jne 0xac3d1
movq 0x290(%rsp), %rsi
movq 0x2b0(%rsp), %rdi
testq %rdi, %rdi
je 0xac3c9
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xac3d1
movq 0x40(%rsp), %rax
movq 0x10(%rax), %rax
movq %rax, (%rsp)
leaq 0x290(%rsp), %rdi
pushq $0x4
popq %r8
pushq $0x1
popq %r9
movq 0xf0(%rsp), %rsi
movq 0xf8(%rsp), %rdx
movq 0xe8(%rsp), %rcx
callq 0x628f2
movq 0x290(%rsp), %r8
movq 0x2a0(%rsp), %r9
movq 0x2d0(%rsp), %r11
jmp 0xac44f
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x3a0(%rsp)
pxor %xmm0, %xmm0
movdqa %xmm0, 0x360(%rsp)
movdqu %xmm0, 0x36c(%rsp)
leaq 0x380(%rsp), %rax
movdqu %xmm0, 0xc(%rax)
movdqu %xmm0, (%rax)
leaq 0x170(%rsp), %rdi
andq $0x0, 0x40(%rdi)
movdqa %xmm0, (%rdi)
movdqu %xmm0, 0xc(%rdi)
movdqa %xmm0, 0x20(%rdi)
movdqu %xmm0, 0x2c(%rdi)
movq 0x40(%rsp), %rax
movq 0x10(%rax), %r9
pushq $0x10
popq %rsi
pushq $0x4
popq %r8
movq 0x50(%rsp), %rdx
movq 0x108(%rsp), %rcx
callq 0x63810
movq 0x108(%rsp), %rax
movl %eax, %ecx
sarl $0x2, %ecx
movq 0x180(%rsp), %rax
movq 0x1b0(%rsp), %rdx
movq %rax, 0x5c0(%rsp)
imulq %rax, %rdx
movq 0x20(%rsp), %r14
movq 0xb8(%r14), %rsi
movq 0xe8(%r14), %rdi
imulq %rsi, %rdi
xorl %r8d, %r8d
movq 0x50(%rsp), %rax
testl %eax, %eax
cmovlel %r8d, %eax
movq %rax, 0x50(%rsp)
movq 0x170(%rsp), %rax
testl %ecx, %ecx
cmovlel %r8d, %ecx
movq %rcx, 0x5c8(%rsp)
movq 0xa8(%r14), %rcx
leaq (,%rdi,4), %r9
movq %r9, 0x5b0(%rsp)
movq %rsi, 0x5b8(%rsp)
leaq (,%rsi,4), %rsi
movq %rsi, 0x5a8(%rsp)
leaq (%rdi,%rdi,2), %rsi
addq %rcx, %rsi
movq %rsi, 0xf8(%rsp)
leaq (%rcx,%rdi,2), %rsi
movq %rsi, 0x2f0(%rsp)
leaq (%rdx,%rdx,2), %rsi
addq %rax, %rsi
leaq (,%rdx,4), %r9
movq %r9, 0x5a0(%rsp)
leaq (%rax,%rdx,2), %r9
movq %r9, 0x320(%rsp)
movq %rax, 0x2e0(%rsp)
addq %rax, %rdx
movq %rsi, %rax
movq %rdx, 0xe8(%rsp)
movq %rcx, 0x280(%rsp)
addq %rdi, %rcx
movq %rcx, 0x160(%rsp)
pxor %xmm0, %xmm0
cmpq 0x5c8(%rsp), %r8
je 0xab747
movslq 0x19c(%rsp), %rcx
movslq 0xd4(%r14), %rdx
imulq 0x5c0(%rsp), %rcx
movq %rcx, 0x208(%rsp)
movq 0x5b8(%rsp), %rsi
imulq %rdx, %rsi
imulq 0x5a8(%rsp), %rdx
movq %rdx, 0x38(%rsp)
movq 0x2e0(%rsp), %rdx
movq 0xe8(%rsp), %r9
movq 0x320(%rsp), %r10
movq %rax, 0x330(%rsp)
movq %rax, %r11
xorl %ebx, %ebx
movq %r8, 0x168(%rsp)
movq %rsi, 0x2e8(%rsp)
cmpq 0x50(%rsp), %rbx
je 0xab6ec
movq %r11, 0x88(%rsp)
movq %r10, 0x100(%rsp)
movq %r9, 0x78(%rsp)
movq %rdx, 0x70(%rsp)
movdqa %xmm0, 0x260(%rsp)
movdqa %xmm0, 0x250(%rsp)
movdqa %xmm0, 0x240(%rsp)
movdqa %xmm0, 0x230(%rsp)
movdqa %xmm0, 0x6e0(%rsp)
movdqa %xmm0, 0x6d0(%rsp)
movdqa %xmm0, 0x6c0(%rsp)
movdqa %xmm0, 0x6b0(%rsp)
movdqa %xmm0, 0x6a0(%rsp)
movdqa %xmm0, 0x690(%rsp)
movdqa %xmm0, 0x680(%rsp)
movdqa %xmm0, 0x670(%rsp)
movdqa %xmm0, 0x610(%rsp)
movdqa %xmm0, 0x600(%rsp)
movdqa %xmm0, 0x5f0(%rsp)
movdqa %xmm0, 0x5e0(%rsp)
movslq 0x1ec(%rsp), %r10
movq 0x200(%rsp), %r15
movq 0x1c0(%rsp), %rax
movq 0x1d0(%rsp), %rcx
movq %rcx, %rdx
imulq %r15, %rdx
movq %rbx, 0xe0(%rsp)
imulq %rbx, %r10
leaq (%r15,%r15,2), %r11
addq %r10, %r11
imulq %rcx, %r11
addq %rax, %r11
movq %rdx, 0xf0(%rsp)
leaq (,%rdx,4), %rdx
movq %rdx, 0x48(%rsp)
leaq (%r10,%r15,2), %r12
imulq %rcx, %r12
addq %rax, %r12
addq %r10, %r15
imulq %rcx, %r15
addq %rax, %r15
imulq %rcx, %r10
addq %rax, %r10
movq 0x160(%rsp), %rbp
movq 0x2f0(%rsp), %rax
movq %rax, 0x30(%rsp)
movq 0xf8(%rsp), %r9
movq 0x280(%rsp), %r13
xorl %ecx, %ecx
movq %rcx, 0x18(%rsp)
movq %rcx, %rax
orq $0x3, %rax
cmpq 0x60(%rsp), %rax
jge 0xab5e8
movq %rbp, 0x28(%rsp)
xorl %ebp, %ebp
cmpq $0x20, %rbp
je 0xab5ac
movswl (%r10,%rbp), %r14d
movswl (%r13,%rbp), %ebx
imull %r14d, %ebx
addl 0x230(%rsp,%rbp,2), %ebx
movswl (%r15,%rbp), %eax
movswl 0x20(%r13,%rbp), %esi
imull %eax, %esi
movswl (%r12,%rbp), %edx
movswl 0x40(%r13,%rbp), %r8d
imull %edx, %r8d
addl %esi, %r8d
movswl (%r11,%rbp), %esi
movswl 0x60(%r13,%rbp), %ecx
imull %esi, %ecx
addl %r8d, %ecx
movq %r13, %r8
addq %rbp, %r8
addl %ebx, %ecx
movl %ecx, 0x230(%rsp,%rbp,2)
movswl 0x20(%rdi,%r8), %ecx
imull %eax, %ecx
movswl 0x40(%rdi,%r8), %ebx
imull %edx, %ebx
addl %ecx, %ebx
movswl 0x60(%rdi,%r8), %ecx
imull %esi, %ecx
addl %ebx, %ecx
leaq (%r8,%rdi), %rbx
movswl (%rdi,%r8), %r8d
imull %r14d, %r8d
addl 0x6b0(%rsp,%rbp,2), %r8d
addl %r8d, %ecx
movl %ecx, 0x6b0(%rsp,%rbp,2)
movswl 0x20(%rdi,%rbx), %ecx
imull %eax, %ecx
movswl 0x40(%rdi,%rbx), %r8d
imull %edx, %r8d
addl %ecx, %r8d
movswl 0x60(%rdi,%rbx), %ecx
imull %esi, %ecx
addl %r8d, %ecx
movswl (%rdi,%rbx), %r8d
imull %r14d, %r8d
addl 0x670(%rsp,%rbp,2), %r8d
addl %r8d, %ecx
leaq (%rbx,%rdi), %r8
movl %ecx, 0x670(%rsp,%rbp,2)
movswl (%rdi,%r8), %ecx
imull %r14d, %ecx
movswl 0x20(%rdi,%r8), %ebx
imull %eax, %ebx
movswl 0x40(%rdi,%r8), %eax
imull %edx, %eax
addl %ebx, %eax
movswl 0x60(%rdi,%r8), %edx
imull %esi, %edx
addl 0x5e0(%rsp,%rbp,2), %ecx
addl %eax, %edx
addl %ecx, %edx
movl %edx, 0x5e0(%rsp,%rbp,2)
addq $0x2, %rbp
jmp 0xab48e
movq 0x18(%rsp), %rcx
addq $0x4, %rcx
movq 0x48(%rsp), %rdx
addq %rdx, %r11
addq %rdx, %r12
addq %rdx, %r15
movq 0x38(%rsp), %rax
addq %rax, %r13
addq %rdx, %r10
addq %rax, %r9
addq %rax, 0x30(%rsp)
movq 0x28(%rsp), %rbp
addq %rax, %rbp
movq 0x20(%rsp), %r14
jmp 0xab470
movq 0x2e8(%rsp), %rsi
movq 0x88(%rsp), %r11
movq 0xe0(%rsp), %rbx
movq 0xf0(%rsp), %r8
movq 0x30(%rsp), %r15
movq 0x18(%rsp), %r12
cmpq 0x60(%rsp), %r12
jge 0xab67d
xorl %eax, %eax
cmpq $0x20, %rax
je 0xab669
movswl (%r10,%rax), %ecx
movswl (%r13,%rax), %edx
imull %ecx, %edx
addl %edx, 0x230(%rsp,%rax,2)
movswl (%rbp,%rax), %edx
imull %ecx, %edx
addl %edx, 0x6b0(%rsp,%rax,2)
movswl (%r15,%rax), %edx
imull %ecx, %edx
addl %edx, 0x670(%rsp,%rax,2)
movswl (%r9,%rax), %edx
imull %ecx, %edx
addl %edx, 0x5e0(%rsp,%rax,2)
addq $0x2, %rax
jmp 0xab61b
incq %r12
addq %rsi, %r9
addq %rsi, %r15
addq %rsi, %rbp
addq %rsi, %r13
addq %r8, %r10
jmp 0xab612
xorl %eax, %eax
movq 0x168(%rsp), %r8
movq 0x70(%rsp), %rdx
movq 0x78(%rsp), %r9
movq 0x100(%rsp), %r10
cmpq $0x40, %rax
je 0xab6d0
movl 0x230(%rsp,%rax), %ecx
movl %ecx, (%rdx,%rax)
movl 0x6b0(%rsp,%rax), %ecx
movl %ecx, (%r9,%rax)
movl 0x670(%rsp,%rax), %ecx
movl %ecx, (%r10,%rax)
movl 0x5e0(%rsp,%rax), %ecx
movl %ecx, (%r11,%rax)
addq $0x4, %rax
jmp 0xab699
incq %rbx
movq 0x208(%rsp), %rax
addq %rax, %r11
addq %rax, %r10
addq %rax, %r9
addq %rax, %rdx
jmp 0xab322
incq %r8
movq 0x5b0(%rsp), %rax
addq %rax, 0x280(%rsp)
addq %rax, 0xf8(%rsp)
addq %rax, 0x2f0(%rsp)
addq %rax, 0x160(%rsp)
movq 0x330(%rsp), %rax
movq 0x5a0(%rsp), %rcx
addq %rcx, %rax
addq %rcx, 0x320(%rsp)
addq %rcx, 0xe8(%rsp)
addq %rcx, 0x2e0(%rsp)
jmp 0xab2a5
movq 0x108(%rsp), %rax
andq $-0x4, %rax
movq 0x180(%rsp), %rcx
movq 0xb8(%r14), %rdx
movq 0xe8(%r14), %rsi
imulq %rdx, %rsi
movq %rsi, 0x160(%rsp)
imulq %rax, %rsi
addq 0xa8(%r14), %rsi
movq %rsi, 0x38(%rsp)
leaq (%rdx,%rdx,2), %rsi
movq %rsi, 0x168(%rsp)
leaq (,%rdx,4), %rsi
movq %rsi, 0x208(%rsp)
movq %rdx, 0x2f0(%rsp)
addq %rdx, %rdx
movq %rdx, 0x2e8(%rsp)
movq 0x1b0(%rsp), %rdx
movq %rcx, 0xf8(%rsp)
imulq %rcx, %rdx
movq %rdx, 0xe8(%rsp)
imulq %rax, %rdx
addq 0x170(%rsp), %rdx
movq %rdx, 0x280(%rsp)
pxor %xmm0, %xmm0
cmpq 0x108(%rsp), %rax
jge 0xaba7f
movslq 0x19c(%rsp), %rdx
movslq 0xd4(%r14), %rsi
movq 0x2f0(%rsp), %r9
imulq %rsi, %r9
movq 0x168(%rsp), %rdi
imulq %rsi, %rdi
movq 0x38(%rsp), %rcx
addq %rcx, %rdi
movq %rdi, 0xe0(%rsp)
movq 0x208(%rsp), %rdi
imulq %rsi, %rdi
movq %rdi, 0x18(%rsp)
imulq 0x2e8(%rsp), %rsi
addq %rcx, %rsi
movq %rsi, 0x100(%rsp)
addq %r9, %rcx
movq %rcx, 0xf0(%rsp)
imulq 0xf8(%rsp), %rdx
movq %rdx, 0x78(%rsp)
movq 0x280(%rsp), %rdx
xorl %edi, %edi
movq %rax, 0x70(%rsp)
movq %r9, 0x88(%rsp)
cmpq 0x50(%rsp), %rdi
je 0xaba4d
movq %rdx, 0x28(%rsp)
movdqa %xmm0, 0x260(%rsp)
movdqa %xmm0, 0x250(%rsp)
movdqa %xmm0, 0x240(%rsp)
movdqa %xmm0, 0x230(%rsp)
movslq 0x1ec(%rsp), %rsi
movq 0x200(%rsp), %r11
movq 0x1c0(%rsp), %rax
movq 0x1d0(%rsp), %rcx
movq %rcx, %rdx
imulq %r11, %rdx
movq %rdi, 0x30(%rsp)
imulq %rdi, %rsi
leaq (%r11,%r11,2), %r8
addq %rsi, %r8
imulq %rcx, %r8
addq %rax, %r8
movq %rdx, 0x48(%rsp)
leaq (,%rdx,4), %rbp
leaq (%rsi,%r11,2), %rbx
imulq %rcx, %rbx
addq %rax, %rbx
addq %rsi, %r11
imulq %rcx, %r11
addq %rax, %r11
imulq %rcx, %rsi
addq %rax, %rsi
movq 0x38(%rsp), %rdi
movq 0xf0(%rsp), %r10
movq 0x100(%rsp), %r15
movq 0xe0(%rsp), %r13
xorl %eax, %eax
movq %rax, %rcx
orq $0x3, %rcx
cmpq 0x60(%rsp), %rcx
jge 0xab9d9
xorl %ecx, %ecx
cmpq $0x20, %rcx
je 0xab9ae
movswl (%rsi,%rcx), %r12d
movswl (%rdi,%rcx), %r14d
imull %r12d, %r14d
addl 0x230(%rsp,%rcx,2), %r14d
movswl (%r11,%rcx), %r12d
movswl (%r10,%rcx), %edx
imull %r12d, %edx
movswl (%rbx,%rcx), %r12d
movswl (%r15,%rcx), %r9d
imull %r12d, %r9d
addl %edx, %r9d
movswl (%r8,%rcx), %edx
movswl (%r13,%rcx), %r12d
imull %edx, %r12d
addl %r9d, %r12d
addl %r14d, %r12d
movl %r12d, 0x230(%rsp,%rcx,2)
addq $0x2, %rcx
jmp 0xab950
addq $0x4, %rax
movq 0x18(%rsp), %rcx
addq %rcx, %r13
addq %rbp, %r8
addq %rcx, %r15
addq %rbp, %rbx
addq %rcx, %r10
addq %rbp, %r11
addq %rcx, %rdi
addq %rbp, %rsi
movq 0x20(%rsp), %r14
jmp 0xab93c
movq 0x88(%rsp), %r9
movq 0x48(%rsp), %r10
cmpq 0x60(%rsp), %rax
jge 0xaba1a
xorl %ecx, %ecx
cmpq $0x10, %rcx
je 0xaba0f
movswl (%rsi,%rcx,2), %edx
movswl (%rdi,%rcx,2), %r8d
imull %edx, %r8d
addl %r8d, 0x230(%rsp,%rcx,4)
incq %rcx
jmp 0xab9ef
incq %rax
addq %r9, %rdi
addq %r10, %rsi
jmp 0xab9e6
xorl %eax, %eax
movq 0x28(%rsp), %rdx
movq 0x30(%rsp), %rdi
cmpq $0x10, %rax
je 0xaba3b
movl 0x230(%rsp,%rax,4), %ecx
movl %ecx, (%rdx,%rax,4)
incq %rax
jmp 0xaba26
incq %rdi
addq 0x78(%rsp), %rdx
movq 0x70(%rsp), %rax
jmp 0xab882
incq %rax
movq 0x38(%rsp), %rcx
addq 0x160(%rsp), %rcx
movq %rcx, 0x38(%rsp)
movq 0x280(%rsp), %rcx
addq 0xe8(%rsp), %rcx
movq %rcx, 0x280(%rsp)
jmp 0xab7e8
movq 0x1c8(%rsp), %rax
testq %rax, %rax
je 0xabab6
lock
decl (%rax)
jne 0xabab6
movq 0x1c0(%rsp), %rsi
movq 0x1e0(%rsp), %rdi
testq %rdi, %rdi
je 0xabaae
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xabab6
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x200(%rsp)
pxor %xmm0, %xmm0
movdqa %xmm0, 0x1c0(%rsp)
movdqu %xmm0, 0x1cc(%rsp)
leaq 0x1e0(%rsp), %rax
movdqu %xmm0, 0xc(%rax)
movdqu %xmm0, (%rax)
leaq 0x230(%rsp), %rdi
andq $0x0, 0x40(%rdi)
movdqa %xmm0, (%rdi)
movdqu %xmm0, 0xc(%rdi)
movdqa %xmm0, 0x20(%rdi)
movdqu %xmm0, 0x2c(%rdi)
movq 0x40(%rsp), %rax
movq 0x10(%rax), %r9
pushq $0x4
popq %r8
movl 0x344(%rsp), %esi
movl 0x348(%rsp), %edx
movq 0x108(%rsp), %rcx
callq 0x63810
movq 0x170(%rsp), %rcx
movq 0x180(%rsp), %rdx
movq 0x230(%rsp), %rax
movq %rax, 0x30(%rsp)
movq 0x240(%rsp), %rax
movq 0x270(%rsp), %rsi
movq %rax, 0x48(%rsp)
imulq %rax, %rsi
movq %rsi, 0x38(%rsp)
movslq 0x328(%rsp), %rsi
xorl %edi, %edi
movq 0x108(%rsp), %rax
testl %eax, %eax
cmovlel %edi, %eax
movq %rax, 0x108(%rsp)
movl 0x58(%rsp), %r10d
addq $0x30, %rcx
movq 0x1b0(%rsp), %rax
imulq %rdx, %rax
movq %rax, 0x50(%rsp)
movq %rdx, 0x28(%rsp)
imulq %rdx, %rsi
movq %rsi, 0x58(%rsp)
cmpq 0x108(%rsp), %rdi
je 0xabddc
movslq 0x19c(%rsp), %rbx
movslq 0x25c(%rsp), %r14
movq 0x38(%rsp), %r15
movq %rdi, 0x18(%rsp)
imulq %rdi, %r15
addq 0x30(%rsp), %r15
imulq 0x48(%rsp), %r14
movq 0x28(%rsp), %r12
imulq %rbx, %r12
imulq 0x58(%rsp), %rbx
movq %rcx, 0x60(%rsp)
movq %rcx, %rax
xorl %ebp, %ebp
cmpq 0x80(%rsp), %rbp
je 0xabdc5
leaq (,%rbp,2), %rdx
imulq %r14, %rdx
addq %r15, %rdx
leaq 0x1(,%rbp,2), %rsi
imulq %r14, %rsi
addq %r15, %rsi
movq %rax, %r13
xorl %edi, %edi
cmpq %r10, %rdi
je 0xabdba
xorl %r8d, %r8d
cmpq $0x10, %r8
je 0xabc7d
movl -0x30(%r13,%r8), %r11d
movl %r11d, 0x6b0(%rsp,%r8)
movl -0x20(%r13,%r8), %r11d
movl %r11d, 0x670(%rsp,%r8)
movl -0x10(%r13,%r8), %r11d
movl %r11d, 0x5e0(%rsp,%r8)
movl (%r13,%r8), %r11d
movl %r11d, 0x350(%rsp,%r8)
addq $0x4, %r8
jmp 0xabc3d
xorl %r8d, %r8d
cmpq $0x10, %r8
je 0xabcc5
movl 0x670(%rsp,%r8), %ecx
movl 0x6b0(%rsp,%r8), %r11d
addl %ecx, %r11d
movl 0x5e0(%rsp,%r8), %r9d
addl %r9d, %r11d
movl %r11d, 0x310(%rsp,%r8)
subl %r9d, %ecx
addl 0x350(%rsp,%r8), %ecx
movl %ecx, 0x300(%rsp,%r8)
addq $0x4, %r8
jmp 0xabc80
movl 0x310(%rsp), %r8d
movl 0x314(%rsp), %r11d
movl %r8d, 0x228(%rsp)
movl 0x300(%rsp), %r8d
movl 0x304(%rsp), %ecx
movl %r8d, 0x22c(%rsp)
movl %r11d, 0x220(%rsp)
movl %ecx, 0x224(%rsp)
movl 0x318(%rsp), %ecx
movl %ecx, 0x218(%rsp)
movl 0x308(%rsp), %ecx
movl %ecx, 0x21c(%rsp)
movl 0x31c(%rsp), %ecx
movl %ecx, 0x210(%rsp)
movl 0x30c(%rsp), %ecx
movl %ecx, 0x214(%rsp)
xorl %r8d, %r8d
cmpq $0x8, %r8
je 0xabd83
movl 0x220(%rsp,%r8), %ecx
movl 0x228(%rsp,%r8), %r9d
addl %ecx, %r9d
movl 0x218(%rsp,%r8), %r11d
addl %r11d, %r9d
movl %r9d, 0x5d8(%rsp,%r8)
subl %r11d, %ecx
addl 0x210(%rsp,%r8), %ecx
movl %ecx, 0x5d0(%rsp,%r8)
addq $0x4, %r8
jmp 0xabd3e
movq 0x5d8(%rsp), %xmm0
psrad $0x2, %xmm0
movq %xmm0, (%rdx)
movq 0x5d0(%rsp), %xmm0
psrad $0x2, %xmm0
movq %xmm0, (%rsi)
addq $0x8, %rdx
addq $0x8, %rsi
incq %rdi
addq %r12, %r13
jmp 0xabc31
incq %rbp
addq %rbx, %rax
jmp 0xabc00
movq 0x18(%rsp), %rdi
incq %rdi
movq 0x60(%rsp), %rcx
addq 0x50(%rsp), %rcx
jmp 0xabbb0
leaq 0x230(%rsp), %rdi
movl 0x2c(%rdi), %r9d
movl 0x30(%rdi), %ecx
leaq 0x110(%rsp), %rsi
subl 0x30(%rsi), %ecx
subl 0x2c(%rsi), %r9d
movq 0x40(%rsp), %rax
movq %rax, (%rsp)
xorl %edx, %edx
xorl %r8d, %r8d
callq 0x6489a
movq 0x238(%rsp), %rax
testq %rax, %rax
je 0xabe44
lock
decl (%rax)
jne 0xabe44
movq 0x230(%rsp), %rsi
movq 0x250(%rsp), %rdi
testq %rdi, %rdi
je 0xabe3c
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xabe44
movq %rsi, %rdi
callq 0x5f3e0
movq 0x178(%rsp), %rax
testq %rax, %rax
je 0xabe7b
lock
decl (%rax)
jne 0xabe7b
movq 0x170(%rsp), %rsi
movq 0x190(%rsp), %rdi
testq %rdi, %rdi
je 0xabe73
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xabe7b
movq %rsi, %rdi
callq 0x5f3e0
movq 0x1c8(%rsp), %rax
testq %rax, %rax
je 0xabeb2
lock
decl (%rax)
jne 0xabeb2
movq 0x1c0(%rsp), %rsi
movq 0x1e0(%rsp), %rdi
testq %rdi, %rdi
je 0xabeaa
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xabeb2
movq %rsi, %rdi
callq 0x5f3e0
movq 0x368(%rsp), %rax
testq %rax, %rax
je 0xa9087
lock
decl (%rax)
jne 0xa9087
movq 0x360(%rsp), %rsi
movq 0x380(%rsp), %rdi
testq %rdi, %rdi
je 0xac95d
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xa9087
movq %rsi, %rdi
callq 0x5f3e0
movaps 0x110(%rsp), %xmm0
movaps %xmm0, 0x290(%rsp)
movq 0x120(%rsp), %rax
movq %rax, 0x2a0(%rsp)
movl 0x128(%rsp), %eax
movl %eax, 0x2a8(%rsp)
movq 0x130(%rsp), %rax
movq %rax, 0x2b0(%rsp)
movupd 0x138(%rsp), %xmm0
movupd %xmm0, 0x2b8(%rsp)
movl 0x148(%rsp), %eax
movl %eax, 0x2c8(%rsp)
movq 0x150(%rsp), %rax
movq %rax, 0x2d0(%rsp)
movq 0x28(%rsp), %rdx
leal (,%rdx,4), %eax
movslq %eax, %rcx
imull $0xc, %edx, %eax
movslq %eax, %rsi
imull $0x14, %edx, %eax
movslq %eax, %rdi
movl 0x160(%rsp), %eax
shll $0x4, %eax
movslq %eax, %r8
shlq $0x2, %rbx
shlq $0x2, %r8
xorl %r9d, %r9d
pushq $-0x50
popq %r10
pushq $0x50
popq %r13
movaps 0x345d98(%rip), %xmm0 # 0x3f1d40
movq 0x58(%rsp), %r11
cmpq 0x50(%rsp), %r9
je 0xac2ac
movq 0x270(%rsp), %rdx
movslq 0x2bc(%rsp), %r15
imulq 0x240(%rsp), %rdx
movq 0x290(%rsp), %rax
imulq %r9, %rdx
addq 0x230(%rsp), %rdx
movq %rdx, 0x28(%rsp)
movq 0x2a0(%rsp), %rdx
movq %r9, %r14
movq 0x2d0(%rsp), %r9
imulq %rdx, %r9
imulq %rdx, %r15
shlq $0x2, %r15
movq %r15, 0x30(%rsp)
movq %r14, 0x38(%rsp)
imulq %r14, %r9
leaq (%rax,%r9), %r15
addq $0x30, %r15
xorl %r9d, %r9d
cmpq %r11, %r9
je 0xac29f
movl 0x48(%rsp), %eax
imull %r9d, %eax
cltd
pushq $0x6
popq %r11
idivl %r11d
movslq %eax, %r14
shlq $0x4, %r14
addq 0x28(%rsp), %r14
movq %r15, 0x18(%rsp)
movq %r15, %rdx
xorl %r15d, %r15d
cmpq 0x60(%rsp), %r15
je 0xac288
movq %r14, %rax
movq %r10, %r11
testq %r11, %r11
je 0xac105
movdqu (%rax), %xmm1
movdqu (%rax,%rcx,4), %xmm3
movdqu (%rax,%r12,4), %xmm4
movdqu (%rax,%rsi,4), %xmm2
movdqu (%rax,%rbp,4), %xmm5
movdqu (%rax,%rdi,4), %xmm6
movdqa %xmm4, %xmm7
paddd %xmm3, %xmm7
psubd %xmm4, %xmm3
movdqa %xmm5, %xmm4
paddd %xmm2, %xmm4
psubd %xmm5, %xmm2
paddd %xmm7, %xmm1
paddd %xmm4, %xmm1
movdqa %xmm2, %xmm5
paddd %xmm2, %xmm5
paddd %xmm3, %xmm5
pslld $0x2, %xmm4
paddd %xmm7, %xmm4
pslld $0x2, %xmm6
paddd %xmm3, %xmm6
pslld $0x3, %xmm2
paddd %xmm6, %xmm2
movdqa %xmm1, 0x3b0(%rsp,%r11)
movdqa %xmm5, 0x410(%rsp,%r11)
movdqa %xmm4, 0x470(%rsp,%r11)
movdqa %xmm2, 0x4d0(%rsp,%r11)
addq $0x10, %r11
addq %rbx, %rax
jmp 0xac063
movdqu (%rax,%rdi,4), %xmm5
pslld $0x4, %xmm5
movdqu (%rax,%rcx,4), %xmm6
movdqu (%rax,%r12,4), %xmm2
movdqa %xmm6, %xmm1
psubd %xmm2, %xmm1
pslld $0x2, %xmm1
paddd %xmm1, %xmm5
movdqu (%rax,%rsi,4), %xmm7
movdqu (%rax,%rbp,4), %xmm8
movdqa %xmm7, %xmm4
psubd %xmm8, %xmm4
movdqa %xmm4, %xmm3
pslld $0x5, %xmm3
paddd %xmm5, %xmm3
paddd %xmm7, %xmm8
paddd %xmm6, %xmm2
movdqu (%rax), %xmm5
paddd %xmm2, %xmm5
paddd %xmm8, %xmm5
pslld $0x4, %xmm8
pslld $0x2, %xmm2
paddd %xmm8, %xmm2
pslld $0x3, %xmm4
paddd %xmm1, %xmm4
pslld $0x2, %xmm5
movdqa %xmm5, 0x3b0(%rsp)
movdqa %xmm4, 0x410(%rsp)
movdqa %xmm2, 0x470(%rsp)
movdqa %xmm3, 0x4d0(%rsp)
movq %rdx, %rax
movq %r13, %r11
cmpq $0x1d0, %r11 # imm = 0x1D0
je 0xac278
movdqa 0x320(%rsp,%r11), %xmm2
movdqa 0x330(%rsp,%r11), %xmm3
movdqa 0x340(%rsp,%r11), %xmm1
movdqa 0x350(%rsp,%r11), %xmm4
movdqa %xmm3, %xmm5
paddd %xmm2, %xmm5
psubd %xmm3, %xmm2
movdqa %xmm4, %xmm3
paddd %xmm1, %xmm3
psubd %xmm4, %xmm1
movdqa 0x310(%rsp,%r11), %xmm4
paddd %xmm5, %xmm4
paddd %xmm3, %xmm4
movdqa %xmm1, %xmm6
paddd %xmm1, %xmm6
paddd %xmm2, %xmm6
pslld $0x2, %xmm3
paddd %xmm5, %xmm3
paddd 0x360(%rsp,%r11), %xmm2
pslld $0x3, %xmm1
paddd %xmm2, %xmm1
cvtdq2ps %xmm4, %xmm2
mulps %xmm0, %xmm2
cvttps2dq %xmm2, %xmm2
cvtdq2ps %xmm6, %xmm4
mulps %xmm0, %xmm4
cvttps2dq %xmm4, %xmm4
cvtdq2ps %xmm3, %xmm3
mulps %xmm0, %xmm3
cvttps2dq %xmm3, %xmm3
cvtdq2ps %xmm1, %xmm1
mulps %xmm0, %xmm1
cvttps2dq %xmm1, %xmm1
movdqu %xmm2, -0x30(%rax)
movdqu %xmm4, -0x20(%rax)
movdqu %xmm3, -0x10(%rax)
movdqu %xmm1, (%rax)
addq $0x60, %r11
addq %r8, %rax
jmp 0xac1aa
incq %r15
addq $0x10, %r14
addq $0x40, %rdx
jmp 0xac052
incq %r9
movq 0x18(%rsp), %r15
addq 0x30(%rsp), %r15
movq 0x58(%rsp), %r11
jmp 0xac022
movq 0x38(%rsp), %r9
incq %r9
jmp 0xabfad
leaq 0x290(%rsp), %rdi
movl 0x2c(%rdi), %r9d
movl 0x30(%rdi), %ecx
leaq 0x110(%rsp), %rsi
subl 0x30(%rsi), %ecx
subl 0x2c(%rsi), %r9d
movq 0x40(%rsp), %rax
movq %rax, (%rsp)
xorl %edx, %edx
xorl %r8d, %r8d
callq 0x6489a
movq 0x298(%rsp), %rax
testq %rax, %rax
je 0xac314
lock
decl (%rax)
jne 0xac314
movq 0x290(%rsp), %rsi
movq 0x2b0(%rsp), %rdi
testq %rdi, %rdi
je 0xac30c
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xac314
movq %rsi, %rdi
callq 0x5f3e0
movq 0x238(%rsp), %rax
testq %rax, %rax
je 0xac34b
lock
decl (%rax)
jne 0xac34b
movq 0x230(%rsp), %rsi
movq 0x250(%rsp), %rdi
testq %rdi, %rdi
je 0xac343
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xac34b
movq %rsi, %rdi
callq 0x5f3e0
movq 0x178(%rsp), %rax
testq %rax, %rax
je 0xac382
lock
decl (%rax)
jne 0xac382
movq 0x170(%rsp), %rsi
movq 0x190(%rsp), %rdi
testq %rdi, %rdi
je 0xac37a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xac382
movq %rsi, %rdi
callq 0x5f3e0
movq 0x1c8(%rsp), %rax
testq %rax, %rax
je 0xa6e1e
lock
decl (%rax)
jne 0xa6e1e
movq 0x1c0(%rsp), %rsi
movq 0x1e0(%rsp), %rdi
testq %rdi, %rdi
je 0xac3bc
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xa6e1e
movq %rsi, %rdi
callq 0x5f3e0
jmp 0xa6e1e
movq %rsi, %rdi
callq 0x5f3e0
movq 0x110(%rsp), %r8
movq 0x118(%rsp), %rax
movq %r8, 0x290(%rsp)
movq %rax, 0x298(%rsp)
movq 0x120(%rsp), %r9
movq %r9, 0x2a0(%rsp)
movl 0x128(%rsp), %eax
movl %eax, 0x2a8(%rsp)
movq 0x130(%rsp), %rax
movq %rax, 0x2b0(%rsp)
movupd 0x138(%rsp), %xmm0
movupd %xmm0, 0x2b8(%rsp)
movl 0x148(%rsp), %eax
movl %eax, 0x2c8(%rsp)
movq 0x150(%rsp), %r11
movq %r11, 0x2d0(%rsp)
movq 0x230(%rsp), %rbx
movq 0x240(%rsp), %r14
movq 0x60(%rsp), %rdi
leal (%rdi,%rdi), %eax
cltq
leal (%rdi,%rdi,2), %ecx
movslq %ecx, %rcx
leal (,%rdi,4), %edx
movslq %edx, %rdx
leal (%rdi,%rdi,4), %esi
movslq %esi, %rsi
imull $0x6, %edi, %edi
movslq %edi, %r10
xorl %r15d, %r15d
movq 0xe8(%rsp), %rdi
testl %edi, %edi
cmovlel %r15d, %edi
movq %rdi, 0xe8(%rsp)
movslq 0xf0(%rsp), %rbp
imulq 0x270(%rsp), %r14
movq %r14, 0x330(%rsp)
shlq $0x2, %r10
leaq (%rbx,%r13,4), %rdi
movq %rdi, 0x280(%rsp)
leaq (%rbx,%rax,4), %rax
movq %rax, 0xf8(%rsp)
leaq (%rbx,%rcx,4), %rax
movq %rax, 0x2f0(%rsp)
leaq (%rbx,%rdx,4), %rax
movq %rax, 0x160(%rsp)
xorl %edx, %edx
movq %rbx, 0xf0(%rsp)
leaq (%rbx,%rsi,4), %rax
movq %rax, 0x168(%rsp)
addq $0xc, %r8
movq %r8, 0x2e0(%rsp)
imulq %r9, %r11
shlq $0x2, %r9
movq %r9, 0x320(%rsp)
shlq $0x2, %rbp
movl $0x240, %r9d # imm = 0x240
movq 0x18(%rsp), %rsi
movq 0x100(%rsp), %rax
movq %r11, 0x108(%rsp)
movq %rbp, 0x70(%rsp)
cmpq 0xe8(%rsp), %rdx
je 0xac84d
movslq 0x2bc(%rsp), %rcx
imulq 0x320(%rsp), %rcx
movq %rcx, 0x2e8(%rsp)
movq 0x2e0(%rsp), %rcx
movq %rcx, 0x78(%rsp)
xorl %ecx, %ecx
movq %rdx, 0x208(%rsp)
cmpq %rax, %rcx
je 0xac805
movl 0x88(%rsp), %eax
movq %rcx, 0xe0(%rsp)
imull %ecx, %eax
cltd
pushq $0x6
popq %rcx
idivl %ecx
movslq %eax, %r13
movq 0x78(%rsp), %r8
movq 0x168(%rsp), %rdx
movq 0x160(%rsp), %rdi
movq 0x2f0(%rsp), %r12
movq 0xf8(%rsp), %rax
movq 0x280(%rsp), %r15
movq 0xf0(%rsp), %rcx
xorl %r11d, %r11d
cmpq %rsi, %r11
je 0xac7cb
movq %r11, 0x80(%rsp)
movq %r8, 0x60(%rsp)
movq %rdx, 0x28(%rsp)
movq %rdi, 0x30(%rsp)
movq %r12, 0x58(%rsp)
movq %rax, 0x48(%rsp)
movq %rax, %rbp
movq %r15, 0x38(%rsp)
movq %rcx, 0x50(%rsp)
movq %rcx, %rax
pushq $-0x5
popq %rsi
testq %rsi, %rsi
je 0xac68a
movl (%r15,%r13,4), %r11d
movl (%rbp,%r13,4), %r8d
leal (%r8,%r11), %ebx
subl %r8d, %r11d
movl (%r12,%r13,4), %r8d
movl (%rdi,%r13,4), %ecx
leal (%rcx,%r8), %r14d
subl %ecx, %r8d
leal (%rbx,%r14,4), %ecx
addl (%rax,%r13,4), %ebx
addl %r14d, %ebx
movl %ebx, 0x374(%rsp,%rsi,4)
leal (%r11,%r8,2), %ebx
movl %ebx, 0x38c(%rsp,%rsi,4)
movl %ecx, 0x3a4(%rsp,%rsi,4)
movl (%rdx,%r13,4), %ecx
leal (%r11,%r8,8), %r8d
leal (%r8,%rcx,4), %ecx
movl %ecx, 0x3bc(%rsp,%rsi,4)
incq %rsi
addq %r10, %rax
addq %r10, %r15
addq %r10, %rbp
addq %r10, %r12
addq %r10, %rdi
addq %r10, %rdx
jmp 0xac618
movl (%r12,%r13,4), %ecx
movl (%rdi,%r13,4), %edi
movl %ecx, %esi
subl %edi, %esi
movl %esi, %r8d
shll $0x5, %r8d
movl (%r15,%r13,4), %ebx
movl (%rbp,%r13,4), %r11d
movl %ebx, %r14d
subl %r11d, %r14d
leal (%r8,%r14,4), %r8d
movl (%rdx,%r13,4), %edx
shll $0x4, %edx
addl %r8d, %edx
addl %ecx, %edi
movl %edi, %ecx
shll $0x4, %ecx
addl %ebx, %r11d
leal (%rcx,%r11,4), %ecx
shll $0x3, %esi
addl %edi, %r11d
addl (%rax,%r13,4), %r11d
leal (%rsi,%r14,4), %eax
shll $0x2, %r11d
movl %r11d, 0x374(%rsp)
movl %eax, 0x38c(%rsp)
movl %ecx, 0x3a4(%rsp)
movl %edx, 0x3bc(%rsp)
movq 0x60(%rsp), %r8
movq %r8, %rsi
pushq $0x14
popq %rdi
movq 0x70(%rsp), %rbp
cmpq $0x74, %rdi
je 0xac77c
movl 0x350(%rsp,%rdi), %r15d
movl 0x354(%rsp,%rdi), %eax
leal (%rax,%r15), %r11d
subl %eax, %r15d
movl 0x358(%rsp,%rdi), %r12d
movl 0x35c(%rsp,%rdi), %eax
leal (%rax,%r12), %ecx
subl %eax, %r12d
movl 0x34c(%rsp,%rdi), %eax
addl %r11d, %eax
addl %ecx, %eax
cltd
idivl %r9d
movl %eax, -0xc(%rsi)
leal (%r15,%r12,2), %eax
cltd
idivl %r9d
movl %eax, -0x8(%rsi)
leal (%r11,%rcx,4), %eax
cltd
idivl %r9d
movl %eax, -0x4(%rsi)
leal (%r15,%r12,8), %eax
addl 0x360(%rsp,%rdi), %eax
cltd
idivl %r9d
movl %eax, (%rsi)
addq $0x18, %rdi
addq %rbp, %rsi
jmp 0xac707
movq 0x80(%rsp), %r11
incq %r11
movq 0x50(%rsp), %rcx
addq $0x4, %rcx
movq 0x38(%rsp), %r15
addq $0x4, %r15
movq 0x48(%rsp), %rax
addq $0x4, %rax
movq 0x58(%rsp), %r12
addq $0x4, %r12
movq 0x30(%rsp), %rdi
addq $0x4, %rdi
movq 0x28(%rsp), %rdx
addq $0x4, %rdx
addq $0x10, %r8
movq 0x18(%rsp), %rsi
jmp 0xac5db
movq 0xe0(%rsp), %rcx
incq %rcx
movq 0x78(%rsp), %rax
addq 0x2e8(%rsp), %rax
movq %rax, 0x78(%rsp)
movq 0x100(%rsp), %rax
movq 0x108(%rsp), %r11
movq 0x208(%rsp), %rdx
jmp 0xac57f
incq %rdx
movq 0x330(%rsp), %rcx
addq %rcx, 0xf0(%rsp)
addq %rcx, 0x280(%rsp)
addq %rcx, 0xf8(%rsp)
addq %rcx, 0x2f0(%rsp)
addq %rcx, 0x160(%rsp)
addq %rcx, 0x168(%rsp)
addq %r11, 0x2e0(%rsp)
jmp 0xac541
leaq 0x290(%rsp), %rdi
movl 0x2c(%rdi), %r9d
movl 0x30(%rdi), %ecx
leaq 0x110(%rsp), %rsi
subl 0x30(%rsi), %ecx
subl 0x2c(%rsi), %r9d
movq 0x40(%rsp), %rax
movq %rax, (%rsp)
xorl %edx, %edx
xorl %r8d, %r8d
callq 0x6489a
movq 0x298(%rsp), %rax
testq %rax, %rax
je 0xac8b5
lock
decl (%rax)
jne 0xac8b5
movq 0x290(%rsp), %rsi
movq 0x2b0(%rsp), %rdi
testq %rdi, %rdi
je 0xac8ad
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xac8b5
movq %rsi, %rdi
callq 0x5f3e0
movq 0x238(%rsp), %rax
testq %rax, %rax
je 0xac8ec
lock
decl (%rax)
jne 0xac8ec
movq 0x230(%rsp), %rsi
movq 0x250(%rsp), %rdi
testq %rdi, %rdi
je 0xac8e4
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xac8ec
movq %rsi, %rdi
callq 0x5f3e0
movq 0x178(%rsp), %rax
testq %rax, %rax
je 0xac923
lock
decl (%rax)
jne 0xac923
movq 0x170(%rsp), %rsi
movq 0x190(%rsp), %rdi
testq %rdi, %rdi
je 0xac91b
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xac923
movq %rsi, %rdi
callq 0x5f3e0
movq 0x1c8(%rsp), %rax
testq %rax, %rax
je 0xa9087
lock
decl (%rax)
jne 0xa9087
movq 0x1c0(%rsp), %rsi
movq 0x1e0(%rsp), %rdi
testq %rdi, %rdi
je 0xac95d
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xa9087
movq %rsi, %rdi
callq 0x5f3e0
jmp 0xa9087
jmp 0xad174
jmp 0xad174
jmp 0xad174
jmp 0xad174
jmp 0xad174
jmp 0xad174
jmp 0xad174
jmp 0xad174
jmp 0xad174
jmp 0xad174
jmp 0xad174
jmp 0xad174
jmp 0xac9eb
movq %rax, %rbx
jmp 0xacbea
jmp 0xad174
jmp 0xacbb0
movq %rax, %rbx
jmp 0xacce4
jmp 0xad174
jmp 0xaccaa
jmp 0xac9f0
jmp 0xaca00
jmp 0xaca10
jmp 0xad174
jmp 0xad174
jmp 0xad174
jmp 0xad174
jmp 0xad174
movq %rax, %rbx
jmp 0xaca64
movq %rax, %rbx
jmp 0xaca9b
movq %rax, %rbx
jmp 0xacad2
movq %rax, %rbx
jmp 0xacc21
movq %rax, %rbx
jmp 0xacc58
movq %rax, %rbx
jmp 0xacd1b
movq %rax, %rbx
jmp 0xacd52
jmp 0xad174
jmp 0xad174
movq %rax, %rbx
movq 0x238(%rsp), %rax
testq %rax, %rax
je 0xaca64
lock
decl (%rax)
jne 0xaca64
movq 0x230(%rsp), %rsi
movq 0x250(%rsp), %rdi
testq %rdi, %rdi
jne 0xaca5e
movq %rsi, %rdi
callq 0x5f3e0
jmp 0xaca64
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x178(%rsp), %rax
testq %rax, %rax
je 0xaca9b
lock
decl (%rax)
jne 0xaca9b
movq 0x170(%rsp), %rsi
movq 0x190(%rsp), %rdi
testq %rdi, %rdi
jne 0xaca95
movq %rsi, %rdi
callq 0x5f3e0
jmp 0xaca9b
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x1c8(%rsp), %rax
testq %rax, %rax
je 0xacad2
lock
decl (%rax)
jne 0xacad2
movq 0x1c0(%rsp), %rsi
movq 0x1e0(%rsp), %rdi
testq %rdi, %rdi
jne 0xacacc
movq %rsi, %rdi
callq 0x5f3e0
jmp 0xacad2
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x368(%rsp), %rax
testq %rax, %rax
je 0xad0b4
lock
decl (%rax)
jne 0xad0b4
movq 0x360(%rsp), %rsi
movq 0x380(%rsp), %rdi
testq %rdi, %rdi
je 0xad08b
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xad0b4
jmp 0xad174
jmp 0xad174
jmp 0xad174
jmp 0xad174
movq %rax, %rbx
movq 0x298(%rsp), %rax
testq %rax, %rax
je 0xacbea
lock
decl (%rax)
jne 0xacbea
movq 0x290(%rsp), %rsi
movq 0x2b0(%rsp), %rdi
testq %rdi, %rdi
je 0xacbda
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xacbea
jmp 0xad174
movq %rax, %rbx
movq 0x298(%rsp), %rax
testq %rax, %rax
je 0xacce4
lock
decl (%rax)
jne 0xacce4
movq 0x290(%rsp), %rsi
movq 0x2b0(%rsp), %rdi
testq %rdi, %rdi
je 0xaccd4
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xacce4
jmp 0xad174
movq %rax, %rbx
movq 0x368(%rsp), %rax
testq %rax, %rax
je 0xacbea
lock
decl (%rax)
jne 0xacbea
movq 0x360(%rsp), %rsi
movq 0x380(%rsp), %rdi
testq %rdi, %rdi
jne 0xacbe4
movq %rsi, %rdi
callq 0x5f3e0
jmp 0xacbea
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x238(%rsp), %rax
testq %rax, %rax
je 0xacc21
lock
decl (%rax)
jne 0xacc21
movq 0x230(%rsp), %rsi
movq 0x250(%rsp), %rdi
testq %rdi, %rdi
jne 0xacc1b
movq %rsi, %rdi
callq 0x5f3e0
jmp 0xacc21
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x178(%rsp), %rax
testq %rax, %rax
je 0xacc58
lock
decl (%rax)
jne 0xacc58
movq 0x170(%rsp), %rsi
movq 0x190(%rsp), %rdi
testq %rdi, %rdi
jne 0xacc52
movq %rsi, %rdi
callq 0x5f3e0
jmp 0xacc58
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x1c8(%rsp), %rax
testq %rax, %rax
je 0xad0b4
lock
decl (%rax)
jne 0xad0b4
movq 0x1c0(%rsp), %rsi
movq 0x1e0(%rsp), %rdi
testq %rdi, %rdi
je 0xad08b
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xad0b4
jmp 0xad174
jmp 0xad174
jmp 0xad174
jmp 0xad174
movq %rax, %rbx
movq 0x368(%rsp), %rax
testq %rax, %rax
je 0xacce4
lock
decl (%rax)
jne 0xacce4
movq 0x360(%rsp), %rsi
movq 0x380(%rsp), %rdi
testq %rdi, %rdi
jne 0xaccde
movq %rsi, %rdi
callq 0x5f3e0
jmp 0xacce4
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x238(%rsp), %rax
testq %rax, %rax
je 0xacd1b
lock
decl (%rax)
jne 0xacd1b
movq 0x230(%rsp), %rsi
movq 0x250(%rsp), %rdi
testq %rdi, %rdi
jne 0xacd15
movq %rsi, %rdi
callq 0x5f3e0
jmp 0xacd1b
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x178(%rsp), %rax
testq %rax, %rax
je 0xacd52
lock
decl (%rax)
jne 0xacd52
movq 0x170(%rsp), %rsi
movq 0x190(%rsp), %rdi
testq %rdi, %rdi
jne 0xacd4c
movq %rsi, %rdi
callq 0x5f3e0
jmp 0xacd52
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x1c8(%rsp), %rax
testq %rax, %rax
je 0xad0b4
lock
decl (%rax)
jne 0xad0b4
movq 0x1c0(%rsp), %rsi
movq 0x1e0(%rsp), %rdi
testq %rdi, %rdi
je 0xad08b
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xad0b4
jmp 0xad174
jmp 0xad174
jmp 0xad174
jmp 0xad174
movq %rax, %rbx
movq 0x368(%rsp), %rax
testq %rax, %rax
je 0xad0b4
lock
decl (%rax)
jne 0xad0b4
movq 0x360(%rsp), %rsi
movq 0x380(%rsp), %rdi
testq %rdi, %rdi
je 0xad08b
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xad0b4
jmp 0xad174
movq %rax, %rbx
movq 0x368(%rsp), %rax
testq %rax, %rax
je 0xad0b4
lock
decl (%rax)
jne 0xad0b4
movq 0x360(%rsp), %rsi
movq 0x380(%rsp), %rdi
testq %rdi, %rdi
je 0xad08b
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xad0b4
jmp 0xad174
movq %rax, %rbx
movq 0x368(%rsp), %rax
testq %rax, %rax
je 0xad0b4
lock
decl (%rax)
jne 0xad0b4
movq 0x360(%rsp), %rsi
movq 0x380(%rsp), %rdi
testq %rdi, %rdi
je 0xad08b
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xad0b4
jmp 0xad174
movq %rax, %rbx
movq 0x368(%rsp), %rax
testq %rax, %rax
je 0xad0b4
lock
decl (%rax)
jne 0xad0b4
movq 0x360(%rsp), %rsi
movq 0x380(%rsp), %rdi
testq %rdi, %rdi
je 0xad08b
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xad0b4
jmp 0xad174
movq %rax, %rbx
movq 0x368(%rsp), %rax
testq %rax, %rax
je 0xad0b4
lock
decl (%rax)
jne 0xad0b4
movq 0x360(%rsp), %rsi
movq 0x380(%rsp), %rdi
testq %rdi, %rdi
je 0xad08b
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xad0b4
jmp 0xad174
movq %rax, %rbx
movq 0x368(%rsp), %rax
testq %rax, %rax
je 0xad0b4
lock
decl (%rax)
jne 0xad0b4
movq 0x360(%rsp), %rsi
movq 0x380(%rsp), %rdi
testq %rdi, %rdi
je 0xad08b
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xad0b4
jmp 0xad174
movq %rax, %rbx
movq 0x368(%rsp), %rax
testq %rax, %rax
je 0xad0b4
lock
decl (%rax)
jne 0xad0b4
movq 0x360(%rsp), %rsi
movq 0x380(%rsp), %rdi
testq %rdi, %rdi
je 0xad08b
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xad0b4
jmp 0xad174
jmp 0xad174
jmp 0xad174
jmp 0xad174
jmp 0xad174
movq %rax, %rbx
movq 0x368(%rsp), %rax
testq %rax, %rax
je 0xad0b4
lock
decl (%rax)
jne 0xad0b4
movq 0x360(%rsp), %rsi
movq 0x380(%rsp), %rdi
testq %rdi, %rdi
je 0xad08b
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xad0b4
jmp 0xad174
movq %rax, %rbx
movq 0x368(%rsp), %rax
testq %rax, %rax
je 0xad0b4
lock
decl (%rax)
jne 0xad0b4
movq 0x360(%rsp), %rsi
movq 0x380(%rsp), %rdi
testq %rdi, %rdi
je 0xad08b
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xad0b4
jmp 0xad174
movq %rax, %rbx
movq 0x368(%rsp), %rax
testq %rax, %rax
je 0xad0b4
lock
decl (%rax)
jne 0xad0b4
movq 0x360(%rsp), %rsi
movq 0x380(%rsp), %rdi
testq %rdi, %rdi
je 0xad08b
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xad0b4
jmp 0xad174
movq %rax, %rbx
movq 0x368(%rsp), %rax
testq %rax, %rax
je 0xad0b4
lock
decl (%rax)
jne 0xad0b4
movq 0x360(%rsp), %rsi
movq 0x380(%rsp), %rdi
testq %rdi, %rdi
jne 0xad095
movq %rsi, %rdi
callq 0x5f3e0
jmp 0xad0b4
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xad0b4
jmp 0xad174
jmp 0xad174
jmp 0xad174
jmp 0xad174
movq %rax, %rbx
movq 0x118(%rsp), %rax
testq %rax, %rax
je 0xad0fc
lock
decl (%rax)
jne 0xad0fc
movq 0x110(%rsp), %rsi
movq 0x130(%rsp), %rdi
testq %rdi, %rdi
jne 0xad0e5
movq %rsi, %rdi
callq 0x5f3e0
jmp 0xad0fc
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xad0fc
jmp 0xad174
jmp 0xad0f9
movq %rax, %rbx
jmp 0xad133
movq %rax, %rbx
movq 0x98(%rsp), %rax
testq %rax, %rax
je 0xad133
lock
decl (%rax)
jne 0xad133
movq 0x90(%rsp), %rsi
movq 0xb0(%rsp), %rdi
testq %rdi, %rdi
jne 0xad12d
movq %rsi, %rdi
callq 0x5f3e0
jmp 0xad133
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x630(%rsp), %rax
testq %rax, %rax
je 0xad16a
lock
decl (%rax)
jne 0xad16a
movq 0x628(%rsp), %rsi
movq 0x648(%rsp), %rdi
testq %rdi, %rdi
jne 0xad164
movq %rsi, %rdi
callq 0x5f3e0
jmp 0xad16a
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
jmp 0xad174
movq %rax, %rdi
callq 0x61d68
|
/csukuangfj[P]ncnn/src/layer/x86/convolution_x86.cpp
|
ncnn::im2col_sgemm_pack8to1_int8_sse(ncnn::Mat const&, ncnn::Mat&, ncnn::Mat const&, ncnn::Option const&)
|
static void im2col_sgemm_pack8to1_int8_sse(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt)
{
#if !(__AVX512VNNI__ || __AVXVNNI__ || __AVX2__ || __XOP__)
#if NCNN_RUNTIME_CPU && NCNN_AVX512VNNI && __AVX512F__ && !__AVX512VNNI__
if (ncnn::cpu_support_x86_avx512_vnni())
{
im2col_sgemm_pack8to1_int8_sse_avx512vnni(bottom_im2col, top_blob, kernel, opt);
return;
}
#endif
#if NCNN_RUNTIME_CPU && NCNN_AVXVNNI && __AVX2__ && !__AVXVNNI__
if (ncnn::cpu_support_x86_avx_vnni())
{
im2col_sgemm_pack8to1_int8_sse_avxvnni(bottom_im2col, top_blob, kernel, opt);
return;
}
#endif
#if NCNN_RUNTIME_CPU && NCNN_AVX2 && __AVX__ && !__AVX2__
if (ncnn::cpu_support_x86_avx2())
{
im2col_sgemm_pack8to1_int8_sse_avx2(bottom_im2col, top_blob, kernel, opt);
return;
}
#endif
#if NCNN_RUNTIME_CPU && NCNN_XOP && __SSE2__ && !__XOP__
if (ncnn::cpu_support_x86_xop())
{
im2col_sgemm_pack8to1_int8_sse_xop(bottom_im2col, top_blob, kernel, opt);
return;
}
#endif
#endif
// Mat bottom_im2col(size, maxk, inch, 8u, 8, opt.workspace_allocator);
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
const int outch = top_blob.c;
// permute
Mat tmp;
#if __AVX2__
if (size >= 4)
tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator);
else if (size >= 2)
tmp.create(2 * maxk, inch, size / 2 + size % 2, 8u, 8, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 8u, 8, opt.workspace_allocator);
#else
if (size >= 2)
tmp.create(2 * maxk, inch, size / 2 + size % 2, 8u, 8, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 8u, 8, opt.workspace_allocator);
#endif
{
#if __AVX2__
int remain_size_start = 0;
int nn_size = size >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
int64_t* tmpptr = tmp.channel(i / 4);
for (int q = 0; q < inch; q++)
{
const int64_t* img0 = (const int64_t*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
__m256i _v = _mm256_loadu_si256((const __m256i*)img0);
_mm256_storeu_si256((__m256i*)tmpptr, _v);
tmpptr += 4;
img0 += size;
}
}
}
remain_size_start += nn_size << 2;
nn_size = (size - remain_size_start) >> 1;
#else
int remain_size_start = 0;
int nn_size = (size - remain_size_start) >> 1;
#endif
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 2;
#if __AVX2__
int64_t* tmpptr = tmp.channel(i / 4 + (i % 4) / 2);
#else
int64_t* tmpptr = tmp.channel(i / 2);
#endif
for (int q = 0; q < inch; q++)
{
const int64_t* img0 = (const int64_t*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
__m128i _v = _mm_loadu_si128((const __m128i*)img0);
_mm_storeu_si128((__m128i*)tmpptr, _v);
tmpptr += 2;
img0 += size;
}
}
}
remain_size_start += nn_size << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
#if __AVX2__
int64_t* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2);
#else
int64_t* tmpptr = tmp.channel(i / 2 + i % 2);
#endif
for (int q = 0; q < inch; q++)
{
const int64_t* img0 = (const int64_t*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr += 1;
img0 += size;
}
}
}
}
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 4;
int* outptr0 = top_blob.channel(p);
int* outptr1 = top_blob.channel(p + 1);
int* outptr2 = top_blob.channel(p + 2);
int* outptr3 = top_blob.channel(p + 3);
int i = 0;
#if __AVX2__
for (; i + 3 < size; i += 4)
{
const signed char* tmpptr = tmp.channel(i / 4);
const signed char* kptr0 = kernel.channel(p / 4);
int nn = inch * maxk; // inch always > 0
__m256i _sum00_11 = _mm256_setzero_si256();
__m256i _sum10_01 = _mm256_setzero_si256();
__m256i _sum02_13 = _mm256_setzero_si256();
__m256i _sum12_03 = _mm256_setzero_si256();
__m256i _sum04_15 = _mm256_setzero_si256();
__m256i _sum14_05 = _mm256_setzero_si256();
__m256i _sum06_17 = _mm256_setzero_si256();
__m256i _sum16_07 = _mm256_setzero_si256();
int j = 0;
for (; j < nn; j++)
{
__m128i _val01 = _mm_loadu_si128((const __m128i*)tmpptr);
__m256i _val01_16 = _mm256_cvtepi8_epi16(_val01);
__m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0);
__m128i _w23 = _mm_loadu_si128((const __m128i*)(kptr0 + 16));
__m256i _w01_16 = _mm256_cvtepi8_epi16(_w01);
__m256i _w23_16 = _mm256_cvtepi8_epi16(_w23);
__m256i _val10_16 = _mm256_permute4x64_epi64(_val01_16, 78);
#if __AVXVNNI__ || __AVX512VNNI__
_sum00_11 = _mm256_dpwssd_epi32(_sum00_11, _val01_16, _w01_16);
_sum10_01 = _mm256_dpwssd_epi32(_sum10_01, _val10_16, _w01_16);
_sum02_13 = _mm256_dpwssd_epi32(_sum02_13, _val01_16, _w23_16);
_sum12_03 = _mm256_dpwssd_epi32(_sum12_03, _val10_16, _w23_16);
#else
_sum00_11 = _mm256_add_epi32(_sum00_11, _mm256_madd_epi16(_val01_16, _w01_16));
_sum10_01 = _mm256_add_epi32(_sum10_01, _mm256_madd_epi16(_val10_16, _w01_16));
_sum02_13 = _mm256_add_epi32(_sum02_13, _mm256_madd_epi16(_val01_16, _w23_16));
_sum12_03 = _mm256_add_epi32(_sum12_03, _mm256_madd_epi16(_val10_16, _w23_16));
#endif
__m128i _val23 = _mm_loadu_si128((const __m128i*)(tmpptr + 16));
__m256i _val23_16 = _mm256_cvtepi8_epi16(_val23);
__m256i _val32_16 = _mm256_permute4x64_epi64(_val23_16, 78);
#if __AVXVNNI__ || __AVX512VNNI__
_sum04_15 = _mm256_dpwssd_epi32(_sum04_15, _val23_16, _w01_16);
_sum14_05 = _mm256_dpwssd_epi32(_sum14_05, _val32_16, _w01_16);
_sum06_17 = _mm256_dpwssd_epi32(_sum06_17, _val23_16, _w23_16);
_sum16_07 = _mm256_dpwssd_epi32(_sum16_07, _val32_16, _w23_16);
#else
_sum04_15 = _mm256_add_epi32(_sum04_15, _mm256_madd_epi16(_val23_16, _w01_16));
_sum14_05 = _mm256_add_epi32(_sum14_05, _mm256_madd_epi16(_val32_16, _w01_16));
_sum06_17 = _mm256_add_epi32(_sum06_17, _mm256_madd_epi16(_val23_16, _w23_16));
_sum16_07 = _mm256_add_epi32(_sum16_07, _mm256_madd_epi16(_val32_16, _w23_16));
#endif
tmpptr += 32;
kptr0 += 32;
}
// transpose 4x8
{
__m256i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm256_unpacklo_epi32(_sum00_11, _sum10_01);
_tmp1 = _mm256_unpacklo_epi32(_sum02_13, _sum12_03);
_tmp2 = _mm256_unpackhi_epi32(_sum00_11, _sum10_01);
_tmp3 = _mm256_unpackhi_epi32(_sum02_13, _sum12_03);
_sum00_11 = _mm256_unpacklo_epi64(_tmp0, _tmp1);
_sum10_01 = _mm256_unpackhi_epi64(_tmp0, _tmp1);
_sum02_13 = _mm256_unpacklo_epi64(_tmp2, _tmp3);
_sum12_03 = _mm256_unpackhi_epi64(_tmp2, _tmp3);
}
{
__m256i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm256_unpacklo_epi32(_sum04_15, _sum14_05);
_tmp1 = _mm256_unpacklo_epi32(_sum06_17, _sum16_07);
_tmp2 = _mm256_unpackhi_epi32(_sum04_15, _sum14_05);
_tmp3 = _mm256_unpackhi_epi32(_sum06_17, _sum16_07);
_sum04_15 = _mm256_unpacklo_epi64(_tmp0, _tmp1);
_sum14_05 = _mm256_unpackhi_epi64(_tmp0, _tmp1);
_sum06_17 = _mm256_unpacklo_epi64(_tmp2, _tmp3);
_sum16_07 = _mm256_unpackhi_epi64(_tmp2, _tmp3);
}
_sum00_11 = _mm256_add_epi32(_sum00_11, _sum10_01);
_sum02_13 = _mm256_add_epi32(_sum02_13, _sum12_03);
_sum00_11 = _mm256_add_epi32(_sum00_11, _sum02_13);
_sum04_15 = _mm256_add_epi32(_sum04_15, _sum14_05);
_sum06_17 = _mm256_add_epi32(_sum06_17, _sum16_07);
_sum04_15 = _mm256_add_epi32(_sum04_15, _sum06_17);
__m256i _perm_mask = _mm256_set_epi32(6, 3, 4, 1, 7, 2, 5, 0);
_sum00_11 = _mm256_permutevar8x32_epi32(_sum00_11, _perm_mask);
_sum04_15 = _mm256_permutevar8x32_epi32(_sum04_15, _perm_mask);
int sum[16];
_mm256_storeu_si256((__m256i*)sum, _sum00_11);
_mm256_storeu_si256((__m256i*)(sum + 8), _sum04_15);
outptr0[0] = sum[0];
outptr1[0] = sum[1];
outptr2[0] = sum[2];
outptr3[0] = sum[3];
outptr0[1] = sum[4];
outptr1[1] = sum[5];
outptr2[1] = sum[6];
outptr3[1] = sum[7];
outptr0[2] = sum[8];
outptr1[2] = sum[9];
outptr2[2] = sum[10];
outptr3[2] = sum[11];
outptr0[3] = sum[12];
outptr1[3] = sum[13];
outptr2[3] = sum[14];
outptr3[3] = sum[15];
outptr0 += 4;
outptr1 += 4;
outptr2 += 4;
outptr3 += 4;
}
#endif
for (; i + 1 < size; i += 2)
{
#if __AVX2__
const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2);
#else
const signed char* tmpptr = tmp.channel(i / 2);
#endif
const signed char* kptr0 = kernel.channel(p / 4);
int nn = inch * maxk; // inch always > 0
#if __AVX2__
__m256i _sum00_11 = _mm256_setzero_si256();
__m256i _sum10_01 = _mm256_setzero_si256();
__m256i _sum02_13 = _mm256_setzero_si256();
__m256i _sum12_03 = _mm256_setzero_si256();
#else
__m128i _sum00 = _mm_setzero_si128();
__m128i _sum01 = _mm_setzero_si128();
__m128i _sum02 = _mm_setzero_si128();
__m128i _sum03 = _mm_setzero_si128();
__m128i _sum10 = _mm_setzero_si128();
__m128i _sum11 = _mm_setzero_si128();
__m128i _sum12 = _mm_setzero_si128();
__m128i _sum13 = _mm_setzero_si128();
#endif
int j = 0;
for (; j < nn; j++)
{
#if __AVX2__
__m128i _val01 = _mm_loadu_si128((const __m128i*)tmpptr);
__m256i _val01_16 = _mm256_cvtepi8_epi16(_val01);
__m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0);
__m128i _w23 = _mm_loadu_si128((const __m128i*)(kptr0 + 16));
__m256i _w01_16 = _mm256_cvtepi8_epi16(_w01);
__m256i _w23_16 = _mm256_cvtepi8_epi16(_w23);
__m256i _val10_16 = _mm256_permute4x64_epi64(_val01_16, 78);
#if __AVXVNNI__ || __AVX512VNNI__
_sum00_11 = _mm256_dpwssd_epi32(_sum00_11, _val01_16, _w01_16);
_sum10_01 = _mm256_dpwssd_epi32(_sum10_01, _val10_16, _w01_16);
_sum02_13 = _mm256_dpwssd_epi32(_sum02_13, _val01_16, _w23_16);
_sum12_03 = _mm256_dpwssd_epi32(_sum12_03, _val10_16, _w23_16);
#else
_sum00_11 = _mm256_add_epi32(_sum00_11, _mm256_madd_epi16(_val01_16, _w01_16));
_sum10_01 = _mm256_add_epi32(_sum10_01, _mm256_madd_epi16(_val10_16, _w01_16));
_sum02_13 = _mm256_add_epi32(_sum02_13, _mm256_madd_epi16(_val01_16, _w23_16));
_sum12_03 = _mm256_add_epi32(_sum12_03, _mm256_madd_epi16(_val10_16, _w23_16));
#endif
#else
__m128i _val01 = _mm_loadu_si128((const __m128i*)tmpptr);
__m128i _extval01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _val01);
__m128i _val0 = _mm_unpacklo_epi8(_val01, _extval01);
__m128i _val1 = _mm_unpackhi_epi8(_val01, _extval01);
__m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0);
__m128i _w23 = _mm_loadu_si128((const __m128i*)(kptr0 + 16));
__m128i _extw01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w01);
__m128i _extw23 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w23);
__m128i _w0 = _mm_unpacklo_epi8(_w01, _extw01);
__m128i _w1 = _mm_unpackhi_epi8(_w01, _extw01);
__m128i _w2 = _mm_unpacklo_epi8(_w23, _extw23);
__m128i _w3 = _mm_unpackhi_epi8(_w23, _extw23);
#if __XOP__
_sum00 = _mm_maddd_epi16(_val0, _w0, _sum00);
_sum01 = _mm_maddd_epi16(_val0, _w1, _sum01);
_sum02 = _mm_maddd_epi16(_val0, _w2, _sum02);
_sum03 = _mm_maddd_epi16(_val0, _w3, _sum03);
_sum10 = _mm_maddd_epi16(_val1, _w0, _sum10);
_sum11 = _mm_maddd_epi16(_val1, _w1, _sum11);
_sum12 = _mm_maddd_epi16(_val1, _w2, _sum12);
_sum13 = _mm_maddd_epi16(_val1, _w3, _sum13);
#else
_sum00 = _mm_add_epi32(_mm_madd_epi16(_val0, _w0), _sum00);
_sum01 = _mm_add_epi32(_mm_madd_epi16(_val0, _w1), _sum01);
_sum02 = _mm_add_epi32(_mm_madd_epi16(_val0, _w2), _sum02);
_sum03 = _mm_add_epi32(_mm_madd_epi16(_val0, _w3), _sum03);
_sum10 = _mm_add_epi32(_mm_madd_epi16(_val1, _w0), _sum10);
_sum11 = _mm_add_epi32(_mm_madd_epi16(_val1, _w1), _sum11);
_sum12 = _mm_add_epi32(_mm_madd_epi16(_val1, _w2), _sum12);
_sum13 = _mm_add_epi32(_mm_madd_epi16(_val1, _w3), _sum13);
#endif
#endif
tmpptr += 16;
kptr0 += 32;
}
#if __AVX2__
// transpose 4x8
{
__m256i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm256_unpacklo_epi32(_sum00_11, _sum10_01);
_tmp1 = _mm256_unpacklo_epi32(_sum02_13, _sum12_03);
_tmp2 = _mm256_unpackhi_epi32(_sum00_11, _sum10_01);
_tmp3 = _mm256_unpackhi_epi32(_sum02_13, _sum12_03);
_sum00_11 = _mm256_unpacklo_epi64(_tmp0, _tmp1);
_sum10_01 = _mm256_unpackhi_epi64(_tmp0, _tmp1);
_sum02_13 = _mm256_unpacklo_epi64(_tmp2, _tmp3);
_sum12_03 = _mm256_unpackhi_epi64(_tmp2, _tmp3);
}
_sum00_11 = _mm256_add_epi32(_sum00_11, _sum10_01);
_sum02_13 = _mm256_add_epi32(_sum02_13, _sum12_03);
_sum00_11 = _mm256_add_epi32(_sum00_11, _sum02_13);
__m256i _perm_mask = _mm256_set_epi32(6, 3, 4, 1, 7, 2, 5, 0);
_sum00_11 = _mm256_permutevar8x32_epi32(_sum00_11, _perm_mask);
int sum[8];
_mm256_storeu_si256((__m256i*)sum, _sum00_11);
#else
// transpose 4x4
{
__m128i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm_unpacklo_epi32(_sum00, _sum01);
_tmp1 = _mm_unpacklo_epi32(_sum02, _sum03);
_tmp2 = _mm_unpackhi_epi32(_sum00, _sum01);
_tmp3 = _mm_unpackhi_epi32(_sum02, _sum03);
_sum00 = _mm_unpacklo_epi64(_tmp0, _tmp1);
_sum01 = _mm_unpackhi_epi64(_tmp0, _tmp1);
_sum02 = _mm_unpacklo_epi64(_tmp2, _tmp3);
_sum03 = _mm_unpackhi_epi64(_tmp2, _tmp3);
}
{
__m128i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm_unpacklo_epi32(_sum10, _sum11);
_tmp1 = _mm_unpacklo_epi32(_sum12, _sum13);
_tmp2 = _mm_unpackhi_epi32(_sum10, _sum11);
_tmp3 = _mm_unpackhi_epi32(_sum12, _sum13);
_sum10 = _mm_unpacklo_epi64(_tmp0, _tmp1);
_sum11 = _mm_unpackhi_epi64(_tmp0, _tmp1);
_sum12 = _mm_unpacklo_epi64(_tmp2, _tmp3);
_sum13 = _mm_unpackhi_epi64(_tmp2, _tmp3);
}
_sum00 = _mm_add_epi32(_sum00, _sum01);
_sum02 = _mm_add_epi32(_sum02, _sum03);
_sum10 = _mm_add_epi32(_sum10, _sum11);
_sum12 = _mm_add_epi32(_sum12, _sum13);
_sum00 = _mm_add_epi32(_sum00, _sum02);
_sum10 = _mm_add_epi32(_sum10, _sum12);
int sum[8];
_mm_storeu_si128((__m128i*)sum, _sum00);
_mm_storeu_si128((__m128i*)(sum + 4), _sum10);
#endif
outptr0[0] = sum[0];
outptr1[0] = sum[1];
outptr2[0] = sum[2];
outptr3[0] = sum[3];
outptr0[1] = sum[4];
outptr1[1] = sum[5];
outptr2[1] = sum[6];
outptr3[1] = sum[7];
outptr0 += 2;
outptr1 += 2;
outptr2 += 2;
outptr3 += 2;
}
for (; i < size; i++)
{
#if __AVX2__
const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2);
#else
const signed char* tmpptr = tmp.channel(i / 2 + i % 2);
#endif
const signed char* kptr0 = kernel.channel(p / 4);
int nn = inch * maxk; // inch always > 0
#if __AVX2__
__m256i _sum0_1 = _mm256_setzero_si256();
__m256i _sum2_3 = _mm256_setzero_si256();
#else
__m128i _sum0 = _mm_setzero_si128();
__m128i _sum1 = _mm_setzero_si128();
__m128i _sum2 = _mm_setzero_si128();
__m128i _sum3 = _mm_setzero_si128();
#endif
int j = 0;
for (; j < nn; j++)
{
#if __AVX2__
__m128i _val = _mm_loadl_epi64((const __m128i*)tmpptr);
_val = _mm_cvtepi8_epi16(_val);
__m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0);
__m128i _w23 = _mm_loadu_si128((const __m128i*)(kptr0 + 16));
__m256i _w01_16 = _mm256_cvtepi8_epi16(_w01);
__m256i _w23_16 = _mm256_cvtepi8_epi16(_w23);
__m256i _valval = _mm256_inserti128_si256(_mm256_castsi128_si256(_val), _val, 1);
#if __AVXVNNI__ || __AVX512VNNI__
_sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _valval, _w01_16);
_sum2_3 = _mm256_dpwssd_epi32(_sum2_3, _valval, _w23_16);
#else
_sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_madd_epi16(_valval, _w01_16));
_sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_madd_epi16(_valval, _w23_16));
#endif
#else
__m128i _val = _mm_loadl_epi64((const __m128i*)tmpptr);
#if __SSE4_1__
_val = _mm_cvtepi8_epi16(_val);
#else
_val = _mm_unpacklo_epi8(_val, _mm_cmpgt_epi8(_mm_setzero_si128(), _val));
#endif
__m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0);
__m128i _w23 = _mm_loadu_si128((const __m128i*)(kptr0 + 16));
__m128i _extw01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w01);
__m128i _extw23 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w23);
__m128i _w0 = _mm_unpacklo_epi8(_w01, _extw01);
__m128i _w1 = _mm_unpackhi_epi8(_w01, _extw01);
__m128i _w2 = _mm_unpacklo_epi8(_w23, _extw23);
__m128i _w3 = _mm_unpackhi_epi8(_w23, _extw23);
#if __XOP__
_sum0 = _mm_maddd_epi16(_val, _w0, _sum0);
_sum1 = _mm_maddd_epi16(_val, _w1, _sum1);
_sum2 = _mm_maddd_epi16(_val, _w2, _sum2);
_sum3 = _mm_maddd_epi16(_val, _w3, _sum3);
#else
_sum0 = _mm_add_epi32(_mm_madd_epi16(_val, _w0), _sum0);
_sum1 = _mm_add_epi32(_mm_madd_epi16(_val, _w1), _sum1);
_sum2 = _mm_add_epi32(_mm_madd_epi16(_val, _w2), _sum2);
_sum3 = _mm_add_epi32(_mm_madd_epi16(_val, _w3), _sum3);
#endif
#endif
tmpptr += 8;
kptr0 += 32;
}
#if __AVX2__
__m128i _sum0 = _mm256_extracti128_si256(_sum0_1, 0);
__m128i _sum1 = _mm256_extracti128_si256(_sum0_1, 1);
__m128i _sum2 = _mm256_extracti128_si256(_sum2_3, 0);
__m128i _sum3 = _mm256_extracti128_si256(_sum2_3, 1);
#endif
// transpose 4x4
{
__m128i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm_unpacklo_epi32(_sum0, _sum1);
_tmp1 = _mm_unpacklo_epi32(_sum2, _sum3);
_tmp2 = _mm_unpackhi_epi32(_sum0, _sum1);
_tmp3 = _mm_unpackhi_epi32(_sum2, _sum3);
_sum0 = _mm_unpacklo_epi64(_tmp0, _tmp1);
_sum1 = _mm_unpackhi_epi64(_tmp0, _tmp1);
_sum2 = _mm_unpacklo_epi64(_tmp2, _tmp3);
_sum3 = _mm_unpackhi_epi64(_tmp2, _tmp3);
}
_sum0 = _mm_add_epi32(_sum0, _sum1);
_sum2 = _mm_add_epi32(_sum2, _sum3);
_sum0 = _mm_add_epi32(_sum0, _sum2);
int sum[4];
_mm_storeu_si128((__m128i*)sum, _sum0);
outptr0[0] = sum[0];
outptr1[0] = sum[1];
outptr2[0] = sum[2];
outptr3[0] = sum[3];
outptr0 += 1;
outptr1 += 1;
outptr2 += 1;
outptr3 += 1;
}
}
remain_outch_start += nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
int* outptr0 = top_blob.channel(p);
int i = 0;
#if __AVX2__
for (; i + 3 < size; i += 4)
{
const signed char* tmpptr = tmp.channel(i / 4);
const signed char* kptr0 = kernel.channel(p / 4 + p % 4);
int nn = inch * maxk; // inch always > 0
__m256i _sum01 = _mm256_setzero_si256();
__m256i _sum23 = _mm256_setzero_si256();
int j = 0;
for (; j < nn; j++)
{
__m128i _val01 = _mm_loadu_si128((const __m128i*)tmpptr);
__m128i _val23 = _mm_loadu_si128((const __m128i*)(tmpptr + 16));
__m256i _val01_16 = _mm256_cvtepi8_epi16(_val01);
__m256i _val23_16 = _mm256_cvtepi8_epi16(_val23);
__m128i _w01 = _mm_loadl_epi64((const __m128i*)kptr0);
__m256i _w01_16 = _mm256_cvtepi8_epi16(_w01);
_w01_16 = _mm256_permute4x64_epi64(_w01_16, _MM_SHUFFLE(1, 0, 1, 0));
#if __AVXVNNI__ || __AVX512VNNI__
_sum01 = _mm256_dpwssd_epi32(_sum01, _val01_16, _w01_16);
_sum23 = _mm256_dpwssd_epi32(_sum23, _val23_16, _w01_16);
#else
_sum01 = _mm256_add_epi32(_sum01, _mm256_madd_epi16(_val01_16, _w01_16));
_sum23 = _mm256_add_epi32(_sum23, _mm256_madd_epi16(_val23_16, _w01_16));
#endif
tmpptr += 32;
kptr0 += 8;
}
__m128i _sum0 = _mm256_extracti128_si256(_sum01, 0);
__m128i _sum1 = _mm256_extracti128_si256(_sum01, 1);
__m128i _sum2 = _mm256_extracti128_si256(_sum23, 0);
__m128i _sum3 = _mm256_extracti128_si256(_sum23, 1);
outptr0[0] = _mm_reduce_add_epi32(_sum0);
outptr0[1] = _mm_reduce_add_epi32(_sum1);
outptr0[2] = _mm_reduce_add_epi32(_sum2);
outptr0[3] = _mm_reduce_add_epi32(_sum3);
outptr0 += 4;
}
#endif
for (; i + 1 < size; i += 2)
{
#if __AVX2__
const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2);
#else
const signed char* tmpptr = tmp.channel(i / 2);
#endif
const signed char* kptr0 = kernel.channel(p / 4 + p % 4);
int nn = inch * maxk; // inch always > 0
#if __AVX2__
__m256i _sum01 = _mm256_setzero_si256();
#else
__m128i _sum0 = _mm_setzero_si128();
__m128i _sum1 = _mm_setzero_si128();
#endif
int j = 0;
for (; j < nn; j++)
{
#if __AVX2__
__m128i _val01 = _mm_loadu_si128((const __m128i*)tmpptr);
__m256i _val01_16 = _mm256_cvtepi8_epi16(_val01);
__m128i _w01 = _mm_loadl_epi64((const __m128i*)kptr0);
__m256i _w01_16 = _mm256_cvtepi8_epi16(_w01);
_w01_16 = _mm256_permute4x64_epi64(_w01_16, _MM_SHUFFLE(1, 0, 1, 0));
#if __AVXVNNI__ || __AVX512VNNI__
_sum01 = _mm256_dpwssd_epi32(_sum01, _val01_16, _w01_16);
#else
_sum01 = _mm256_add_epi32(_sum01, _mm256_madd_epi16(_val01_16, _w01_16));
#endif
#else
__m128i _val01 = _mm_loadu_si128((const __m128i*)tmpptr);
__m128i _extval01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _val01);
__m128i _val0 = _mm_unpacklo_epi8(_val01, _extval01);
__m128i _val1 = _mm_unpackhi_epi8(_val01, _extval01);
__m128i _w01 = _mm_loadl_epi64((const __m128i*)kptr0);
#if __SSE4_1__
__m128i _w0 = _mm_cvtepi8_epi16(_w01);
#else
__m128i _extw01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w01);
__m128i _w0 = _mm_unpacklo_epi8(_w01, _extw01);
#endif
#if __XOP__
_sum0 = _mm_maddd_epi16(_val0, _w0, _sum0);
_sum1 = _mm_maddd_epi16(_val1, _w0, _sum1);
#else
_sum0 = _mm_add_epi32(_mm_madd_epi16(_val0, _w0), _sum0);
_sum1 = _mm_add_epi32(_mm_madd_epi16(_val1, _w0), _sum1);
#endif
#endif
tmpptr += 16;
kptr0 += 8;
}
#if __AVX2__
__m128i _sum0 = _mm256_extracti128_si256(_sum01, 0);
__m128i _sum1 = _mm256_extracti128_si256(_sum01, 1);
#endif
outptr0[0] = _mm_reduce_add_epi32(_sum0);
outptr0[1] = _mm_reduce_add_epi32(_sum1);
outptr0 += 2;
}
for (; i < size; i++)
{
#if __AVX2__
const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2);
#else
const signed char* tmpptr = tmp.channel(i / 2 + i % 2);
#endif
const signed char* kptr0 = kernel.channel(p / 4 + p % 4);
int nn = inch * maxk; // inch always > 0
__m128i _sum0 = _mm_setzero_si128();
int j = 0;
for (; j < nn; j++)
{
__m128i _val01 = _mm_loadl_epi64((const __m128i*)tmpptr);
#if __SSE4_1__
__m128i _val0 = _mm_cvtepi8_epi16(_val01);
#else
__m128i _extval01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _val01);
__m128i _val0 = _mm_unpacklo_epi8(_val01, _extval01);
#endif
__m128i _w01 = _mm_loadl_epi64((const __m128i*)kptr0);
#if __SSE4_1__
__m128i _w0 = _mm_cvtepi8_epi16(_w01);
#else
__m128i _extw01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w01);
__m128i _w0 = _mm_unpacklo_epi8(_w01, _extw01);
#endif
#if __XOP__
_sum0 = _mm_maddd_epi16(_val0, _w0, _sum0);
#else
_sum0 = _mm_add_epi32(_mm_madd_epi16(_val0, _w0), _sum0);
#endif
tmpptr += 8;
kptr0 += 8;
}
outptr0[0] = _mm_reduce_add_epi32(_sum0);
outptr0 += 1;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x88, %rsp
movq %rcx, %r13
movq %rdx, %rbx
movq %rsi, %r14
movq %rdi, %r12
callq 0x732db
testl %eax, %eax
je 0xb3945
movq %r12, %rdi
movq %r14, %rsi
movq %rbx, %rdx
movq %r13, %rcx
addq $0x88, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp 0x138bf0
movq %rbx, 0x10(%rsp)
movslq 0x2c(%r12), %rbp
movl 0x30(%r12), %ebx
movl 0x38(%r12), %r15d
movq %r14, 0x28(%rsp)
movslq 0x38(%r14), %rax
movq %rax, 0x20(%rsp)
leaq 0x30(%rsp), %rdi
andq $0x0, 0x40(%rdi)
movl %ebp, %ecx
shrl %ecx
movl %ebp, %eax
andl $0x1, %eax
addl %ecx, %eax
cmpq $0x2, %rbp
setge %cl
pxor %xmm0, %xmm0
movdqu %xmm0, 0xc(%rdi)
movdqa %xmm0, (%rdi)
movdqu %xmm0, 0x2c(%rdi)
movdqa %xmm0, 0x20(%rdi)
cmovll %ebp, %eax
movl %ebx, %esi
shll %cl, %esi
movq 0x10(%r13), %rcx
movq %rcx, (%rsp)
pushq $0x8
popq %r8
pushq $0x8
popq %r9
movl %r15d, %edx
movl %eax, %ecx
callq 0x628f2
movl %ebp, %eax
sarl %eax
xorl %ecx, %ecx
testl %ebx, %ebx
movl %ebx, %edx
movl $0x0, %ebx
movl %edx, 0x18(%rsp)
cmovgl %edx, %ebx
testl %r15d, %r15d
movl $0x0, %edi
cmovgl %r15d, %edi
testl %eax, %eax
cmovlel %ecx, %eax
leaq (,%rbp,8), %r8
xorl %edx, %edx
cmpq %rax, %rdx
je 0xb3a50
movq 0x70(%rsp), %r9
imulq %rdx, %r9
imulq 0x40(%rsp), %r9
addq 0x30(%rsp), %r9
xorl %r10d, %r10d
cmpq %rdi, %r10
je 0xb3a47
movq 0x40(%r12), %r11
movq (%r12), %rsi
imulq 0x10(%r12), %r11
addq %rcx, %rsi
imulq %r10, %r11
addq %rsi, %r11
movl %ebx, %r14d
subl $0x1, %r14d
jb 0xb3a42
movdqu (%r11), %xmm0
movdqu %xmm0, (%r9)
addq $0x10, %r9
addq %r8, %r11
jmp 0xb3a29
incq %r10
jmp 0xb3a08
incq %rdx
addq $0x10, %rcx
jmp 0xb39ec
movl %ebp, %eax
andl $-0x2, %eax
movq 0x30(%rsp), %r9
movslq %eax, %rcx
leaq (,%rcx,8), %r10
cmpq %rbp, %rcx
jge 0xb3acc
movl %ecx, %eax
cltd
pushq $0x2
popq %rsi
idivl %esi
addl %eax, %edx
movslq %edx, %rax
imulq 0x70(%rsp), %rax
imulq 0x40(%rsp), %rax
addq %r9, %rax
movq (%r12), %rdx
addq %r10, %rdx
xorl %r14d, %r14d
cmpq %rdi, %r14
je 0xb3ac3
movq 0x40(%r12), %r13
imulq 0x10(%r12), %r13
imulq %r14, %r13
addq %rdx, %r13
movl %ebx, %esi
subl $0x1, %esi
jb 0xb3abe
movq (%r13), %r11
movq %r11, (%rax)
addq $0x8, %rax
addq %r8, %r13
jmp 0xb3aa9
incq %r14
jmp 0xb3a90
incq %rcx
addq $0x8, %r10
jmp 0xb3a65
movq 0x20(%rsp), %rax
movl %eax, %ebx
sarl $0x2, %ebx
movq 0x28(%rsp), %rax
movq 0x40(%rax), %rcx
imulq 0x10(%rax), %rcx
movq %rcx, 0x80(%rsp)
imull 0x18(%rsp), %r15d
xorl %edx, %edx
testl %r15d, %r15d
cmovlel %edx, %r15d
movq (%rax), %rax
movq %rax, 0x18(%rsp)
testl %ebx, %ebx
cmovlel %edx, %ebx
cmpq %rbx, %rdx
je 0xb3ebc
leaq (,%rdx,4), %rdi
movq 0x80(%rsp), %rax
imulq %rax, %rdi
movq 0x18(%rsp), %rcx
addq %rcx, %rdi
leaq 0x1(,%rdx,4), %r8
imulq %rax, %r8
addq %rcx, %r8
leaq 0x2(,%rdx,4), %r9
imulq %rax, %r9
addq %rcx, %r9
leaq 0x3(,%rdx,4), %r10
imulq %rax, %r10
addq %rcx, %r10
movq 0x10(%rsp), %rax
movq 0x40(%rax), %r12
imulq 0x10(%rax), %r12
movq 0x30(%rsp), %r13
imulq %rdx, %r12
addq (%rax), %r12
movq 0x70(%rsp), %r14
imulq 0x40(%rsp), %r14
xorl %r11d, %r11d
movq %r11, %rax
orq $0x1, %rax
cmpq %rbp, %rax
jge 0xb3d6c
pxor %xmm4, %xmm4
xorl %eax, %eax
movl %r15d, %ecx
pxor %xmm2, %xmm2
pxor %xmm5, %xmm5
pxor %xmm0, %xmm0
pxor %xmm6, %xmm6
pxor %xmm3, %xmm3
pxor %xmm7, %xmm7
pxor %xmm1, %xmm1
subl $0x1, %ecx
jb 0xb3c87
movdqu (%r13,%rax), %xmm8
pxor %xmm9, %xmm9
pcmpgtb %xmm8, %xmm9
movdqa %xmm8, %xmm11
punpcklbw %xmm9, %xmm11 # xmm11 = xmm11[0],xmm9[0],xmm11[1],xmm9[1],xmm11[2],xmm9[2],xmm11[3],xmm9[3],xmm11[4],xmm9[4],xmm11[5],xmm9[5],xmm11[6],xmm9[6],xmm11[7],xmm9[7]
punpckhbw %xmm9, %xmm8 # xmm8 = xmm8[8],xmm9[8],xmm8[9],xmm9[9],xmm8[10],xmm9[10],xmm8[11],xmm9[11],xmm8[12],xmm9[12],xmm8[13],xmm9[13],xmm8[14],xmm9[14],xmm8[15],xmm9[15]
movdqu (%r12,%rax,2), %xmm10
movdqu 0x10(%r12,%rax,2), %xmm9
pxor %xmm12, %xmm12
pcmpgtb %xmm10, %xmm12
pxor %xmm13, %xmm13
pcmpgtb %xmm9, %xmm13
movdqa %xmm10, %xmm14
punpcklbw %xmm12, %xmm14 # xmm14 = xmm14[0],xmm12[0],xmm14[1],xmm12[1],xmm14[2],xmm12[2],xmm14[3],xmm12[3],xmm14[4],xmm12[4],xmm14[5],xmm12[5],xmm14[6],xmm12[6],xmm14[7],xmm12[7]
punpckhbw %xmm12, %xmm10 # xmm10 = xmm10[8],xmm12[8],xmm10[9],xmm12[9],xmm10[10],xmm12[10],xmm10[11],xmm12[11],xmm10[12],xmm12[12],xmm10[13],xmm12[13],xmm10[14],xmm12[14],xmm10[15],xmm12[15]
movdqa %xmm9, %xmm12
punpcklbw %xmm13, %xmm12 # xmm12 = xmm12[0],xmm13[0],xmm12[1],xmm13[1],xmm12[2],xmm13[2],xmm12[3],xmm13[3],xmm12[4],xmm13[4],xmm12[5],xmm13[5],xmm12[6],xmm13[6],xmm12[7],xmm13[7]
punpckhbw %xmm13, %xmm9 # xmm9 = xmm9[8],xmm13[8],xmm9[9],xmm13[9],xmm9[10],xmm13[10],xmm9[11],xmm13[11],xmm9[12],xmm13[12],xmm9[13],xmm13[13],xmm9[14],xmm13[14],xmm9[15],xmm13[15]
movdqa %xmm11, %xmm13
pmaddwd %xmm14, %xmm13
paddd %xmm13, %xmm1
movdqa %xmm11, %xmm13
pmaddwd %xmm10, %xmm13
paddd %xmm13, %xmm7
movdqa %xmm11, %xmm13
pmaddwd %xmm12, %xmm13
paddd %xmm13, %xmm3
pmaddwd %xmm9, %xmm11
paddd %xmm11, %xmm6
pmaddwd %xmm8, %xmm14
paddd %xmm14, %xmm0
pmaddwd %xmm8, %xmm10
paddd %xmm10, %xmm5
pmaddwd %xmm8, %xmm12
paddd %xmm12, %xmm2
pmaddwd %xmm8, %xmm9
paddd %xmm9, %xmm4
addq $0x10, %rax
jmp 0xb3bb7
movdqa %xmm1, %xmm8
punpckldq %xmm7, %xmm8 # xmm8 = xmm8[0],xmm7[0],xmm8[1],xmm7[1]
movdqa %xmm3, %xmm9
punpckldq %xmm6, %xmm9 # xmm9 = xmm9[0],xmm6[0],xmm9[1],xmm6[1]
punpckhdq %xmm7, %xmm1 # xmm1 = xmm1[2],xmm7[2],xmm1[3],xmm7[3]
punpckhdq %xmm6, %xmm3 # xmm3 = xmm3[2],xmm6[2],xmm3[3],xmm6[3]
movdqa %xmm0, %xmm6
punpckldq %xmm5, %xmm6 # xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
movdqa %xmm2, %xmm7
punpckldq %xmm4, %xmm7 # xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1]
punpckhdq %xmm5, %xmm0 # xmm0 = xmm0[2],xmm5[2],xmm0[3],xmm5[3]
punpckhdq %xmm4, %xmm2 # xmm2 = xmm2[2],xmm4[2],xmm2[3],xmm4[3]
movdqa %xmm8, %xmm4
punpcklqdq %xmm9, %xmm4 # xmm4 = xmm4[0],xmm9[0]
punpckhqdq %xmm9, %xmm8 # xmm8 = xmm8[1],xmm9[1]
paddd %xmm4, %xmm8
movdqa %xmm1, %xmm4
punpcklqdq %xmm3, %xmm4 # xmm4 = xmm4[0],xmm3[0]
punpckhqdq %xmm3, %xmm1 # xmm1 = xmm1[1],xmm3[1]
paddd %xmm4, %xmm1
paddd %xmm8, %xmm1
movdqa %xmm6, %xmm3
punpcklqdq %xmm7, %xmm3 # xmm3 = xmm3[0],xmm7[0]
punpckhqdq %xmm7, %xmm6 # xmm6 = xmm6[1],xmm7[1]
paddd %xmm3, %xmm6
movdqa %xmm0, %xmm3
punpcklqdq %xmm2, %xmm3 # xmm3 = xmm3[0],xmm2[0]
punpckhqdq %xmm2, %xmm0 # xmm0 = xmm0[1],xmm2[1]
paddd %xmm3, %xmm0
paddd %xmm6, %xmm0
movd %xmm1, (%rdi)
pshufd $0x55, %xmm1, %xmm2 # xmm2 = xmm1[1,1,1,1]
movd %xmm2, (%r8)
pshufd $0xee, %xmm1, %xmm2 # xmm2 = xmm1[2,3,2,3]
movd %xmm2, (%r9)
pshufd $0xff, %xmm1, %xmm1 # xmm1 = xmm1[3,3,3,3]
movd %xmm1, (%r10)
movd %xmm0, 0x4(%rdi)
pshufd $0x55, %xmm0, %xmm1 # xmm1 = xmm0[1,1,1,1]
movd %xmm1, 0x4(%r8)
pshufd $0xee, %xmm0, %xmm1 # xmm1 = xmm0[2,3,2,3]
movd %xmm1, 0x4(%r9)
pshufd $0xff, %xmm0, %xmm0 # xmm0 = xmm0[3,3,3,3]
movd %xmm0, 0x4(%r10)
addq $0x8, %rdi
addq $0x8, %r8
addq $0x8, %r9
addq $0x8, %r10
addq $0x2, %r11
addq %r14, %r13
jmp 0xb3b82
movq 0x30(%rsp), %r14
movq 0x70(%rsp), %r12
movq 0x10(%rsp), %rax
movq 0x40(%rax), %rsi
imulq %rdx, %rsi
imulq 0x10(%rax), %rsi
addq (%rax), %rsi
imulq 0x40(%rsp), %r12
cmpl %ebp, %r11d
jge 0xb3eb4
movl %r11d, %ecx
shrl %ecx
movl %r11d, %eax
andl $0x1, %eax
addl %ecx, %eax
imulq %r12, %rax
addq %r14, %rax
pxor %xmm0, %xmm0
xorl %ecx, %ecx
movq %rsi, %r13
pxor %xmm2, %xmm2
pxor %xmm1, %xmm1
pxor %xmm3, %xmm3
cmpl %ecx, %r15d
je 0xb3e3e
movq (%rax,%rcx,8), %xmm4
pxor %xmm5, %xmm5
pcmpgtb %xmm4, %xmm5
punpcklbw %xmm5, %xmm4 # xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
movdqu (%r13), %xmm5
movdqu 0x10(%r13), %xmm6
pxor %xmm7, %xmm7
pcmpgtb %xmm5, %xmm7
pxor %xmm8, %xmm8
pcmpgtb %xmm6, %xmm8
movdqa %xmm5, %xmm9
punpcklbw %xmm7, %xmm9 # xmm9 = xmm9[0],xmm7[0],xmm9[1],xmm7[1],xmm9[2],xmm7[2],xmm9[3],xmm7[3],xmm9[4],xmm7[4],xmm9[5],xmm7[5],xmm9[6],xmm7[6],xmm9[7],xmm7[7]
punpckhbw %xmm7, %xmm5 # xmm5 = xmm5[8],xmm7[8],xmm5[9],xmm7[9],xmm5[10],xmm7[10],xmm5[11],xmm7[11],xmm5[12],xmm7[12],xmm5[13],xmm7[13],xmm5[14],xmm7[14],xmm5[15],xmm7[15]
movdqa %xmm6, %xmm7
punpcklbw %xmm8, %xmm7 # xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3],xmm7[4],xmm8[4],xmm7[5],xmm8[5],xmm7[6],xmm8[6],xmm7[7],xmm8[7]
punpckhbw %xmm8, %xmm6 # xmm6 = xmm6[8],xmm8[8],xmm6[9],xmm8[9],xmm6[10],xmm8[10],xmm6[11],xmm8[11],xmm6[12],xmm8[12],xmm6[13],xmm8[13],xmm6[14],xmm8[14],xmm6[15],xmm8[15]
pmaddwd %xmm4, %xmm9
paddd %xmm9, %xmm0
pmaddwd %xmm4, %xmm5
paddd %xmm5, %xmm2
pmaddwd %xmm4, %xmm7
paddd %xmm7, %xmm1
pmaddwd %xmm4, %xmm6
paddd %xmm6, %xmm3
addq $0x20, %r13
incq %rcx
jmp 0xb3dc3
movdqa %xmm0, %xmm4
punpckldq %xmm2, %xmm4 # xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
movdqa %xmm1, %xmm5
punpckldq %xmm3, %xmm5 # xmm5 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
punpckhdq %xmm2, %xmm0 # xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
punpckhdq %xmm3, %xmm1 # xmm1 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
movdqa %xmm4, %xmm2
punpcklqdq %xmm5, %xmm2 # xmm2 = xmm2[0],xmm5[0]
punpckhqdq %xmm5, %xmm4 # xmm4 = xmm4[1],xmm5[1]
paddd %xmm2, %xmm4
movdqa %xmm0, %xmm2
punpcklqdq %xmm1, %xmm2 # xmm2 = xmm2[0],xmm1[0]
punpckhqdq %xmm1, %xmm0 # xmm0 = xmm0[1],xmm1[1]
paddd %xmm2, %xmm0
paddd %xmm4, %xmm0
movd %xmm0, (%rdi)
pshufd $0x55, %xmm0, %xmm1 # xmm1 = xmm0[1,1,1,1]
movd %xmm1, (%r8)
pshufd $0xee, %xmm0, %xmm1 # xmm1 = xmm0[2,3,2,3]
movd %xmm1, (%r9)
pshufd $0xff, %xmm0, %xmm0 # xmm0 = xmm0[3,3,3,3]
movd %xmm0, (%r10)
addq $0x4, %rdi
addq $0x4, %r8
addq $0x4, %r9
addq $0x4, %r10
incl %r11d
jmp 0xb3d91
incq %rdx
jmp 0xb3b08
movq 0x20(%rsp), %rcx
andq $-0x4, %rcx
movq 0x28(%rsp), %rax
movq (%rax), %rsi
movq 0x40(%rax), %rdi
imulq 0x10(%rax), %rdi
movl %r15d, %r8d
pushq $0x4
popq %r9
cmpq 0x20(%rsp), %rcx
jge 0xb406f
movq 0x30(%rsp), %r11
movq 0x70(%rsp), %r14
imulq 0x40(%rsp), %r14
movl %ecx, %eax
cltd
idivl %r9d
movq %rdi, %r10
imulq %rcx, %r10
addl %eax, %edx
movslq %edx, %rax
movq 0x10(%rsp), %rdx
movq 0x40(%rdx), %r15
imulq %rax, %r15
imulq 0x10(%rdx), %r15
addq %rsi, %r10
addq (%rdx), %r15
xorl %edx, %edx
movq %rdx, %rbx
orq $0x1, %rbx
cmpq %rbp, %rbx
jge 0xb3fd0
movq %rdx, %r12
shrq %r12
imulq %r14, %r12
addq %r11, %r12
pxor %xmm0, %xmm0
xorl %r13d, %r13d
pxor %xmm1, %xmm1
cmpl %r13d, %r8d
je 0xb3f96
movdqu (%r12), %xmm2
pxor %xmm3, %xmm3
pcmpgtb %xmm2, %xmm3
movdqa %xmm2, %xmm4
punpcklbw %xmm3, %xmm4 # xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
punpckhbw %xmm3, %xmm2 # xmm2 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15]
movq (%r15,%r13,8), %xmm3
pxor %xmm5, %xmm5
pcmpgtb %xmm3, %xmm5
punpcklbw %xmm5, %xmm3 # xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3],xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
pmaddwd %xmm3, %xmm4
paddd %xmm4, %xmm0
pmaddwd %xmm2, %xmm3
paddd %xmm3, %xmm1
addq $0x10, %r12
incq %r13
jmp 0xb3f4c
pshufd $0xee, %xmm0, %xmm2 # xmm2 = xmm0[2,3,2,3]
paddd %xmm0, %xmm2
pshufd $0x55, %xmm2, %xmm0 # xmm0 = xmm2[1,1,1,1]
pshufd $0xee, %xmm1, %xmm3 # xmm3 = xmm1[2,3,2,3]
paddd %xmm1, %xmm3
pshufd $0x55, %xmm3, %xmm1 # xmm1 = xmm3[1,1,1,1]
punpckldq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
punpckldq %xmm3, %xmm2 # xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
paddd %xmm0, %xmm2
movq %xmm2, (%r10)
addq $0x8, %r10
addq $0x2, %rdx
jmp 0xb3f24
movq 0x30(%rsp), %r11
movq 0x70(%rsp), %r14
movq 0x10(%rsp), %rbx
imulq 0x40(%rbx), %rax
imulq 0x10(%rbx), %rax
addq (%rbx), %rax
imulq 0x40(%rsp), %r14
cmpl %ebp, %edx
jge 0xb4067
movl %edx, %ebx
shrl %ebx
movl %edx, %r15d
andl $0x1, %r15d
addl %ebx, %r15d
imulq %r14, %r15
addq %r11, %r15
pxor %xmm0, %xmm0
xorl %r12d, %r12d
cmpl %r12d, %r8d
je 0xb4048
movq (%r15,%r12,8), %xmm1
pxor %xmm2, %xmm2
pcmpgtb %xmm1, %xmm2
punpcklbw %xmm2, %xmm1 # xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
movq (%rax,%r12,8), %xmm2
pxor %xmm3, %xmm3
pcmpgtb %xmm2, %xmm3
punpcklbw %xmm3, %xmm2 # xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
pmaddwd %xmm1, %xmm2
paddd %xmm2, %xmm0
incq %r12
jmp 0xb4012
pshufd $0xee, %xmm0, %xmm1 # xmm1 = xmm0[2,3,2,3]
paddd %xmm0, %xmm1
pshufd $0x55, %xmm1, %xmm0 # xmm0 = xmm1[1,1,1,1]
paddd %xmm1, %xmm0
movd %xmm0, (%r10)
addq $0x4, %r10
incl %edx
jmp 0xb3ff2
incq %rcx
jmp 0xb3edd
movq 0x38(%rsp), %rax
testq %rax, %rax
je 0xb409d
lock
decl (%rax)
jne 0xb409d
movq 0x30(%rsp), %rsi
movq 0x50(%rsp), %rdi
testq %rdi, %rdi
je 0xb4095
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xb409d
movq %rsi, %rdi
callq 0x5f3e0
addq $0x88, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0xb40ea
movq %rax, %rbx
movq 0x38(%rsp), %rax
testq %rax, %rax
je 0xb40e2
lock
decl (%rax)
jne 0xb40e2
movq 0x30(%rsp), %rsi
movq 0x50(%rsp), %rdi
testq %rdi, %rdi
jne 0xb40dc
movq %rsi, %rdi
callq 0x5f3e0
jmp 0xb40e2
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
movq %rax, %rdi
callq 0x61d68
|
/csukuangfj[P]ncnn/src/layer/x86/convolution_sgemm_pack8to1_int8.h
|
ncnn::im2col_sgemm_int8_sse(ncnn::Mat const&, ncnn::Mat&, ncnn::Mat const&, ncnn::Option const&)
|
static void im2col_sgemm_int8_sse(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt)
{
#if !(__AVX512VNNI__ || __AVXVNNI__ || __AVX2__ || __XOP__)
#if NCNN_RUNTIME_CPU && NCNN_AVX512VNNI && __AVX512F__ && !__AVX512VNNI__
if (ncnn::cpu_support_x86_avx512_vnni())
{
im2col_sgemm_int8_sse_avx512vnni(bottom_im2col, top_blob, kernel, opt);
return;
}
#endif
#if NCNN_RUNTIME_CPU && NCNN_AVXVNNI && __AVX2__ && !__AVXVNNI__
if (ncnn::cpu_support_x86_avx_vnni())
{
im2col_sgemm_int8_sse_avxvnni(bottom_im2col, top_blob, kernel, opt);
return;
}
#endif
#if NCNN_RUNTIME_CPU && NCNN_AVX2 && __AVX__ && !__AVX2__
if (ncnn::cpu_support_x86_avx2())
{
im2col_sgemm_int8_sse_avx2(bottom_im2col, top_blob, kernel, opt);
return;
}
#endif
#if NCNN_RUNTIME_CPU && NCNN_XOP && __SSE2__ && !__XOP__
if (ncnn::cpu_support_x86_xop())
{
im2col_sgemm_int8_sse_xop(bottom_im2col, top_blob, kernel, opt);
return;
}
#endif
#endif
// Mat bottom_im2col(size, maxk, inch, 8u, 8, opt.workspace_allocator);
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
const int outch = top_blob.c;
// permute
Mat tmp;
#if __SSE2__
if (inch >= 4)
{
#if __AVX2__
if (size >= 4)
tmp.create(4 * maxk, inch / 4 + inch % 4, size / 4 + (size % 4) / 2 + size % 2, 4u, 4, opt.workspace_allocator);
else if (size >= 2)
tmp.create(2 * maxk, inch / 4 + inch % 4, size / 2 + size % 2, 4u, 4, opt.workspace_allocator);
else
tmp.create(maxk, inch / 4 + inch % 4, size, 4u, 4, opt.workspace_allocator);
#else
if (size >= 2)
tmp.create(2 * maxk, inch / 4 + inch % 4, size / 2 + size % 2, 4u, 4, opt.workspace_allocator);
else
tmp.create(maxk, inch / 4 + inch % 4, size, 4u, 4, opt.workspace_allocator);
#endif
}
else
{
#if __AVX2__
if (size >= 4)
tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 1u, 1, opt.workspace_allocator);
else if (size >= 2)
tmp.create(2 * maxk, inch, size / 2 + size % 2, 1u, 1, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 1u, 1, opt.workspace_allocator);
#else
if (size >= 2)
tmp.create(2 * maxk, inch, size / 2 + size % 2, 1u, 1, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 1u, 1, opt.workspace_allocator);
#endif
}
{
#if __AVX2__
int remain_size_start = 0;
int nn_size = size >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
signed char* tmpptr = tmp.channel(i / 4);
int q = 0;
for (; q + 3 < inch; q += 4)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i;
const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i;
const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img1[0];
tmpptr[2] = img2[0];
tmpptr[3] = img3[0];
tmpptr[4] = img0[1];
tmpptr[5] = img1[1];
tmpptr[6] = img2[1];
tmpptr[7] = img3[1];
tmpptr[8] = img0[2];
tmpptr[9] = img1[2];
tmpptr[10] = img2[2];
tmpptr[11] = img3[2];
tmpptr[12] = img0[3];
tmpptr[13] = img1[3];
tmpptr[14] = img2[3];
tmpptr[15] = img3[3];
tmpptr += 16;
img0 += size;
img1 += size;
img2 += size;
img3 += size;
}
}
for (; q < inch; q++)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img0[2];
tmpptr[3] = img0[3];
tmpptr += 4;
img0 += size;
}
}
}
remain_size_start += nn_size << 2;
nn_size = (size - remain_size_start) >> 1;
#else
int remain_size_start = 0;
int nn_size = (size - remain_size_start) >> 1;
#endif
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 2;
#if __AVX2__
signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2);
#else
signed char* tmpptr = tmp.channel(i / 2);
#endif
int q = 0;
for (; q + 3 < inch; q += 4)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i;
const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i;
const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img1[0];
tmpptr[2] = img2[0];
tmpptr[3] = img3[0];
tmpptr[4] = img0[1];
tmpptr[5] = img1[1];
tmpptr[6] = img2[1];
tmpptr[7] = img3[1];
tmpptr += 8;
img0 += size;
img1 += size;
img2 += size;
img3 += size;
}
}
for (; q < inch; q++)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr += 2;
img0 += size;
}
}
}
remain_size_start += nn_size << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
#if __AVX2__
signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2);
#else
signed char* tmpptr = tmp.channel(i / 2 + i % 2);
#endif
int q = 0;
for (; q + 3 < inch; q += 4)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i;
const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i;
const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img1[0];
tmpptr[2] = img2[0];
tmpptr[3] = img3[0];
tmpptr += 4;
img0 += size;
img1 += size;
img2 += size;
img3 += size;
}
}
for (; q < inch; q++)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr += 1;
img0 += size;
}
}
}
}
#else // __SSE2__
tmp.create(maxk, inch, size, 1u, 1, opt.workspace_allocator);
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < size; i++)
{
signed char* tmpptr = tmp.channel(i);
int q = 0;
for (; q < inch; q++)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr += 1;
img0 += size;
}
}
}
}
#endif // __SSE2__
int nn_outch = 0;
int remain_outch_start = 0;
#if __SSE2__
nn_outch = outch >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 4;
int* outptr0 = top_blob.channel(p);
int* outptr1 = top_blob.channel(p + 1);
int* outptr2 = top_blob.channel(p + 2);
int* outptr3 = top_blob.channel(p + 3);
int i = 0;
#if __AVX2__
for (; i + 3 < size; i += 4)
{
const signed char* tmpptr = tmp.channel(i / 4);
const signed char* kptr0 = kernel.channel(p / 4);
int nn4 = (inch / 4) * maxk;
int nn1 = (inch % 4) * maxk;
__m256i _sum00_12 = _mm256_setzero_si256();
__m256i _sum20_32 = _mm256_setzero_si256();
if (nn4 > 0)
{
__m256i _sum10_02 = _mm256_setzero_si256();
__m256i _sum30_22 = _mm256_setzero_si256();
int j = 0;
for (; j < nn4; j++)
{
__m128i _val0123 = _mm_loadu_si128((const __m128i*)tmpptr);
__m256i _val0123_16 = _mm256_cvtepi8_epi16(_val0123);
__m256i _val01_16 = _mm256_permute4x64_epi64(_val0123_16, _MM_SHUFFLE(1, 1, 0, 0));
__m256i _val23_16 = _mm256_permute4x64_epi64(_val0123_16, _MM_SHUFFLE(3, 3, 2, 2));
__m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0);
__m256i _w01_16 = _mm256_cvtepi8_epi16(_w01);
__m256i _val10_16 = _mm256_permute4x64_epi64(_val01_16, 78);
__m256i _val32_16 = _mm256_permute4x64_epi64(_val23_16, 78);
#if __AVXVNNI__ || __AVX512VNNI__
_sum00_12 = _mm256_dpwssd_epi32(_sum00_12, _val01_16, _w01_16);
_sum10_02 = _mm256_dpwssd_epi32(_sum10_02, _val10_16, _w01_16);
_sum20_32 = _mm256_dpwssd_epi32(_sum20_32, _val23_16, _w01_16);
_sum30_22 = _mm256_dpwssd_epi32(_sum30_22, _val32_16, _w01_16);
#else
_sum00_12 = _mm256_add_epi32(_sum00_12, _mm256_madd_epi16(_val01_16, _w01_16));
_sum10_02 = _mm256_add_epi32(_sum10_02, _mm256_madd_epi16(_val10_16, _w01_16));
_sum20_32 = _mm256_add_epi32(_sum20_32, _mm256_madd_epi16(_val23_16, _w01_16));
_sum30_22 = _mm256_add_epi32(_sum30_22, _mm256_madd_epi16(_val32_16, _w01_16));
#endif
tmpptr += 16;
kptr0 += 16;
}
_sum00_12 = _mm256_hadd_epi32(_sum00_12, _sum10_02);
_sum20_32 = _mm256_hadd_epi32(_sum20_32, _sum30_22);
_sum00_12 = _mm256_permute4x64_epi64(_sum00_12, _MM_SHUFFLE(2, 1, 3, 0));
_sum20_32 = _mm256_permute4x64_epi64(_sum20_32, _MM_SHUFFLE(2, 1, 3, 0));
}
__m128i _sum00 = _mm256_extracti128_si256(_sum00_12, 0);
__m128i _sum10 = _mm256_extracti128_si256(_sum00_12, 1);
__m128i _sum20 = _mm256_extracti128_si256(_sum20_32, 0);
__m128i _sum30 = _mm256_extracti128_si256(_sum20_32, 1);
int j = 0;
for (; j < nn1; j++)
{
__m128i _val0123 = _mm_loadl_epi64((const __m128i*)tmpptr);
#if __SSE4_1__
_val0123 = _mm_cvtepi8_epi16(_val0123);
#else
__m128i _extval0123 = _mm_cmpgt_epi8(_mm_setzero_si128(), _val0123);
_val0123 = _mm_unpacklo_epi8(_val0123, _extval0123);
#endif
__m128i _val01 = _mm_shufflelo_epi16(_val0123, _MM_SHUFFLE(1, 1, 0, 0));
_val01 = _mm_shuffle_epi32(_val01, _MM_SHUFFLE(1, 1, 0, 0));
__m128i _val23 = _mm_shufflelo_epi16(_val0123, _MM_SHUFFLE(3, 3, 2, 2));
_val23 = _mm_shuffle_epi32(_val23, _MM_SHUFFLE(1, 1, 0, 0));
__m128i _w0123 = _mm_loadl_epi64((const __m128i*)kptr0);
#if __SSE4_1__
_w0123 = _mm_cvtepi8_epi16(_w0123);
#else
__m128i _extw0123 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w0123);
_w0123 = _mm_unpacklo_epi8(_w0123, _extw0123);
#endif
_w0123 = _mm_shuffle_epi32(_w0123, _MM_SHUFFLE(1, 0, 1, 0));
__m128i _sl00 = _mm_mullo_epi16(_val01, _w0123);
__m128i _sh00 = _mm_mulhi_epi16(_val01, _w0123);
__m128i _sl10 = _mm_mullo_epi16(_val23, _w0123);
__m128i _sh10 = _mm_mulhi_epi16(_val23, _w0123);
_sum00 = _mm_add_epi32(_sum00, _mm_unpacklo_epi16(_sl00, _sh00));
_sum10 = _mm_add_epi32(_sum10, _mm_unpackhi_epi16(_sl00, _sh00));
_sum20 = _mm_add_epi32(_sum20, _mm_unpacklo_epi16(_sl10, _sh10));
_sum30 = _mm_add_epi32(_sum30, _mm_unpackhi_epi16(_sl10, _sh10));
tmpptr += 4;
kptr0 += 4;
}
// transpose 4x4
{
__m128i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm_unpacklo_epi32(_sum00, _sum10);
_tmp1 = _mm_unpacklo_epi32(_sum20, _sum30);
_tmp2 = _mm_unpackhi_epi32(_sum00, _sum10);
_tmp3 = _mm_unpackhi_epi32(_sum20, _sum30);
_sum00 = _mm_unpacklo_epi64(_tmp0, _tmp1);
_sum10 = _mm_unpackhi_epi64(_tmp0, _tmp1);
_sum20 = _mm_unpacklo_epi64(_tmp2, _tmp3);
_sum30 = _mm_unpackhi_epi64(_tmp2, _tmp3);
}
_mm_storeu_si128((__m128i*)outptr0, _sum00);
_mm_storeu_si128((__m128i*)outptr1, _sum10);
_mm_storeu_si128((__m128i*)outptr2, _sum20);
_mm_storeu_si128((__m128i*)outptr3, _sum30);
outptr0 += 4;
outptr1 += 4;
outptr2 += 4;
outptr3 += 4;
}
#endif
for (; i + 1 < size; i += 2)
{
#if __AVX2__
const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2);
#else
const signed char* tmpptr = tmp.channel(i / 2);
#endif
const signed char* kptr0 = kernel.channel(p / 4);
int nn4 = (inch / 4) * maxk;
int nn1 = (inch % 4) * maxk;
#if __AVX2__
__m256i _sum00_12 = _mm256_setzero_si256();
#else
__m128i _sum00 = _mm_setzero_si128();
__m128i _sum10 = _mm_setzero_si128();
#endif
if (nn4 > 0)
{
#if __AVX2__
__m256i _sum10_02 = _mm256_setzero_si256();
#else
__m128i _sum01 = _mm_setzero_si128();
__m128i _sum11 = _mm_setzero_si128();
#endif
int j = 0;
for (; j < nn4; j++)
{
#if __AVX2__
__m128i _val01 = _mm_loadu_si128((const __m128i*)tmpptr);
__m256i _val01_16 = _mm256_cvtepi8_epi16(_val01);
_val01_16 = _mm256_permute4x64_epi64(_val01_16, _MM_SHUFFLE(1, 1, 0, 0));
__m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0);
__m256i _w01_16 = _mm256_cvtepi8_epi16(_w01);
__m256i _val10_16 = _mm256_permute4x64_epi64(_val01_16, 78);
#if __AVXVNNI__ || __AVX512VNNI__
_sum00_12 = _mm256_dpwssd_epi32(_sum00_12, _val01_16, _w01_16);
_sum10_02 = _mm256_dpwssd_epi32(_sum10_02, _val10_16, _w01_16);
#else
_sum00_12 = _mm256_add_epi32(_sum00_12, _mm256_madd_epi16(_val01_16, _w01_16));
_sum10_02 = _mm256_add_epi32(_sum10_02, _mm256_madd_epi16(_val10_16, _w01_16));
#endif
#else
__m128i _val01 = _mm_loadl_epi64((const __m128i*)tmpptr);
#if __SSE4_1__
_val01 = _mm_cvtepi8_epi16(_val01);
#else
__m128i _extval01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _val01);
_val01 = _mm_unpacklo_epi8(_val01, _extval01);
#endif
__m128i _val0 = _mm_shuffle_epi32(_val01, _MM_SHUFFLE(1, 0, 1, 0));
__m128i _val1 = _mm_shuffle_epi32(_val01, _MM_SHUFFLE(3, 2, 3, 2));
__m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0);
__m128i _extw01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w01);
__m128i _w0 = _mm_unpacklo_epi8(_w01, _extw01);
__m128i _w1 = _mm_unpackhi_epi8(_w01, _extw01);
#if __XOP__
_sum00 = _mm_maddd_epi16(_val0, _w0, _sum00);
_sum01 = _mm_maddd_epi16(_val0, _w1, _sum01);
_sum10 = _mm_maddd_epi16(_val1, _w0, _sum10);
_sum11 = _mm_maddd_epi16(_val1, _w1, _sum11);
#else
_sum00 = _mm_add_epi32(_mm_madd_epi16(_val0, _w0), _sum00);
_sum01 = _mm_add_epi32(_mm_madd_epi16(_val0, _w1), _sum01);
_sum10 = _mm_add_epi32(_mm_madd_epi16(_val1, _w0), _sum10);
_sum11 = _mm_add_epi32(_mm_madd_epi16(_val1, _w1), _sum11);
#endif
#endif
tmpptr += 8;
kptr0 += 16;
}
#if __AVX2__
_sum00_12 = _mm256_hadd_epi32(_sum00_12, _sum10_02);
_sum00_12 = _mm256_permute4x64_epi64(_sum00_12, _MM_SHUFFLE(2, 1, 3, 0));
#else
#if __SSSE3__
_sum00 = _mm_hadd_epi32(_sum00, _sum01);
_sum10 = _mm_hadd_epi32(_sum10, _sum11);
#else
__m128i _sum00_sh = _mm_shuffle_epi32(_sum00, 216);
__m128i _sum01_sh = _mm_shuffle_epi32(_sum01, 216);
__m128i _sum10_sh = _mm_shuffle_epi32(_sum10, 216);
__m128i _sum11_sh = _mm_shuffle_epi32(_sum11, 216);
_sum00 = _mm_unpacklo_epi64(_sum00_sh, _sum01_sh);
_sum01 = _mm_unpackhi_epi64(_sum00_sh, _sum01_sh);
_sum10 = _mm_unpacklo_epi64(_sum10_sh, _sum11_sh);
_sum11 = _mm_unpackhi_epi64(_sum10_sh, _sum11_sh);
_sum00 = _mm_add_epi32(_sum00, _sum01);
_sum10 = _mm_add_epi32(_sum10, _sum11);
#endif
#endif
}
#if __AVX2__
__m128i _sum00 = _mm256_extracti128_si256(_sum00_12, 0);
__m128i _sum10 = _mm256_extracti128_si256(_sum00_12, 1);
#endif
int j = 0;
for (; j < nn1; j++)
{
__m128i _val = _mm_set_epi16(tmpptr[1], tmpptr[1], tmpptr[1], tmpptr[1], tmpptr[0], tmpptr[0], tmpptr[0], tmpptr[0]);
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99754
// gcc incorrectly put 32bit to tail with _mm_loadu_si32 :(
// 0 1 2 3 x x x x x x x x x x x x
// x x x x x x x x x x x x 0 1 2 3
// __m128i _w0123 = _mm_loadu_si32(kptr0);
__m128i _w0123 = _mm_loadl_epi64((const __m128i*)kptr0);
#if __SSE4_1__
_w0123 = _mm_cvtepi8_epi16(_w0123);
#else
__m128i _extw0123 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w0123);
_w0123 = _mm_unpacklo_epi8(_w0123, _extw0123);
#endif
_w0123 = _mm_shuffle_epi32(_w0123, _MM_SHUFFLE(1, 0, 1, 0));
__m128i _sl00 = _mm_mullo_epi16(_val, _w0123);
__m128i _sh00 = _mm_mulhi_epi16(_val, _w0123);
_sum00 = _mm_add_epi32(_sum00, _mm_unpacklo_epi16(_sl00, _sh00));
_sum10 = _mm_add_epi32(_sum10, _mm_unpackhi_epi16(_sl00, _sh00));
tmpptr += 2;
kptr0 += 4;
}
int sum[8];
_mm_storeu_si128((__m128i*)sum, _sum00);
_mm_storeu_si128((__m128i*)(sum + 4), _sum10);
outptr0[0] = sum[0];
outptr1[0] = sum[1];
outptr2[0] = sum[2];
outptr3[0] = sum[3];
outptr0[1] = sum[4];
outptr1[1] = sum[5];
outptr2[1] = sum[6];
outptr3[1] = sum[7];
outptr0 += 2;
outptr1 += 2;
outptr2 += 2;
outptr3 += 2;
}
for (; i < size; i++)
{
#if __AVX2__
const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2);
#else
const signed char* tmpptr = tmp.channel(i / 2 + i % 2);
#endif
const signed char* kptr0 = kernel.channel(p / 4);
int nn4 = (inch / 4) * maxk;
int nn1 = (inch % 4) * maxk;
__m128i _sum0 = _mm_setzero_si128();
if (nn4 > 0)
{
__m128i _sum1 = _mm_setzero_si128();
__m128i _sum2 = _mm_setzero_si128();
__m128i _sum3 = _mm_setzero_si128();
int j = 0;
for (; j < nn4; j++)
{
__m128i _val01 = _mm_loadl_epi64((const __m128i*)tmpptr);
#if __SSE4_1__
__m128i _val0 = _mm_cvtepi8_epi16(_val01);
#else
__m128i _extval01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _val01);
__m128i _val0 = _mm_unpacklo_epi8(_val01, _extval01);
#endif
_val0 = _mm_shuffle_epi32(_val0, _MM_SHUFFLE(1, 0, 1, 0));
__m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0);
__m128i _extw01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w01);
__m128i _w0 = _mm_unpacklo_epi8(_w01, _extw01);
__m128i _w1 = _mm_unpackhi_epi8(_w01, _extw01);
__m128i _sl00 = _mm_mullo_epi16(_val0, _w0);
__m128i _sh00 = _mm_mulhi_epi16(_val0, _w0);
__m128i _sl01 = _mm_mullo_epi16(_val0, _w1);
__m128i _sh01 = _mm_mulhi_epi16(_val0, _w1);
_sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl00, _sh00));
_sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl00, _sh00));
_sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl01, _sh01));
_sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl01, _sh01));
tmpptr += 4;
kptr0 += 16;
}
// transpose 4x4
{
__m128i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm_unpacklo_epi32(_sum0, _sum1);
_tmp1 = _mm_unpacklo_epi32(_sum2, _sum3);
_tmp2 = _mm_unpackhi_epi32(_sum0, _sum1);
_tmp3 = _mm_unpackhi_epi32(_sum2, _sum3);
_sum0 = _mm_unpacklo_epi64(_tmp0, _tmp1);
_sum1 = _mm_unpackhi_epi64(_tmp0, _tmp1);
_sum2 = _mm_unpacklo_epi64(_tmp2, _tmp3);
_sum3 = _mm_unpackhi_epi64(_tmp2, _tmp3);
}
_sum0 = _mm_add_epi32(_sum0, _sum1);
_sum2 = _mm_add_epi32(_sum2, _sum3);
_sum0 = _mm_add_epi32(_sum0, _sum2);
}
int j = 0;
for (; j < nn1; j++)
{
__m128i _val = _mm_set1_epi16(tmpptr[0]);
__m128i _w0123 = _mm_loadl_epi64((const __m128i*)kptr0);
#if __SSE4_1__
_w0123 = _mm_cvtepi8_epi16(_w0123);
#else
__m128i _extw0123 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w0123);
_w0123 = _mm_unpacklo_epi8(_w0123, _extw0123);
#endif
__m128i _sl00 = _mm_mullo_epi16(_val, _w0123);
__m128i _sh00 = _mm_mulhi_epi16(_val, _w0123);
_sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl00, _sh00));
tmpptr += 1;
kptr0 += 4;
}
int sum[4];
_mm_storeu_si128((__m128i*)sum, _sum0);
outptr0[0] = sum[0];
outptr1[0] = sum[1];
outptr2[0] = sum[2];
outptr3[0] = sum[3];
outptr0 += 1;
outptr1 += 1;
outptr2 += 1;
outptr3 += 1;
}
}
remain_outch_start += nn_outch << 2;
#endif // __SSE2__
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
int* outptr0 = top_blob.channel(p);
int i = 0;
#if __SSE2__
#if __AVX2__
for (; i + 3 < size; i += 4)
{
const signed char* tmpptr = tmp.channel(i / 4);
const signed char* kptr0 = kernel.channel(p / 4 + p % 4);
int nn4 = (inch / 4) * maxk;
int nn1 = (inch % 4) * maxk;
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
if (nn4 > 0)
{
__m256i _sum0_2 = _mm256_setzero_si256();
__m256i _sum1_3 = _mm256_setzero_si256();
int j = 0;
for (; j < nn4; j++)
{
__m128i _val01 = _mm_loadu_si128((const __m128i*)tmpptr);
__m256i _val01_16 = _mm256_cvtepi8_epi16(_val01);
__m128i _w0123 = _mm_loadl_epi64((const __m128i*)kptr0);
__m128i _w = _mm_cvtepi8_epi16(_w0123);
_w = _mm_unpacklo_epi64(_w, _w);
__m256i _ww = _mm256_inserti128_si256(_mm256_castsi128_si256(_w), _w, 1);
__m256i _sl0_1 = _mm256_mullo_epi16(_val01_16, _ww);
__m256i _sh0_1 = _mm256_mulhi_epi16(_val01_16, _ww);
_sum0_2 = _mm256_add_epi32(_sum0_2, _mm256_unpacklo_epi16(_sl0_1, _sh0_1));
_sum1_3 = _mm256_add_epi32(_sum1_3, _mm256_unpackhi_epi16(_sl0_1, _sh0_1));
tmpptr += 16;
kptr0 += 4;
}
__m128i _sum0 = _mm256_extracti128_si256(_sum0_2, 0);
__m128i _sum1 = _mm256_extracti128_si256(_sum1_3, 0);
__m128i _sum2 = _mm256_extracti128_si256(_sum0_2, 1);
__m128i _sum3 = _mm256_extracti128_si256(_sum1_3, 1);
sum0 = _mm_reduce_add_epi32(_sum0);
sum1 = _mm_reduce_add_epi32(_sum1);
sum2 = _mm_reduce_add_epi32(_sum2);
sum3 = _mm_reduce_add_epi32(_sum3);
}
int j = 0;
for (; j < nn1; j++)
{
signed char val0 = tmpptr[0];
signed char val1 = tmpptr[1];
signed char val2 = tmpptr[2];
signed char val3 = tmpptr[3];
signed char w = kptr0[0];
sum0 += val0 * w;
sum1 += val1 * w;
sum2 += val2 * w;
sum3 += val3 * w;
tmpptr += 4;
kptr0 += 1;
}
outptr0[0] = sum0;
outptr0[1] = sum1;
outptr0[2] = sum2;
outptr0[3] = sum3;
outptr0 += 4;
}
#endif
for (; i + 1 < size; i += 2)
{
#if __AVX2__
const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2);
#else
const signed char* tmpptr = tmp.channel(i / 2);
#endif
const signed char* kptr0 = kernel.channel(p / 4 + p % 4);
int nn4 = (inch / 4) * maxk;
int nn1 = (inch % 4) * maxk;
int sum0 = 0;
int sum1 = 0;
if (nn4 > 0)
{
__m128i _sum0 = _mm_setzero_si128();
__m128i _sum1 = _mm_setzero_si128();
int j = 0;
for (; j < nn4; j++)
{
__m128i _val = _mm_loadl_epi64((const __m128i*)tmpptr);
__m128i _extval = _mm_cmpgt_epi8(_mm_setzero_si128(), _val);
__m128i _val01 = _mm_unpacklo_epi8(_val, _extval);
__m128i _w0123 = _mm_loadl_epi64((const __m128i*)kptr0);
#if __SSE4_1__
__m128i _w = _mm_cvtepi8_epi16(_w0123);
#else
__m128i _extw = _mm_cmpgt_epi8(_mm_setzero_si128(), _w0123);
__m128i _w = _mm_unpacklo_epi8(_w0123, _extw);
#endif
_w = _mm_shuffle_epi32(_w, _MM_SHUFFLE(1, 0, 1, 0));
__m128i _sl01 = _mm_mullo_epi16(_val01, _w);
__m128i _sh01 = _mm_mulhi_epi16(_val01, _w);
_sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl01, _sh01));
_sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl01, _sh01));
tmpptr += 8;
kptr0 += 4;
}
sum0 = _mm_reduce_add_epi32(_sum0);
sum1 = _mm_reduce_add_epi32(_sum1);
}
int j = 0;
for (; j < nn1; j++)
{
signed char val0 = tmpptr[0];
signed char val1 = tmpptr[1];
signed char w = kptr0[0];
sum0 += val0 * w;
sum1 += val1 * w;
tmpptr += 2;
kptr0 += 1;
}
outptr0[0] = sum0;
outptr0[1] = sum1;
outptr0 += 2;
}
for (; i < size; i++)
{
#if __AVX2__
const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2);
#else
const signed char* tmpptr = tmp.channel(i / 2 + i % 2);
#endif
const signed char* kptr0 = kernel.channel(p / 4 + p % 4);
int nn4 = (inch / 4) * maxk;
int nn1 = (inch % 4) * maxk;
int sum = 0;
if (nn4 > 0)
{
__m128i _sum = _mm_setzero_si128();
int j = 0;
for (; j < nn4; j++)
{
__m128i _val0123 = _mm_loadl_epi64((const __m128i*)tmpptr);
#if __SSE4_1__
__m128i _val = _mm_cvtepi8_epi16(_val0123);
#else
__m128i _extval = _mm_cmpgt_epi8(_mm_setzero_si128(), _val0123);
__m128i _val = _mm_unpacklo_epi8(_val0123, _extval);
#endif
__m128i _w0123 = _mm_loadl_epi64((const __m128i*)kptr0);
#if __SSE4_1__
__m128i _w = _mm_cvtepi8_epi16(_w0123);
#else
__m128i _extw = _mm_cmpgt_epi8(_mm_setzero_si128(), _w0123);
__m128i _w = _mm_unpacklo_epi8(_w0123, _extw);
#endif
__m128i _sl = _mm_mullo_epi16(_val, _w);
__m128i _sh = _mm_mulhi_epi16(_val, _w);
_sum = _mm_add_epi32(_sum, _mm_unpacklo_epi16(_sl, _sh));
tmpptr += 4;
kptr0 += 4;
}
sum = _mm_reduce_add_epi32(_sum);
}
int j = 0;
for (; j < nn1; j++)
{
signed char val = tmpptr[0];
signed char w = kptr0[0];
sum += val * w;
tmpptr += 1;
kptr0 += 1;
}
outptr0[0] = sum;
outptr0 += 1;
}
#else // __SSE2__
for (; i < size; i++)
{
const signed char* tmpptr = tmp.channel(i);
const signed char* kptr0 = kernel.channel(p);
int nn1 = inch * maxk;
int sum = 0;
int j = 0;
for (; j < nn1; j++)
{
signed char val = tmpptr[0];
signed char w = kptr0[0];
sum += val * w;
tmpptr += 1;
kptr0 += 1;
}
outptr0[0] = sum;
outptr0 += 1;
}
#endif // __SSE2__
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xa8, %rsp
movq %rcx, %r13
movq %rdx, %rbx
movq %rsi, %r14
movq %rdi, %r15
callq 0x732db
testl %eax, %eax
je 0xb413a
movq %r15, %rdi
movq %r14, %rsi
movq %rbx, %rdx
movq %r13, %rcx
addq $0xa8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp 0x137be8
movq %rbx, 0x38(%rsp)
movl 0x2c(%r15), %ecx
movl 0x30(%r15), %eax
movq %rax, 0x10(%rsp)
movslq 0x38(%r15), %r12
movq %r14, 0x40(%rsp)
movslq 0x38(%r14), %rax
movq %rax, 0x48(%rsp)
andq $0x0, 0x90(%rsp)
pxor %xmm0, %xmm0
movdqa %xmm0, 0x50(%rsp)
movdqu %xmm0, 0x5c(%rsp)
movdqa %xmm0, 0x70(%rsp)
movdqu %xmm0, 0x7c(%rsp)
cmpq $0x4, %r12
movl %ecx, 0x24(%rsp)
jl 0xb41b7
pushq $0x4
popq %r8
cmpl $0x2, %ecx
jl 0xb41d7
movq 0x10(%rsp), %rax
leal (%rax,%rax), %esi
movl %r12d, %eax
shrl $0x2, %eax
movl %r12d, %edx
andl $0x3, %edx
addl %eax, %edx
movl %ecx, %eax
shrl %eax
andl $0x1, %ecx
addl %eax, %ecx
jmp 0xb41ec
pushq $0x1
popq %r8
pushq $0x1
popq %r9
cmpl $0x2, %ecx
jl 0xb41f2
movq 0x10(%rsp), %rax
leal (%rax,%rax), %esi
movl %ecx, %eax
shrl %eax
andl $0x1, %ecx
addl %eax, %ecx
jmp 0xb41f9
movl %r12d, %eax
shrl $0x2, %eax
movl %r12d, %edx
andl $0x3, %edx
addl %eax, %edx
movq 0x10(%rsp), %rax
movl %eax, %esi
pushq $0x4
popq %r9
jmp 0xb41fc
movq 0x10(%rsp), %rax
movl %eax, %esi
movl %r12d, %edx
movq 0x10(%r13), %rax
movq %rax, (%rsp)
leaq 0x50(%rsp), %rdi
callq 0x628f2
movl 0x24(%rsp), %eax
movl %eax, %edi
sarl %edi
xorl %ecx, %ecx
movq 0x10(%rsp), %rdx
testl %edx, %edx
movl $0x0, %esi
cmovgl %edx, %esi
movl %esi, 0xc(%rsp)
movslq %eax, %r8
testl %edi, %edi
cmovlel %ecx, %edi
movq %rdi, 0x28(%rsp)
xorl %eax, %eax
movq %r12, 0x18(%rsp)
cmpq 0x28(%rsp), %rax
je 0xb4365
movq 0x90(%rsp), %r11
movq %rax, 0x30(%rsp)
imulq %rax, %r11
imulq 0x60(%rsp), %r11
addq 0x50(%rsp), %r11
pushq $0x1
popq %r14
pushq $0x2
popq %r13
pushq $0x3
popq %rdx
xorl %ebx, %ebx
movq %rbx, %rax
orq $0x3, %rax
cmpq %r12, %rax
jge 0xb434f
movq 0x40(%r15), %rax
imulq 0x10(%r15), %rax
movq %rax, %r10
imulq %rdx, %r10
movq (%r15), %r9
addq %rcx, %r9
movq %rax, %rbp
imulq %r13, %rbp
movq %rax, %rdi
imulq %rbx, %rdi
imulq %r14, %rax
movl 0xc(%rsp), %esi
subl $0x1, %esi
jb 0xb4300
movb (%r9,%rdi), %r12b
movb %r12b, (%r11)
movb (%r9,%rax), %r12b
movb %r12b, 0x1(%r11)
movb (%r9,%rbp), %r12b
movb %r12b, 0x2(%r11)
movb (%r9,%r10), %r12b
movb %r12b, 0x3(%r11)
movb 0x1(%r9,%rdi), %r12b
movb %r12b, 0x4(%r11)
movb 0x1(%r9,%rax), %r12b
movb %r12b, 0x5(%r11)
movb 0x1(%r9,%rbp), %r12b
movb %r12b, 0x6(%r11)
movb 0x1(%r9,%r10), %r12b
movb %r12b, 0x7(%r11)
addq $0x8, %r11
addq %r8, %r9
jmp 0xb42af
addq $0x4, %rbx
addq $0x4, %rdx
addq $0x4, %r13
addq $0x4, %r14
movq 0x18(%rsp), %r12
jmp 0xb4273
movq 0x40(%r15), %rax
imulq 0x10(%r15), %rax
imulq %rbx, %rax
addq (%r15), %rax
movl 0xc(%rsp), %edx
subl $0x1, %edx
jb 0xb434c
movb (%rax,%rcx), %sil
movb %sil, (%r11)
movb 0x1(%rax,%rcx), %sil
movb %sil, 0x1(%r11)
addq $0x2, %r11
addq %r8, %rax
jmp 0xb432e
incq %rbx
cmpq %r12, %rbx
jl 0xb431a
movq 0x30(%rsp), %rax
incq %rax
addq $0x2, %rcx
jmp 0xb423f
movq %r8, %rcx
andq $-0x2, %rcx
cmpq %r8, %rcx
jge 0xb4462
movl %ecx, %eax
cltd
pushq $0x2
popq %rsi
idivl %esi
addl %eax, %edx
movslq %edx, %rax
imulq 0x90(%rsp), %rax
imulq 0x60(%rsp), %rax
addq 0x50(%rsp), %rax
pushq $0x1
popq %rbx
pushq $0x2
popq %r14
pushq $0x3
popq %r13
xorl %edx, %edx
movq %rdx, %rsi
orq $0x3, %rsi
cmpq %r12, %rsi
jge 0xb4455
movq 0x40(%r15), %rdi
imulq 0x10(%r15), %rdi
movq %rdi, %r10
imulq %r13, %r10
movq (%r15), %r9
addq %rcx, %r9
movq %rdi, %rsi
imulq %r14, %rsi
movq %rdi, %rbp
imulq %rbx, %rbp
imulq %rdx, %rdi
movl 0xc(%rsp), %r12d
subl $0x1, %r12d
jb 0xb440e
movb (%r9,%rdi), %r11b
movb %r11b, (%rax)
movb (%r9,%rbp), %r11b
movb %r11b, 0x1(%rax)
movb (%r9,%rsi), %r11b
movb %r11b, 0x2(%rax)
movb (%r9,%r10), %r11b
movb %r11b, 0x3(%rax)
addq $0x4, %rax
addq %r8, %r9
jmp 0xb43e0
addq $0x4, %rdx
addq $0x4, %r13
addq $0x4, %r14
addq $0x4, %rbx
movq 0x18(%rsp), %r12
jmp 0xb43a3
movq 0x40(%r15), %rsi
imulq %rdx, %rsi
imulq 0x10(%r15), %rsi
addq (%r15), %rsi
addq %rcx, %rsi
movl 0xc(%rsp), %edi
subl $0x1, %edi
jb 0xb4452
movb (%rsi), %r9b
movb %r9b, (%rax)
incq %rax
addq %r8, %rsi
jmp 0xb443f
incq %rdx
cmpq %r12, %rdx
jl 0xb4428
incq %rcx
jmp 0xb436c
movq 0x40(%rsp), %rdi
movq %r12, %rax
movq (%rdi), %rbp
movq 0x40(%rdi), %rcx
imulq 0x10(%rdi), %rcx
movq %rcx, 0x28(%rsp)
cltd
pushq $0x4
popq %rcx
idivl %ecx
movl %eax, %ecx
movl %edx, %esi
movq 0x48(%rsp), %rax
sarl $0x2, %eax
movq 0x10(%rsp), %rdx
imull %edx, %ecx
imull %edx, %esi
xorl %r9d, %r9d
testl %esi, %esi
cmovlel %r9d, %esi
testl %eax, %eax
cmovlel %r9d, %eax
movq %rax, 0x30(%rsp)
movl %ecx, 0xc(%rsp)
cmpq 0x30(%rsp), %r9
movq 0x38(%rsp), %rax
je 0xb48a7
leaq (,%r9,4), %r10
movq 0x28(%rsp), %rdx
imulq %rdx, %r10
addq %rbp, %r10
leaq 0x1(,%r9,4), %r11
imulq %rdx, %r11
addq %rbp, %r11
leaq 0x2(,%r9,4), %r15
imulq %rdx, %r15
addq %rbp, %r15
leaq 0x3(,%r9,4), %r12
imulq %rdx, %r12
addq %rbp, %r12
movq 0x90(%rsp), %r14
imulq 0x60(%rsp), %r14
movq 0x40(%rax), %rbx
movq %r9, 0x18(%rsp)
imulq %r9, %rbx
imulq 0x10(%rax), %rbx
movq 0x50(%rsp), %rdi
addq (%rax), %rbx
xorl %r9d, %r9d
movq %r9, %rax
orq $0x1, %rax
cmpq %r8, %rax
jge 0xb46d8
movq %r9, %r13
shrq %r13
imulq %r14, %r13
addq %rdi, %r13
testl %ecx, %ecx
jle 0xb4606
pxor %xmm2, %xmm2
pxor %xmm0, %xmm0
pxor %xmm3, %xmm3
pxor %xmm1, %xmm1
movq %rbx, %rax
subl $0x1, %ecx
jb 0xb45d0
movq (%r13), %xmm4
pxor %xmm5, %xmm5
pcmpgtb %xmm4, %xmm5
punpcklbw %xmm5, %xmm4 # xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
pshufd $0x44, %xmm4, %xmm5 # xmm5 = xmm4[0,1,0,1]
pshufd $0xee, %xmm4, %xmm4 # xmm4 = xmm4[2,3,2,3]
movdqu (%rax), %xmm6
pxor %xmm7, %xmm7
pcmpgtb %xmm6, %xmm7
movdqa %xmm6, %xmm8
punpcklbw %xmm7, %xmm8 # xmm8 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3],xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7]
punpckhbw %xmm7, %xmm6 # xmm6 = xmm6[8],xmm7[8],xmm6[9],xmm7[9],xmm6[10],xmm7[10],xmm6[11],xmm7[11],xmm6[12],xmm7[12],xmm6[13],xmm7[13],xmm6[14],xmm7[14],xmm6[15],xmm7[15]
movdqa %xmm5, %xmm7
pmaddwd %xmm8, %xmm7
paddd %xmm7, %xmm1
pmaddwd %xmm6, %xmm5
paddd %xmm5, %xmm0
pmaddwd %xmm4, %xmm8
paddd %xmm8, %xmm3
pmaddwd %xmm4, %xmm6
paddd %xmm6, %xmm2
addq $0x8, %r13
addq $0x10, %rax
jmp 0xb4564
pshufd $0xd8, %xmm1, %xmm1 # xmm1 = xmm1[0,2,1,3]
pshufd $0xd8, %xmm0, %xmm4 # xmm4 = xmm0[0,2,1,3]
pshufd $0xd8, %xmm3, %xmm0 # xmm0 = xmm3[0,2,1,3]
pshufd $0xd8, %xmm2, %xmm2 # xmm2 = xmm2[0,2,1,3]
movdqa %xmm1, %xmm3
punpcklqdq %xmm4, %xmm3 # xmm3 = xmm3[0],xmm4[0]
punpckhqdq %xmm4, %xmm1 # xmm1 = xmm1[1],xmm4[1]
paddd %xmm3, %xmm1
movdqa %xmm0, %xmm3
punpcklqdq %xmm2, %xmm3 # xmm3 = xmm3[0],xmm2[0]
punpckhqdq %xmm2, %xmm0 # xmm0 = xmm0[1],xmm2[1]
paddd %xmm3, %xmm0
jmp 0xb4611
pxor %xmm0, %xmm0
pxor %xmm1, %xmm1
movq %rbx, %rax
xorl %ecx, %ecx
cmpl %ecx, %esi
je 0xb466f
movzwl (%r13,%rcx,2), %edx
movd %edx, %xmm2
punpcklbw %xmm2, %xmm2 # xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
pshuflw $0x50, %xmm2, %xmm2 # xmm2 = xmm2[0,0,1,1,4,5,6,7]
psraw $0x8, %xmm2
pshufd $0x50, %xmm2, %xmm2 # xmm2 = xmm2[0,0,1,1]
movq (%rax,%rcx,4), %xmm3
pxor %xmm4, %xmm4
pcmpgtb %xmm3, %xmm4
punpcklbw %xmm4, %xmm3 # xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
pshufd $0x44, %xmm3, %xmm3 # xmm3 = xmm3[0,1,0,1]
movdqa %xmm2, %xmm4
pmullw %xmm3, %xmm4
pmulhw %xmm3, %xmm2
movdqa %xmm4, %xmm3
punpcklwd %xmm2, %xmm3 # xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
paddd %xmm3, %xmm1
punpckhwd %xmm2, %xmm4 # xmm4 = xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
paddd %xmm4, %xmm0
incq %rcx
jmp 0xb4613
movd %xmm1, (%r10)
pshufd $0x55, %xmm1, %xmm2 # xmm2 = xmm1[1,1,1,1]
movd %xmm2, (%r11)
pshufd $0xee, %xmm1, %xmm2 # xmm2 = xmm1[2,3,2,3]
movd %xmm2, (%r15)
pshufd $0xff, %xmm1, %xmm1 # xmm1 = xmm1[3,3,3,3]
movd %xmm1, (%r12)
movd %xmm0, 0x4(%r10)
pshufd $0x55, %xmm0, %xmm1 # xmm1 = xmm0[1,1,1,1]
movd %xmm1, 0x4(%r11)
pshufd $0xee, %xmm0, %xmm1 # xmm1 = xmm0[2,3,2,3]
movd %xmm1, 0x4(%r15)
pshufd $0xff, %xmm0, %xmm0 # xmm0 = xmm0[3,3,3,3]
movd %xmm0, 0x4(%r12)
addq $0x8, %r10
addq $0x8, %r11
addq $0x8, %r15
addq $0x8, %r12
addq $0x2, %r9
movl 0xc(%rsp), %ecx
jmp 0xb452c
movq 0x50(%rsp), %rax
movq 0x90(%rsp), %r14
imulq 0x60(%rsp), %r14
movq 0x38(%rsp), %rcx
movq 0x40(%rcx), %rdi
imulq 0x18(%rsp), %rdi
imulq 0x10(%rcx), %rdi
addq (%rcx), %rdi
cmpl 0x24(%rsp), %r9d
jge 0xb4891
movl %r9d, %ecx
shrl %ecx
movl %r9d, %r13d
andl $0x1, %r13d
addl %ecx, %r13d
imulq %r14, %r13
addq %rax, %r13
movl 0xc(%rsp), %ecx
testl %ecx, %ecx
jle 0xb480a
pxor %xmm2, %xmm2
pxor %xmm1, %xmm1
pxor %xmm3, %xmm3
pxor %xmm0, %xmm0
movq %rdi, %rbx
subl $0x1, %ecx
jb 0xb47cc
movq (%r13), %xmm4
pxor %xmm5, %xmm5
pcmpgtb %xmm4, %xmm5
punpcklbw %xmm5, %xmm4 # xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
pshufd $0x44, %xmm4, %xmm4 # xmm4 = xmm4[0,1,0,1]
movdqu (%rbx), %xmm5
pxor %xmm6, %xmm6
pcmpgtb %xmm5, %xmm6
movdqa %xmm5, %xmm7
punpcklbw %xmm6, %xmm7 # xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3],xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
punpckhbw %xmm6, %xmm5 # xmm5 = xmm5[8],xmm6[8],xmm5[9],xmm6[9],xmm5[10],xmm6[10],xmm5[11],xmm6[11],xmm5[12],xmm6[12],xmm5[13],xmm6[13],xmm5[14],xmm6[14],xmm5[15],xmm6[15]
movdqa %xmm4, %xmm6
pmullw %xmm7, %xmm6
pmulhw %xmm4, %xmm7
movdqa %xmm4, %xmm8
pmullw %xmm5, %xmm8
pmulhw %xmm4, %xmm5
movdqa %xmm6, %xmm4
punpcklwd %xmm7, %xmm4 # xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1],xmm4[2],xmm7[2],xmm4[3],xmm7[3]
paddd %xmm4, %xmm0
punpckhwd %xmm7, %xmm6 # xmm6 = xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7]
paddd %xmm6, %xmm3
movdqa %xmm8, %xmm4
punpcklwd %xmm5, %xmm4 # xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
paddd %xmm4, %xmm1
punpckhwd %xmm5, %xmm8 # xmm8 = xmm8[4],xmm5[4],xmm8[5],xmm5[5],xmm8[6],xmm5[6],xmm8[7],xmm5[7]
paddd %xmm8, %xmm2
addq $0x4, %r13
addq $0x10, %rbx
jmp 0xb4742
movdqa %xmm0, %xmm4
punpckldq %xmm3, %xmm4 # xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
movdqa %xmm1, %xmm5
punpckldq %xmm2, %xmm5 # xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1]
punpckhdq %xmm3, %xmm0 # xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
punpckhdq %xmm2, %xmm1 # xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
movdqa %xmm4, %xmm2
punpcklqdq %xmm5, %xmm2 # xmm2 = xmm2[0],xmm5[0]
punpckhqdq %xmm5, %xmm4 # xmm4 = xmm4[1],xmm5[1]
paddd %xmm2, %xmm4
movdqa %xmm0, %xmm2
punpcklqdq %xmm1, %xmm2 # xmm2 = xmm2[0],xmm1[0]
punpckhqdq %xmm1, %xmm0 # xmm0 = xmm0[1],xmm1[1]
paddd %xmm2, %xmm0
paddd %xmm4, %xmm0
jmp 0xb4811
pxor %xmm0, %xmm0
movq %rdi, %rbx
xorl %ecx, %ecx
cmpl %ecx, %esi
je 0xb4855
movsbl (%r13,%rcx), %edx
movd %edx, %xmm1
pshuflw $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0,4,5,6,7]
pshufd $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
movq (%rbx,%rcx,4), %xmm2
pxor %xmm3, %xmm3
pcmpgtb %xmm2, %xmm3
punpcklbw %xmm3, %xmm2 # xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
movdqa %xmm1, %xmm3
pmullw %xmm2, %xmm3
pmulhw %xmm1, %xmm2
punpcklwd %xmm2, %xmm3 # xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
paddd %xmm3, %xmm0
incq %rcx
jmp 0xb4813
movd %xmm0, (%r10)
pshufd $0x55, %xmm0, %xmm1 # xmm1 = xmm0[1,1,1,1]
movd %xmm1, (%r11)
pshufd $0xee, %xmm0, %xmm1 # xmm1 = xmm0[2,3,2,3]
movd %xmm1, (%r15)
pshufd $0xff, %xmm0, %xmm0 # xmm0 = xmm0[3,3,3,3]
movd %xmm0, (%r12)
addq $0x4, %r10
addq $0x4, %r11
addq $0x4, %r15
addq $0x4, %r12
incl %r9d
jmp 0xb4702
movq 0x18(%rsp), %r9
incq %r9
movq 0x40(%rsp), %rdi
movl 0xc(%rsp), %ecx
jmp 0xb44b0
movq 0x48(%rsp), %r9
andq $-0x4, %r9
movq (%rdi), %rdx
movq %rdx, 0xa0(%rsp)
movq 0x40(%rdi), %rdx
imulq 0x10(%rdi), %rdx
movq %rdx, 0x98(%rsp)
movl %ecx, %r11d
leaq (,%r11,4), %rcx
movq %rcx, 0x28(%rsp)
cmpq 0x48(%rsp), %r9
jge 0xb4b50
movq 0x50(%rsp), %rcx
movq %rcx, 0x18(%rsp)
movq 0x90(%rsp), %rcx
imulq 0x60(%rsp), %rcx
movq %rcx, 0x30(%rsp)
movq %rax, %rcx
movl %r9d, %eax
cltd
pushq $0x4
popq %rdi
idivl %edi
movq 0x98(%rsp), %r12
movq %r9, 0x10(%rsp)
imulq %r9, %r12
addq 0xa0(%rsp), %r12
addl %eax, %edx
movslq %edx, %rax
movq 0x40(%rcx), %r9
imulq %rax, %r9
imulq 0x10(%rcx), %r9
addq (%rcx), %r9
movq 0x28(%rsp), %rcx
addq %r9, %rcx
movq %rcx, 0x40(%rsp)
xorl %edx, %edx
movq %rdx, %rcx
orq $0x1, %rcx
cmpq %r8, %rcx
jge 0xb4a53
movq %rdx, %rbx
shrq %rbx
imulq 0x30(%rsp), %rbx
addq 0x18(%rsp), %rbx
cmpl $0x0, 0xc(%rsp)
jle 0xb4a0c
pxor %xmm1, %xmm1
xorl %ecx, %ecx
pxor %xmm0, %xmm0
cmpl %ecx, %r11d
je 0xb49d8
movq (%rbx), %xmm2
pxor %xmm3, %xmm3
pcmpgtb %xmm2, %xmm3
punpcklbw %xmm3, %xmm2 # xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
movq (%r9,%rcx,4), %xmm3
pxor %xmm4, %xmm4
pcmpgtb %xmm3, %xmm4
punpcklbw %xmm4, %xmm3 # xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
pshufd $0x44, %xmm3, %xmm3 # xmm3 = xmm3[0,1,0,1]
movdqa %xmm3, %xmm4
pmullw %xmm2, %xmm4
pmulhw %xmm3, %xmm2
movdqa %xmm4, %xmm3
punpcklwd %xmm2, %xmm3 # xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
paddd %xmm3, %xmm1
punpckhwd %xmm2, %xmm4 # xmm4 = xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
paddd %xmm4, %xmm0
addq $0x8, %rbx
incq %rcx
jmp 0xb4983
pshufd $0xee, %xmm1, %xmm2 # xmm2 = xmm1[2,3,2,3]
paddd %xmm1, %xmm2
pshufd $0x55, %xmm2, %xmm1 # xmm1 = xmm2[1,1,1,1]
paddd %xmm2, %xmm1
movd %xmm1, %ecx
pshufd $0xee, %xmm0, %xmm1 # xmm1 = xmm0[2,3,2,3]
paddd %xmm0, %xmm1
pshufd $0x55, %xmm1, %xmm0 # xmm0 = xmm1[1,1,1,1]
paddd %xmm1, %xmm0
movd %xmm0, %r15d
movq 0x40(%rsp), %r10
jmp 0xb4a14
xorl %ecx, %ecx
movq %r9, %r10
xorl %r15d, %r15d
xorl %edi, %edi
cmpl %edi, %esi
je 0xb4a3d
movsbl (%rbx,%rdi,2), %r14d
movsbl 0x1(%rbx,%rdi,2), %r13d
movsbl (%r10,%rdi), %ebp
imull %ebp, %r14d
addl %r14d, %ecx
imull %ebp, %r13d
addl %r13d, %r15d
incq %rdi
jmp 0xb4a16
movl %ecx, (%r12)
movl %r15d, 0x4(%r12)
addq $0x8, %r12
addq $0x2, %rdx
jmp 0xb494d
movq 0x90(%rsp), %r9
imulq 0x60(%rsp), %r9
movq 0x38(%rsp), %rcx
imulq 0x40(%rcx), %rax
imulq 0x10(%rcx), %rax
movq 0x50(%rsp), %r10
addq (%rcx), %rax
movq 0x28(%rsp), %rcx
leaq (%rax,%rcx), %rbx
cmpl 0x24(%rsp), %edx
jge 0xb4b3e
movl %edx, %ecx
shrl %ecx
movl %edx, %r14d
andl $0x1, %r14d
addl %ecx, %r14d
imulq %r9, %r14
addq %r10, %r14
cmpl $0x0, 0xc(%rsp)
jle 0xb4b0c
pxor %xmm0, %xmm0
xorl %ecx, %ecx
cmpl %ecx, %r11d
je 0xb4af1
movq (%r14), %xmm1
pxor %xmm2, %xmm2
pcmpgtb %xmm1, %xmm2
punpcklbw %xmm2, %xmm1 # xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
movq (%rax,%rcx,4), %xmm2
pxor %xmm3, %xmm3
pcmpgtb %xmm2, %xmm3
punpcklbw %xmm3, %xmm2 # xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
movdqa %xmm2, %xmm3
pmullw %xmm1, %xmm3
pmulhw %xmm1, %xmm2
punpcklwd %xmm2, %xmm3 # xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
paddd %xmm3, %xmm0
addq $0x4, %r14
incq %rcx
jmp 0xb4aad
pshufd $0xee, %xmm0, %xmm1 # xmm1 = xmm0[2,3,2,3]
paddd %xmm0, %xmm1
pshufd $0x55, %xmm1, %xmm0 # xmm0 = xmm1[1,1,1,1]
paddd %xmm1, %xmm0
movd %xmm0, %ecx
movq %rbx, %rdi
jmp 0xb4b11
xorl %ecx, %ecx
movq %rax, %rdi
xorl %r15d, %r15d
cmpl %r15d, %esi
je 0xb4b2f
movsbl (%r14,%r15), %ebp
movsbl (%rdi,%r15), %r13d
imull %ebp, %r13d
addl %r13d, %ecx
incq %r15
jmp 0xb4b14
movl %ecx, (%r12)
addq $0x4, %r12
incl %edx
jmp 0xb4a81
movq 0x10(%rsp), %r9
incq %r9
movq 0x38(%rsp), %rax
jmp 0xb48dc
movq 0x58(%rsp), %rax
testq %rax, %rax
je 0xb4b7e
lock
decl (%rax)
jne 0xb4b7e
movq 0x50(%rsp), %rsi
movq 0x70(%rsp), %rdi
testq %rdi, %rdi
je 0xb4b76
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xb4b7e
movq %rsi, %rdi
callq 0x5f3e0
addq $0xa8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0xb4bcb
movq %rax, %rbx
movq 0x58(%rsp), %rax
testq %rax, %rax
je 0xb4bc3
lock
decl (%rax)
jne 0xb4bc3
movq 0x50(%rsp), %rsi
movq 0x70(%rsp), %rdi
testq %rdi, %rdi
jne 0xb4bbd
movq %rsi, %rdi
callq 0x5f3e0
jmp 0xb4bc3
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
movq %rax, %rdi
callq 0x61d68
nop
|
/csukuangfj[P]ncnn/src/layer/x86/convolution_sgemm_int8.h
|
ncnn::convolution_im2col_sgemm_transform_kernel_pack8to1_int8_sse(ncnn::Mat const&, ncnn::Mat&, int, int, int, int)
|
static void convolution_im2col_sgemm_transform_kernel_pack8to1_int8_sse(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h)
{
const int maxk = kernel_w * kernel_h;
// interleave
// src = maxk-inch-outch
// dst = 8a-4b-maxk-inch/8a-outch/4b
Mat kernel = _kernel.reshape(maxk, inch, outch);
if (outch >= 4)
kernel_tm.create(32 * maxk, inch / 8, outch / 4 + outch % 4, (size_t)1u);
else
kernel_tm.create(8 * maxk, inch / 8, outch, (size_t)1u);
int q = 0;
for (; q + 3 < outch; q += 4)
{
signed char* g00 = kernel_tm.channel(q / 4);
for (int p = 0; p + 7 < inch; p += 8)
{
for (int k = 0; k < maxk; k++)
{
for (int i = 0; i < 4; i++)
{
for (int j = 0; j < 8; j++)
{
const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j);
g00[0] = k00[k];
g00++;
}
}
}
}
}
// TODO unroll 2
for (; q < outch; q++)
{
signed char* g00 = kernel_tm.channel(q / 4 + q % 4);
for (int p = 0; p + 7 < inch; p += 8)
{
for (int k = 0; k < maxk; k++)
{
for (int j = 0; j < 8; j++)
{
const signed char* k00 = kernel.channel(q).row<const signed char>(p + j);
g00[0] = k00[k];
g00++;
}
}
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x48, %rsp
movl %r8d, %ebx
movl %ecx, %ebp
movl %edx, %r15d
movq %rsi, %r14
movq %rdi, %rsi
imull %r9d, %ebx
movq %rsp, %rdi
movl %ebx, %edx
movl %r15d, %ecx
movl %ebp, %r8d
xorl %r9d, %r9d
callq 0x63020
pushq $0x8
popq %rcx
movl %r15d, %eax
cltd
idivl %ecx
cmpl $0x4, %ebp
jl 0xd71b1
movl %ebx, %esi
shll $0x5, %esi
movl %ebp, %ecx
shrl $0x2, %ecx
imull $-0x3, %ecx, %ecx
addl %ebp, %ecx
jmp 0xd71ba
leal (,%rbx,8), %esi
movl %ebp, %ecx
xorl %r12d, %r12d
pushq $0x1
popq %r8
movq %r14, %rdi
movl %eax, %edx
xorl %r9d, %r9d
callq 0x63810
testl %ebx, %ebx
cmovlel %r12d, %ebx
movslq %r15d, %rax
movslq %ebp, %rcx
movq %r12, %rdx
orq $0x3, %rdx
cmpq %rcx, %rdx
jge 0xd72ff
movq %r12, %rdx
shrq $0x2, %rdx
imulq 0x40(%r14), %rdx
imulq 0x10(%r14), %rdx
addq (%r14), %rdx
xorl %esi, %esi
movq %rsi, %rdi
orq $0x7, %rdi
cmpq %rax, %rdi
jge 0xd7275
xorl %edi, %edi
cmpq %rbx, %rdi
je 0xd726f
xorl %r8d, %r8d
cmpq $0x4, %r8
je 0xd726a
movq %r8, %r10
orq %r12, %r10
xorl %r9d, %r9d
cmpq $0x8, %r9
je 0xd7262
movq 0x40(%rsp), %r11
imulq %r10, %r11
movq 0x10(%rsp), %r15
imulq %r15, %r11
addq (%rsp), %r11
movslq 0x2c(%rsp), %r13
leaq (%rsi,%r9), %rbp
imulq %r13, %rbp
imulq %r15, %rbp
addq %r11, %rbp
movb (%rdi,%rbp), %r11b
movb %r11b, (%rdx,%r9)
incq %r9
jmp 0xd7225
incq %r8
addq %r9, %rdx
jmp 0xd7216
incq %rdi
jmp 0xd720e
addq $0x8, %rsi
jmp 0xd7200
addq $0x4, %r12
jmp 0xd71da
movl %r12d, %esi
shrl $0x2, %esi
movl %r12d, %edx
andl $0x3, %edx
addl %esi, %edx
imulq 0x40(%r14), %rdx
imulq 0x10(%r14), %rdx
addq (%r14), %rdx
xorl %esi, %esi
movq %rsi, %rdi
orq $0x7, %rdi
cmpq %rax, %rdi
jge 0xd72fc
xorl %edi, %edi
cmpq %rbx, %rdi
je 0xd72f6
xorl %r8d, %r8d
cmpq $0x8, %r8
je 0xd72ee
movq 0x40(%rsp), %r9
imulq %r12, %r9
movq 0x10(%rsp), %r10
imulq %r10, %r9
addq (%rsp), %r9
movslq 0x2c(%rsp), %r11
leaq (%rsi,%r8), %r15
imulq %r11, %r15
imulq %r10, %r15
addq %r9, %r15
movb (%rdi,%r15), %r9b
movb %r9b, (%rdx,%r8)
incq %r8
jmp 0xd72b1
incq %rdi
addq %r8, %rdx
jmp 0xd72a9
addq $0x8, %rsi
jmp 0xd729b
incq %r12
cmpq %rcx, %r12
jl 0xd727e
movq 0x8(%rsp), %rax
testq %rax, %rax
je 0xd7335
lock
decl (%rax)
jne 0xd7335
movq (%rsp), %rsi
movq 0x20(%rsp), %rdi
testq %rdi, %rdi
je 0xd732d
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xd7335
movq %rsi, %rdi
callq 0x5f3e0
addq $0x48, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0xd737e
movq %rax, %rbx
movq 0x8(%rsp), %rax
testq %rax, %rax
je 0xd7376
lock
decl (%rax)
jne 0xd7376
movq (%rsp), %rsi
movq 0x20(%rsp), %rdi
testq %rdi, %rdi
jne 0xd7370
movq %rsi, %rdi
callq 0x5f3e0
jmp 0xd7376
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
movq %rax, %rdi
callq 0x61d68
|
/csukuangfj[P]ncnn/src/layer/x86/convolution_sgemm_pack8to1_int8.h
|
ncnn::Convolution_x86_fma::create_pipeline(ncnn::Option const&)
|
int Convolution_x86_fma::create_pipeline(const Option& opt)
{
if (dynamic_weight)
return 0;
activation = create_activation_layer(activation_type, activation_params, opt);
nT = opt.num_threads;
#if NCNN_INT8
if (opt.use_int8_inference && weight_data.elemsize == (size_t)1u)
{
return create_pipeline_int8_x86(opt);
}
#endif
int kernel_size = kernel_w * kernel_h;
int num_input = weight_data_size / kernel_size / num_output;
if (!opt.use_packing_layout && kernel_w == kernel_h && dilation_w != 1 && dilation_h == dilation_w && stride_w == 1 && stride_h == 1)
{
convolution_dilation1 = ncnn::create_layer(ncnn::LayerType::Convolution);
// set param
ncnn::ParamDict pd;
pd.set(0, num_output); // num_output
pd.set(1, kernel_w);
pd.set(11, kernel_h);
pd.set(2, 1);
pd.set(12, 1);
pd.set(3, 1); // stride_w
pd.set(13, 1); // stride_h
pd.set(4, 0); // pad_w
pd.set(14, 0); // pad_h
pd.set(5, bias_term);
pd.set(6, weight_data_size);
convolution_dilation1->load_param(pd);
// set weights
if (bias_term)
{
ncnn::Mat weights[2];
weights[0] = weight_data;
weights[1] = bias_data;
convolution_dilation1->load_model(ModelBinFromMatArray(weights));
}
else
{
ncnn::Mat weights[1];
weights[0] = weight_data;
convolution_dilation1->load_model(ModelBinFromMatArray(weights));
}
convolution_dilation1->create_pipeline(opt);
if (opt.lightmode)
{
weight_data.release();
}
return 0;
}
int elempack = 1;
int out_elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
#if __AVX512F__
elempack = num_input % 16 == 0 ? 16 : num_input % 8 == 0 ? 8 : num_input % 4 == 0 ? 4 : 1;
out_elempack = num_output % 16 == 0 ? 16 : num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1;
#elif __AVX__
elempack = num_input % 8 == 0 ? 8 : num_input % 4 == 0 ? 4 : 1;
out_elempack = num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1;
#else
elempack = num_input % 4 == 0 ? 4 : 1;
out_elempack = num_output % 4 == 0 ? 4 : 1;
#endif
}
#endif // __SSE2__
bool prefer_winograd = (opt.use_winograd23_convolution || opt.use_winograd43_convolution || opt.use_winograd63_convolution) && (num_input > 8 || num_output > 8);
if (opt.use_winograd_convolution && prefer_winograd && kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
if ((bottom_shapes.empty() || bottom_shapes[0].w == 0 || bottom_shapes[0].h == 0) && (top_shapes.empty() || top_shapes[0].w == 0 || top_shapes[0].h == 0))
{
// dynamic shape
if ((opt.use_winograd63_convolution) && (num_input <= 32 && num_output <= 32))
conv3x3s1_winograd63_transform_kernel(weight_data, weight_winograd63_data, num_input, num_output, opt);
else if (opt.use_winograd43_convolution)
conv3x3s1_winograd43_transform_kernel(weight_data, weight_winograd43_data, num_input, num_output, opt);
else
conv3x3s1_winograd23_transform_kernel(weight_data, weight_winograd23_data, num_input, num_output, opt);
}
else
{
int w;
int h;
if (top_shapes.empty() || top_shapes[0].w == 0 || top_shapes[0].h == 0)
{
w = bottom_shapes[0].w;
h = bottom_shapes[0].h;
// make padding
if (pad_left > 0 || pad_right > 0 || pad_top > 0 || pad_bottom > 0)
{
w += pad_left + pad_right;
h += pad_top + pad_bottom;
}
else if ((pad_left == -233 && pad_right == -233 && pad_top == -233 && pad_bottom == -233)
|| (pad_left == -234 && pad_right == -234 && pad_top == -234 && pad_bottom == -234))
{
// tensorflow padding=SAME or onnx padding=SAME_UPPER/SAME_LOWER
w += 2;
h += 2;
}
}
else
{
w = top_shapes[0].w + 2;
h = top_shapes[0].h + 2;
}
bool prefer_winograd63 = test_prefer_winograd63(num_input, num_output, w, h);
bool prefer_winograd23 = test_prefer_winograd23(num_input, num_output, w, h);
bool prefer_winograd43 = !prefer_winograd63 && !prefer_winograd23;
if (prefer_winograd23 && !opt.use_winograd23_convolution)
{
// f23 fallback to f43
prefer_winograd23 = false;
prefer_winograd43 = true;
}
if (prefer_winograd63 && !opt.use_winograd63_convolution)
{
// f63 fallback to f43
prefer_winograd63 = false;
prefer_winograd43 = true;
}
if (prefer_winograd43 && !opt.use_winograd43_convolution)
{
// f43 fallback to f63 or f23
prefer_winograd43 = false;
if (opt.use_winograd63_convolution)
{
prefer_winograd63 = true;
}
else
{
prefer_winograd23 = true;
}
}
if (prefer_winograd23)
{
conv3x3s1_winograd23_transform_kernel(weight_data, weight_winograd23_data, num_input, num_output, opt);
}
else if (prefer_winograd43)
{
conv3x3s1_winograd43_transform_kernel(weight_data, weight_winograd43_data, num_input, num_output, opt);
}
else if (prefer_winograd63)
{
conv3x3s1_winograd63_transform_kernel(weight_data, weight_winograd63_data, num_input, num_output, opt);
}
else
{
// should never reach here
}
}
if (opt.lightmode)
{
weight_data.release();
}
return 0;
}
int l2_cache_size = get_cpu_level2_cache_size();
bool prefer_sgemm = num_input * num_output * kernel_w * kernel_h * dilation_w * dilation_h * stride_w * stride_h * (int)sizeof(float) * 2 > l2_cache_size || (num_input > 16 || num_output > 16);
if ((opt.use_sgemm_convolution && prefer_sgemm) || (kernel_w == 1 && kernel_h == 1))
{
const int maxk = kernel_w * kernel_h;
gemm = ncnn::create_layer(ncnn::LayerType::Gemm);
ncnn::ParamDict pd;
pd.set(2, 0); // transA
pd.set(3, 0); // transB
pd.set(4, 1); // constantA
pd.set(5, 0); // constantB
pd.set(6, 1); // constantC
pd.set(7, num_output); // M = outch
pd.set(8, 0); // N = size
pd.set(9, maxk * num_input); // K = maxk*inch
pd.set(10, bias_term ? 1 : -1); // constant_broadcast_type_C = (M)
pd.set(11, 1); // output_N1M
gemm->load_param(pd);
// maxk-inch-outch to pa-maxk-inch/pa-outch
Mat tmp;
{
Mat weight_data_r2 = weight_data.reshape(maxk, num_input, num_output);
tmp.create(maxk * num_input, num_output);
for (int q = 0; q < num_output; q += 1)
{
float* g00 = tmp.row(q);
for (int p = 0; p + (elempack - 1) < num_input; p += elempack)
{
for (int k = 0; k < maxk; k++)
{
for (int i = 0; i < elempack; i++)
{
const float* k00 = weight_data_r2.channel(q).row(p + i);
g00[0] = k00[k];
g00++;
}
}
}
}
}
if (bias_term)
{
ncnn::Mat weights[2];
weights[0] = tmp;
weights[1] = bias_data;
gemm->load_model(ModelBinFromMatArray(weights));
}
else
{
ncnn::Mat weights[1];
weights[0] = tmp;
gemm->load_model(ModelBinFromMatArray(weights));
}
gemm->create_pipeline(opt);
}
else
{
if ((elempack == 16 && out_elempack == 1 && kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
|| (elempack == 8 && out_elempack == 8 && kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
|| (elempack == 8 && out_elempack == 8 && kernel_w == 2 && kernel_h == 2 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
|| (elempack == 1 && out_elempack == 8 && kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
|| (elempack == 1 && out_elempack == 8 && kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
|| (elempack == 8 && out_elempack == 1 && kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
|| (elempack == 1 && out_elempack == 4 && kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
|| (elempack == 1 && out_elempack == 4 && kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2))
{
convolution_transform_kernel_packed_sse(weight_data, weight_data_tm, num_input, num_output, kernel_w, kernel_h, elempack, out_elempack);
}
else
{
convolution_transform_kernel_packed(weight_data, weight_data_tm, num_input, num_output, kernel_w, kernel_h);
}
}
if (opt.lightmode)
{
weight_data.release();
}
return 0;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x228, %rsp # imm = 0x228
movq (%rdi), %rax
movq -0x18(%rax), %rbx
cmpl $0x0, 0x158(%rdi,%rbx)
je 0xe34e0
xorl %eax, %eax
addq $0x228, %rsp # imm = 0x228
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq %rsi, 0x30(%rsp)
movq %rdi, 0x8(%rsp)
movl 0x10c(%rdi,%rbx), %ecx
decl %ecx
cmpl $0x5, %ecx
ja 0xe3e34
leaq 0x30f3cd(%rip), %rax # 0x3f28d0
movslq (%rax,%rcx,4), %rcx
addq %rax, %rcx
jmpq *%rcx
pushq $0x1a
popq %rdi
callq 0x782bf
movq %rax, %r15
leaq 0x90(%rsp), %r12
movq %r12, %rdi
callq 0x71548
movq (%r15), %rax
movq %r15, %rdi
movq %r12, %rsi
callq *0x10(%rax)
jmp 0xe36b0
pushq $0x47
popq %rdi
callq 0x782bf
movq %rax, %r15
leaq 0x90(%rsp), %r12
movq %r12, %rdi
callq 0x71548
movq (%r15), %rax
movq %r15, %rdi
movq %r12, %rsi
callq *0x10(%rax)
jmp 0xe36b0
pushq $0x36
popq %rdi
callq 0x782bf
movq %rax, %r15
leaq 0x90(%rsp), %r12
movq %r12, %rdi
callq 0x71548
movq 0x8(%rsp), %rax
movq 0x110(%rax,%rbx), %rax
vmovd (%rax), %xmm0
movq %r12, %rdi
xorl %esi, %esi
callq 0x71952
movq 0x8(%rsp), %rax
movq 0x110(%rax,%rbx), %rax
vmovd 0x4(%rax), %xmm0
leaq 0x90(%rsp), %rdi
pushq $0x1
popq %rsi
callq 0x71952
movq (%r15), %rax
leaq 0x90(%rsp), %rsi
movq %r15, %rdi
callq *0x10(%rax)
jmp 0xe36b0
pushq $0x1a
popq %rdi
callq 0x782bf
movq %rax, %r15
leaq 0x90(%rsp), %r12
movq %r12, %rdi
callq 0x71548
movq 0x8(%rsp), %rax
movq 0x110(%rax,%rbx), %rax
vmovd (%rax), %xmm0
movq %r12, %rdi
xorl %esi, %esi
callq 0x71952
movq (%r15), %rax
leaq 0x90(%rsp), %rsi
movq %r15, %rdi
callq *0x10(%rax)
jmp 0xe36b0
pushq $0x1e
popq %rdi
callq 0x782bf
movq %rax, %r15
leaq 0x90(%rsp), %r12
movq %r12, %rdi
callq 0x71548
movq (%r15), %rax
movq %r15, %rdi
movq %r12, %rsi
callq *0x10(%rax)
jmp 0xe36b0
pushq $0x43
popq %rdi
callq 0x782bf
movq %rax, %r15
leaq 0x90(%rsp), %r12
movq %r12, %rdi
callq 0x71548
movq 0x8(%rsp), %rax
movq 0x110(%rax,%rbx), %rax
vmovd (%rax), %xmm0
movq %r12, %rdi
xorl %esi, %esi
callq 0x71952
movq 0x8(%rsp), %rax
movq 0x110(%rax,%rbx), %rax
vmovd 0x4(%rax), %xmm0
leaq 0x90(%rsp), %rdi
pushq $0x1
popq %rsi
callq 0x71952
movq (%r15), %rax
leaq 0x90(%rsp), %rsi
movq %r15, %rdi
callq *0x10(%rax)
leaq 0x90(%rsp), %rdi
callq 0x71614
movq (%r15), %rax
movq %r15, %rdi
movq 0x30(%rsp), %rbx
movq %rbx, %rsi
callq *0x20(%rax)
movq 0x8(%rsp), %rdi
movq (%rdi), %rax
movq %r15, 0x8(%rdi)
movl 0x4(%rbx), %ecx
movl %ecx, 0x10(%rdi)
movq -0x18(%rax), %rax
cmpb $0x1, 0x1e(%rbx)
jne 0xe3704
cmpq $0x1, 0x170(%rdi,%rax)
jne 0xe3704
movq 0x30(%rsp), %rsi
callq 0xe630c
jmp 0xe34cc
leaq (%rdi,%rax), %r12
movl 0xd0(%rdi,%rax), %ebp
movl 0xd4(%rdi,%rax), %ecx
movl 0xd8(%rdi,%rax), %esi
movl %esi, %r8d
imull %ecx, %r8d
movl 0x104(%rdi,%rax), %eax
cltd
idivl %r8d
cltd
idivl %ebp
movq %rax, 0x18(%rsp)
movq 0x30(%rsp), %rdi
movb 0x27(%rdi), %al
cmpl %esi, %ecx
setne %dl
orb %al, %dl
jne 0xe399c
movl 0xdc(%r12), %edx
cmpl $0x1, %edx
je 0xe399c
cmpl %edx, 0xe0(%r12)
jne 0xe399c
cmpl $0x1, 0xe4(%r12)
jne 0xe399c
cmpl $0x1, 0xe8(%r12)
jne 0xe399c
pushq $0x6
popq %rdi
callq 0x782bf
movq 0x8(%rsp), %rbx
movq %rax, 0x180(%rbx)
leaq 0x160(%rsp), %r15
movq %r15, %rdi
callq 0x71548
movq (%rbx), %rax
movq -0x18(%rax), %rax
movl 0xd0(%rbx,%rax), %edx
movq %r15, %rdi
xorl %esi, %esi
callq 0x7193a
movq 0x8(%rsp), %rcx
movq (%rcx), %rax
movq -0x18(%rax), %rax
movl 0xd4(%rcx,%rax), %edx
leaq 0x160(%rsp), %rdi
pushq $0x1
popq %rsi
callq 0x7193a
movq 0x8(%rsp), %rcx
movq (%rcx), %rax
movq -0x18(%rax), %rax
movl 0xd8(%rcx,%rax), %edx
leaq 0x160(%rsp), %rdi
pushq $0xb
popq %rsi
callq 0x7193a
leaq 0x160(%rsp), %rdi
pushq $0x2
popq %rsi
pushq $0x1
popq %rdx
callq 0x7193a
leaq 0x160(%rsp), %rdi
pushq $0xc
popq %rsi
pushq $0x1
popq %rdx
callq 0x7193a
leaq 0x160(%rsp), %rdi
pushq $0x3
popq %rsi
pushq $0x1
popq %rdx
callq 0x7193a
leaq 0x160(%rsp), %rdi
pushq $0xd
popq %rsi
pushq $0x1
popq %rdx
callq 0x7193a
leaq 0x160(%rsp), %rdi
pushq $0x4
popq %rsi
xorl %edx, %edx
callq 0x7193a
leaq 0x160(%rsp), %rdi
pushq $0xe
popq %rsi
xorl %edx, %edx
callq 0x7193a
movq 0x8(%rsp), %rcx
movq (%rcx), %rax
movq -0x18(%rax), %rax
movl 0x100(%rcx,%rax), %edx
leaq 0x160(%rsp), %rdi
pushq $0x5
popq %rsi
callq 0x7193a
movq 0x8(%rsp), %rcx
movq (%rcx), %rax
movq -0x18(%rax), %rax
movl 0x104(%rcx,%rax), %edx
leaq 0x160(%rsp), %rdi
pushq $0x6
popq %rsi
callq 0x7193a
movq 0x8(%rsp), %rax
movq 0x180(%rax), %rdi
movq (%rdi), %rax
leaq 0x160(%rsp), %rsi
callq *0x10(%rax)
movq 0x8(%rsp), %rdx
movq (%rdx), %rax
movq -0x18(%rax), %rcx
cmpl $0x0, 0x100(%rdx,%rcx)
je 0xe47f9
leaq 0x90(%rsp), %rcx
andq $0x0, 0x40(%rcx)
vpxor %xmm0, %xmm0, %xmm0
vmovdqa %xmm0, (%rcx)
vmovdqu %xmm0, 0xc(%rcx)
vmovdqa %xmm0, 0x20(%rcx)
vmovdqu %xmm0, 0x2c(%rcx)
andq $0x0, 0x88(%rcx)
vmovdqu %xmm0, 0x48(%rcx)
vmovdqu %xmm0, 0x54(%rcx)
vmovdqu %xmm0, 0x68(%rcx)
vmovdqu %xmm0, 0x74(%rcx)
movq -0x18(%rax), %rbx
leaq (%rdx,%rbx), %rax
addq $0x160, %rax # imm = 0x160
cmpq %rax, %rcx
je 0xe5c9c
addq %rdx, %rbx
movq 0x168(%rbx), %rax
testq %rax, %rax
je 0xe395e
lock
incl (%rax)
movq 0x98(%rsp), %rax
testq %rax, %rax
je 0xe5c27
lock
decl (%rax)
jne 0xe5c27
movq 0x90(%rsp), %rsi
movq 0xb0(%rsp), %rdi
testq %rdi, %rdi
je 0xe5c1f
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xe5c27
testb %al, %al
je 0xe39d4
pushq $0x8
popq %rax
xorl %edx, %edx
movq 0x18(%rsp), %r8
testb $0x3, %r8b
sete %dl
testb $0x7, %r8b
leal 0x1(%rdx,%rdx,2), %ebx
cmovel %eax, %ebx
xorl %edx, %edx
testb $0x3, %bpl
sete %dl
testb $0x7, %bpl
leal 0x1(%rdx,%rdx,2), %r8d
cmovel %eax, %r8d
jmp 0xe39db
pushq $0x1
popq %r8
movl %r8d, %ebx
movb 0x37(%rdi), %r15b
testb %r15b, %r15b
jne 0xe39f4
cmpb $0x0, 0x38(%rdi)
jne 0xe39f4
cmpb $0x1, 0x39(%rdi)
jne 0xe3e46
cmpl $0x9, 0x18(%rsp)
setge %dl
cmpl $0x9, %ebp
setge %al
orb %dl, %al
cmpl $0x3, %esi
jne 0xe3a83
cmpl $0x3, %ecx
jne 0xe3a83
cmpb $0x0, 0x1c(%rdi)
je 0xe3a83
testb %al, %al
je 0xe3a83
cmpl $0x1, 0xdc(%r12)
jne 0xe3a83
cmpl $0x1, 0xe0(%r12)
jne 0xe3a83
cmpl $0x1, 0xe4(%r12)
jne 0xe3a83
cmpl $0x1, 0xe8(%r12)
jne 0xe3a83
movq 0xa0(%r12), %rax
cmpq 0xa8(%r12), %rax
je 0xe44db
cmpl $0x0, 0x2c(%rax)
je 0xe44db
cmpl $0x0, 0x30(%rax)
je 0xe44db
movq 0xb8(%r12), %rcx
movq 0xc0(%r12), %rdx
jmp 0xe4508
movq %r8, 0x28(%rsp)
movq %rdi, %r14
callq 0x73479
movq 0x8(%rsp), %rdx
movq (%rdx), %rcx
movq -0x18(%rcx), %rcx
movl 0xd0(%rdx,%rcx), %r12d
movl 0xd4(%rdx,%rcx), %r13d
vmovdqu 0xd8(%rdx,%rcx), %xmm0
movq %rcx, 0x128(%rsp)
movl 0xe8(%rdx,%rcx), %ecx
movl %r12d, %edx
imull %r13d, %edx
vmovd %xmm0, %esi
movq %rsi, 0x10(%rsp)
imull %esi, %edx
vpextrd $0x1, %xmm0, %edi
imull %edx, %edi
vpextrd $0x2, %xmm0, %esi
imull %esi, %edi
vpextrd $0x3, %xmm0, %edx
imull %edx, %edi
imull %ecx, %edi
movq 0x18(%rsp), %r9
imull %r9d, %edi
shll $0x3, %edi
cmpl %eax, %edi
setg %al
cmpl $0x11, %r9d
setge %dil
cmpl $0x11, %r12d
setge %r8b
cmpb $0x1, 0x1d(%r14)
movl %ebx, %r15d
movslq %r9d, %r9
movq %r9, 0x20(%rsp)
jne 0xe3b30
orb %r8b, %dil
orb %dil, %al
jne 0xe3b46
movl %r13d, %eax
xorl $0x1, %eax
movq 0x10(%rsp), %rdi
xorl $0x1, %edi
orl %eax, %edi
jne 0xe3dec
pushq $0x4a
popq %rdi
callq 0x782bf
movq 0x8(%rsp), %rcx
movq %rax, 0x188(%rcx)
leaq 0x70(%rsp), %r12
movq %r12, %rdi
callq 0x71548
pushq $0x2
popq %rsi
movq %r12, %rdi
xorl %edx, %edx
callq 0x7193a
leaq 0x70(%rsp), %rdi
pushq $0x3
popq %rsi
xorl %edx, %edx
callq 0x7193a
leaq 0x70(%rsp), %rdi
pushq $0x4
popq %rsi
pushq $0x1
popq %rdx
callq 0x7193a
leaq 0x70(%rsp), %rdi
pushq $0x5
popq %rsi
xorl %edx, %edx
callq 0x7193a
leaq 0x70(%rsp), %rdi
pushq $0x6
popq %rsi
pushq $0x1
popq %rdx
callq 0x7193a
movq 0x8(%rsp), %rcx
movq (%rcx), %rax
movq -0x18(%rax), %rax
movl 0xd0(%rcx,%rax), %edx
leaq 0x70(%rsp), %rdi
pushq $0x7
popq %rsi
callq 0x7193a
leaq 0x70(%rsp), %rdi
pushq $0x8
popq %rsi
xorl %edx, %edx
callq 0x7193a
movq 0x10(%rsp), %rcx
imull %r13d, %ecx
movq 0x18(%rsp), %rax
movl %eax, %r12d
movq %rcx, %rbp
imull %ecx, %r12d
leaq 0x70(%rsp), %rdi
pushq $0x9
popq %rsi
movl %r12d, %edx
callq 0x7193a
movq 0x8(%rsp), %rcx
movq (%rcx), %rax
movq -0x18(%rax), %rax
xorl %edx, %edx
cmpl $0x1, 0x100(%rcx,%rax)
sbbl %edx, %edx
orl $0x1, %edx
leaq 0x70(%rsp), %rdi
pushq $0xa
popq %rsi
callq 0x7193a
leaq 0x70(%rsp), %rdi
pushq $0xb
popq %rsi
pushq $0x1
popq %rdx
callq 0x7193a
movq 0x8(%rsp), %rax
movq 0x188(%rax), %rdi
movq (%rdi), %rax
leaq 0x70(%rsp), %rsi
callq *0x10(%rax)
andq $0x0, 0x1a0(%rsp)
vpxor %xmm0, %xmm0, %xmm0
vmovdqa %xmm0, 0x160(%rsp)
vmovdqu %xmm0, 0x16c(%rsp)
vmovdqa %xmm0, 0x180(%rsp)
vmovdqu %xmm0, 0x18c(%rsp)
movq 0x8(%rsp), %r13
movq (%r13), %rax
movq -0x18(%rax), %rax
leaq 0x160(%r13,%rax), %rsi
movl -0x90(%rsi), %r8d
leaq 0x90(%rsp), %rdi
movl %ebp, %edx
movq 0x18(%rsp), %rcx
xorl %r9d, %r9d
callq 0x63020
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0xd0(%r13,%rax), %edx
xorl %r14d, %r14d
leaq 0x160(%rsp), %rdi
pushq $0x4
popq %rcx
movl %r12d, %esi
xorl %r8d, %r8d
callq 0x636fa
movq (%r13), %rax
movq 0x160(%rsp), %rcx
movslq 0x18c(%rsp), %rdx
imulq 0x170(%rsp), %rdx
decl %ebx
testl %ebp, %ebp
cmovlel %r14d, %ebp
subq %rbx, 0x20(%rsp)
movq -0x18(%rax), %rsi
movq 0x8(%rsp), %rdi
movslq 0xd0(%rdi,%rsi), %rsi
cmpq %rsi, %r14
jge 0xe3dae
movq %rdx, %rsi
imulq %r14, %rsi
addq %rcx, %rsi
xorl %edi, %edi
cmpq 0x20(%rsp), %rdi
jge 0xe3da6
movslq 0xbc(%rsp), %r8
movq 0xa0(%rsp), %r10
movq %r10, %r9
imulq %r8, %r9
movq 0xd0(%rsp), %r11
imulq %r14, %r11
imulq %rdi, %r8
addq %r11, %r8
imulq %r10, %r8
addq 0x90(%rsp), %r8
xorl %r10d, %r10d
cmpq %rbp, %r10
je 0xe3da1
movq %r8, %r11
movq %r15, %rbx
subq $0x1, %rbx
jb 0xe3d98
vmovd (%r11), %xmm0
vmovd %xmm0, (%rsi)
addq $0x4, %rsi
addq %r9, %r11
jmp 0xe3d80
incq %r10
addq $0x4, %r8
jmp 0xe3d75
addq %r15, %rdi
jmp 0xe3d35
incq %r14
jmp 0xe3d0f
movq 0x98(%rsp), %rax
testq %rax, %rax
je 0xe3e85
lock
decl (%rax)
jne 0xe3e85
movq 0x90(%rsp), %rsi
movq 0xb0(%rsp), %rdi
testq %rdi, %rdi
je 0xe3e7d
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xe3e85
movq 0x8(%rsp), %rax
addq %rax, 0x128(%rsp)
movslq %r12d, %rdi
movq %rdi, 0x80(%rsp)
movq 0x28(%rsp), %rdi
cmpl $0x8, %edi
jne 0xe3e4d
cmpl $0x8, %ebx
jne 0xe3e4d
cmpl $0x2, %r13d
je 0xe40bd
cmpl $0x3, %r13d
jne 0xe4669
vpxor 0x30ea51(%rip), %xmm0, %xmm0 # 0x3f2880
jmp 0xe40c5
xorl %r15d, %r15d
movq 0x8(%rsp), %rdi
movq 0x30(%rsp), %rbx
jmp 0xe36d6
xorl %eax, %eax
jmp 0xe3a04
vpinsrd $0x3, %r13d, %xmm0, %xmm1
cmpl $0x8, %edi
jne 0xe3fce
cmpl $0x1, %ebx
jne 0xe3fce
vpxor 0x30e9f3(%rip), %xmm1, %xmm0 # 0x3f2860
vptest %xmm0, %xmm0
jne 0xe4669
jmp 0xe4309
movq %rsi, %rdi
callq 0x5f3e0
movq 0x8(%rsp), %rcx
movq (%rcx), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0x100(%rcx,%rax)
je 0xe3f4b
andq $0x0, 0xd0(%rsp)
vpxor %xmm0, %xmm0, %xmm0
vmovdqa %xmm0, 0x90(%rsp)
vmovdqu %xmm0, 0x9c(%rsp)
vmovdqa %xmm0, 0xb0(%rsp)
vmovdqu %xmm0, 0xbc(%rsp)
andq $0x0, 0x118(%rsp)
vmovdqu %xmm0, 0xd8(%rsp)
vmovdqu %xmm0, 0xe4(%rsp)
vmovdqu %xmm0, 0xf8(%rsp)
vmovdqu %xmm0, 0x104(%rsp)
movq 0x168(%rsp), %rax
testq %rax, %rax
je 0xe3f0d
lock
incl (%rax)
movq 0x98(%rsp), %rax
testq %rax, %rax
je 0xe4000
lock
decl (%rax)
jne 0xe4000
movq 0x90(%rsp), %rsi
movq 0xb0(%rsp), %rdi
testq %rdi, %rdi
je 0xe3ff8
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xe4000
andq $0x0, 0xd0(%rsp)
vpxor %xmm0, %xmm0, %xmm0
vmovdqa %xmm0, 0x90(%rsp)
vmovdqu %xmm0, 0x9c(%rsp)
vmovdqa %xmm0, 0xb0(%rsp)
vmovdqu %xmm0, 0xbc(%rsp)
movq 0x168(%rsp), %rax
testq %rax, %rax
je 0xe453d
lock
incl (%rax)
movq 0x98(%rsp), %rax
testq %rax, %rax
je 0xe453d
lock
decl (%rax)
jne 0xe453d
movq 0x90(%rsp), %rsi
movq 0xb0(%rsp), %rdi
testq %rdi, %rdi
je 0xe4535
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xe453d
cmpl $0x1, %edi
jne 0xe42d3
cmpl $0x8, %ebx
jne 0xe42d3
vpxor 0x30e878(%rip), %xmm1, %xmm0 # 0x3f2860
vptest %xmm0, %xmm0
je 0xe4312
jmp 0xe4669
movq %rsi, %rdi
callq 0x5f3e0
vmovaps 0x160(%rsp), %xmm0
leaq 0xd8(%rsp), %rax
vmovaps %xmm0, -0x48(%rax)
movq 0x170(%rsp), %rcx
movq %rcx, -0x38(%rax)
movl 0x178(%rsp), %ecx
movl %ecx, -0x30(%rax)
movq 0x180(%rsp), %rcx
movq %rcx, -0x28(%rax)
vmovdqu 0x188(%rsp), %xmm0
vmovdqu %xmm0, -0x20(%rax)
movl 0x198(%rsp), %ecx
movl %ecx, -0x10(%rax)
movq 0x1a0(%rsp), %rcx
movq %rcx, -0x8(%rax)
movq 0x8(%rsp), %rdx
movq (%rdx), %rcx
movq -0x18(%rcx), %rbx
leaq (%rdx,%rbx), %rcx
addq $0x1a8, %rcx # imm = 0x1A8
cmpq %rcx, %rax
je 0xe414b
addq %rdx, %rbx
movq 0x1b0(%rbx), %rax
testq %rax, %rax
je 0xe408e
lock
incl (%rax)
movq 0xe0(%rsp), %rax
testq %rax, %rax
je 0xe40dd
lock
decl (%rax)
jne 0xe40dd
movq 0xd8(%rsp), %rsi
movq 0xf8(%rsp), %rdi
testq %rdi, %rdi
je 0xe40d5
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xe40dd
vpxor 0x30e7ab(%rip), %xmm0, %xmm0 # 0x3f2870
vptest %xmm0, %xmm0
je 0xe431b
jmp 0xe4669
movq %rsi, %rdi
callq 0x5f3e0
vmovups 0x1a8(%rbx), %xmm0
vmovups %xmm0, 0xd8(%rsp)
movq 0x1b8(%rbx), %rax
movq %rax, 0xe8(%rsp)
movl 0x1c0(%rbx), %eax
movl %eax, 0xf0(%rsp)
movq 0x1c8(%rbx), %rax
movq %rax, 0xf8(%rsp)
vmovdqu 0x1d0(%rbx), %xmm0
vmovdqa %xmm0, 0x100(%rsp)
movl 0x1e0(%rbx), %eax
movl %eax, 0x110(%rsp)
movq 0x1e8(%rbx), %rax
movq %rax, 0x118(%rsp)
movq 0x8(%rsp), %rdx
movq 0x188(%rdx), %r15
leaq 0x1f8(%rsp), %rdi
leaq 0x90(%rsp), %rsi
callq 0x6b00e
movq (%r15), %rax
leaq 0x1f8(%rsp), %rsi
movq %r15, %rdi
callq *0x18(%rax)
leaq 0x1f8(%rsp), %rdi
callq 0x6b03a
pushq $0x48
popq %rbx
vpxor %xmm0, %xmm0, %xmm0
movq 0x98(%rsp,%rbx), %rax
testq %rax, %rax
je 0xe41cb
lock
decl (%rax)
jne 0xe41cb
movq 0x90(%rsp,%rbx), %rsi
movq 0xb0(%rsp,%rbx), %rdi
testq %rdi, %rdi
je 0xe41bf
movq (%rdi), %rax
callq *0x18(%rax)
vpxor %xmm0, %xmm0, %xmm0
jmp 0xe41cb
movq %rsi, %rdi
callq 0x5f3e0
vpxor %xmm0, %xmm0, %xmm0
leaq (%rsp,%rbx), %rax
addq $0x90, %rax
andq $0x0, 0x40(%rax)
vmovdqu %xmm0, 0xc(%rax)
vmovdqu %xmm0, (%rax)
andl $0x0, 0x38(%rax)
vmovdqu %xmm0, 0x28(%rax)
addq $-0x48, %rbx
cmpq $-0x48, %rbx
jne 0xe418c
movq 0x8(%rsp), %rax
movq 0x188(%rax), %rdi
movq (%rdi), %rax
movq 0x30(%rsp), %rsi
callq *0x20(%rax)
movq 0x168(%rsp), %rax
testq %rax, %rax
je 0xe4244
lock
decl (%rax)
jne 0xe4244
movq 0x160(%rsp), %rsi
movq 0x180(%rsp), %rdi
testq %rdi, %rdi
je 0xe423c
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xe4244
movq %rsi, %rdi
callq 0x5f3e0
leaq 0x70(%rsp), %rdi
callq 0x71614
movq 0x30(%rsp), %rax
cmpb $0x1, (%rax)
movq 0x8(%rsp), %rcx
jne 0xe34cc
movq (%rcx), %rax
movq -0x18(%rax), %rax
leaq (%rcx,%rax), %rbx
leaq (%rcx,%rax), %r14
addq $0x160, %r14 # imm = 0x160
movq 0x8(%r14), %rax
testq %rax, %rax
je 0xe42a8
lock
decl (%rax)
jne 0xe42a8
movq 0x160(%rbx), %rsi
movq 0x180(%rbx), %rdi
testq %rdi, %rdi
je 0xe42a0
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xe42a8
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x1a0(%rbx)
vpxor %xmm0, %xmm0, %xmm0
vmovdqu %xmm0, 0xc(%r14)
vmovdqu %xmm0, (%r14)
vmovdqu %xmm0, 0x188(%rbx)
andl $0x0, 0x198(%rbx)
jmp 0xe34cc
vpshufd $0x44, %xmm0, %xmm0 # xmm0 = xmm0[0,1,0,1]
vpinsrd $0x0, %edi, %xmm0, %xmm0
vpinsrd $0x1, %r13d, %xmm0, %xmm0
vpxor 0x30e564(%rip), %xmm0, %xmm0 # 0x3f2850
vptest %xmm0, %xmm0
jne 0xe4669
cmpl $0x1, %esi
jne 0xe4669
cmpl $0x1, %ebx
jne 0xe4669
cmpl $0x2, %edx
je 0xe4660
cmpl $0x1, %edx
jne 0xe4669
cmpl $0x1, %ecx
jne 0xe4669
movq 0x128(%rsp), %rsi
addq $0x160, %rsi # imm = 0x160
leaq 0x18(%rax), %rcx
movq %rcx, 0x38(%rsp)
movq 0x10(%rsp), %rbp
imull %r13d, %ebp
leaq 0x90(%rsp), %rdi
movl %ebp, %edx
movq 0x18(%rsp), %r14
movl %r14d, %ecx
movq 0x80(%rsp), %r13
movl %r13d, %r8d
xorl %r9d, %r9d
callq 0x63020
movl %r14d, %eax
movq 0x28(%rsp), %r14
cltd
idivl %ebx
movl %eax, %ecx
movl %r12d, %eax
cltd
idivl %r14d
movl %ebx, %r9d
imull %r14d, %r9d
leal (,%r9,4), %r8d
andq $0x0, (%rsp)
movq 0x38(%rsp), %rdi
movl %ebp, %esi
movl %ecx, %edx
movl %eax, %ecx
callq 0x628f2
movq %r14, %rax
movl %eax, %r14d
decl %eax
movq 0x8(%rsp), %rdx
movq 0x18(%rdx), %rcx
movq %rcx, 0x38(%rsp)
movq 0x28(%rdx), %rcx
imulq 0x58(%rdx), %rcx
movq %rcx, 0x40(%rsp)
decl %ebx
subq %rax, %r13
movq %r13, 0x80(%rsp)
subq %rbx, 0x20(%rsp)
movl %ebp, %r9d
xorl %r8d, %r8d
cmpq 0x80(%rsp), %r8
jge 0xe449d
movl %r8d, %eax
cltd
idivl 0x28(%rsp)
cltq
imulq 0x40(%rsp), %rax
addq 0x38(%rsp), %rax
xorl %edx, %edx
cmpq 0x20(%rsp), %rdx
jge 0xe4495
xorl %r10d, %r10d
xorl %r11d, %r11d
cmpq %r9, %r11
je 0xe448d
movq %rdx, %rbx
xorl %r12d, %r12d
cmpq %r15, %r12
je 0xe4484
movslq 0xbc(%rsp), %r13
movq 0xa0(%rsp), %rbp
movq 0xd0(%rsp), %rsi
movq 0x90(%rsp), %rdi
addq %r10, %rdi
movq %r8, %rcx
imulq %rsi, %rcx
imulq %rbx, %r13
addq %rcx, %r13
imulq %rbp, %r13
addq %rdi, %r13
imulq %rsi, %rbp
movq %r14, %rsi
subq $0x1, %rsi
jb 0xe447c
vmovd (%r13), %xmm0
vmovd %xmm0, (%rax)
addq $0x4, %rax
addq %rbp, %r13
jmp 0xe4463
incq %r12
incq %rbx
jmp 0xe441f
incq %r11
addq $0x4, %r10
jmp 0xe4414
addq %r15, %rdx
jmp 0xe4403
addq %r14, %r8
jmp 0xe43de
movq 0x98(%rsp), %rax
testq %rax, %rax
je 0xe424e
lock
decl (%rax)
jne 0xe424e
movq 0x90(%rsp), %rsi
movq 0xb0(%rsp), %rdi
testq %rdi, %rdi
je 0xe4a28
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xe424e
movq 0xb8(%r12), %rcx
movq 0xc0(%r12), %rdx
cmpq %rdx, %rcx
je 0xe4616
cmpl $0x0, 0x2c(%rcx)
je 0xe4616
cmpl $0x0, 0x30(%rcx)
je 0xe4616
cmpq %rdx, %rcx
je 0xe4941
movl 0x2c(%rcx), %r13d
testl %r13d, %r13d
je 0xe4941
movl 0x30(%rcx), %ecx
testl %ecx, %ecx
je 0xe4941
addl $0x2, %r13d
addl $0x2, %ecx
jmp 0xe49a1
movq %rsi, %rdi
callq 0x5f3e0
vmovaps 0x160(%rsp), %xmm0
leaq 0x90(%rsp), %rsi
vmovaps %xmm0, (%rsi)
movq 0x170(%rsp), %rax
movq %rax, 0x10(%rsi)
movl 0x178(%rsp), %eax
movl %eax, 0x18(%rsi)
movq 0x180(%rsp), %rax
movq %rax, 0x20(%rsi)
vmovdqu 0x188(%rsp), %xmm0
vmovdqu %xmm0, 0x28(%rsi)
movl 0x198(%rsp), %eax
movl %eax, 0x38(%rsi)
movq 0x1a0(%rsp), %rax
movq %rax, 0x40(%rsi)
movq 0x8(%rsp), %rax
movq 0x188(%rax), %r15
leaq 0x1f8(%rsp), %rdi
callq 0x6b00e
movq (%r15), %rax
leaq 0x1f8(%rsp), %rsi
movq %r15, %rdi
callq *0x18(%rax)
leaq 0x1f8(%rsp), %rdi
callq 0x6b03a
movq 0x98(%rsp), %rax
testq %rax, %rax
je 0xe41f6
lock
decl (%rax)
jne 0xe41f6
movq 0x90(%rsp), %rsi
movq 0xb0(%rsp), %rdi
testq %rdi, %rdi
je 0xe4609
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xe41f6
movq %rsi, %rdi
callq 0x5f3e0
jmp 0xe41f6
cmpl $0x20, %ebp
jg 0xe49f1
cmpb $0x0, 0x39(%rdi)
je 0xe49f1
cmpl $0x20, 0x18(%rsp)
jg 0xe49f1
addq $0x160, %r12 # imm = 0x160
movq 0x8(%rsp), %rax
leaq 0x138(%rax), %rsi
movq %r12, %rdi
movq 0x18(%rsp), %rdx
movl %ebp, %ecx
movq 0x30(%rsp), %r8
callq 0xe77b3
jmp 0xe5bdc
cmpl $0x2, %ecx
je 0xe4324
leaq 0x18(%rax), %rdi
movq 0x10(%rsp), %rax
imull %r13d, %eax
movq %rax, 0x10(%rsp)
cmpl $0x8, %r12d
jl 0xe46bd
movq 0x18(%rsp), %rbx
cmpl $0x8, %ebx
movq 0x80(%rsp), %r8
jl 0xe4707
movq 0x10(%rsp), %r14
movl %r14d, %esi
shll $0x6, %esi
movl %ebx, %eax
shrl $0x3, %eax
movl %ebx, %ecx
shrl $0x2, %ecx
andl $0x1, %ecx
movl %ebx, %edx
andl $0x1, %edx
addl %eax, %edx
btl $0x1, %ebx
adcl %ecx, %edx
jmp 0xe47a1
cmpl $0x4, %r12d
movq 0x18(%rsp), %rbx
movq 0x80(%rsp), %rcx
jl 0xe4725
cmpl $0x8, %ebx
jl 0xe4761
movq 0x10(%rsp), %r14
movl %r14d, %esi
shll $0x5, %esi
movl %ebx, %eax
shrl $0x3, %eax
movl %ebx, %r8d
shrl $0x2, %r8d
andl $0x1, %r8d
movl %ebx, %edx
andl $0x1, %edx
addl %eax, %edx
btl $0x1, %ebx
adcl %r8d, %edx
jmp 0xe48d5
cmpl $0x4, %ebx
jl 0xe4786
movq 0x10(%rsp), %r14
movl %r14d, %esi
shll $0x5, %esi
movl %ebx, %edx
andl $0x1, %edx
btl $0x1, %ebx
adcl $0x1, %edx
jmp 0xe47a1
cmpl $0x2, %ecx
jl 0xe47c4
cmpl $0x8, %ebx
jl 0xe4888
movq 0x10(%rsp), %r14
movl %r14d, %esi
shll $0x4, %esi
movl %ebx, %eax
shrl $0x3, %eax
movl %ebx, %ecx
shrl $0x2, %ecx
andl $0x1, %ecx
movl %ebx, %edx
andl $0x1, %edx
addl %eax, %edx
btl $0x1, %ebx
adcl %ecx, %edx
jmp 0xe4a75
cmpl $0x4, %ebx
jl 0xe48b8
movq 0x10(%rsp), %r14
movl %r14d, %esi
shll $0x4, %esi
movl %ebx, %edx
andl $0x1, %edx
btl $0x1, %ebx
adcl $0x1, %edx
jmp 0xe48d5
cmpl $0x2, %ebx
jl 0xe48e6
movq 0x10(%rsp), %r14
movl %r14d, %esi
shll $0x4, %esi
movl %ebx, %edx
andl $0x1, %edx
incl %edx
movl %r8d, %eax
shrl $0x3, %eax
movl %r12d, %ecx
shrl $0x2, %ecx
andl $0x1, %ecx
andl $0x1, %r12d
addl %eax, %r12d
btl $0x1, %r8d
adcl %ecx, %r12d
jmp 0xe4a7c
cmpl $0x8, %ebx
jl 0xe4916
movq 0x10(%rsp), %r14
leal (,%r14,8), %esi
movl %ebx, %eax
shrl $0x3, %eax
movl %ebx, %ecx
shrl $0x2, %ecx
andl $0x1, %ecx
movl %ebx, %edx
andl $0x1, %edx
addl %eax, %edx
btl $0x1, %ebx
adcl %ecx, %edx
jmp 0xe4a7c
leaq 0x90(%rsp), %rcx
andq $0x0, 0x40(%rcx)
vpxor %xmm0, %xmm0, %xmm0
vmovdqa %xmm0, (%rcx)
vmovdqu %xmm0, 0xc(%rcx)
vmovdqa %xmm0, 0x20(%rcx)
vmovdqu %xmm0, 0x2c(%rcx)
movq -0x18(%rax), %rbx
leaq (%rdx,%rbx), %rax
addq $0x160, %rax # imm = 0x160
cmpq %rax, %rcx
je 0xe5f75
addq %rdx, %rbx
movq 0x168(%rbx), %rax
testq %rax, %rax
je 0xe5f07
lock
incl (%rax)
movq 0x98(%rsp), %rax
testq %rax, %rax
je 0xe5f07
lock
decl (%rax)
jne 0xe5f07
movq 0x90(%rsp), %rsi
movq 0xb0(%rsp), %rdi
testq %rdi, %rdi
je 0xe5eff
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xe5f07
cmpl $0x4, %ebx
jl 0xe4a58
movl %ebx, %edx
andl $0x1, %edx
btl $0x1, %ebx
adcl $0x1, %edx
movq 0x10(%rsp), %r14
leal (,%r14,8), %esi
andl $0x1, %ecx
incl %ecx
pushq $0x4
popq %r8
jmp 0xe4a83
cmpl $0x2, %ebx
jl 0xe5b15
movq 0x10(%rsp), %r14
leal (,%r14,8), %esi
movl %ebx, %edx
andl $0x1, %edx
incl %edx
andl $0x1, %r12d
btl $0x1, %ecx
adcl $0x1, %r12d
jmp 0xe4a7c
movq 0x10(%rsp), %r14
leal (,%r14,8), %esi
movl %r8d, %eax
shrl $0x3, %eax
movl %r12d, %ecx
shrl $0x2, %ecx
andl $0x1, %ecx
andl $0x1, %r12d
addl %eax, %r12d
btl $0x1, %r8d
adcl %ecx, %r12d
jmp 0xe5b2e
cmpl $0x4, %ebx
jl 0xe5b39
movq 0x10(%rsp), %r14
leal (,%r14,4), %esi
movl %ebx, %edx
andl $0x1, %edx
btl $0x1, %ebx
adcl $0x1, %edx
pushq $0x4
popq %r8
jmp 0xe5c10
movl 0x2c(%rax), %r13d
movl 0x30(%rax), %ecx
vmovq 0xec(%r12), %xmm0
vpxor %xmm1, %xmm1, %xmm1
vpcmpgtd %xmm1, %xmm0, %xmm1
vpmovsxdq %xmm1, %xmm1
vtestpd %xmm1, %xmm1
jne 0xe4982
movl 0xf4(%r12), %eax
testl %eax, %eax
jg 0xe4982
movl 0xf8(%r12), %edx
testl %edx, %edx
jle 0xe5eba
vmovd %xmm0, %eax
vpextrd $0x1, %xmm0, %edx
addl %eax, %edx
addl %edx, %r13d
addl 0xf4(%r12), %ecx
addl 0xf8(%r12), %ecx
movl %ecx, 0x28(%rsp)
movq 0x18(%rsp), %rbx
movl %ebx, %edi
movl %ebp, %esi
movl %r13d, %edx
callq 0xe8726
movl %eax, %r14d
movl %ebx, %edi
movl %ebp, %esi
movl %r13d, %edx
movl 0x28(%rsp), %ecx
callq 0xe891e
movl %eax, %ecx
andb %r15b, %cl
movzbl %r14b, %esi
movzbl %r15b, %edx
testb %al, %al
cmovel %esi, %edx
testb %sil, %sil
je 0xe4a35
movq 0x30(%rsp), %rbx
movb 0x39(%rbx), %al
testb %al, %dl
jne 0xe4a42
jmp 0xe5b74
addq $0x160, %r12 # imm = 0x160
cmpb $0x1, 0x38(%rdi)
movq 0x8(%rsp), %rax
movq %rdi, %rbx
jne 0xe5b57
leaq 0xf0(%rax), %rsi
movq %r12, %rdi
movq 0x18(%rsp), %rdx
movl %ebp, %ecx
movq %rbx, %r8
callq 0xe7d38
jmp 0xe5be1
movq %rsi, %rdi
callq 0x5f3e0
jmp 0xe424e
testb %dl, %dl
movq 0x30(%rsp), %rbx
je 0xe5b72
testb %cl, %cl
jne 0xe5bb5
testb %r14b, %r14b
jne 0xe4634
jmp 0xe5be1
cmpl $0x2, %ebx
jl 0xe5be9
movq 0x10(%rsp), %r14
leal (,%r14,4), %esi
movl %ebx, %edx
andl $0x1, %edx
incl %edx
andl $0x1, %r12d
incl %r12d
pushq $0x4
popq %r8
movl %r12d, %ecx
xorl %r9d, %r9d
callq 0x63810
movq 0x128(%rsp), %rax
movq 0x160(%rax), %rax
movq %rax, 0x1d8(%rsp)
movq 0x8(%rsp), %rax
movq 0x28(%rax), %rcx
imulq 0x58(%rax), %rcx
movq %rcx, 0x220(%rsp)
movq 0x18(%rax), %rax
movq %rax, 0x218(%rsp)
movslq %r14d, %rdi
xorl %edx, %edx
testl %r14d, %r14d
movl $0x0, %ecx
cmovgl %r14d, %ecx
movl %r14d, %r8d
imull %ebx, %r8d
leal (,%r8,8), %eax
movl %eax, 0x20c(%rsp)
leal (%r8,%r8), %r11d
leal (%r8,%r8,2), %ebx
imull $0x7, %r8d, %r10d
movq %rdi, %rax
shlq $0x5, %rax
movq %rax, 0x1c8(%rsp)
leaq (,%rdi,4), %rsi
imull $0x6, %r8d, %ebp
movq %rdi, %rax
shlq $0x4, %rax
movq %rax, 0x1c0(%rsp)
leal (%r8,%r8,4), %r12d
leal (,%r8,4), %r13d
movq %rdi, 0x210(%rsp)
leaq (,%rdi,8), %rax
movq %r8, %rdi
movq %rax, 0x1b8(%rsp)
leaq (,%rcx,4), %rax
movq %rax, 0x1d0(%rsp)
xorl %r9d, %r9d
movq %r9, %rax
orq $0x7, %rax
movq 0x80(%rsp), %r8
cmpq %r8, %rax
jge 0xe5102
movl %r10d, 0x8c(%rsp)
movslq %r10d, %rax
movq %rdx, %r8
movq 0x1d8(%rsp), %r14
leaq (%r14,%rax,4), %rdx
movl %ebp, 0x1f0(%rsp)
movslq %ebp, %rax
movq %r9, %r10
leaq (%r14,%rax,4), %r9
movl %r12d, 0x1e8(%rsp)
movslq %r12d, %rax
movl %ebx, %ebp
leaq (%r14,%rax,4), %rbx
movl %r13d, 0x1e0(%rsp)
movslq %r13d, %rax
leaq (%r14,%rax,4), %r12
movl %ebp, 0x130(%rsp)
movslq %ebp, %rax
leaq (%r14,%rax,4), %rax
movl %r11d, %ebp
movq %rdi, %r11
movl %ebp, 0x138(%rsp)
movslq %ebp, %rdi
leaq (%r14,%rdi,4), %r13
movq %r11, 0x140(%rsp)
movslq %r11d, %rdi
leaq (%r14,%rdi,4), %r15
movq %r8, 0x150(%rsp)
movslq %r8d, %rdi
leaq (%r14,%rdi,4), %r11
movq %r10, 0x148(%rsp)
movq %r10, %rdi
shrq $0x3, %rdi
imulq 0x220(%rsp), %rdi
addq 0x218(%rsp), %rdi
movq %r11, 0x28(%rsp)
movq %r15, 0x38(%rsp)
movq %r13, 0x40(%rsp)
movq %rax, 0x60(%rsp)
movq %r12, 0x58(%rsp)
movq %rbx, 0x50(%rsp)
movq %r9, 0x48(%rsp)
movq %rdx, 0x68(%rsp)
xorl %r14d, %r14d
xorl %r10d, %r10d
movq %r10, 0x158(%rsp)
movq %r10, %r8
orq $0x7, %r8
cmpq 0x20(%rsp), %r8
jge 0xe4d52
movq %r14, 0x1b0(%rsp)
xorl %r8d, %r8d
cmpq %rcx, %r8
je 0xe4d06
movq %r14, %rbp
xorl %r10d, %r10d
cmpl $0x100, %r10d # imm = 0x100
je 0xe4cf7
vmovss (%r11,%rbp), %xmm0
vmovss %xmm0, (%rdi,%r10)
vmovss (%r15,%rbp), %xmm0
vmovss %xmm0, 0x4(%rdi,%r10)
vmovss (%r13,%rbp), %xmm0
vmovss %xmm0, 0x8(%rdi,%r10)
vmovss (%rax,%rbp), %xmm0
vmovss %xmm0, 0xc(%rdi,%r10)
vmovss (%r12,%rbp), %xmm0
vmovss %xmm0, 0x10(%rdi,%r10)
vmovss (%rbx,%rbp), %xmm0
vmovss %xmm0, 0x14(%rdi,%r10)
vmovss (%r9,%rbp), %xmm0
vmovss %xmm0, 0x18(%rdi,%r10)
vmovd (%rdx,%rbp), %xmm0
vmovd %xmm0, 0x1c(%rdi,%r10)
addq $0x20, %r10
addq %rsi, %rbp
jmp 0xe4c80
incq %r8
addq $0x4, %r14
addq %r10, %rdi
jmp 0xe4c71
movq 0x158(%rsp), %r10
addq $0x8, %r10
movq 0x1c8(%rsp), %r8
movq 0x1b0(%rsp), %r14
addq %r8, %r14
addq %r8, 0x68(%rsp)
addq %r8, 0x48(%rsp)
addq %r8, 0x50(%rsp)
addq %r8, 0x58(%rsp)
addq %r8, 0x60(%rsp)
addq %r8, 0x40(%rsp)
addq %r8, 0x38(%rsp)
addq %r8, 0x28(%rsp)
jmp 0xe4c4c
movq 0x158(%rsp), %r15
movq %r15, %rax
orq $0x3, %rax
cmpq 0x20(%rsp), %rax
jge 0xe4f9e
movq %r15, %r14
movq 0x28(%rsp), %rax
movq 0x38(%rsp), %rdx
movq 0x40(%rsp), %r9
movq 0x60(%rsp), %r11
movq 0x58(%rsp), %rbx
movq 0x50(%rsp), %r15
movq 0x48(%rsp), %r12
movq 0x68(%rsp), %r13
xorl %ebp, %ebp
cmpq %rcx, %rbp
je 0xe4e4d
xorl %r10d, %r10d
xorl %r8d, %r8d
cmpl $0x80, %r8d
je 0xe4e22
vmovss (%rax,%r10), %xmm0
vmovss %xmm0, (%rdi,%r8)
vmovss (%rdx,%r10), %xmm0
vmovss %xmm0, 0x4(%rdi,%r8)
vmovss (%r9,%r10), %xmm0
vmovss %xmm0, 0x8(%rdi,%r8)
vmovss (%r11,%r10), %xmm0
vmovss %xmm0, 0xc(%rdi,%r8)
vmovss (%rbx,%r10), %xmm0
vmovss %xmm0, 0x10(%rdi,%r8)
vmovss (%r15,%r10), %xmm0
vmovss %xmm0, 0x14(%rdi,%r8)
vmovss (%r12,%r10), %xmm0
vmovss %xmm0, 0x18(%rdi,%r8)
vmovd (%r13,%r10), %xmm0
vmovd %xmm0, 0x1c(%rdi,%r8)
addq $0x20, %r8
addq %rsi, %r10
jmp 0xe4da8
incq %rbp
addq $0x4, %r13
addq $0x4, %r12
addq $0x4, %r15
addq $0x4, %rbx
addq $0x4, %r11
addq $0x4, %r9
addq $0x4, %rdx
addq $0x4, %rax
addq %r8, %rdi
jmp 0xe4d99
movq %r14, %r15
addq $0x4, %r15
movq 0x1c0(%rsp), %rax
addq %rax, 0x68(%rsp)
addq %rax, 0x48(%rsp)
addq %rax, 0x50(%rsp)
addq %rax, 0x58(%rsp)
addq %rax, 0x60(%rsp)
addq %rax, 0x40(%rsp)
addq %rax, 0x38(%rsp)
addq %rax, 0x28(%rsp)
jmp 0xe4d5a
movq %r15, %r14
movq 0x28(%rsp), %rax
movq 0x68(%rsp), %rdx
movq 0x48(%rsp), %r9
movq 0x50(%rsp), %r11
movq 0x58(%rsp), %rbx
movq 0x60(%rsp), %r15
movq 0x40(%rsp), %r12
movq 0x38(%rsp), %r13
xorl %ebp, %ebp
cmpq %rcx, %rbp
je 0xe4f67
xorl %r8d, %r8d
xorl %r10d, %r10d
cmpl $0x40, %r8d
je 0xe4f3c
vmovss (%rax,%r10), %xmm0
vmovss %xmm0, (%rdi,%r8)
vmovss (%r13,%r10), %xmm0
vmovss %xmm0, 0x4(%rdi,%r8)
vmovss (%r12,%r10), %xmm0
vmovss %xmm0, 0x8(%rdi,%r8)
vmovss (%r15,%r10), %xmm0
vmovss %xmm0, 0xc(%rdi,%r8)
vmovss (%rbx,%r10), %xmm0
vmovss %xmm0, 0x10(%rdi,%r8)
vmovss (%r11,%r10), %xmm0
vmovss %xmm0, 0x14(%rdi,%r8)
vmovss (%r9,%r10), %xmm0
vmovss %xmm0, 0x18(%rdi,%r8)
vmovd (%rdx,%r10), %xmm0
vmovd %xmm0, 0x1c(%rdi,%r8)
addq %rsi, %r10
addq $0x20, %r8
jmp 0xe4ec5
incq %rbp
addq $0x4, %r13
addq $0x4, %r12
addq $0x4, %r15
addq $0x4, %rbx
addq $0x4, %r11
addq $0x4, %r9
addq $0x4, %rdx
addq $0x4, %rax
addq %r8, %rdi
jmp 0xe4eb6
movq %r14, %r15
addq $0x2, %r15
movq 0x1b8(%rsp), %rax
addq %rax, 0x38(%rsp)
addq %rax, 0x40(%rsp)
addq %rax, 0x60(%rsp)
addq %rax, 0x58(%rsp)
addq %rax, 0x50(%rsp)
addq %rax, 0x48(%rsp)
addq %rax, 0x68(%rsp)
addq %rax, 0x28(%rsp)
movq %r15, %rax
orq $0x1, %rax
cmpq 0x20(%rsp), %rax
jl 0xe4e89
movq 0x1d0(%rsp), %r8
movq 0x148(%rsp), %r9
movl 0x138(%rsp), %r11d
movl 0x130(%rsp), %ebx
movl 0x8c(%rsp), %r10d
movl 0x1f0(%rsp), %ebp
movl 0x1e8(%rsp), %r12d
movl 0x1e0(%rsp), %r13d
cmpq 0x20(%rsp), %r15
jge 0xe50ce
xorl %edx, %edx
movq %rdi, %rax
cmpq %rdx, %r8
je 0xe509b
movq 0x28(%rsp), %r14
vmovss (%r14,%rdx), %xmm0
vmovss %xmm0, (%rdi,%rdx,8)
movq 0x38(%rsp), %r14
vmovss (%r14,%rdx), %xmm0
vmovss %xmm0, 0x4(%rdi,%rdx,8)
movq 0x40(%rsp), %r14
vmovss (%r14,%rdx), %xmm0
vmovss %xmm0, 0x8(%rdi,%rdx,8)
movq 0x60(%rsp), %r14
vmovss (%r14,%rdx), %xmm0
vmovss %xmm0, 0xc(%rdi,%rdx,8)
movq 0x58(%rsp), %r14
vmovss (%r14,%rdx), %xmm0
vmovss %xmm0, 0x10(%rdi,%rdx,8)
movq 0x50(%rsp), %r14
vmovss (%r14,%rdx), %xmm0
vmovss %xmm0, 0x14(%rdi,%rdx,8)
movq 0x48(%rsp), %r14
vmovss (%r14,%rdx), %xmm0
vmovss %xmm0, 0x18(%rdi,%rdx,8)
movq 0x68(%rsp), %r14
vmovd (%r14,%rdx), %xmm0
vmovd %xmm0, 0x1c(%rdi,%rdx,8)
addq $0x20, %rax
addq $0x4, %rdx
jmp 0xe4ffe
incq %r15
addq %rsi, 0x68(%rsp)
addq %rsi, 0x48(%rsp)
addq %rsi, 0x50(%rsp)
addq %rsi, 0x58(%rsp)
addq %rsi, 0x60(%rsp)
addq %rsi, 0x40(%rsp)
addq %rsi, 0x38(%rsp)
addq %rsi, 0x28(%rsp)
movq %rax, %rdi
jmp 0xe4fee
addq $0x8, %r9
movq 0x150(%rsp), %rdx
movl 0x20c(%rsp), %eax
addl %eax, %edx
movq 0x140(%rsp), %rdi
addl %eax, %edi
addl %eax, %r11d
addl %eax, %ebx
addl %eax, %r10d
addl %eax, %ebp
addl %eax, %r12d
addl %eax, %r13d
jmp 0xe4b5b
movq 0x128(%rsp), %rax
movq 0x160(%rax), %rax
movq %rax, 0x40(%rsp)
movq 0x8(%rsp), %rax
movq 0x18(%rax), %r10
movq %r10, 0x60(%rsp)
movq 0x28(%rax), %r10
imulq 0x58(%rax), %r10
movq %r10, 0x58(%rsp)
movq 0x18(%rsp), %rax
movq 0x10(%rsp), %r10
imull %r10d, %eax
movq %rax, 0x18(%rsp)
leal (,%rax,4), %eax
movl %eax, 0x50(%rsp)
movq %r9, %rax
orq $0x3, %rax
cmpq %r8, %rax
movq %r9, %r12
jge 0xe5459
movq %rdx, 0x150(%rsp)
movslq %edx, %rax
movq 0x40(%rsp), %r8
leaq (%r8,%rax,4), %rdx
movq %rdi, 0x140(%rsp)
movslq %edi, %rax
leaq (%r8,%rax,4), %r9
movl %r11d, 0x138(%rsp)
movslq %r11d, %rax
leaq (%r8,%rax,4), %rbp
movl %ebx, 0x130(%rsp)
movslq %ebx, %rax
leaq (%r8,%rax,4), %rbx
movl %r12d, %r11d
shrl $0x3, %r11d
movq %r12, 0x148(%rsp)
btl $0x2, %r12d
adcl $0x0, %r11d
imulq 0x58(%rsp), %r11
addq 0x60(%rsp), %r11
movq %rbx, %rdi
movq %rbp, %r14
movq %r9, %r15
movq %rdx, %r12
xorl %r8d, %r8d
xorl %r13d, %r13d
movq %r13, %rax
orq $0x7, %rax
cmpq 0x20(%rsp), %rax
jge 0xe5307
movq %r13, 0x28(%rsp)
movq %r8, 0x38(%rsp)
movq %r8, %r13
xorl %r10d, %r10d
cmpq %rcx, %r10
je 0xe5254
movq %r13, %r8
xorl %eax, %eax
cmpl $0x80, %eax
je 0xe5248
vmovss (%rdx,%r8), %xmm0
vmovss %xmm0, (%r11,%rax)
vmovss (%r9,%r8), %xmm0
vmovss %xmm0, 0x4(%r11,%rax)
vmovss (%rbp,%r8), %xmm0
vmovss %xmm0, 0x8(%r11,%rax)
vmovd (%rbx,%r8), %xmm0
vmovd %xmm0, 0xc(%r11,%rax)
addq $0x10, %rax
addq %rsi, %r8
jmp 0xe5204
incq %r10
addq %rax, %r11
addq $0x4, %r13
jmp 0xe51fa
movq 0x28(%rsp), %r13
addq $0x8, %r13
movq 0x1c8(%rsp), %rax
movq 0x38(%rsp), %r8
addq %rax, %r8
addq %rax, %r12
addq %rax, %r15
addq %rax, %r14
addq %rax, %rdi
jmp 0xe51d8
movq %rdi, %rdx
movq %r14, %r8
movq %r15, %r9
movq %r12, %rbx
xorl %ebp, %ebp
cmpq %rcx, %rbp
je 0xe52ef
xorl %r10d, %r10d
xorl %eax, %eax
cmpl $0x40, %eax
je 0xe52d7
vmovss (%rbx,%r10), %xmm0
vmovss %xmm0, (%r11,%rax)
vmovss (%r9,%r10), %xmm0
vmovss %xmm0, 0x4(%r11,%rax)
vmovss (%r8,%r10), %xmm0
vmovss %xmm0, 0x8(%r11,%rax)
vmovd (%rdx,%r10), %xmm0
vmovd %xmm0, 0xc(%r11,%rax)
addq $0x10, %rax
addq %rsi, %r10
jmp 0xe5296
incq %rbp
addq %rax, %r11
addq $0x4, %rbx
addq $0x4, %r9
addq $0x4, %r8
addq $0x4, %rdx
jmp 0xe528c
addq $0x4, %r13
movq 0x1c0(%rsp), %rax
addq %rax, %r12
addq %rax, %r15
addq %rax, %r14
addq %rax, %rdi
movq %r13, %rax
orq $0x3, %rax
cmpq 0x20(%rsp), %rax
jl 0xe527e
jmp 0xe53a7
movq %rdi, %rdx
movq %r14, %r8
movq %r15, %r9
movq %r12, %rbx
xorl %ebp, %ebp
cmpq %rcx, %rbp
je 0xe538f
xorl %r10d, %r10d
xorl %eax, %eax
cmpl $0x20, %eax
je 0xe5377
vmovss (%rbx,%r10), %xmm0
vmovss %xmm0, (%r11,%rax)
vmovss (%r9,%r10), %xmm0
vmovss %xmm0, 0x4(%r11,%rax)
vmovss (%r8,%r10), %xmm0
vmovss %xmm0, 0x8(%r11,%rax)
vmovd (%rdx,%r10), %xmm0
vmovd %xmm0, 0xc(%r11,%rax)
addq $0x10, %rax
addq %rsi, %r10
jmp 0xe5336
incq %rbp
addq %rax, %r11
addq $0x4, %rbx
addq $0x4, %r9
addq $0x4, %r8
addq $0x4, %rdx
jmp 0xe532c
addq $0x2, %r13
movq 0x1b8(%rsp), %rax
addq %rax, %r12
addq %rax, %r15
addq %rax, %r14
addq %rax, %rdi
movq %r13, %rax
orq $0x1, %rax
cmpq 0x20(%rsp), %rax
jl 0xe531e
movq 0x150(%rsp), %rdx
movq 0x148(%rsp), %r9
movl 0x130(%rsp), %ebx
cmpq 0x20(%rsp), %r13
jge 0xe5426
xorl %eax, %eax
cmpq %rax, %rcx
je 0xe5415
vmovss (%r12,%rax,4), %xmm0
vmovss %xmm0, (%r11)
vmovss (%r15,%rax,4), %xmm0
vmovss %xmm0, 0x4(%r11)
vmovss (%r14,%rax,4), %xmm0
vmovss %xmm0, 0x8(%r11)
vmovd (%rdi,%rax,4), %xmm0
vmovd %xmm0, 0xc(%r11)
addq $0x10, %r11
incq %rax
jmp 0xe53d9
incq %r13
addq %rsi, %rdi
addq %rsi, %r14
addq %rsi, %r15
addq %rsi, %r12
jmp 0xe53d0
addq $0x4, %r9
movl 0x50(%rsp), %eax
addl %eax, %edx
movq 0x140(%rsp), %rdi
addl %eax, %edi
movl 0x138(%rsp), %r11d
addl %eax, %r11d
addl %eax, %ebx
movq 0x80(%rsp), %r8
movq 0x10(%rsp), %r10
jmp 0xe5150
movq 0x128(%rsp), %rax
movq 0x160(%rax), %r14
movq 0x8(%rsp), %rax
movq 0x18(%rax), %r9
movq %r9, 0x138(%rsp)
movq 0x28(%rax), %r9
imulq 0x58(%rax), %r9
movq %r9, 0x130(%rsp)
leal (%r10,%r10), %eax
cltq
movq %rdx, %rbx
leal (%r10,%r10,2), %edx
movslq %edx, %rdx
movq %r8, %r15
movq %rdi, %r13
leal (,%r10,4), %edi
movslq %edi, %rdi
leal (%r10,%r10,4), %r8d
movslq %r8d, %r8
imull $0x6, %r10d, %r9d
movslq %r9d, %r9
imull $0x7, %r10d, %r10d
movslq %r10d, %r10
movq 0x18(%rsp), %r11
addl %r11d, %r11d
movl %r11d, 0x8c(%rsp)
leaq (%r14,%r10,4), %r10
movq %r10, 0x1f0(%rsp)
leaq (%r14,%r9,4), %r9
movq %r9, 0x1e8(%rsp)
leaq (%r14,%r8,4), %r8
movq %r8, 0x1e0(%rsp)
leaq (%r14,%rdi,4), %rdi
movq %rdi, 0x158(%rsp)
movq %r13, %rdi
leaq (%r14,%rdx,4), %rdx
movq %rdx, 0x10(%rsp)
movq %rbx, %r9
leaq (%r14,%rax,4), %rax
movq %rax, 0x1d8(%rsp)
movq 0x210(%rsp), %rax
movq %r14, 0x68(%rsp)
leaq (%r14,%rax,4), %rax
movq %rax, 0x1b0(%rsp)
movq %r12, %rax
orq $0x1, %rax
cmpq %r15, %rax
jge 0xe594b
movq %rdi, 0x140(%rsp)
movslq %edi, %rax
movq 0x1f0(%rsp), %r8
leaq (%r8,%rax,4), %rdi
movq %rdi, 0x28(%rsp)
movq 0x1e8(%rsp), %r10
leaq (%r10,%rax,4), %rdi
movq %rdi, 0x38(%rsp)
movq 0x1e0(%rsp), %r11
leaq (%r11,%rax,4), %rdi
movq %rdi, 0x40(%rsp)
movq 0x158(%rsp), %rdx
leaq (%rdx,%rax,4), %rdi
movq %rdi, 0x60(%rsp)
movq 0x10(%rsp), %r13
leaq (%r13,%rax,4), %rbx
movq 0x1d8(%rsp), %rbp
leaq (%rbp,%rax,4), %r14
movq 0x1b0(%rsp), %rdx
leaq (%rdx,%rax,4), %r15
movq 0x68(%rsp), %rdx
leaq (%rdx,%rax,4), %rdi
movq %r9, 0x150(%rsp)
movslq %r9d, %rax
leaq (%r8,%rax,4), %rdx
movq %rdx, 0x58(%rsp)
leaq (%r10,%rax,4), %rdx
movq %rdx, 0x50(%rsp)
leaq (%r11,%rax,4), %r11
movl %r12d, %r8d
shrl $0x3, %r8d
movl %r12d, %r10d
shrl $0x2, %r10d
andl $0x1, %r10d
movq %r12, 0x148(%rsp)
btl $0x1, %r12d
adcl %r8d, %r10d
movq 0x158(%rsp), %rdx
leaq (%rdx,%rax,4), %r9
leaq (,%rax,4), %r12
addq %r13, %r12
leaq (%rbp,%rax,4), %r13
movq 0x1b0(%rsp), %rdx
leaq (%rdx,%rax,4), %rbp
movq 0x68(%rsp), %rdx
leaq (%rdx,%rax,4), %r8
imulq 0x130(%rsp), %r10
addq 0x138(%rsp), %r10
xorl %edx, %edx
movq %rdx, 0x48(%rsp)
movq %rdx, %rax
orq $0x7, %rax
cmpq 0x20(%rsp), %rax
jge 0xe57a2
xorl %eax, %eax
cmpq %rax, 0x1d0(%rsp)
je 0xe5750
vmovss (%r8,%rax), %xmm0
vmovss %xmm0, (%r10)
vmovss (%rbp,%rax), %xmm0
vmovss %xmm0, 0x4(%r10)
vmovss (%r13,%rax), %xmm0
vmovss %xmm0, 0x8(%r10)
vmovss (%r12,%rax), %xmm0
vmovss %xmm0, 0xc(%r10)
vmovss (%r9,%rax), %xmm0
vmovss %xmm0, 0x10(%r10)
vmovss (%r11,%rax), %xmm0
vmovss %xmm0, 0x14(%r10)
movq 0x50(%rsp), %rdx
vmovss (%rdx,%rax), %xmm0
vmovss %xmm0, 0x18(%r10)
movq 0x58(%rsp), %rdx
vmovss (%rdx,%rax), %xmm0
vmovss %xmm0, 0x1c(%r10)
vmovss (%rdi,%rax), %xmm0
vmovss %xmm0, 0x20(%r10)
vmovss (%r15,%rax), %xmm0
vmovss %xmm0, 0x24(%r10)
vmovss (%r14,%rax), %xmm0
vmovss %xmm0, 0x28(%r10)
vmovss (%rbx,%rax), %xmm0
vmovss %xmm0, 0x2c(%r10)
movq 0x60(%rsp), %rdx
vmovss (%rdx,%rax), %xmm0
vmovss %xmm0, 0x30(%r10)
movq 0x40(%rsp), %rdx
vmovss (%rdx,%rax), %xmm0
vmovss %xmm0, 0x34(%r10)
movq 0x38(%rsp), %rdx
vmovss (%rdx,%rax), %xmm0
vmovss %xmm0, 0x38(%r10)
movq 0x28(%rsp), %rdx
vmovd (%rdx,%rax), %xmm0
vmovd %xmm0, 0x3c(%r10)
addq $0x40, %r10
addq $0x4, %rax
jmp 0xe565f
movq 0x48(%rsp), %rdx
addq $0x8, %rdx
movq 0x1c8(%rsp), %rax
addq %rax, 0x28(%rsp)
addq %rax, 0x38(%rsp)
addq %rax, 0x40(%rsp)
addq %rax, 0x60(%rsp)
addq %rax, %rbx
addq %rax, %r14
addq %rax, %r15
addq %rax, %rdi
addq %rax, 0x58(%rsp)
addq %rax, 0x50(%rsp)
addq %rax, %r11
addq %rax, %r9
addq %rax, %r12
addq %rax, %r13
addq %rax, %rbp
addq %rax, %r8
jmp 0xe5646
movq 0x1d0(%rsp), %r9
movq 0x48(%rsp), %r11
movq %r11, 0x48(%rsp)
orq $0x3, %r11
cmpq 0x20(%rsp), %r11
jge 0xe586e
xorl %edx, %edx
movq %r10, %rax
cmpq %rdx, %r9
je 0xe583d
vmovss (%r8,%rdx), %xmm0
vmovss %xmm0, (%r10,%rdx,8)
vmovss (%rbp,%rdx), %xmm0
vmovss %xmm0, 0x4(%r10,%rdx,8)
vmovss (%r13,%rdx), %xmm0
vmovss %xmm0, 0x8(%r10,%rdx,8)
vmovss (%r12,%rdx), %xmm0
vmovss %xmm0, 0xc(%r10,%rdx,8)
vmovss (%rdi,%rdx), %xmm0
vmovss %xmm0, 0x10(%r10,%rdx,8)
vmovss (%r15,%rdx), %xmm0
vmovss %xmm0, 0x14(%r10,%rdx,8)
vmovss (%r14,%rdx), %xmm0
vmovss %xmm0, 0x18(%r10,%rdx,8)
vmovd (%rbx,%rdx), %xmm0
vmovd %xmm0, 0x1c(%r10,%rdx,8)
addq $0x20, %rax
addq $0x4, %rdx
jmp 0xe57c8
movq 0x48(%rsp), %r11
addq $0x4, %r11
movq 0x1c0(%rsp), %rdx
addq %rdx, %rbx
addq %rdx, %r14
addq %rdx, %r15
addq %rdx, %rdi
addq %rdx, %r12
addq %rdx, %r13
addq %rdx, %rbp
addq %rdx, %r8
movq %rax, %r10
jmp 0xe57af
movq 0x80(%rsp), %r15
movq 0x48(%rsp), %r14
movq %r14, %rax
orq $0x1, %rax
cmpq 0x20(%rsp), %rax
jge 0xe58e6
movq %rdi, %rax
movq %r8, %rdx
xorl %r9d, %r9d
cmpq %rcx, %r9
je 0xe58d2
xorl %ebx, %ebx
xorl %r11d, %r11d
cmpl $0x10, %r11d
je 0xe58c2
vmovss (%rdx,%rbx), %xmm0
vmovss %xmm0, (%r10,%r11)
vmovd (%rax,%rbx), %xmm0
vmovd %xmm0, 0x4(%r10,%r11)
addq $0x8, %r11
addq %rsi, %rbx
jmp 0xe589c
incq %r9
addq %r11, %r10
addq $0x4, %rdx
addq $0x4, %rax
jmp 0xe5892
addq $0x2, %r14
movq 0x1b8(%rsp), %rax
addq %rax, %r8
addq %rax, %rdi
jmp 0xe587b
movq 0x150(%rsp), %r9
movq 0x148(%rsp), %r12
cmpq 0x20(%rsp), %r14
jge 0xe592e
xorl %eax, %eax
cmpq %rax, %rcx
je 0xe5923
vmovss (%r8,%rax,4), %xmm0
vmovss %xmm0, (%r10)
vmovd (%rdi,%rax,4), %xmm0
vmovd %xmm0, 0x4(%r10)
addq $0x8, %r10
incq %rax
jmp 0xe58ff
incq %r14
addq %rsi, %rdi
addq %rsi, %r8
jmp 0xe58f6
addq $0x2, %r12
movl 0x8c(%rsp), %eax
addl %eax, %r9d
movq 0x140(%rsp), %rdi
addl %eax, %edi
jmp 0xe5534
movq 0x128(%rsp), %rax
movq 0x160(%rax), %rax
movq 0x8(%rsp), %rdx
movq 0x18(%rdx), %rdi
movq %rdi, 0x28(%rsp)
movq 0x28(%rdx), %r8
imulq 0x58(%rdx), %r8
movq %r9, %rdx
movq %r12, %r14
movq 0x210(%rsp), %rdi
leaq (%rax,%rdi,4), %r10
cmpq %r15, %r14
jge 0xe424e
movq %rdx, %rbp
movslq %edx, %rdx
leaq (%r10,%rdx,4), %r15
movl %r14d, %r9d
shrl $0x3, %r9d
movl %r14d, %ebx
shrl $0x2, %ebx
andl $0x1, %ebx
movl %r14d, %r11d
andl $0x1, %r11d
addl %r9d, %r11d
movq %r14, %rdi
btl $0x1, %r14d
adcl %ebx, %r11d
leaq (%rax,%rdx,4), %rbx
imulq %r8, %r11
addq 0x28(%rsp), %r11
xorl %r14d, %r14d
movq %r14, %rdx
orq $0x7, %rdx
cmpq 0x20(%rsp), %rdx
jge 0xe5a73
movq %rbx, %rdx
xorl %r9d, %r9d
cmpq %rcx, %r9
je 0xe5a15
movq %rdx, %r13
xorl %r12d, %r12d
cmpl $0x20, %r12d
je 0xe5a09
vmovd (%r13), %xmm0
vmovd %xmm0, (%r11,%r12)
addq $0x4, %r12
addq %rsi, %r13
jmp 0xe59ee
incq %r9
addq %r12, %r11
addq $0x4, %rdx
jmp 0xe59e3
addq $0x8, %r14
movq 0x1c8(%rsp), %rdx
addq %rdx, %rbx
addq %rdx, %r15
jmp 0xe59cb
movq %rbx, %rdx
xorl %r9d, %r9d
cmpq %rcx, %r9
je 0xe5a61
movq %rdx, %r13
xorl %r12d, %r12d
cmpl $0x10, %r12d
je 0xe5a55
vmovd (%r13), %xmm0
vmovd %xmm0, (%r11,%r12)
addq $0x4, %r12
addq %rsi, %r13
jmp 0xe5a3a
incq %r9
addq %r12, %r11
addq $0x4, %rdx
jmp 0xe5a2f
addq $0x4, %r14
movq 0x1c0(%rsp), %rdx
addq %rdx, %rbx
addq %rdx, %r15
movq %r14, %rdx
orq $0x3, %rdx
cmpq 0x20(%rsp), %rdx
jl 0xe5a29
movq 0x1d0(%rsp), %r9
movq %r14, %rdx
orq $0x1, %rdx
cmpq 0x20(%rsp), %rdx
jge 0xe5ad1
xorl %edx, %edx
cmpq %rdx, %rcx
je 0xe5abd
vmovss (%rbx,%rdx,4), %xmm0
vmovss %xmm0, (%r11)
vmovd (%r15,%rdx,4), %xmm0
vmovd %xmm0, 0x4(%r11)
addq $0x8, %r11
incq %rdx
jmp 0xe5a99
addq $0x2, %r14
movq 0x1b8(%rsp), %rdx
addq %rdx, %r15
addq %rdx, %rbx
jmp 0xe5a89
movq 0x80(%rsp), %r15
cmpq 0x20(%rsp), %r14
jge 0xe5b03
xorl %edx, %edx
cmpq %rdx, %r9
je 0xe5af8
vmovd (%rbx,%rdx), %xmm0
vmovd %xmm0, (%r11,%rdx)
addq $0x4, %rdx
jmp 0xe5ae2
incq %r14
addq %rsi, %rbx
addq %rdx, %r11
jmp 0xe5ad9
movq %rdi, %r14
incq %r14
movq %rbp, %rdx
addl 0x18(%rsp), %edx
jmp 0xe5983
movq 0x10(%rsp), %r14
leal (,%r14,4), %esi
andl $0x1, %r12d
btl $0x1, %ecx
adcl $0x1, %r12d
pushq $0x4
popq %r8
movl %ebx, %edx
jmp 0xe4a80
cmpl $0x2, %ebx
jl 0xe5c02
movq 0x10(%rsp), %r14
leal (%r14,%r14), %esi
movl %ebx, %edx
andl $0x1, %edx
incl %edx
jmp 0xe4a7c
leaq 0xa8(%rax), %rsi
movq %r12, %rdi
movq 0x18(%rsp), %rdx
movl %ebp, %ecx
movq %rbx, %r8
callq 0xe825a
jmp 0xe5be1
xorl %eax, %eax
cmpb $0x0, 0x38(%rbx)
je 0xe5ba7
testb %cl, %cl
jne 0xe5bb5
addq $0x160, %r12 # imm = 0x160
movq 0x8(%rsp), %rax
leaq 0xf0(%rax), %rsi
movq %r12, %rdi
movq 0x18(%rsp), %rdx
movl %ebp, %ecx
movq 0x30(%rsp), %r8
callq 0xe7d38
jmp 0xe5bdc
movb 0x39(%rbx), %dl
movl %edx, %esi
xorb $0x1, %sil
orb %sil, %cl
je 0xe5c15
addq $0x160, %r12 # imm = 0x160
movq 0x8(%rsp), %rax
leaq 0xa8(%rax), %rsi
movq %r12, %rdi
movq 0x18(%rsp), %rdx
movl %ebp, %ecx
movq 0x30(%rsp), %r8
callq 0xe825a
movq 0x30(%rsp), %rbx
cmpb $0x1, (%rbx)
jmp 0xe4256
movq 0x10(%rsp), %r14
leal (%r14,%r14), %esi
andl $0x1, %ecx
incl %ecx
pushq $0x4
popq %r8
movl %ebx, %edx
jmp 0xe4a83
pushq $0x4
popq %r8
movq 0x10(%rsp), %r14
movl %r14d, %esi
movl %ebx, %edx
jmp 0xe4a83
orb %dl, %al
jne 0xe4634
jmp 0xe5be1
movq %rsi, %rdi
callq 0x5f3e0
vmovups 0x160(%rbx), %xmm0
vmovaps %xmm0, 0x90(%rsp)
movq 0x170(%rbx), %rax
movq %rax, 0xa0(%rsp)
movl 0x178(%rbx), %eax
movl %eax, 0xa8(%rsp)
movq 0x180(%rbx), %rax
movq %rax, 0xb0(%rsp)
vmovdqu 0x188(%rbx), %xmm0
vmovdqu %xmm0, 0xb8(%rsp)
movl 0x198(%rbx), %eax
movl %eax, 0xc8(%rsp)
movq 0x1a0(%rbx), %rax
movq %rax, 0xd0(%rsp)
movq 0x8(%rsp), %rdx
movq (%rdx), %rax
movq -0x18(%rax), %rbx
leaq (%rdx,%rbx), %rax
addq $0x1a8, %rax # imm = 0x1A8
leaq 0xd8(%rsp), %rcx
cmpq %rax, %rcx
je 0xe5d6e
addq %rdx, %rbx
movq 0x1b0(%rbx), %rax
testq %rax, %rax
je 0xe5cc9
lock
incl (%rax)
movq 0xe0(%rsp), %rax
testq %rax, %rax
je 0xe5d00
lock
decl (%rax)
jne 0xe5d00
movq 0xd8(%rsp), %rsi
movq 0xf8(%rsp), %rdi
testq %rdi, %rdi
je 0xe5cf8
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xe5d00
movq %rsi, %rdi
callq 0x5f3e0
vmovups 0x1a8(%rbx), %xmm0
vmovups %xmm0, 0xd8(%rsp)
movq 0x1b8(%rbx), %rax
movq %rax, 0xe8(%rsp)
movl 0x1c0(%rbx), %eax
movl %eax, 0xf0(%rsp)
movq 0x1c8(%rbx), %rax
movq %rax, 0xf8(%rsp)
vmovdqu 0x1d0(%rbx), %xmm0
vmovdqa %xmm0, 0x100(%rsp)
movl 0x1e0(%rbx), %eax
movl %eax, 0x110(%rsp)
movq 0x1e8(%rbx), %rax
movq %rax, 0x118(%rsp)
movq 0x8(%rsp), %rdx
movq 0x180(%rdx), %r15
leaq 0x70(%rsp), %rdi
leaq 0x90(%rsp), %rsi
callq 0x6b00e
movq (%r15), %rax
leaq 0x70(%rsp), %rsi
movq %r15, %rdi
callq *0x18(%rax)
leaq 0x70(%rsp), %rdi
callq 0x6b03a
pushq $0x48
popq %rbx
vpxor %xmm0, %xmm0, %xmm0
movq 0x98(%rsp,%rbx), %rax
testq %rax, %rax
je 0xe5de5
lock
decl (%rax)
jne 0xe5de5
movq 0x90(%rsp,%rbx), %rsi
movq 0xb0(%rsp,%rbx), %rdi
testq %rdi, %rdi
je 0xe5dd9
movq (%rdi), %rax
callq *0x18(%rax)
vpxor %xmm0, %xmm0, %xmm0
jmp 0xe5de5
movq %rsi, %rdi
callq 0x5f3e0
vpxor %xmm0, %xmm0, %xmm0
leaq (%rsp,%rbx), %rax
addq $0x90, %rax
andq $0x0, 0x40(%rax)
vmovdqu %xmm0, 0xc(%rax)
vmovdqu %xmm0, (%rax)
andl $0x0, 0x38(%rax)
vmovdqu %xmm0, 0x28(%rax)
addq $-0x48, %rbx
cmpq $-0x48, %rbx
jne 0xe5da6
movq 0x8(%rsp), %rax
movq 0x180(%rax), %rdi
movq (%rdi), %rax
movq 0x30(%rsp), %rsi
callq *0x20(%rax)
movq 0x30(%rsp), %rax
cmpb $0x1, (%rax)
jne 0xe5ea8
movq 0x8(%rsp), %rcx
movq (%rcx), %rax
movq -0x18(%rax), %r14
leaq (%rcx,%r14), %rbx
movq 0x168(%rcx,%r14), %rax
testq %rax, %rax
je 0xe5e76
lock
decl (%rax)
jne 0xe5e76
movq 0x160(%rbx), %rsi
movq 0x180(%rbx), %rdi
testq %rdi, %rdi
je 0xe5e6e
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xe5e76
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x1a0(%rbx)
movq 0x8(%rsp), %rax
addq %r14, %rax
addq $0x160, %rax # imm = 0x160
vpxor %xmm0, %xmm0, %xmm0
vmovdqu %xmm0, 0xc(%rax)
vmovdqu %xmm0, (%rax)
vmovdqu %xmm0, 0x188(%rbx)
andl $0x0, 0x198(%rbx)
leaq 0x160(%rsp), %rdi
callq 0x71614
jmp 0xe34cc
vpinsrd $0x2, %eax, %xmm0, %xmm0
vpinsrd $0x3, %edx, %xmm0, %xmm0
vbroadcastss 0x30c9f1(%rip), %xmm1 # 0x3f28c0
vpxor %xmm1, %xmm0, %xmm1
vptest %xmm1, %xmm1
je 0xe4529
vbroadcastss 0x30c9dd(%rip), %xmm1 # 0x3f28c4
vpcmpeqd %xmm1, %xmm0, %xmm0
vmovmskps %xmm0, %eax
xorl $0xf, %eax
testb %al, %al
jne 0xe49a1
jmp 0xe4529
movq %rsi, %rdi
callq 0x5f3e0
vmovups 0x160(%rbx), %xmm0
vmovaps %xmm0, 0x90(%rsp)
movq 0x170(%rbx), %rax
movq %rax, 0xa0(%rsp)
movl 0x178(%rbx), %eax
movl %eax, 0xa8(%rsp)
movq 0x180(%rbx), %rax
movq %rax, 0xb0(%rsp)
vmovdqu 0x188(%rbx), %xmm0
vmovdqu %xmm0, 0xb8(%rsp)
movl 0x198(%rbx), %eax
movl %eax, 0xc8(%rsp)
movq 0x1a0(%rbx), %rax
movq %rax, 0xd0(%rsp)
movq 0x8(%rsp), %rdx
movq 0x180(%rdx), %r15
leaq 0x70(%rsp), %rdi
leaq 0x90(%rsp), %rsi
callq 0x6b00e
movq (%r15), %rax
leaq 0x70(%rsp), %rsi
movq %r15, %rdi
callq *0x18(%rax)
leaq 0x70(%rsp), %rdi
callq 0x6b03a
movq 0x98(%rsp), %rax
testq %rax, %rax
je 0xe5e10
lock
decl (%rax)
jne 0xe5e10
movq 0x90(%rsp), %rsi
movq 0xb0(%rsp), %rdi
testq %rdi, %rdi
je 0xe5fe0
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xe5e10
movq %rsi, %rdi
callq 0x5f3e0
jmp 0xe5e10
jmp 0xe6005
jmp 0xe62ee
jmp 0xe6068
movq %rax, %rbx
leaq 0x70(%rsp), %rdi
callq 0x6b03a
jmp 0xe6008
movq %rax, %rbx
movq 0x98(%rsp), %rax
testq %rax, %rax
je 0xe61c3
lock
decl (%rax)
jne 0xe61c3
movq 0x90(%rsp), %rsi
movq 0xb0(%rsp), %rdi
testq %rdi, %rdi
jne 0xe6044
movq %rsi, %rdi
callq 0x5f3e0
jmp 0xe61c3
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xe61c3
jmp 0xe62ee
jmp 0xe62ee
movq %rax, %rbx
leaq 0x70(%rsp), %rdi
callq 0x6b03a
jmp 0xe606b
movq %rax, %rbx
pushq $0x48
popq %r14
vpxor %xmm0, %xmm0, %xmm0
movq 0x98(%rsp,%r14), %rax
testq %rax, %rax
je 0xe60b2
lock
decl (%rax)
jne 0xe60b2
movq 0x90(%rsp,%r14), %rsi
movq 0xb0(%rsp,%r14), %rdi
testq %rdi, %rdi
je 0xe60a6
movq (%rdi), %rax
callq *0x18(%rax)
vpxor %xmm0, %xmm0, %xmm0
jmp 0xe60b2
movq %rsi, %rdi
callq 0x5f3e0
vpxor %xmm0, %xmm0, %xmm0
leaq (%rsp,%r14), %rax
addq $0x90, %rax
andq $0x0, 0x40(%rax)
vmovdqu %xmm0, 0xc(%rax)
vmovdqu %xmm0, (%rax)
andl $0x0, 0x38(%rax)
vmovdqu %xmm0, 0x28(%rax)
addq $-0x48, %r14
cmpq $-0x48, %r14
jne 0xe6073
jmp 0xe61c3
jmp 0xe62ee
jmp 0xe6175
jmp 0xe62ee
movq %rax, %rbx
movq 0x98(%rsp), %rax
testq %rax, %rax
je 0xe6303
lock
decl (%rax)
jne 0xe6303
movq 0x90(%rsp), %rsi
movq 0xb0(%rsp), %rdi
testq %rdi, %rdi
jne 0xe6130
movq %rsi, %rdi
callq 0x5f3e0
jmp 0xe6303
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xe6303
jmp 0xe62ee
jmp 0xe62ee
jmp 0xe62ee
jmp 0xe62ee
jmp 0xe61e2
jmp 0xe6260
jmp 0xe6260
jmp 0xe6260
movq %rax, %rbx
leaq 0x1f8(%rsp), %rdi
callq 0x6b03a
jmp 0xe6178
movq %rax, %rbx
movq 0x98(%rsp), %rax
testq %rax, %rax
je 0xe62b3
lock
decl (%rax)
jne 0xe62b3
movq 0x90(%rsp), %rsi
movq 0xb0(%rsp), %rdi
testq %rdi, %rdi
je 0xe629c
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xe62b3
jmp 0xe62ee
jmp 0xe6260
movq %rax, %rbx
leaq 0x160(%rsp), %rdi
jmp 0xe62fe
movq %rax, %rbx
leaq 0x1f8(%rsp), %rdi
callq 0x6b03a
jmp 0xe61e5
movq %rax, %rbx
pushq $0x48
popq %r14
vpxor %xmm0, %xmm0, %xmm0
movq 0x98(%rsp,%r14), %rax
testq %rax, %rax
je 0xe622c
lock
decl (%rax)
jne 0xe622c
movq 0x90(%rsp,%r14), %rsi
movq 0xb0(%rsp,%r14), %rdi
testq %rdi, %rdi
je 0xe6220
movq (%rdi), %rax
callq *0x18(%rax)
vpxor %xmm0, %xmm0, %xmm0
jmp 0xe622c
movq %rsi, %rdi
callq 0x5f3e0
vpxor %xmm0, %xmm0, %xmm0
leaq (%rsp,%r14), %rax
addq $0x90, %rax
andq $0x0, 0x40(%rax)
vmovdqu %xmm0, 0xc(%rax)
vmovdqu %xmm0, (%rax)
andl $0x0, 0x38(%rax)
vmovdqu %xmm0, 0x28(%rax)
addq $-0x48, %r14
cmpq $-0x48, %r14
jne 0xe61ed
jmp 0xe62b3
jmp 0xe62ee
jmp 0xe6260
movq %rax, %rbx
leaq 0x90(%rsp), %rdi
jmp 0xe62fe
jmp 0xe62b0
movq %rax, %rbx
movq 0x98(%rsp), %rax
testq %rax, %rax
je 0xe62b3
lock
decl (%rax)
jne 0xe62b3
movq 0x90(%rsp), %rsi
movq 0xb0(%rsp), %rdi
testq %rdi, %rdi
jne 0xe62a6
movq %rsi, %rdi
callq 0x5f3e0
jmp 0xe62b3
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xe62b3
jmp 0xe62ee
movq %rax, %rbx
movq 0x168(%rsp), %rax
testq %rax, %rax
je 0xe62f9
lock
decl (%rax)
jne 0xe62f9
movq 0x160(%rsp), %rsi
movq 0x180(%rsp), %rdi
testq %rdi, %rdi
jne 0xe62e4
movq %rsi, %rdi
callq 0x5f3e0
jmp 0xe62f9
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xe62f9
jmp 0xe62ee
movq %rax, %rdi
callq 0x61d68
movq %rax, %rbx
leaq 0x70(%rsp), %rdi
callq 0x71614
movq %rbx, %rdi
callq 0x5f340
nop
|
/csukuangfj[P]ncnn/build_O2/src/layer/x86/convolution_x86_fma.cpp
|
ncnn::conv3x3s1_winograd43_transform_kernel(ncnn::Mat const&, ncnn::Mat&, int, int, ncnn::Option const&)
|
static void conv3x3s1_winograd43_transform_kernel(const Mat& kernel, Mat& AT, int inch, int outch, const Option& opt)
{
const int M = outch;
const int K = inch;
const int B = 36;
int TILE_M, TILE_N, TILE_K;
get_optimal_tile_mnk(M, 0, K, TILE_M, TILE_N, TILE_K, opt.num_threads);
const int nn_M = (M + TILE_M - 1) / TILE_M;
Mat A_tileX(B * TILE_M * TILE_K, 1, opt.num_threads, 4u, (Allocator*)0);
AT.create(TILE_K * TILE_M, B, (K + TILE_K - 1) / TILE_K, (M + TILE_M - 1) / TILE_M, 4u, (Allocator*)0);
#pragma omp parallel for num_threads(opt.num_threads)
for (int ppj = 0; ppj < nn_M; ppj++)
{
const int i = ppj * TILE_M;
Mat A_tile = A_tileX.channel(get_omp_thread_num());
for (int k = 0; k < K; k += TILE_K)
{
const int max_ii = std::min((M - i), TILE_M);
const int max_kk = std::min((K - k), TILE_K);
conv3x3s1_winograd43_transform_kernel_tile(kernel, A_tile, inch, i, max_ii, k, max_kk);
Mat AT_tile = AT.channel(i / TILE_M).depth(k / TILE_K);
pack_A_tile(A_tile, AT_tile, B, max_ii, max_kk);
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x148, %rsp # imm = 0x148
movq %r8, %r13
movl %ecx, %r14d
movl %edx, %r15d
movq %rsi, 0x18(%rsp)
movq %rdi, %r12
movl 0x4(%r8), %eax
movl %eax, (%rsp)
leaq 0xac(%rsp), %rbx
leaq 0xa8(%rsp), %r8
leaq 0xa4(%rsp), %rbp
movl %ecx, %edi
xorl %esi, %esi
movq %rbx, %rcx
movq %rbp, %r9
callq 0x1016d1
movl (%rbx), %esi
movq %r14, 0x80(%rsp)
leal (%r14,%rsi), %eax
decl %eax
cltd
idivl %esi
movl (%rbp), %r14d
movl %eax, %ebp
movl %r14d, %ebx
movl 0x4(%r13), %ecx
leaq 0xb0(%rsp), %rdi
andq $0x0, 0x40(%rdi)
movq %rsi, 0x78(%rsp)
imull %esi, %ebx
imull $0x24, %ebx, %esi
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, (%rdi)
vmovups %xmm0, 0xc(%rdi)
vmovaps %xmm0, 0x20(%rdi)
vmovups %xmm0, 0x2c(%rdi)
pushq $0x1
popq %rdx
pushq $0x4
popq %r13
movq %r13, %r8
xorl %r9d, %r9d
callq 0x63810
movq %r15, 0x20(%rsp)
leal (%r15,%r14), %eax
decl %eax
cltd
movq %r14, 0x28(%rsp)
idivl %r14d
andq $0x0, (%rsp)
pushq $0x24
popq %rdx
movq 0x18(%rsp), %rdi
movl %ebx, %esi
movl %eax, %ecx
movl %ebp, %r8d
movq %r13, %r9
callq 0x6393c
movq 0x20(%rsp), %rax
leal (%rax,%rax,8), %r14d
testl %ebp, %ebp
movl $0x0, %eax
cmovlel %eax, %ebp
xorl %ebx, %ebx
movl %ebp, 0xc(%rsp)
movq 0x28(%rsp), %r15
cmpl %ebp, %ebx
je 0xe81c3
callq 0x7357d
movslq 0xdc(%rsp), %rdx
movslq 0xe0(%rsp), %rcx
movslq %eax, %r9
imulq 0xf0(%rsp), %r9
movq 0xc0(%rsp), %rsi
imulq %rsi, %r9
addq 0xb0(%rsp), %r9
movl 0xe4(%rsp), %eax
movl 0xc8(%rsp), %edi
movq 0xd0(%rsp), %r8
movq %r9, 0x98(%rsp)
movq %r9, 0x100(%rsp)
andq $0x0, 0x108(%rsp)
movq %rsi, 0x110(%rsp)
movl %edi, 0x118(%rsp)
movq %r8, 0x120(%rsp)
movl %edx, 0x12c(%rsp)
movl %ecx, 0x130(%rsp)
movl $0x1, 0x134(%rsp)
movl %eax, 0x138(%rsp)
imulq %rdx, %rcx
movq %rsi, %rax
imulq %rcx, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rsi
movq %rax, 0x140(%rsp)
movl 0xd8(%rsp), %eax
leal -0x1(%rax), %edx
movl %edx, 0x128(%rsp)
cmpl $0x4, %eax
jne 0xe7f18
movq %rcx, 0x140(%rsp)
movq 0x78(%rsp), %rcx
movl %ecx, %r13d
imull %ebx, %r13d
movq 0x80(%rsp), %rax
subl %r13d, %eax
cmpl %eax, %ecx
cmovll %ecx, %eax
testl %eax, %eax
movl $0x0, %ebp
movl %eax, 0x10(%rsp)
cmovgl %eax, %ebp
movq %rbx, 0x88(%rsp)
movslq %ebx, %rax
movq %rax, 0x90(%rsp)
xorl %ebx, %ebx
movq 0x20(%rsp), %rax
movl %eax, %r8d
subl %ebx, %r8d
jle 0xe81b0
movq 0x28(%rsp), %rax
cmpl %r8d, %eax
cmovll %eax, %r8d
movslq %ebx, %rax
testl %r8d, %r8d
movl $0x0, %ecx
cmovgl %r8d, %ecx
xorl %edx, %edx
movq 0x98(%rsp), %rsi
cmpq %rbp, %rdx
je 0xe8100
leal (%rdx,%r13), %edi
imull %r14d, %edi
movslq %edi, %rdi
shlq $0x2, %rdi
addq (%r12), %rdi
xorl %r9d, %r9d
cmpq %rcx, %r9
je 0xe80f8
leaq (%r9,%rax), %r10
imulq $0x24, %r10, %r10
addq %rdi, %r10
xorl %r11d, %r11d
cmpq $0x3, %r11
je 0xe805c
vmovss (%r10), %xmm0
vmovss 0x4(%r10), %xmm1
vmovss 0x8(%r10), %xmm2
vmovss %xmm0, 0x30(%rsp,%r11,4)
vmulss 0x309af3(%rip), %xmm0, %xmm3 # 0x3f1ae0
vmulss 0x309aef(%rip), %xmm1, %xmm4 # 0x3f1ae4
vmulss 0x309aeb(%rip), %xmm2, %xmm5 # 0x3f1ae8
vaddss %xmm5, %xmm4, %xmm6
vsubss %xmm6, %xmm3, %xmm6
vmovss %xmm6, 0x3c(%rsp,%r11,4)
vaddss %xmm3, %xmm4, %xmm3
vsubss %xmm5, %xmm3, %xmm3
vmovss %xmm3, 0x48(%rsp,%r11,4)
vmulss 0x309ac9(%rip), %xmm0, %xmm0 # 0x3f1aec
vmulss 0x309ac5(%rip), %xmm1, %xmm1 # 0x3f1af0
vaddss %xmm0, %xmm1, %xmm3
vaddss %xmm5, %xmm3, %xmm3
vmovss %xmm3, 0x54(%rsp,%r11,4)
vsubss %xmm1, %xmm0, %xmm0
vaddss %xmm5, %xmm0, %xmm0
vmovss %xmm0, 0x60(%rsp,%r11,4)
vmovss %xmm2, 0x6c(%rsp,%r11,4)
addq $0xc, %r10
incq %r11
jmp 0xe7fc3
xorl %r10d, %r10d
cmpq $0x48, %r10
je 0xe80f0
vmovss 0x30(%rsp,%r10), %xmm0
vmovss 0x34(%rsp,%r10), %xmm1
vmovss 0x38(%rsp,%r10), %xmm2
vmulss 0x309a5a(%rip), %xmm0, %xmm3 # 0x3f1ae0
vmulss 0x309a56(%rip), %xmm1, %xmm4 # 0x3f1ae4
vmulss 0x309a52(%rip), %xmm2, %xmm5 # 0x3f1ae8
vaddss %xmm5, %xmm4, %xmm6
vsubss %xmm6, %xmm3, %xmm6
vaddss %xmm3, %xmm4, %xmm3
vsubss %xmm5, %xmm3, %xmm3
vmulss 0x309a3e(%rip), %xmm0, %xmm4 # 0x3f1aec
vmulss 0x309a3a(%rip), %xmm1, %xmm1 # 0x3f1af0
vaddss %xmm4, %xmm1, %xmm7
vaddss %xmm5, %xmm7, %xmm7
vsubss %xmm1, %xmm4, %xmm1
vaddss %xmm5, %xmm1, %xmm1
vmovss %xmm0, (%rsi)
vmovss %xmm6, 0x4(%rsi)
vmovss %xmm3, 0x8(%rsi)
vmovss %xmm7, 0xc(%rsi)
vmovss %xmm1, 0x10(%rsi)
vmovss %xmm2, 0x14(%rsi)
addq $0x18, %rsi
addq $0xc, %r10
jmp 0xe805f
incq %r9
jmp 0xe7fac
incq %rdx
jmp 0xe7f8d
movq 0x18(%rsp), %rax
movslq 0x2c(%rax), %r10
movslq 0x30(%rax), %rcx
movq 0x40(%rax), %rdi
imulq 0x90(%rsp), %rdi
movq 0x10(%rax), %r9
imulq %r9, %rdi
addq (%rax), %rdi
movl 0x18(%rax), %edx
movl %edx, 0x14(%rsp)
movq 0x20(%rax), %r11
movq %rcx, %rsi
imulq %r10, %rsi
movl %ebx, %eax
cltd
idivl %r15d
cltq
movq %r9, %rdx
imulq %rsi, %rdx
imulq %rax, %rdx
addq %rdi, %rdx
movq %rdx, 0x30(%rsp)
andq $0x0, 0x38(%rsp)
movq %r9, 0x40(%rsp)
movl 0x14(%rsp), %eax
movl %eax, 0x48(%rsp)
movq %r11, 0x50(%rsp)
movl $0x2, 0x58(%rsp)
movl %r10d, 0x5c(%rsp)
movl %ecx, 0x60(%rsp)
movabsq $0x100000001, %rax # imm = 0x100000001
movq %rax, 0x64(%rsp)
movq %rsi, 0x70(%rsp)
leaq 0x100(%rsp), %rdi
leaq 0x30(%rsp), %rsi
pushq $0x24
popq %rdx
movl 0x10(%rsp), %ecx
callq 0x101876
addl %r15d, %ebx
jmp 0xe7f57
movq 0x88(%rsp), %rbx
incl %ebx
movl 0xc(%rsp), %ebp
jmp 0xe7e37
movq 0xb8(%rsp), %rax
testq %rax, %rax
je 0xe81fa
lock
decl (%rax)
jne 0xe81fa
movq 0xb0(%rsp), %rsi
movq 0xd0(%rsp), %rdi
testq %rdi, %rdi
je 0xe81f2
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xe81fa
movq %rsi, %rdi
callq 0x5f3e0
addq $0x148, %rsp # imm = 0x148
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0xe8252
jmp 0xe8210
movq %rax, %rbx
movq 0xb8(%rsp), %rax
testq %rax, %rax
je 0xe824a
lock
decl (%rax)
jne 0xe824a
movq 0xb0(%rsp), %rsi
movq 0xd0(%rsp), %rdi
testq %rdi, %rdi
jne 0xe8244
movq %rsi, %rdi
callq 0x5f3e0
jmp 0xe824a
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
movq %rax, %rdi
callq 0x61d68
|
/csukuangfj[P]ncnn/src/layer/x86/convolution_3x3_winograd.h
|
ncnn::conv3x3s1_winograd23_transform_kernel(ncnn::Mat const&, ncnn::Mat&, int, int, ncnn::Option const&)
|
static void conv3x3s1_winograd23_transform_kernel(const Mat& kernel, Mat& AT, int inch, int outch, const Option& opt)
{
const int M = outch;
const int K = inch;
const int B = 16;
int TILE_M, TILE_N, TILE_K;
get_optimal_tile_mnk(M, 0, K, TILE_M, TILE_N, TILE_K, opt.num_threads);
const int nn_M = (M + TILE_M - 1) / TILE_M;
Mat A_tileX(B * TILE_M * TILE_K, 1, opt.num_threads, 4u, (Allocator*)0);
AT.create(TILE_K * TILE_M, B, (K + TILE_K - 1) / TILE_K, (M + TILE_M - 1) / TILE_M, 4u, (Allocator*)0);
#pragma omp parallel for num_threads(opt.num_threads)
for (int ppj = 0; ppj < nn_M; ppj++)
{
const int i = ppj * TILE_M;
Mat A_tile = A_tileX.channel(get_omp_thread_num());
for (int k = 0; k < K; k += TILE_K)
{
const int max_ii = std::min((M - i), TILE_M);
const int max_kk = std::min((K - k), TILE_K);
conv3x3s1_winograd23_transform_kernel_tile(kernel, A_tile, inch, i, max_ii, k, max_kk);
Mat AT_tile = AT.channel(i / TILE_M).depth(k / TILE_K);
pack_A_tile(A_tile, AT_tile, B, max_ii, max_kk);
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x158, %rsp # imm = 0x158
movq %r8, %r13
movl %ecx, %r14d
movl %edx, %r15d
movq %rsi, 0x28(%rsp)
movq %rdi, %r12
movl 0x4(%r8), %eax
movl %eax, (%rsp)
leaq 0xbc(%rsp), %rbx
leaq 0xb8(%rsp), %r8
leaq 0xb4(%rsp), %rbp
movl %ecx, %edi
xorl %esi, %esi
movq %rbx, %rcx
movq %rbp, %r9
callq 0x1016d1
movl (%rbx), %ecx
movq %r14, 0x80(%rsp)
leal (%r14,%rcx), %eax
decl %eax
cltd
idivl %ecx
movl (%rbp), %r14d
movl %eax, %ebp
movl %r14d, %ebx
movq %rcx, 0x78(%rsp)
imull %ecx, %ebx
movl 0x4(%r13), %ecx
leaq 0xc0(%rsp), %rdi
andq $0x0, 0x40(%rdi)
movl %ebx, %esi
shll $0x4, %esi
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, (%rdi)
vmovups %xmm0, 0xc(%rdi)
vmovaps %xmm0, 0x20(%rdi)
vmovups %xmm0, 0x2c(%rdi)
pushq $0x1
popq %rdx
pushq $0x4
popq %r13
movq %r13, %r8
xorl %r9d, %r9d
callq 0x63810
leal (%r15,%r14), %eax
decl %eax
cltd
movq %r14, 0x10(%rsp)
idivl %r14d
andq $0x0, (%rsp)
pushq $0x10
popq %rdx
movq 0x28(%rsp), %rdi
movl %ebx, %esi
movl %eax, %ecx
movl %ebp, %r8d
movq %r13, %r9
callq 0x6393c
leal (%r15,%r15,8), %r14d
testl %ebp, %ebp
movl $0x0, %eax
cmovlel %eax, %ebp
pushq $0x8
popq %r13
movq %r15, %rbx
xorl %r15d, %r15d
movq %rbx, 0x90(%rsp)
movl %ebp, 0x1c(%rsp)
cmpl %ebp, %r15d
je 0xe868f
callq 0x7357d
movslq 0xec(%rsp), %rdx
movslq 0xf0(%rsp), %rcx
movslq %eax, %r9
imulq 0x100(%rsp), %r9
movq 0xd0(%rsp), %rsi
imulq %rsi, %r9
addq 0xc0(%rsp), %r9
movl 0xf4(%rsp), %eax
movl 0xd8(%rsp), %edi
movq 0xe0(%rsp), %r8
movq %r9, 0xa0(%rsp)
movq %r9, 0x110(%rsp)
andq $0x0, 0x118(%rsp)
movq %rsi, 0x120(%rsp)
movl %edi, 0x128(%rsp)
movq %r8, 0x130(%rsp)
movl %edx, 0x13c(%rsp)
movl %ecx, 0x140(%rsp)
movl $0x1, 0x144(%rsp)
movl %eax, 0x148(%rsp)
imulq %rdx, %rcx
movq %rsi, %rax
imulq %rcx, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rsi
movq %rax, 0x150(%rsp)
movl 0xe8(%rsp), %eax
leal -0x1(%rax), %edx
movl %edx, 0x138(%rsp)
cmpl $0x4, %eax
jne 0xe843e
movq %rcx, 0x150(%rsp)
movq 0x78(%rsp), %rcx
movl %ecx, %ebp
imull %r15d, %ebp
movq 0x80(%rsp), %rax
movl %eax, %edx
subl %ebp, %edx
cmpl %edx, %ecx
cmovll %ecx, %edx
xorl %ecx, %ecx
testl %edx, %edx
movq %r15, %rax
movl $0x0, %r15d
movl %edx, 0x20(%rsp)
cmovgl %edx, %r15d
movq %rax, 0x88(%rsp)
cltq
movq %rax, 0x98(%rsp)
movl %ebx, %r8d
subl %ecx, %r8d
jle 0xe867b
movq 0x10(%rsp), %rax
cmpl %r8d, %eax
cmovll %eax, %r8d
movl %ecx, 0x24(%rsp)
movslq %ecx, %rax
testl %r8d, %r8d
movl $0x0, %ecx
cmovgl %r8d, %ecx
xorl %edx, %edx
movq 0xa0(%rsp), %rsi
cmpq %r15, %rdx
je 0xe85b2
leal (%rdx,%rbp), %edi
imull %r14d, %edi
movslq %edi, %rdi
shlq $0x2, %rdi
addq (%r12), %rdi
xorl %r9d, %r9d
cmpq %rcx, %r9
je 0xe85aa
leaq (%r9,%rax), %r10
imulq $0x24, %r10, %r10
addq %rdi, %r10
xorl %r11d, %r11d
cmpq $0x3, %r11
je 0xe8547
vmovss (%r10), %xmm0
vmovss 0x8(%r10), %xmm1
vmovss 0x305b10(%rip), %xmm3 # 0x3ee014
vmulss 0x4(%r10), %xmm3, %xmm2
vmovss %xmm0, 0x30(%rsp,%r11,4)
vmulss %xmm3, %xmm0, %xmm0
vmulss %xmm3, %xmm1, %xmm3
vaddss %xmm3, %xmm0, %xmm4
vaddss %xmm4, %xmm2, %xmm4
vmovss %xmm4, 0x3c(%rsp,%r11,4)
vsubss %xmm2, %xmm0, %xmm0
vaddss %xmm3, %xmm0, %xmm0
vmovss %xmm0, 0x48(%rsp,%r11,4)
vmovss %xmm1, 0x54(%rsp,%r11,4)
addq $0xc, %r10
incq %r11
jmp 0xe84eb
movq %r13, %r10
cmpq $0x38, %r10
je 0xe85a2
vmovss 0x28(%rsp,%r10), %xmm0
vmovss 0x30(%rsp,%r10), %xmm1
vmovss 0x305aae(%rip), %xmm4 # 0x3ee014
vmulss 0x2c(%rsp,%r10), %xmm4, %xmm2
vmulss %xmm4, %xmm0, %xmm3
vmulss %xmm4, %xmm1, %xmm4
vaddss %xmm4, %xmm3, %xmm5
vaddss %xmm5, %xmm2, %xmm5
vsubss %xmm2, %xmm3, %xmm2
vaddss %xmm4, %xmm2, %xmm2
vmovss %xmm0, (%rsi)
vmovss %xmm5, 0x4(%rsi)
vmovss %xmm2, 0x8(%rsi)
vmovss %xmm1, 0xc(%rsi)
addq $0x10, %rsi
addq $0xc, %r10
jmp 0xe854a
incq %r9
jmp 0xe84d4
incq %rdx
jmp 0xe84b6
movq 0x28(%rsp), %rax
movslq 0x2c(%rax), %r11
movslq 0x30(%rax), %r10
movq 0x40(%rax), %rdi
imulq 0x98(%rsp), %rdi
movq 0x10(%rax), %r9
imulq %r9, %rdi
addq (%rax), %rdi
movl 0x18(%rax), %ecx
movq 0x20(%rax), %rax
movq %rax, 0xa8(%rsp)
movq %r10, %rsi
imulq %r11, %rsi
movl 0x24(%rsp), %ebx
movl %ebx, %eax
cltd
idivl 0x10(%rsp)
cltq
movq %r9, %rdx
imulq %rsi, %rdx
imulq %rax, %rdx
addq %rdi, %rdx
movq %rdx, 0x30(%rsp)
andq $0x0, 0x38(%rsp)
movq %r9, 0x40(%rsp)
movl %ecx, 0x48(%rsp)
movq 0xa8(%rsp), %rax
movq %rax, 0x50(%rsp)
movl $0x2, 0x58(%rsp)
movl %r11d, 0x5c(%rsp)
movl %r10d, 0x60(%rsp)
movabsq $0x100000001, %rax # imm = 0x100000001
movq %rax, 0x64(%rsp)
movq %rsi, 0x70(%rsp)
leaq 0x110(%rsp), %rdi
leaq 0x30(%rsp), %rsi
pushq $0x10
popq %rdx
movl 0x20(%rsp), %ecx
callq 0x101876
movl %ebx, %ecx
addl 0x10(%rsp), %ecx
movq 0x90(%rsp), %rbx
jmp 0xe8481
movq 0x88(%rsp), %r15
incl %r15d
movl 0x1c(%rsp), %ebp
jmp 0xe835c
movq 0xc8(%rsp), %rax
testq %rax, %rax
je 0xe86c6
lock
decl (%rax)
jne 0xe86c6
movq 0xc0(%rsp), %rsi
movq 0xe0(%rsp), %rdi
testq %rdi, %rdi
je 0xe86be
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xe86c6
movq %rsi, %rdi
callq 0x5f3e0
addq $0x158, %rsp # imm = 0x158
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0xe871e
jmp 0xe86dc
movq %rax, %rbx
movq 0xc8(%rsp), %rax
testq %rax, %rax
je 0xe8716
lock
decl (%rax)
jne 0xe8716
movq 0xc0(%rsp), %rsi
movq 0xe0(%rsp), %rdi
testq %rdi, %rdi
jne 0xe8710
movq %rsi, %rdi
callq 0x5f3e0
jmp 0xe8716
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
movq %rax, %rdi
callq 0x61d68
|
/csukuangfj[P]ncnn/src/layer/x86/convolution_3x3_winograd.h
|
ncnn::test_prefer_winograd63(int, int, int, int)
|
static bool test_prefer_winograd63(int num_input, int num_output, int w, int h)
{
// winograd selection strategy (profiled on i7-7700 single thread)
int minwh = std::min(w, h);
if (num_input >= 64)
{
return false;
}
if (num_input >= 32)
{
if (num_output >= 64) return false;
if (num_output >= 32) return (minwh >= 11 && minwh <= 14)
|| (minwh >= 19 && minwh <= 20)
|| (minwh >= 23 && minwh <= 44)
|| (minwh >= 47 && minwh <= 56)
|| (minwh >= 63 && minwh <= 130);
if (num_output >= 16) return (minwh >= 13 && minwh <= 14)
|| (minwh >= 19 && minwh <= 20)
|| (minwh >= 23 && minwh <= 38)
|| (minwh >= 43 && minwh <= 44)
|| (minwh >= 47 && minwh <= 140);
if (num_output >= 8) return (minwh >= 11 && minwh <= 14)
|| (minwh >= 19 && minwh <= 20)
|| (minwh >= 31 && minwh <= 38)
|| (minwh >= 43 && minwh <= 44)
|| (minwh >= 55 && minwh <= 162);
return false;
}
if (num_input >= 16)
{
if (num_output >= 64) return false;
if (num_output >= 32) return (minwh >= 11 && minwh <= 14)
|| (minwh >= 19 && minwh <= 20)
|| (minwh >= 23 && minwh <= 44)
|| (minwh >= 47 && minwh <= 92)
|| (minwh >= 95 && minwh <= 188);
if (num_output >= 16) return (minwh >= 11 && minwh <= 14)
|| (minwh >= 27 && minwh <= 38)
|| (minwh >= 43 && minwh <= 44)
|| (minwh >= 47 && minwh <= 74)
|| (minwh >= 81 && minwh <= 110)
|| (minwh >= 117 && minwh <= 170)
|| (minwh >= 177 && minwh <= 182);
if (num_output >= 8) return (minwh >= 19 && minwh <= 20)
|| (minwh >= 33 && minwh <= 38)
|| (minwh >= 43 && minwh <= 44)
|| (minwh >= 47 && minwh <= 128)
|| (minwh >= 155 && minwh <= 210);
return false;
}
if (num_input >= 8)
{
if (num_output >= 64) return false;
if (num_output >= 32) return (minwh >= 7 && minwh <= 14)
|| (minwh >= 17 && minwh <= 20)
|| (minwh >= 23 && minwh <= 26)
|| (minwh >= 31 && minwh <= 38)
|| (minwh >= 43 && minwh <= 162);
if (num_output >= 16) return minwh == 31 || minwh == 32
|| (minwh >= 39 && minwh <= 44)
|| (minwh >= 47 && minwh <= 212);
if (num_output >= 8) return false;
return false;
}
return false;
}
|
cmpl %edx, %ecx
cmovll %ecx, %edx
cmpl $0x3f, %edi
jle 0xe8733
xorl %eax, %eax
retq
cmpl $0x20, %edi
jl 0xe8778
cmpl $0x3f, %esi
jg 0xe8730
cmpl $0x20, %esi
jl 0xe87f7
vmovd %edx, %xmm0
vpshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vpaddd 0x309439(%rip), %xmm0, %xmm0 # 0x3f1b90
vpminud 0x30a150(%rip), %xmm0, %xmm1 # 0x3f28b0
vpcmpeqd %xmm1, %xmm0, %xmm0
movb $0x1, %al
vtestps %xmm0, %xmm0
jne 0xe8732
addl $-0x3f, %edx
cmpl $0x44, %edx
jmp 0xe88e2
cmpl $0x10, %edi
jl 0xe87b7
cmpl $0x3f, %esi
jg 0xe8730
cmpl $0x20, %esi
jl 0xe882f
vmovd %edx, %xmm0
vpshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vpaddd 0x3093f4(%rip), %xmm0, %xmm0 # 0x3f1b90
vpminud 0x30a0fb(%rip), %xmm0, %xmm1 # 0x3f28a0
vpcmpeqd %xmm1, %xmm0, %xmm0
movb $0x1, %al
vtestps %xmm0, %xmm0
jne 0xe8732
addl $-0x5f, %edx
jmp 0xe8827
cmpl $0x8, %edi
setl %al
cmpl $0x40, %esi
setge %cl
orb %al, %cl
jne 0xe8730
cmpl $0x20, %esi
jl 0xe8889
leal -0x7(%rdx), %ecx
cmpl $0x20, %ecx
jae 0xe87ec
movb $0x1, %al
movl $0xff0f3cff, %esi # imm = 0xFF0F3CFF
btl %ecx, %esi
jb 0xe8732
addl $-0x2b, %edx
cmpl $0x78, %edx
jmp 0xe88e2
cmpl $0x10, %esi
jl 0xe88b5
leal -0x17(%rdx), %ecx
movb $0x1, %al
cmpl $0x10, %ecx
jb 0xe8732
leal -0xd(%rdx), %ecx
cmpl $0x20, %ecx
jae 0xe8824
movl $0xc00000c3, %esi # imm = 0xC00000C3
btl %ecx, %esi
jb 0xe8732
addl $-0x2f, %edx
cmpl $0x5e, %edx
jmp 0xe88e2
cmpl $0x10, %esi
jl 0xe88e6
vmovd %edx, %xmm0
vpshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vpaddd 0x309327(%rip), %xmm0, %xmm0 # 0x3f1b70
leal -0x51(%rdx), %eax
cmpl $0x1e, %eax
setb %al
leal -0x75(%rdx), %ecx
cmpl $0x36, %ecx
setb %cl
vpminud 0x30a02c(%rip), %xmm0, %xmm1 # 0x3f2890
vpcmpeqd %xmm1, %xmm0, %xmm0
vtestps %xmm0, %xmm0
setne %sil
orb %al, %cl
orb %sil, %cl
movb $0x1, %al
jne 0xe8732
addl $0xffffff4f, %edx # imm = 0xFFFFFF4F
cmpl $0x6, %edx
jmp 0xe88e2
cmpl $0x10, %esi
jl 0xe8730
leal -0x1f(%rdx), %ecx
cmpl $0xe, %ecx
jae 0xe88aa
movb $0x1, %al
movl $0x3f03, %esi # imm = 0x3F03
btl %ecx, %esi
jb 0xe8732
addl $-0x2f, %edx
cmpl $0xa6, %edx
jmp 0xe88e2
cmpl $0x8, %esi
jl 0xe8730
leal -0xb(%rdx), %ecx
cmpl $0x22, %ecx
jae 0xe88dc
movb $0x1, %al
movabsq $0x30ff0030f, %rsi # imm = 0x30FF0030F
btq %rcx, %rsi
jb 0xe8732
addl $-0x37, %edx
cmpl $0x6c, %edx
setb %al
retq
cmpl $0x8, %esi
jl 0xe8730
leal -0x2f(%rdx), %ecx
movb $0x1, %al
cmpl $0x52, %ecx
jb 0xe8732
leal -0x13(%rdx), %ecx
cmpl $0x1a, %ecx
jae 0xe8913
movl $0x30fc003, %esi # imm = 0x30FC003
btl %ecx, %esi
jb 0xe8732
addl $0xffffff65, %edx # imm = 0xFFFFFF65
cmpl $0x38, %edx
jmp 0xe88e2
|
/csukuangfj[P]ncnn/build_O2/src/layer/x86/convolution_x86_fma.cpp
|
ncnn::test_prefer_winograd23(int, int, int, int)
|
static bool test_prefer_winograd23(int num_input, int num_output, int w, int h)
{
int minwh = std::min(w, h);
if (num_input >= 512)
{
if (num_output >= 512) return (minwh >= 3 && minwh <= 14);
if (num_output >= 256) return (minwh >= 3 && minwh <= 14);
if (num_output >= 128) return (minwh >= 3 && minwh <= 14);
if (num_output >= 64) return (minwh >= 3 && minwh <= 8) || (minwh >= 11 && minwh <= 12);
if (num_output >= 32) return (minwh >= 3 && minwh <= 8);
if (num_output >= 16) return (minwh >= 3 && minwh <= 8);
if (num_output >= 8) return (minwh >= 3 && minwh <= 6);
return false;
}
if (num_input >= 256)
{
if (num_output >= 512) return (minwh >= 3 && minwh <= 14);
if (num_output >= 256) return (minwh >= 3 && minwh <= 14);
if (num_output >= 128) return (minwh >= 3 && minwh <= 12);
if (num_output >= 64) return (minwh >= 3 && minwh <= 4);
if (num_output >= 32) return (minwh >= 3 && minwh <= 8);
if (num_output >= 16) return (minwh >= 3 && minwh <= 8);
if (num_output >= 8) return (minwh >= 3 && minwh <= 6);
return false;
}
if (num_input >= 128)
{
if (num_output >= 512) return (minwh >= 3 && minwh <= 14);
if (num_output >= 256) return (minwh >= 3 && minwh <= 8) || (minwh >= 11 && minwh <= 12);
if (num_output >= 128) return (minwh >= 3 && minwh <= 10);
if (num_output >= 64) return (minwh >= 3 && minwh <= 8);
if (num_output >= 32) return (minwh >= 3 && minwh <= 10);
if (num_output >= 16) return (minwh >= 3 && minwh <= 6);
if (num_output >= 8) return (minwh >= 3 && minwh <= 6);
return false;
}
if (num_input >= 64)
{
if (num_output >= 512) return (minwh >= 3 && minwh <= 8) || (minwh >= 11 && minwh <= 12) || (minwh >= 15 && minwh <= 20);
if (num_output >= 256) return (minwh >= 7 && minwh <= 8);
if (num_output >= 128) return (minwh >= 3 && minwh <= 8) || (minwh >= 19 && minwh <= 22);
if (num_output >= 64) return (minwh >= 3 && minwh <= 12);
if (num_output >= 32) return (minwh >= 3 && minwh <= 12);
if (num_output >= 16) return (minwh >= 3 && minwh <= 12);
if (num_output >= 8) return (minwh >= 3 && minwh <= 12);
return false;
}
if (num_input >= 32)
{
if (num_output >= 512) return (minwh >= 3 && minwh <= 6) || (minwh >= 11 && minwh <= 12);
if (num_output >= 256) return (minwh >= 3 && minwh <= 6) || (minwh >= 11 && minwh <= 12);
if (num_output >= 128) return (minwh >= 3 && minwh <= 4) || (minwh >= 7 && minwh <= 16);
if (num_output >= 64) return (minwh >= 3 && minwh <= 8);
if (num_output >= 32) return (minwh >= 7 && minwh <= 8);
if (num_output >= 16) return (minwh >= 7 && minwh <= 8);
if (num_output >= 8) return (minwh >= 3 && minwh <= 10);
return false;
}
if (num_input >= 16)
{
if (num_output >= 512) return (minwh >= 11 && minwh <= 12);
if (num_output >= 256) return (minwh >= 3 && minwh <= 12);
if (num_output >= 128) return (minwh >= 3 && minwh <= 6)
|| (minwh >= 9 && minwh <= 18);
if (num_output >= 64) return (minwh >= 3 && minwh <= 4)
|| (minwh >= 7 && minwh <= 8)
|| (minwh >= 11 && minwh <= 12)
|| (minwh >= 15 && minwh <= 18);
if (num_output >= 32) return (minwh >= 3 && minwh <= 4)
|| (minwh >= 9 && minwh <= 10);
if (num_output >= 16) return (minwh >= 3 && minwh <= 10);
if (num_output >= 8) return (minwh >= 3 && minwh <= 8)
|| (minwh >= 11 && minwh <= 12);
return false;
}
if (num_input >= 8)
{
if (num_output >= 128) return false;
if (num_output >= 64) return (minwh >= 3 && minwh <= 4)
|| (minwh >= 7 && minwh <= 14)
|| (minwh >= 47 && minwh <= 48);
if (num_output >= 32) return (minwh >= 3 && minwh <= 6)
|| (minwh >= 15 && minwh <= 16);
if (num_output >= 16) return (minwh >= 3 && minwh <= 6)
|| (minwh >= 9 && minwh <= 14)
|| (minwh >= 47 && minwh <= 212);
if (num_output >= 8) return true;
return false;
}
return false;
}
|
cmpl %edx, %ecx
cmovll %ecx, %edx
cmpl $0x200, %edi # imm = 0x200
jl 0xe894d
cmpl $0x200, %esi # imm = 0x200
jge 0xe8989
cmpl $0x100, %esi # imm = 0x100
jge 0xe8989
cmpl $0x80, %esi
jge 0xe8989
cmpl $0x40, %esi
jge 0xe89c4
jmp 0xe8a07
cmpl $0x100, %edi # imm = 0x100
jl 0xe8979
cmpl $0x200, %esi # imm = 0x200
jge 0xe8989
cmpl $0x100, %esi # imm = 0x100
jge 0xe8989
cmpl $0x80, %esi
jl 0xe89fa
addl $-0x3, %edx
cmpl $0xa, %edx
jmp 0xe898f
cmpl $0x80, %edi
jl 0xe8993
cmpl $0x200, %esi # imm = 0x200
jl 0xe89bc
addl $-0x3, %edx
cmpl $0xc, %edx
setb %al
retq
cmpl $0x40, %edi
jl 0xe89cc
cmpl $0x200, %esi # imm = 0x200
jl 0xe8a1f
leal -0x3(%rdx), %ecx
cmpl $0xa, %ecx
jae 0xe89b4
movb $0x1, %al
movl $0x33f, %esi # imm = 0x33F
btl %ecx, %esi
jb 0xe8992
addl $-0xf, %edx
jmp 0xe8a80
cmpl $0x100, %esi # imm = 0x100
jl 0xe8a2f
leal -0x3(%rdx), %eax
cmpl $0x6, %eax
jmp 0xe89eb
cmpl $0x20, %edi
jl 0xe8a42
cmpl $0x200, %esi # imm = 0x200
jge 0xe89e5
cmpl $0x100, %esi # imm = 0x100
jl 0xe8a9f
leal -0x3(%rdx), %eax
cmpl $0x4, %eax
setb %cl
addl $-0xb, %edx
cmpl $0x2, %edx
setb %al
orb %cl, %al
retq
cmpl $0x40, %esi
jl 0xe8a07
addl $-0x3, %edx
jmp 0xe8b48
cmpl $0x20, %esi
jge 0xe8a7d
cmpl $0x10, %esi
jge 0xe8a7d
cmpl $0x8, %esi
jl 0xe8a9c
jmp 0xe8b1d
cmpl $0x100, %esi # imm = 0x100
jl 0xe8a57
addl $-0x7, %edx
jmp 0xe8b48
cmpl $0x80, %esi
jl 0xe8a74
addl $-0x3, %edx
cmpl $0x8, %edx
jmp 0xe898f
cmpl $0x10, %edi
jl 0xe8a88
cmpl $0x200, %esi # imm = 0x200
jl 0xe8ab9
addl $-0xb, %edx
jmp 0xe8b48
cmpl $0x80, %esi
jl 0xe8ae5
leal -0x3(%rdx), %eax
cmpl $0x6, %eax
setb %cl
addl $-0x13, %edx
cmpl $0x4, %edx
jmp 0xe89f4
cmpl $0x40, %esi
jl 0xe8b0b
addl $-0x3, %edx
cmpl $0x6, %edx
jmp 0xe898f
cmpl $0x8, %edi
setl %al
cmpl $0x7f, %esi
setg %cl
orb %al, %cl
je 0xe8b28
xorl %eax, %eax
retq
cmpl $0x80, %esi
jl 0xe8b50
leal -0x3(%rdx), %eax
cmpl $0x2, %eax
setb %cl
addl $-0x7, %edx
jmp 0xe8add
cmpl $0x100, %esi # imm = 0x100
jge 0xe8971
cmpl $0x80, %esi
jl 0xe8b8f
leal -0x3(%rdx), %eax
cmpl $0x4, %eax
setb %cl
addl $-0x9, %edx
cmpl $0xa, %edx
jmp 0xe89f4
cmpl $0x40, %esi
jge 0xe8971
cmpl $0x20, %esi
jge 0xe8971
cmpl $0x10, %esi
jge 0xe8971
cmpl $0x8, %esi
jge 0xe8971
jmp 0xe8a9c
cmpl $0x20, %esi
jge 0xe8a37
cmpl $0x10, %esi
jl 0xe8a11
addl $-0x3, %edx
cmpl $0x4, %edx
jmp 0xe898f
cmpl $0x40, %esi
jl 0xe8b79
leal -0x3(%rdx), %ecx
cmpl $0xc, %ecx
jae 0xe8b45
movb $0x1, %al
movl $0xff3, %esi # imm = 0xFF3
btl %ecx, %esi
jb 0xe8992
addl $-0x2f, %edx
cmpl $0x2, %edx
jmp 0xe898f
cmpl $0x40, %esi
jge 0xe8a7d
cmpl $0x20, %esi
jge 0xe8a27
cmpl $0x10, %esi
jge 0xe8a27
cmpl $0x8, %esi
jge 0xe8a37
jmp 0xe8a9c
cmpl $0x20, %esi
jl 0xe8bb4
leal -0x3(%rdx), %eax
cmpl $0x4, %eax
setb %cl
addl $-0xf, %edx
jmp 0xe89f1
cmpl $0x40, %esi
jl 0xe8bdf
leal -0x3(%rdx), %ecx
cmpl $0xa, %ecx
jae 0xe8bac
movb $0x1, %al
movl $0x333, %esi # imm = 0x333
btl %ecx, %esi
jb 0xe8992
addl $-0xf, %edx
jmp 0xe8b20
cmpl $0x10, %esi
jl 0xe8bf5
leal -0x3(%rdx), %ecx
cmpl $0xc, %ecx
jae 0xe8bd1
movb $0x1, %al
movl $0xfcf, %esi # imm = 0xFCF
btl %ecx, %esi
jb 0xe8992
addl $-0x2f, %edx
cmpl $0xa6, %edx
jmp 0xe898f
cmpl $0x20, %esi
jl 0xe8bfc
leal -0x3(%rdx), %eax
cmpl $0x2, %eax
setb %cl
addl $-0x9, %edx
jmp 0xe89f1
cmpl $0x8, %esi
setge %al
retq
cmpl $0x10, %esi
jge 0xe8a37
cmpl $0x8, %esi
jge 0xe89c4
jmp 0xe8a9c
nop
|
/csukuangfj[P]ncnn/build_O2/src/layer/x86/convolution_x86_fma.cpp
|
ncnn::Convolution_x86_fma::forward_int8_x86(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const
|
int Convolution_x86_fma::forward_int8_x86(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
int elembits = bottom_blob.elembits();
Mat bottom_blob_int8 = bottom_blob;
if (elembits != 8)
{
Option opt_q = opt;
opt_q.blob_allocator = opt.workspace_allocator;
quantize_to_int8(bottom_blob, bottom_blob_int8, bottom_blob_int8_scales, opt_q);
}
// NCNN_LOGE("Convolution_arm input %d x %d ksize=%d %d stride=%d %d", w, h, kernel_w, kernel_h, stride_w, stride_h);
Mat bottom_blob_bordered;
make_padding(bottom_blob_int8, bottom_blob_bordered, opt);
if (bottom_blob_bordered.empty())
return -100;
int w = bottom_blob_bordered.w;
int h = bottom_blob_bordered.h;
int channels = bottom_blob_bordered.c;
int elempack = bottom_blob_bordered.elempack;
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
int outw = (w - kernel_extent_w) / stride_w + 1;
int outh = (h - kernel_extent_h) / stride_h + 1;
bool use_int8_requantize = int8_scale_term > 100;
int out_elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
if (use_int8_requantize)
out_elempack = num_output % 8 == 0 ? 8 : 1;
else
out_elempack = num_output % 4 == 0 ? 4 : 1;
}
#endif // __SSE2__
size_t out_elemsize = use_int8_requantize ? 1u * out_elempack : 4u * out_elempack;
// NCNN_LOGE("forward_int8_arm %d %d %d %d %d", w, h, bottom_blob_bordered.c, elempack, out_elempack);
top_blob.create(outw, outh, num_output / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
const int num_input = channels * elempack;
int out_elempack_int32 = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
out_elempack_int32 = num_output % 4 == 0 ? 4 : 1;
}
#endif // __SSE2__
Mat top_blob_int32;
top_blob_int32.create(outw, outh, num_output / out_elempack_int32, (size_t)(4u * out_elempack_int32), out_elempack_int32, opt.workspace_allocator);
if (top_blob_int32.empty())
return -100;
#if __SSE2__
if (elempack == 8 && out_elempack_int32 == 4)
{
if (kernel_w == 1 && kernel_h == 1 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
conv1x1s1_sgemm_pack8to4_int8_sse(bottom_blob_bordered, top_blob_int32, weight_sgemm_data, opt);
}
else if (kernel_w == 1 && kernel_h == 1 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
conv1x1s2_sgemm_pack8to4_int8_sse(bottom_blob_bordered, top_blob_int32, weight_sgemm_data, opt);
}
else if (opt.use_winograd_convolution && opt.use_winograd43_convolution && kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
conv3x3s1_winograd43_pack8to4_int8_sse(bottom_blob_bordered, top_blob_int32, weight_winograd43_data, opt);
}
else if (opt.use_sgemm_convolution)
{
convolution_im2col_sgemm_pack8to4_int8_sse(bottom_blob_bordered, top_blob_int32, weight_sgemm_data, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, opt);
}
else
{
convolution_pack8to4_int8_sse(bottom_blob_bordered, top_blob_int32, weight_data_tm, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, opt);
}
}
if (elempack == 1 && out_elempack_int32 == 4)
{
if (kernel_w == 1 && kernel_h == 1 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
conv1x1s1_sgemm_pack1to4_int8_sse(bottom_blob_bordered, top_blob_int32, weight_sgemm_data, opt);
}
else if (kernel_w == 1 && kernel_h == 1 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
conv1x1s2_sgemm_pack1to4_int8_sse(bottom_blob_bordered, top_blob_int32, weight_sgemm_data, opt);
}
else if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
conv3x3s1_pack1to4_int8_sse(bottom_blob_bordered, top_blob_int32, weight_sgemm_data, opt);
}
else if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
conv3x3s2_pack1to4_int8_sse(bottom_blob_bordered, top_blob_int32, weight_sgemm_data, opt);
}
else if (kernel_w == 7 && kernel_h == 7 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
conv7x7s2_pack1to4_int8_sse(bottom_blob_bordered, top_blob_int32, weight_sgemm_data, opt);
}
else if (opt.use_sgemm_convolution) // TODO better condition && num_input >= 8 && num_output >= 8)
{
convolution_im2col_sgemm_pack1to4_int8_sse(bottom_blob_bordered, top_blob_int32, weight_sgemm_data, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, opt);
}
else
{
convolution_pack1to4_int8_sse(bottom_blob_bordered, top_blob_int32, weight_data_tm, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, opt);
}
}
if (elempack == 8 && out_elempack_int32 == 1)
{
if (kernel_w == 1 && kernel_h == 1 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
conv1x1s1_sgemm_pack8to1_int8_sse(bottom_blob_bordered, top_blob_int32, weight_sgemm_data, opt);
}
else if (kernel_w == 1 && kernel_h == 1 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
conv1x1s2_sgemm_pack8to1_int8_sse(bottom_blob_bordered, top_blob_int32, weight_sgemm_data, opt);
}
else if (opt.use_winograd_convolution && opt.use_winograd43_convolution && kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
conv3x3s1_winograd43_pack8to1_int8_sse(bottom_blob_bordered, top_blob_int32, weight_winograd43_data, opt);
}
else if (opt.use_sgemm_convolution) // TODO better condition && num_input >= 8 && num_output >= 8)
{
convolution_im2col_sgemm_pack8to1_int8_sse(bottom_blob_bordered, top_blob_int32, weight_sgemm_data, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, opt);
}
else
{
convolution_pack8to1_int8_sse(bottom_blob_bordered, top_blob_int32, weight_data_tm, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, opt);
}
}
#endif // __SSE2__
if (elempack == 1 && out_elempack_int32 == 1)
{
if (kernel_w == 1 && kernel_h == 1 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
conv1x1s1_sgemm_int8_sse(bottom_blob_bordered, top_blob_int32, weight_sgemm_data, opt);
}
else if (kernel_w == 1 && kernel_h == 1 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
conv1x1s2_sgemm_int8_sse(bottom_blob_bordered, top_blob_int32, weight_sgemm_data, opt);
}
else if (opt.use_winograd_convolution && opt.use_winograd23_convolution && kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1 && num_input >= 16 && num_output >= 16)
{
conv3x3s1_winograd23_int8_sse(bottom_blob_bordered, top_blob_int32, weight_winograd23_data, opt);
// conv3x3s1_winograd43_int8_sse(bottom_blob_bordered, top_blob_int32, weight_winograd43_data, opt);
}
else if (opt.use_sgemm_convolution)
{
convolution_im2col_sgemm_int8_sse(bottom_blob_bordered, top_blob_int32, weight_sgemm_data, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, opt);
}
else
{
convolution_int8(bottom_blob_bordered, top_blob_int32, weight_data_tm, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, opt);
}
}
if (use_int8_requantize)
{
requantize_from_int32_to_int8(top_blob_int32, top_blob, scale_in_data, top_blob_int8_scales, bias_data, activation_type, activation_params, opt);
}
else
{
dequantize_from_int32(top_blob_int32, top_blob, scale_in_data, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
}
return 0;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x6e8, %rsp # imm = 0x6E8
movq %rcx, %r14
movq %rdx, %rbx
movq %rdi, 0x20(%rsp)
movl 0x18(%rsi), %ecx
movq 0x10(%rsi), %rdi
testl %ecx, %ecx
je 0xf8f2b
leal (,%rdi,8), %eax
cltd
idivl %ecx
cmpl $0x8, %eax
sete %al
jmp 0xf8f2d
xorl %eax, %eax
movq 0x8(%rsi), %rdx
vmovups (%rsi), %xmm0
vmovaps %xmm0, 0x5d0(%rsp)
movq %rdi, 0x5e0(%rsp)
movl %ecx, 0x5e8(%rsp)
movq 0x20(%rsi), %rcx
movq %rcx, 0x5f0(%rsp)
vmovdqu 0x28(%rsi), %xmm0
vmovdqu %xmm0, 0x5f8(%rsp)
movl 0x38(%rsi), %ecx
movl %ecx, 0x608(%rsp)
movq 0x40(%rsi), %rcx
movq %rcx, 0x610(%rsp)
testq %rdx, %rdx
je 0xf8f85
lock
incl (%rdx)
testb %al, %al
jne 0xf8fd7
vmovdqu (%r14), %ymm0
vmovdqu 0x20(%r14), %ymm1
leaq 0x350(%rsp), %rcx
vmovdqu %ymm1, 0x20(%rcx)
vmovdqu %ymm0, (%rcx)
movq 0x10(%r14), %rax
movq %rax, 0x8(%rcx)
movq 0x20(%rsp), %rdx
movq (%rdx), %rax
movq -0x18(%rax), %rax
leaq 0x238(%rdx,%rax), %rdx
leaq 0x5d0(%rsp), %rax
movq %rsi, %rdi
movq %rax, %rsi
vzeroupper
callq 0x652e3
leaq 0x90(%rsp), %rdx
andq $0x0, 0x40(%rdx)
vpxor %xmm0, %xmm0, %xmm0
vmovdqa %xmm0, (%rdx)
vmovdqu %xmm0, 0xc(%rdx)
vmovdqa %xmm0, 0x20(%rdx)
vmovdqu %xmm0, 0x2c(%rdx)
movq 0x20(%rsp), %rcx
movq (%rcx), %rax
movq -0x18(%rax), %rdi
addq %rcx, %rdi
leaq 0x5d0(%rsp), %rsi
movq %r14, %rcx
callq 0x96742
pushq $-0x64
popq %rbp
cmpq $0x0, 0x90(%rsp)
je 0xfbcfb
movslq 0xc8(%rsp), %rcx
movq 0xd0(%rsp), %rax
imulq %rcx, %rax
testq %rax, %rax
je 0xfbcfb
movq %rcx, 0xf8(%rsp)
movq 0x20(%rsp), %r8
movq (%r8), %rax
movq -0x18(%rax), %rdi
movl 0xd0(%r8,%rdi), %ecx
movl 0xd4(%r8,%rdi), %eax
decl %eax
imull 0xdc(%r8,%rdi), %eax
notl %eax
movl 0xd8(%r8,%rdi), %esi
decl %esi
imull 0xe0(%r8,%rdi), %esi
notl %esi
addl 0xbc(%rsp), %eax
cltd
idivl 0xe4(%r8,%rdi)
movl %eax, %r13d
incl %r13d
addl 0xc0(%rsp), %esi
movl %esi, %eax
cltd
idivl 0xe8(%r8,%rdi)
movl %eax, %r12d
incl %r12d
movl 0x108(%r8,%rdi), %r15d
cmpb $0x1, 0x27(%r14)
jne 0xf90e2
cmpl $0x65, %r15d
jl 0xf90e8
pushq $0x8
popq %rax
testb $0x7, %cl
pushq $0x1
popq %r9
cmovel %eax, %r9d
jmp 0xf90f7
pushq $0x1
popq %r9
jmp 0xf90f7
xorl %eax, %eax
testb $0x3, %cl
sete %al
leal (%rax,%rax,2), %r9d
incl %r9d
movl 0xa8(%rsp), %eax
movl %eax, 0x70(%rsp)
leal (,%r9,4), %r8d
cmpl $0x65, %r15d
cmovgel %r9d, %r8d
movl %ecx, %eax
cltd
idivl %r9d
movq 0x8(%r14), %rcx
movq %rcx, (%rsp)
movq %rbx, %rdi
movl %r13d, %esi
movl %r12d, %edx
movl %eax, %ecx
callq 0x628f2
cmpq $0x0, (%rbx)
je 0xfbcfb
movslq 0x38(%rbx), %rax
imulq 0x40(%rbx), %rax
testq %rax, %rax
je 0xfbcfb
movl %r15d, 0x33c(%rsp)
movq 0x20(%rsp), %rsi
movq (%rsi), %rax
cmpb $0x1, 0x27(%r14)
movq %rbx, 0x328(%rsp)
jne 0xf9185
movq -0x18(%rax), %rcx
xorl %edx, %edx
testb $0x3, 0xd0(%rsi,%rcx)
sete %dl
leal (%rdx,%rdx,2), %r9d
incl %r9d
jmp 0xf9189
pushq $0x1
popq %r9
leaq 0x100(%rsp), %rdi
andq $0x0, 0x40(%rdi)
vpxor %xmm0, %xmm0, %xmm0
vmovdqa %xmm0, (%rdi)
vmovdqu %xmm0, 0xc(%rdi)
vmovdqa %xmm0, 0x20(%rdi)
vmovdqu %xmm0, 0x2c(%rdi)
movq -0x18(%rax), %rax
movl 0xd0(%rsi,%rax), %eax
cltd
idivl %r9d
leal (,%r9,4), %r8d
movq %r14, 0x40(%rsp)
movq 0x10(%r14), %rcx
movq %rcx, (%rsp)
movl %r13d, %esi
movl %r12d, %edx
movl %eax, %ecx
movq %r9, 0xe0(%rsp)
callq 0x628f2
pushq $-0x64
popq %rbp
cmpq $0x0, 0x100(%rsp)
je 0xfbcc4
movslq 0x138(%rsp), %rcx
movq 0x140(%rsp), %rax
movq %rcx, 0x38(%rsp)
imulq %rcx, %rax
testq %rax, %rax
je 0xfbcc4
movl 0x70(%rsp), %ebx
cmpl $0x8, %ebx
movq 0x20(%rsp), %r14
jne 0xf99ed
cmpl $0x4, 0xe0(%rsp)
jne 0xf99ed
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xd4(%r14,%rax), %edi
cmpl $0x1, %edi
movq %r14, %rcx
movq 0x40(%rsp), %r14
jne 0xf93f0
cmpl $0x1, 0xd8(%rcx,%rax)
jne 0xf93f0
cmpl $0x1, 0xdc(%rcx,%rax)
jne 0xf92b1
cmpl $0x1, 0xe0(%rcx,%rax)
jne 0xf92b1
cmpl $0x1, 0xe4(%rcx,%rax)
jne 0xf92b1
cmpl $0x1, 0xe8(%rcx,%rax)
jne 0xf92b1
leaq 0x60(%rcx), %rdx
leaq 0x90(%rsp), %rdi
leaq 0x100(%rsp), %rsi
movq %r14, %rcx
callq 0x101345
jmp 0xfbc1a
cmpl $0x1, 0xdc(%rcx,%rax)
jne 0xf93f0
cmpl $0x1, 0xe0(%rcx,%rax)
jne 0xf93f0
cmpl $0x2, 0xe4(%rcx,%rax)
jne 0xf93f0
cmpl $0x2, 0xe8(%rcx,%rax)
jne 0xf93f0
movl 0xbc(%rsp), %ebx
movl 0xc8(%rsp), %r13d
movq 0xa0(%rsp), %r8
movl 0xa8(%rsp), %r9d
leaq 0x350(%rsp), %rdi
andq $0x0, 0x40(%rdi)
movl 0x12c(%rsp), %ebp
movl 0x130(%rsp), %r12d
vpxor %xmm0, %xmm0, %xmm0
vmovdqa %xmm0, (%rdi)
vmovdqu %xmm0, 0xc(%rdi)
vmovdqa %xmm0, 0x20(%rdi)
vmovdqu %xmm0, 0x2c(%rdi)
movq 0x10(%r14), %rax
movq %rax, (%rsp)
movl %ebp, %esi
movl %r12d, %edx
movl %r13d, %ecx
callq 0x628f2
movq 0x20(%rsp), %rax
leaq 0x60(%rax), %rdx
subl %ebp, %ebx
addl %ebx, %ebx
movq 0x90(%rsp), %rax
movq 0x350(%rsp), %rcx
xorl %esi, %esi
testl %ebp, %ebp
cmovlel %esi, %ebp
movslq %ebx, %rdi
testl %r12d, %r12d
cmovlel %esi, %r12d
testl %r13d, %r13d
cmovlel %esi, %r13d
shlq $0x3, %rdi
cmpq %r13, %rsi
je 0xfbd7d
movq 0xd0(%rsp), %r8
imulq %rsi, %r8
imulq 0xa0(%rsp), %r8
addq %rax, %r8
movq 0x390(%rsp), %r9
imulq %rsi, %r9
imulq 0x360(%rsp), %r9
addq %rcx, %r9
xorl %r10d, %r10d
movl %ebp, %r11d
cmpl %r12d, %r10d
je 0xf93eb
subl $0x1, %r11d
jb 0xf93e3
movq (%r8), %rbx
movq %rbx, (%r9)
addq $0x10, %r8
addq $0x8, %r9
jmp 0xf93cd
addq %rdi, %r8
incl %r10d
jmp 0xf93c5
incq %rsi
jmp 0xf9389
cmpb $0x1, 0x1c(%r14)
jne 0xf9495
cmpl $0x3, %edi
jne 0xf9495
cmpb $0x0, 0x38(%r14)
je 0xf9495
movq 0x20(%rsp), %rcx
cmpl $0x3, 0xd8(%rcx,%rax)
jne 0xf9495
movq 0x20(%rsp), %rcx
cmpl $0x1, 0xdc(%rcx,%rax)
jne 0xf9495
movq 0x20(%rsp), %rcx
cmpl $0x1, 0xe0(%rcx,%rax)
jne 0xf9495
movq 0x20(%rsp), %rcx
cmpl $0x1, 0xe4(%rcx,%rax)
jne 0xf9495
movq 0x20(%rsp), %rcx
cmpl $0x1, 0xe8(%rcx,%rax)
jne 0xf9495
callq 0x732f7
movq 0x20(%rsp), %rcx
leaq 0xf0(%rcx), %r12
testl %eax, %eax
je 0xfc018
leaq 0x90(%rsp), %rdi
leaq 0x100(%rsp), %rsi
movq %r12, %rdx
movq 0x40(%rsp), %rcx
callq 0x13683f
jmp 0xfbc1a
movq 0x20(%rsp), %rcx
movl 0xd8(%rcx,%rax), %edx
movslq 0xdc(%rcx,%rax), %rbp
movslq 0xe0(%rcx,%rax), %rsi
movq %rsi, 0x18(%rsp)
movslq 0xe4(%rcx,%rax), %rbx
movslq 0xe8(%rcx,%rax), %rax
movq %rax, 0x50(%rsp)
movl 0xbc(%rsp), %eax
movl %eax, 0x60(%rsp)
movl 0xc8(%rsp), %r13d
movq %r14, %rax
movslq 0x12c(%rsp), %r14
movl 0x130(%rsp), %r12d
cmpb $0x1, 0x1d(%rax)
movq %rdi, 0x30(%rsp)
jne 0xf969a
movl %r12d, %esi
imull %r14d, %esi
movq %rdx, %r15
imull %edi, %edx
movq 0x10(%rax), %rax
leaq 0x350(%rsp), %rdi
andq $0x0, 0x40(%rdi)
vpxor %xmm0, %xmm0, %xmm0
vmovdqa %xmm0, (%rdi)
vmovdqu %xmm0, 0xc(%rdi)
vmovdqa %xmm0, 0x20(%rdi)
vmovdqu %xmm0, 0x2c(%rdi)
movq %rax, (%rsp)
pushq $0x8
popq %r8
pushq $0x8
popq %r9
movl %r13d, %ecx
callq 0x628f2
movl 0x60(%rsp), %edx
imull 0x50(%rsp), %edx
movl %ebx, %ecx
imull %r14d, %ecx
xorl %esi, %esi
testl %r14d, %r14d
cmovlel %esi, %r14d
testl %r12d, %r12d
cmovlel %esi, %r12d
movq 0x20(%rsp), %rax
addq $0x60, %rax
movq %rax, 0x48(%rsp)
movq 0x30(%rsp), %r9
testl %r9d, %r9d
cmovlel %esi, %r9d
movslq 0xbc(%rsp), %rax
testl %r15d, %r15d
cmovlel %esi, %r15d
movq 0x90(%rsp), %rdi
movq %rdi, 0x50(%rsp)
testl %r13d, %r13d
cmovlel %esi, %r13d
movq 0x350(%rsp), %rsi
movq %rsi, 0x38(%rsp)
imulq 0x18(%rsp), %rax
movq %rax, 0x58(%rsp)
movslq %edx, %r8
shlq $0x3, %r8
movslq %ecx, %rax
shlq $0x3, %rax
subq %rax, %r8
shlq $0x3, %rbx
movq %r15, %rcx
xorl %edx, %edx
movq %r15, 0x28(%rsp)
cmpq %r13, %rdx
je 0xf9727
movq 0xd0(%rsp), %rsi
imulq %rdx, %rsi
movq 0xa0(%rsp), %rdi
imulq %rdi, %rsi
movq 0x390(%rsp), %r11
movq %rdx, 0x30(%rsp)
imulq %rdx, %r11
imulq 0x360(%rsp), %r11
addq 0x50(%rsp), %rsi
movq %rsi, 0x60(%rsp)
addq 0x38(%rsp), %r11
imulq 0x58(%rsp), %rdi
movq %rdi, 0x18(%rsp)
xorl %r15d, %r15d
cmpq %rcx, %r15
je 0xf968d
movq 0x18(%rsp), %r10
imulq %r15, %r10
addq 0x60(%rsp), %r10
xorl %edx, %edx
cmpq %r9, %rdx
je 0xf9683
movq %rdx, %rax
imulq %rbp, %rax
leaq (%r10,%rax,8), %rdi
xorl %ecx, %ecx
cmpl %r12d, %ecx
je 0xf967e
movl %r14d, %eax
subl $0x1, %eax
jb 0xf9677
movq (%rdi), %rsi
movq %rsi, (%r11)
addq $0x8, %r11
addq %rbx, %rdi
jmp 0xf9663
addq %r8, %rdi
incl %ecx
jmp 0xf965b
incq %rdx
jmp 0xf9649
incq %r15
movq 0x28(%rsp), %rcx
jmp 0xf9634
movq 0x30(%rsp), %rdx
incq %rdx
jmp 0xf95dc
movl %edx, %r15d
imull %edi, %r15d
movslq %r15d, %rsi
leaq 0x350(%rsp), %rdi
movq %rdx, 0x28(%rsp)
leaq 0x1b0(%rsp), %rdx
callq 0x73bbe
movl 0x60(%rsp), %ecx
imull 0x18(%rsp), %ecx
movl %ebp, %eax
movq 0x30(%rsp), %rdx
imull %edx, %eax
subl %eax, %ecx
movl %ecx, 0x60(%rsp)
xorl %ecx, %ecx
testl %edx, %edx
cmovlel %ecx, %edx
movq 0x28(%rsp), %r11
testl %r11d, %r11d
cmovlel %ecx, %r11d
movq 0x350(%rsp), %rax
movq %rdx, %r10
xorl %esi, %esi
xorl %edi, %edi
cmpl %r11d, %ecx
je 0xf978d
movslq %edx, %r8
movslq %edi, %r9
leal (%r9,%r10), %edi
cmpq %r9, %r8
je 0xf971c
movl %esi, (%rax,%r9,4)
incq %r9
addl %ebp, %esi
jmp 0xf970c
addl 0x60(%rsp), %esi
incl %ecx
addl %r10d, %edx
jmp 0xf96f9
leaq 0x350(%rsp), %rdi
leaq 0x100(%rsp), %rsi
movq 0x48(%rsp), %rdx
movq 0x40(%rsp), %rcx
callq 0x107aae
movq 0x358(%rsp), %rax
testq %rax, %rax
movq 0x20(%rsp), %r14
movl 0x70(%rsp), %ebx
je 0xf99ed
lock
decl (%rax)
jne 0xf99ed
movq 0x350(%rsp), %rsi
movq 0x370(%rsp), %rdi
testq %rdi, %rdi
je 0xfbdd8
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xf99ed
movl %r14d, %ecx
shll $0x3, %ebx
shlq $0x2, %r14
xorl %edx, %edx
testl %r15d, %r15d
cmovlel %edx, %r15d
testl %r13d, %r13d
cmovlel %edx, %r13d
testl %ecx, %ecx
movl $0x0, %esi
cmovgl %ecx, %esi
movq %rsi, 0x18(%rsp)
testl %r12d, %r12d
cmovlel %edx, %r12d
movq 0x38(%rsp), %rcx
testl %ecx, %ecx
cmovlel %edx, %ecx
movq %rcx, 0x38(%rsp)
movq %r15, %rsi
shlq $0x5, %rsi
shlq $0x2, %r15
vpxor %xmm0, %xmm0, %xmm0
cmpq 0x38(%rsp), %rdx
je 0xf99d7
movq 0x140(%rsp), %rcx
imulq %rdx, %rcx
imulq 0x110(%rsp), %rcx
addq 0x100(%rsp), %rcx
movq %rcx, 0x60(%rsp)
xorl %ecx, %ecx
movq %rdx, 0x30(%rsp)
cmpq %r12, %rcx
je 0xf99cf
movq %rcx, 0x58(%rsp)
imulq 0x50(%rsp), %rcx
movq %rcx, 0x28(%rsp)
xorl %r10d, %r10d
cmpq 0x18(%rsp), %r10
je 0xf99b4
movq 0x20(%rsp), %rcx
movq 0x58(%rcx), %r11
imulq %rdx, %r11
imulq 0x28(%rcx), %r11
addq 0x18(%rcx), %r11
movl %ebx, %ecx
imull %r10d, %ecx
movslq 0xbc(%rsp), %rdi
imulq 0x28(%rsp), %rdi
movq 0xa0(%rsp), %r8
movq 0xd0(%rsp), %rdx
imulq %r8, %rdi
addq 0x90(%rsp), %rdi
imulq %r8, %rdx
movslq %ecx, %r8
addq %rdi, %r8
vpxor %xmm1, %xmm1, %xmm1
xorl %ecx, %ecx
vpxor %xmm2, %xmm2, %xmm2
vpxor %xmm3, %xmm3, %xmm3
vpxor %xmm4, %xmm4, %xmm4
cmpq %r13, %rcx
je 0xf996a
movq %rdx, %rbp
imulq %rcx, %rbp
addq %r8, %rbp
xorl %edi, %edi
cmpq %rdi, %r15
je 0xf995f
movslq (%rax,%rdi), %r9
vmovq (%rbp,%r9,8), %xmm5
vpcmpgtb %xmm5, %xmm0, %xmm6
vpunpcklbw %xmm6, %xmm5, %xmm5 # xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3],xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7]
vmovdqu (%r11,%rdi,8), %xmm6
vmovdqu 0x10(%r11,%rdi,8), %xmm7
vpcmpgtb %xmm6, %xmm0, %xmm8
vpcmpgtb %xmm7, %xmm0, %xmm9
vpunpcklbw %xmm8, %xmm6, %xmm10 # xmm10 = xmm6[0],xmm8[0],xmm6[1],xmm8[1],xmm6[2],xmm8[2],xmm6[3],xmm8[3],xmm6[4],xmm8[4],xmm6[5],xmm8[5],xmm6[6],xmm8[6],xmm6[7],xmm8[7]
vpunpckhbw %xmm8, %xmm6, %xmm6 # xmm6 = xmm6[8],xmm8[8],xmm6[9],xmm8[9],xmm6[10],xmm8[10],xmm6[11],xmm8[11],xmm6[12],xmm8[12],xmm6[13],xmm8[13],xmm6[14],xmm8[14],xmm6[15],xmm8[15]
vpunpcklbw %xmm9, %xmm7, %xmm8 # xmm8 = xmm7[0],xmm9[0],xmm7[1],xmm9[1],xmm7[2],xmm9[2],xmm7[3],xmm9[3],xmm7[4],xmm9[4],xmm7[5],xmm9[5],xmm7[6],xmm9[6],xmm7[7],xmm9[7]
vpunpckhbw %xmm9, %xmm7, %xmm7 # xmm7 = xmm7[8],xmm9[8],xmm7[9],xmm9[9],xmm7[10],xmm9[10],xmm7[11],xmm9[11],xmm7[12],xmm9[12],xmm7[13],xmm9[13],xmm7[14],xmm9[14],xmm7[15],xmm9[15]
vpmullw %xmm5, %xmm10, %xmm9
vpmulhw %xmm5, %xmm10, %xmm10
vpmullw %xmm6, %xmm5, %xmm11
vpmulhw %xmm6, %xmm5, %xmm6
vpmullw %xmm5, %xmm8, %xmm12
vpmulhw %xmm5, %xmm8, %xmm8
vpmullw %xmm7, %xmm5, %xmm13
vpmulhw %xmm7, %xmm5, %xmm5
vpunpcklwd %xmm10, %xmm9, %xmm7 # xmm7 = xmm9[0],xmm10[0],xmm9[1],xmm10[1],xmm9[2],xmm10[2],xmm9[3],xmm10[3]
vpaddd %xmm7, %xmm4, %xmm4
vpunpcklwd %xmm6, %xmm11, %xmm7 # xmm7 = xmm11[0],xmm6[0],xmm11[1],xmm6[1],xmm11[2],xmm6[2],xmm11[3],xmm6[3]
vpaddd %xmm7, %xmm3, %xmm3
vpunpcklwd %xmm8, %xmm12, %xmm7 # xmm7 = xmm12[0],xmm8[0],xmm12[1],xmm8[1],xmm12[2],xmm8[2],xmm12[3],xmm8[3]
vpaddd %xmm7, %xmm2, %xmm2
vpunpcklwd %xmm5, %xmm13, %xmm7 # xmm7 = xmm13[0],xmm5[0],xmm13[1],xmm5[1],xmm13[2],xmm5[2],xmm13[3],xmm5[3]
vpaddd %xmm7, %xmm1, %xmm1
vpunpckhwd %xmm10, %xmm9, %xmm7 # xmm7 = xmm9[4],xmm10[4],xmm9[5],xmm10[5],xmm9[6],xmm10[6],xmm9[7],xmm10[7]
vpaddd %xmm7, %xmm4, %xmm4
vpunpckhwd %xmm6, %xmm11, %xmm6 # xmm6 = xmm11[4],xmm6[4],xmm11[5],xmm6[5],xmm11[6],xmm6[6],xmm11[7],xmm6[7]
vpaddd %xmm6, %xmm3, %xmm3
vpunpckhwd %xmm8, %xmm12, %xmm6 # xmm6 = xmm12[4],xmm8[4],xmm12[5],xmm8[5],xmm12[6],xmm8[6],xmm12[7],xmm8[7]
vpaddd %xmm6, %xmm2, %xmm2
vpunpckhwd %xmm5, %xmm13, %xmm5 # xmm5 = xmm13[4],xmm5[4],xmm13[5],xmm5[5],xmm13[6],xmm5[6],xmm13[7],xmm5[7]
vpaddd %xmm5, %xmm1, %xmm1
addq $0x4, %rdi
jmp 0xf98ad
addq %rsi, %r11
incq %rcx
jmp 0xf9898
vpunpckldq %xmm3, %xmm4, %xmm5 # xmm5 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
vpunpckldq %xmm1, %xmm2, %xmm6 # xmm6 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
vpunpckhdq %xmm3, %xmm4, %xmm3 # xmm3 = xmm4[2],xmm3[2],xmm4[3],xmm3[3]
vpunpckhdq %xmm1, %xmm2, %xmm1 # xmm1 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
vpunpcklqdq %xmm6, %xmm5, %xmm2 # xmm2 = xmm5[0],xmm6[0]
vpunpckhqdq %xmm6, %xmm5, %xmm4 # xmm4 = xmm5[1],xmm6[1]
vpaddd %xmm4, %xmm2, %xmm2
vpunpcklqdq %xmm1, %xmm3, %xmm4 # xmm4 = xmm3[0],xmm1[0]
vpunpckhqdq %xmm1, %xmm3, %xmm1 # xmm1 = xmm3[1],xmm1[1]
vpaddd %xmm4, %xmm1, %xmm1
vpaddd %xmm1, %xmm2, %xmm1
movq %r10, %rcx
shlq $0x4, %rcx
movq 0x60(%rsp), %rdx
vmovdqu %xmm1, (%rdx,%rcx)
incq %r10
movq 0x30(%rsp), %rdx
jmp 0xf982b
movq 0x60(%rsp), %rcx
leaq (%rcx,%r14,4), %rcx
movq %rcx, 0x60(%rsp)
movq 0x58(%rsp), %rcx
incq %rcx
jmp 0xf980f
incq %rdx
jmp 0xf97db
leaq 0x350(%rsp), %rdi
callq 0x624be
movq 0x20(%rsp), %r14
movl 0x70(%rsp), %ebx
cmpl $0x1, %ebx
jne 0xfa7ac
cmpl $0x4, 0xe0(%rsp)
jne 0xfa7ac
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xd4(%r14,%rax), %ecx
cmpl $0x7, %ecx
je 0xf9a9c
cmpl $0x3, %ecx
je 0xf9cc0
cmpl $0x1, %ecx
jne 0xfa279
cmpl $0x1, 0xd8(%r14,%rax)
jne 0xfa279
cmpl $0x1, 0xdc(%r14,%rax)
jne 0xf9ebb
cmpl $0x1, 0xe0(%r14,%rax)
jne 0xf9ebb
cmpl $0x1, 0xe4(%r14,%rax)
jne 0xf9ebb
cmpl $0x1, 0xe8(%r14,%rax)
jne 0xf9ebb
leaq 0x60(%r14), %rdx
leaq 0x90(%rsp), %rdi
leaq 0x100(%rsp), %rsi
movq 0x40(%rsp), %rcx
callq 0x101428
jmp 0xfbc1a
cmpl $0x7, 0xd8(%r14,%rax)
jne 0xfa279
cmpl $0x1, 0xdc(%r14,%rax)
jne 0xfa279
cmpl $0x1, 0xe0(%r14,%rax)
jne 0xfa279
cmpl $0x2, 0xe4(%r14,%rax)
jne 0xfa279
cmpl $0x2, 0xe8(%r14,%rax)
jne 0xfa279
movl 0xbc(%rsp), %r14d
movl 0xc8(%rsp), %r15d
movl 0x12c(%rsp), %ebx
movl 0x130(%rsp), %ebp
movl %ebp, %esi
imull %ebx, %esi
movq 0x40(%rsp), %rax
movq 0x10(%rax), %rax
leaq 0x350(%rsp), %rdi
andq $0x0, 0x40(%rdi)
vpxor %xmm0, %xmm0, %xmm0
vmovdqa %xmm0, (%rdi)
vmovdqu %xmm0, 0xc(%rdi)
vmovdqa %xmm0, 0x20(%rdi)
vmovdqu %xmm0, 0x2c(%rdi)
movq %rax, (%rsp)
pushq $0x31
popq %rdx
pushq $0x1
popq %r8
pushq $0x1
popq %r9
movl %r15d, %ecx
callq 0x628f2
movq 0x20(%rsp), %rax
addq $0x60, %rax
movq %rax, 0x30(%rsp)
subl %ebx, %r14d
addl %r14d, %r14d
xorl %edx, %edx
testl %ebp, %ebp
cmovlel %edx, %ebp
movslq %r14d, %rsi
testl %r15d, %r15d
cmovlel %edx, %r15d
movq 0x40(%rsp), %rcx
movq %r15, 0x28(%rsp)
cmpq %r15, %rdx
je 0xfbe3b
movslq 0xbc(%rsp), %r9
movq 0xd0(%rsp), %rdi
imulq %rdx, %rdi
movq 0xa0(%rsp), %rax
imulq %rax, %rdi
addq 0x90(%rsp), %rdi
movq 0x390(%rsp), %r8
movq %rdx, 0x18(%rsp)
imulq %rdx, %r8
imulq 0x360(%rsp), %r8
addq 0x350(%rsp), %r8
imulq %rax, %r9
movq %r9, 0x60(%rsp)
xorl %r9d, %r9d
cmpq $0x7, %r9
je 0xf9cae
movq 0x60(%rsp), %r10
imulq %r9, %r10
addq %rdi, %r10
xorl %r11d, %r11d
cmpq $0x7, %r11
je 0xf9ca1
leaq (%r10,%r11), %rax
xorl %r14d, %r14d
cmpl %ebp, %r14d
je 0xf9c99
leaq (%rax,%rsi), %r12
xorl %r13d, %r13d
movq %rax, %r15
leal 0x3(%r13), %edx
cmpl %ebx, %edx
jge 0xf9c6d
movb (%r15), %dl
movb %dl, (%r8,%r13)
movb 0x2(%r15), %dl
movb %dl, 0x1(%r8,%r13)
movb 0x4(%r15), %dl
movb %dl, 0x2(%r8,%r13)
movb 0x6(%r15), %dl
movb %dl, 0x3(%r8,%r13)
addq $0x8, %r15
addq $0x4, %r13
addq $0x8, %r12
jmp 0xf9c19
movb (%r15), %dl
movb %dl, (%r8,%r13)
movb 0x2(%r15), %dl
movb %dl, 0x1(%r8,%r13)
addq $0x4, %r15
addq $0x2, %r13
addq $0x4, %r12
leal 0x1(%r13), %edx
cmpl %ebx, %edx
jl 0xf9c51
jmp 0xf9c86
movb (%rax,%r13,2), %dl
movb %dl, (%r8,%r13)
incq %r13
addq $0x2, %r12
cmpl %ebx, %r13d
jl 0xf9c77
incl %r14d
addq %r13, %r8
movq %r12, %rax
jmp 0xf9c06
incq %r11
jmp 0xf9bf5
incq %r9
movq 0x40(%rsp), %rcx
jmp 0xf9bdc
movq 0x18(%rsp), %rdx
incq %rdx
movq 0x28(%rsp), %r15
jmp 0xf9b7d
cmpl $0x3, 0xd8(%r14,%rax)
jne 0xfa279
cmpl $0x1, 0xdc(%r14,%rax)
jne 0xfa064
cmpl $0x1, 0xe0(%r14,%rax)
jne 0xfa064
cmpl $0x1, 0xe4(%r14,%rax)
jne 0xfa064
cmpl $0x1, 0xe8(%r14,%rax)
jne 0xfa064
movl 0xbc(%rsp), %r14d
movl 0xc8(%rsp), %r15d
movl 0x12c(%rsp), %ebx
movl 0x130(%rsp), %ebp
movl %ebp, %esi
imull %ebx, %esi
movq 0x40(%rsp), %rax
movq 0x10(%rax), %rax
leaq 0x350(%rsp), %rdi
andq $0x0, 0x40(%rdi)
vpxor %xmm0, %xmm0, %xmm0
vmovdqa %xmm0, (%rdi)
vmovdqu %xmm0, 0xc(%rdi)
vmovdqa %xmm0, 0x20(%rdi)
vmovdqu %xmm0, 0x2c(%rdi)
movq %rax, (%rsp)
pushq $0x9
popq %rdx
pushq $0x1
popq %r8
pushq $0x1
popq %r9
movl %r15d, %ecx
callq 0x628f2
movq 0x20(%rsp), %rax
addq $0x60, %rax
movq %rax, 0x18(%rsp)
subl %ebx, %r14d
xorl %edx, %edx
testl %ebp, %ebp
cmovlel %edx, %ebp
movslq %r14d, %rax
testl %r15d, %r15d
cmovlel %edx, %r15d
movq 0x40(%rsp), %rcx
movq %r15, 0x60(%rsp)
cmpq %r15, %rdx
je 0xfbe9c
movslq 0xbc(%rsp), %rsi
movq 0xd0(%rsp), %rdi
imulq %rdx, %rdi
movq 0xa0(%rsp), %r9
imulq %r9, %rdi
addq 0x90(%rsp), %rdi
movq 0x390(%rsp), %r8
imulq %rdx, %r8
imulq 0x360(%rsp), %r8
addq 0x350(%rsp), %r8
imulq %r9, %rsi
xorl %r9d, %r9d
cmpq $0x3, %r9
je 0xf9eae
movq %rsi, %r10
imulq %r9, %r10
addq %rdi, %r10
xorl %r11d, %r11d
cmpq $0x3, %r11
je 0xf9ea1
leaq (%r10,%r11), %r14
xorl %r15d, %r15d
cmpl %ebp, %r15d
je 0xf9e99
xorl %r12d, %r12d
leal 0x3(%r12), %r13d
cmpl %ebx, %r13d
jge 0xf9e6f
movb (%r14,%r12), %r13b
movb %r13b, (%r8,%r12)
movb 0x1(%r14,%r12), %r13b
movb %r13b, 0x1(%r8,%r12)
movb 0x2(%r14,%r12), %r13b
movb %r13b, 0x2(%r8,%r12)
movb 0x3(%r14,%r12), %r13b
movb %r13b, 0x3(%r8,%r12)
addq $0x4, %r12
jmp 0xf9e23
movb (%r14,%r12), %r13b
movb %r13b, (%r8,%r12)
movb 0x1(%r14,%r12), %r13b
movb %r13b, 0x1(%r8,%r12)
addq $0x2, %r12
leal 0x1(%r12), %r13d
cmpl %ebx, %r13d
jl 0xf9e59
jmp 0xf9e86
movb (%r14,%r12), %r13b
movb %r13b, (%r8,%r12)
incq %r12
cmpl %ebx, %r12d
jl 0xf9e7b
addq %rax, %r14
addq %r12, %r14
incl %r15d
addq %r12, %r8
jmp 0xf9e1b
incq %r11
jmp 0xf9e0a
incq %r9
movq 0x40(%rsp), %rcx
jmp 0xf9df3
incq %rdx
movq 0x60(%rsp), %r15
jmp 0xf9d9e
cmpl $0x1, 0xdc(%r14,%rax)
jne 0xfa279
cmpl $0x1, 0xe0(%r14,%rax)
jne 0xfa279
cmpl $0x2, 0xe4(%r14,%rax)
jne 0xfa279
cmpl $0x2, 0xe8(%r14,%rax)
jne 0xfa279
movl 0xbc(%rsp), %ebx
movl 0xc8(%rsp), %r15d
movq 0xa0(%rsp), %r8
movl 0xa8(%rsp), %r9d
leaq 0x350(%rsp), %rdi
andq $0x0, 0x40(%rdi)
movl 0x12c(%rsp), %ebp
movl 0x130(%rsp), %r13d
vpxor %xmm0, %xmm0, %xmm0
vmovdqa %xmm0, (%rdi)
vmovdqu %xmm0, 0xc(%rdi)
vmovdqa %xmm0, 0x20(%rdi)
vmovdqu %xmm0, 0x2c(%rdi)
movq 0x40(%rsp), %r14
movq 0x10(%r14), %rax
movq %rax, (%rsp)
movl %ebp, %esi
movl %r13d, %edx
movl %r15d, %ecx
callq 0x628f2
movq 0x20(%rsp), %rax
leaq 0x60(%rax), %rdx
subl %ebp, %ebx
addl %ebx, %ebx
xorl %eax, %eax
testl %r13d, %r13d
cmovlel %eax, %r13d
movslq %ebx, %rcx
testl %r15d, %r15d
cmovlel %eax, %r15d
cmpq %r15, %rax
je 0xfbefd
movq 0xd0(%rsp), %rsi
imulq %rax, %rsi
imulq 0xa0(%rsp), %rsi
addq 0x90(%rsp), %rsi
movq 0x390(%rsp), %rdi
imulq %rax, %rdi
imulq 0x360(%rsp), %rdi
addq 0x350(%rsp), %rdi
xorl %r8d, %r8d
cmpl %r13d, %r8d
je 0xfa05c
leaq (%rsi,%rcx), %r9
xorl %r10d, %r10d
movq %rsi, %r11
leal 0x3(%r10), %ebx
cmpl %ebp, %ebx
jge 0xfa030
movb (%r11), %bl
movb %bl, (%rdi,%r10)
movb 0x2(%r11), %bl
movb %bl, 0x1(%rdi,%r10)
movb 0x4(%r11), %bl
movb %bl, 0x2(%rdi,%r10)
movb 0x6(%r11), %bl
movb %bl, 0x3(%rdi,%r10)
addq $0x8, %r11
addq $0x4, %r10
addq $0x8, %r9
jmp 0xf9fdc
movb (%r11), %bl
movb %bl, (%rdi,%r10)
movb 0x2(%r11), %bl
movb %bl, 0x1(%rdi,%r10)
addq $0x4, %r11
addq $0x2, %r10
addq $0x4, %r9
leal 0x1(%r10), %ebx
cmpl %ebp, %ebx
jl 0xfa014
jmp 0xfa049
movb (%rsi,%r10,2), %r11b
movb %r11b, (%rdi,%r10)
addq $0x2, %r9
incq %r10
cmpl %ebp, %r10d
jl 0xfa03a
incl %r8d
addq %r10, %rdi
movq %r9, %rsi
jmp 0xf9fc9
incq %rax
jmp 0xf9f83
cmpl $0x1, 0xdc(%r14,%rax)
jne 0xfa279
cmpl $0x1, 0xe0(%r14,%rax)
jne 0xfa279
cmpl $0x2, 0xe4(%r14,%rax)
jne 0xfa279
cmpl $0x2, 0xe8(%r14,%rax)
jne 0xfa279
movl 0xbc(%rsp), %r14d
movl 0xc8(%rsp), %r15d
movl 0x12c(%rsp), %ebx
movl 0x130(%rsp), %ebp
movl %ebp, %esi
imull %ebx, %esi
movq 0x40(%rsp), %rax
movq 0x10(%rax), %rax
leaq 0x350(%rsp), %rdi
andq $0x0, 0x40(%rdi)
vpxor %xmm0, %xmm0, %xmm0
vmovdqa %xmm0, (%rdi)
vmovdqu %xmm0, 0xc(%rdi)
vmovdqa %xmm0, 0x20(%rdi)
vmovdqu %xmm0, 0x2c(%rdi)
movq %rax, (%rsp)
pushq $0x9
popq %rdx
pushq $0x1
popq %r8
pushq $0x1
popq %r9
movl %r15d, %ecx
callq 0x628f2
movq 0x20(%rsp), %rax
addq $0x60, %rax
movq %rax, 0x30(%rsp)
subl %ebx, %r14d
addl %r14d, %r14d
xorl %edx, %edx
testl %ebp, %ebp
cmovlel %edx, %ebp
movslq %r14d, %rsi
testl %r15d, %r15d
cmovlel %edx, %r15d
movq 0x40(%rsp), %rcx
movq %r15, 0x28(%rsp)
cmpq %r15, %rdx
je 0xfbf58
movslq 0xbc(%rsp), %r9
movq 0xd0(%rsp), %rdi
imulq %rdx, %rdi
movq 0xa0(%rsp), %rax
imulq %rax, %rdi
addq 0x90(%rsp), %rdi
movq 0x390(%rsp), %r8
movq %rdx, 0x18(%rsp)
imulq %rdx, %r8
imulq 0x360(%rsp), %r8
addq 0x350(%rsp), %r8
imulq %rax, %r9
movq %r9, 0x60(%rsp)
xorl %r9d, %r9d
cmpq $0x3, %r9
je 0xfa267
movq 0x60(%rsp), %r10
imulq %r9, %r10
addq %rdi, %r10
xorl %r11d, %r11d
cmpq $0x3, %r11
je 0xfa25a
leaq (%r10,%r11), %rax
xorl %r14d, %r14d
cmpl %ebp, %r14d
je 0xfa252
leaq (%rax,%rsi), %r12
xorl %r13d, %r13d
movq %rax, %r15
leal 0x3(%r13), %edx
cmpl %ebx, %edx
jge 0xfa226
movb (%r15), %dl
movb %dl, (%r8,%r13)
movb 0x2(%r15), %dl
movb %dl, 0x1(%r8,%r13)
movb 0x4(%r15), %dl
movb %dl, 0x2(%r8,%r13)
movb 0x6(%r15), %dl
movb %dl, 0x3(%r8,%r13)
addq $0x8, %r15
addq $0x4, %r13
addq $0x8, %r12
jmp 0xfa1d2
movb (%r15), %dl
movb %dl, (%r8,%r13)
movb 0x2(%r15), %dl
movb %dl, 0x1(%r8,%r13)
addq $0x4, %r15
addq $0x2, %r13
addq $0x4, %r12
leal 0x1(%r13), %edx
cmpl %ebx, %edx
jl 0xfa20a
jmp 0xfa23f
movb (%rax,%r13,2), %dl
movb %dl, (%r8,%r13)
incq %r13
addq $0x2, %r12
cmpl %ebx, %r13d
jl 0xfa230
incl %r14d
addq %r13, %r8
movq %r12, %rax
jmp 0xfa1bf
incq %r11
jmp 0xfa1ae
incq %r9
movq 0x40(%rsp), %rcx
jmp 0xfa195
movq 0x18(%rsp), %rdx
incq %rdx
movq 0x28(%rsp), %r15
jmp 0xfa136
movl 0xd8(%r14,%rax), %ebp
movslq 0xdc(%r14,%rax), %rdx
movq %rdx, 0x18(%rsp)
movslq 0xe0(%r14,%rax), %rdx
movq %rdx, 0x78(%rsp)
movslq 0xe4(%r14,%rax), %rbx
movslq 0xe8(%r14,%rax), %rax
movq %rax, 0x38(%rsp)
movl 0xbc(%rsp), %r15d
movl 0xc8(%rsp), %r13d
movslq 0x12c(%rsp), %r14
movl 0x130(%rsp), %r12d
movq %rbp, 0x50(%rsp)
movq %rcx, 0x58(%rsp)
imull %ecx, %ebp
movq 0x40(%rsp), %rax
cmpb $0x1, 0x1d(%rax)
jne 0xfa506
movl %r12d, %esi
imull %r14d, %esi
movq 0x10(%rax), %rax
leaq 0x350(%rsp), %rdi
andq $0x0, 0x40(%rdi)
vpxor %xmm0, %xmm0, %xmm0
vmovdqa %xmm0, (%rdi)
vmovdqu %xmm0, 0xc(%rdi)
vmovdqa %xmm0, 0x20(%rdi)
vmovdqu %xmm0, 0x2c(%rdi)
movq %rax, (%rsp)
pushq $0x1
popq %r8
pushq $0x1
popq %r9
movl %ebp, %edx
movl %r13d, %ecx
callq 0x628f2
movq 0x20(%rsp), %rax
addq $0x60, %rax
movq %rax, 0xd8(%rsp)
imull 0x38(%rsp), %r15d
movl %r14d, %eax
imull %ebx, %eax
subl %eax, %r15d
leal (%rbx,%rbx), %eax
cltq
leal (%rbx,%rbx,2), %ecx
movslq %ecx, %rcx
xorl %r9d, %r9d
testl %r12d, %r12d
cmovlel %r9d, %r12d
leal (,%rbx,4), %edx
movq 0x58(%rsp), %rsi
testl %esi, %esi
cmovlel %r9d, %esi
movslq %edx, %rdi
movq 0x50(%rsp), %rdx
testl %edx, %edx
cmovlel %r9d, %edx
movq %rdx, 0x50(%rsp)
movslq %r15d, %r8
testl %r13d, %r13d
cmovlel %r9d, %r13d
movq %rsi, 0x58(%rsp)
cmpq %r13, %r9
je 0xfa59e
movq 0xd0(%rsp), %r10
imulq %r9, %r10
movq 0xa0(%rsp), %rdx
imulq %rdx, %r10
addq 0x90(%rsp), %r10
movq %r10, 0x38(%rsp)
movq 0x390(%rsp), %r10
movq %r9, 0x80(%rsp)
imulq %r9, %r10
imulq 0x360(%rsp), %r10
addq 0x350(%rsp), %r10
movslq 0xbc(%rsp), %r9
imulq %rdx, %r9
imulq 0x78(%rsp), %r9
movq %r9, 0x48(%rsp)
xorl %edx, %edx
cmpq 0x50(%rsp), %rdx
je 0xfa4f6
movq 0x48(%rsp), %r9
imulq %rdx, %r9
addq 0x38(%rsp), %r9
movq %r9, 0x30(%rsp)
xorl %r9d, %r9d
movq %rdx, 0x28(%rsp)
cmpq %rsi, %r9
je 0xfa4ee
movq %r9, 0x60(%rsp)
imulq 0x18(%rsp), %r9
addq 0x30(%rsp), %r9
xorl %esi, %esi
cmpl %r12d, %esi
je 0xfa4d7
xorl %r15d, %r15d
xorl %ebp, %ebp
movq %r9, %rdx
leal 0x3(%rbp), %r11d
cmpl %r14d, %r11d
jge 0xfa4a9
movb (%rdx), %r11b
movb %r11b, (%r10,%rbp)
movb (%rdx,%rbx), %r11b
movb %r11b, 0x1(%r10,%rbp)
movb (%rdx,%rax), %r11b
movb %r11b, 0x2(%r10,%rbp)
movb (%rdx,%rcx), %r11b
movb %r11b, 0x3(%r10,%rbp)
addq %rdi, %rdx
addq $0x4, %rbp
addq %rdi, %r15
jmp 0xfa458
movb (%rdx), %r11b
movb %r11b, (%r10,%rbp)
movb (%rdx,%rbx), %r11b
movb %r11b, 0x1(%r10,%rbp)
addq %rax, %rdx
addq $0x2, %rbp
addq %rax, %r15
leal 0x1(%rbp), %r11d
cmpl %r14d, %r11d
jl 0xfa48f
jmp 0xfa4c2
movb (%r9,%r15), %dl
movb %dl, (%r10,%rbp)
incq %rbp
addq %rbx, %r15
cmpl %r14d, %ebp
jl 0xfa4b4
addq %r8, %r9
addq %r15, %r9
incl %esi
addq %rbp, %r10
jmp 0xfa447
movq 0x60(%rsp), %r9
incq %r9
movq 0x58(%rsp), %rsi
movq 0x28(%rsp), %rdx
jmp 0xfa42c
incq %rdx
jmp 0xfa406
movq 0x80(%rsp), %r9
incq %r9
jmp 0xfa39a
movl 0x138(%rsp), %eax
movq %rax, 0x48(%rsp)
movslq %ebp, %rsi
leaq 0x350(%rsp), %rdi
leaq 0x1b0(%rsp), %rdx
callq 0x73bbe
movl %r15d, %ecx
imull 0x78(%rsp), %ecx
movq 0x18(%rsp), %rax
movq 0x58(%rsp), %r15
imull %r15d, %eax
subl %eax, %ecx
movl %ecx, 0x60(%rsp)
xorl %ecx, %ecx
testl %r15d, %r15d
cmovlel %ecx, %r15d
movq 0x50(%rsp), %r11
testl %r11d, %r11d
cmovlel %ecx, %r11d
movq 0x350(%rsp), %rax
movl %r15d, %edx
xorl %esi, %esi
xorl %edi, %edi
movq 0x18(%rsp), %r10
cmpl %r11d, %ecx
je 0xfa607
movslq %edx, %r8
movslq %edi, %r9
leal (%r9,%r15), %edi
cmpq %r9, %r8
je 0xfa593
movl %esi, (%rax,%r9,4)
incq %r9
addl %r10d, %esi
jmp 0xfa582
addl 0x60(%rsp), %esi
incl %ecx
addl %r15d, %edx
jmp 0xfa56f
leaq 0x350(%rsp), %rdi
leaq 0x100(%rsp), %rsi
movq 0xd8(%rsp), %rdx
movq 0x40(%rsp), %rcx
callq 0x107fba
movq 0x358(%rsp), %rax
testq %rax, %rax
movq 0x20(%rsp), %r14
movl 0x70(%rsp), %ebx
je 0xfa7ac
lock
decl (%rax)
jne 0xfa7ac
movq 0x350(%rsp), %rsi
movq 0x370(%rsp), %rdi
testq %rdi, %rdi
je 0xfbfb5
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xfa7ac
movl %r14d, %ecx
shlq $0x2, %r14
xorl %edx, %edx
testl %ebp, %ebp
cmovlel %edx, %ebp
testl %r13d, %r13d
cmovlel %edx, %r13d
testl %ecx, %ecx
movl $0x0, %esi
cmovgl %ecx, %esi
movq %rsi, 0x18(%rsp)
testl %r12d, %r12d
cmovlel %edx, %r12d
movq 0x48(%rsp), %rcx
testl %ecx, %ecx
cmovlel %edx, %ecx
movq %rcx, 0x48(%rsp)
leaq (,%rbp,4), %rsi
vpxor %xmm0, %xmm0, %xmm0
cmpq 0x48(%rsp), %rdx
je 0xfa796
movq 0x140(%rsp), %rcx
imulq %rdx, %rcx
imulq 0x110(%rsp), %rcx
addq 0x100(%rsp), %rcx
movq %rcx, 0x60(%rsp)
xorl %ecx, %ecx
movq %rdx, 0x30(%rsp)
cmpq %r12, %rcx
je 0xfa78e
movq %rcx, 0x58(%rsp)
imulq 0x38(%rsp), %rcx
movq %rcx, 0x28(%rsp)
xorl %r8d, %r8d
cmpq 0x18(%rsp), %r8
je 0xfa773
movq 0x20(%rsp), %rcx
movq 0x58(%rcx), %r11
imulq %rdx, %r11
imulq 0x28(%rcx), %r11
addq 0x18(%rcx), %r11
movslq 0xbc(%rsp), %rcx
imulq 0x28(%rsp), %rcx
movq 0xa0(%rsp), %rdi
movq 0xd0(%rsp), %rdx
imulq %rdi, %rdx
imulq %rdi, %rcx
addq 0x90(%rsp), %rcx
movq %r8, %r10
imulq %rbx, %r10
addq %rcx, %r10
vpxor %xmm1, %xmm1, %xmm1
xorl %ecx, %ecx
cmpq %r13, %rcx
je 0xfa755
movq %rdx, %r15
imulq %rcx, %r15
addq %r10, %r15
xorl %edi, %edi
cmpq %rdi, %rbp
je 0xfa74d
movslq (%rax,%rdi,4), %r9
movsbl (%r15,%r9), %r9d
vmovd %r9d, %xmm2
vpshuflw $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0,4,5,6,7]
vpshufd $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
vmovq (%r11,%rdi,4), %xmm3
vpcmpgtb %xmm3, %xmm0, %xmm4
vpunpcklbw %xmm4, %xmm3, %xmm3 # xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
vpmullw %xmm3, %xmm2, %xmm4
vpmulhw %xmm3, %xmm2, %xmm2
vpunpcklwd %xmm2, %xmm4, %xmm2 # xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
vpaddd %xmm2, %xmm1, %xmm1
incq %rdi
jmp 0xfa70d
addq %rsi, %r11
incq %rcx
jmp 0xfa6fc
movq %r8, %rcx
shlq $0x4, %rcx
movq 0x60(%rsp), %rdx
vmovdqu %xmm1, (%rdx,%rcx)
incq %r8
movq 0x30(%rsp), %rdx
jmp 0xfa69d
movq 0x60(%rsp), %rcx
leaq (%rcx,%r14,4), %rcx
movq %rcx, 0x60(%rsp)
movq 0x58(%rsp), %rcx
incq %rcx
jmp 0xfa681
incq %rdx
jmp 0xfa64d
leaq 0x350(%rsp), %rdi
callq 0x624be
movq 0x20(%rsp), %r14
movl 0x70(%rsp), %ebx
cmpl $0x8, %ebx
jne 0xfa84f
cmpl $0x1, 0xe0(%rsp)
jne 0xfa84f
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xd4(%r14,%rax), %ecx
movq %rcx, 0x28(%rsp)
cmpl $0x1, %ecx
movq %r14, %rcx
movq 0x40(%rsp), %r14
jne 0xfaa33
cmpl $0x1, 0xd8(%rcx,%rax)
jne 0xfaa33
cmpl $0x1, 0xdc(%rcx,%rax)
jne 0xfa8f4
cmpl $0x1, 0xe0(%rcx,%rax)
jne 0xfa8f4
cmpl $0x1, 0xe4(%rcx,%rax)
jne 0xfa8f4
cmpl $0x1, 0xe8(%rcx,%rax)
jne 0xfa8f4
leaq 0x60(%rcx), %rdx
leaq 0x90(%rsp), %rdi
leaq 0x100(%rsp), %rsi
movq %r14, %rcx
callq 0x10150b
jmp 0xfbc1a
movq 0xe0(%rsp), %rcx
xorl $0x1, %ecx
movl %ebx, %eax
xorl $0x1, %eax
orl %ecx, %eax
jne 0xfbc1a
movq 0x20(%rsp), %rcx
movq (%rcx), %rax
movq -0x18(%rax), %rax
movl 0xd4(%rcx,%rax), %edx
movq %rdx, 0x18(%rsp)
cmpl $0x1, %edx
movq 0x40(%rsp), %r14
jne 0xfb137
cmpl $0x1, 0xd8(%rcx,%rax)
jne 0xfb137
cmpl $0x1, 0xdc(%rcx,%rax)
jne 0xfaf97
cmpl $0x1, 0xe0(%rcx,%rax)
jne 0xfaf97
cmpl $0x1, 0xe4(%rcx,%rax)
jne 0xfaf97
cmpl $0x1, 0xe8(%rcx,%rax)
jne 0xfaf97
leaq 0x60(%rcx), %rdx
leaq 0x90(%rsp), %rdi
leaq 0x100(%rsp), %rsi
movq %r14, %rcx
callq 0x1015ee
jmp 0xfbc1a
cmpl $0x1, 0xdc(%rcx,%rax)
jne 0xfaa33
cmpl $0x1, 0xe0(%rcx,%rax)
jne 0xfaa33
cmpl $0x2, 0xe4(%rcx,%rax)
jne 0xfaa33
cmpl $0x2, 0xe8(%rcx,%rax)
jne 0xfaa33
movl 0xbc(%rsp), %ebx
movl 0xc8(%rsp), %r12d
movq 0xa0(%rsp), %r8
movl 0xa8(%rsp), %r9d
leaq 0x350(%rsp), %rdi
andq $0x0, 0x40(%rdi)
movl 0x12c(%rsp), %ebp
movl 0x130(%rsp), %r13d
vpxor %xmm0, %xmm0, %xmm0
vmovdqa %xmm0, (%rdi)
vmovdqu %xmm0, 0xc(%rdi)
vmovdqa %xmm0, 0x20(%rdi)
vmovdqu %xmm0, 0x2c(%rdi)
movq 0x10(%r14), %rax
movq %rax, (%rsp)
movl %ebp, %esi
movl %r13d, %edx
movl %r12d, %ecx
callq 0x628f2
movq 0x20(%rsp), %rax
leaq 0x60(%rax), %rdx
subl %ebp, %ebx
addl %ebx, %ebx
movq 0x90(%rsp), %rax
movq 0x350(%rsp), %rcx
xorl %esi, %esi
testl %ebp, %ebp
cmovlel %esi, %ebp
movslq %ebx, %rdi
testl %r13d, %r13d
cmovlel %esi, %r13d
testl %r12d, %r12d
cmovlel %esi, %r12d
shlq $0x3, %rdi
cmpq %r12, %rsi
je 0xfbde5
movq 0xd0(%rsp), %r8
imulq %rsi, %r8
imulq 0xa0(%rsp), %r8
addq %rax, %r8
movq 0x390(%rsp), %r9
imulq %rsi, %r9
imulq 0x360(%rsp), %r9
addq %rcx, %r9
xorl %r10d, %r10d
movl %ebp, %r11d
cmpl %r13d, %r10d
je 0xfaa2e
subl $0x1, %r11d
jb 0xfaa26
movq (%r8), %rbx
movq %rbx, (%r9)
addq $0x10, %r8
addq $0x8, %r9
jmp 0xfaa10
addq %rdi, %r8
incl %r10d
jmp 0xfaa08
incq %rsi
jmp 0xfa9cc
cmpb $0x1, 0x1c(%r14)
jne 0xfaada
cmpl $0x3, 0x28(%rsp)
jne 0xfaada
cmpb $0x0, 0x38(%r14)
je 0xfaada
movq 0x20(%rsp), %rcx
cmpl $0x3, 0xd8(%rcx,%rax)
jne 0xfaada
movq 0x20(%rsp), %rcx
cmpl $0x1, 0xdc(%rcx,%rax)
jne 0xfaada
movq 0x20(%rsp), %rcx
cmpl $0x1, 0xe0(%rcx,%rax)
jne 0xfaada
movq 0x20(%rsp), %rcx
cmpl $0x1, 0xe4(%rcx,%rax)
jne 0xfaada
movq 0x20(%rsp), %rcx
cmpl $0x1, 0xe8(%rcx,%rax)
jne 0xfaada
callq 0x732f7
movq 0x20(%rsp), %rcx
leaq 0xf0(%rcx), %r12
testl %eax, %eax
je 0xfc047
leaq 0x90(%rsp), %rdi
leaq 0x100(%rsp), %rsi
movq %r12, %rdx
movq 0x40(%rsp), %rcx
callq 0x134547
jmp 0xfbc1a
movq 0x20(%rsp), %rcx
movl 0xd8(%rcx,%rax), %r13d
movslq 0xdc(%rcx,%rax), %rbp
movslq 0xe0(%rcx,%rax), %rdx
movq %rdx, 0x30(%rsp)
movslq 0xe4(%rcx,%rax), %rbx
movslq 0xe8(%rcx,%rax), %rax
movq %rax, 0x48(%rsp)
movl 0xbc(%rsp), %eax
movl %eax, 0x18(%rsp)
movl 0xc8(%rsp), %r12d
movq %r14, %rax
movslq 0x12c(%rsp), %r14
movl 0x130(%rsp), %r15d
movq %r13, 0x60(%rsp)
imull 0x28(%rsp), %r13d
cmpb $0x1, 0x1d(%rax)
jne 0xfacde
movl %r15d, %esi
imull %r14d, %esi
movq 0x10(%rax), %rax
leaq 0x350(%rsp), %rdi
andq $0x0, 0x40(%rdi)
vpxor %xmm0, %xmm0, %xmm0
vmovdqa %xmm0, (%rdi)
vmovdqu %xmm0, 0xc(%rdi)
vmovdqa %xmm0, 0x20(%rdi)
vmovdqu %xmm0, 0x2c(%rdi)
movq %rax, (%rsp)
pushq $0x8
popq %r8
pushq $0x8
popq %r9
movl %r13d, %edx
movl %r12d, %ecx
callq 0x628f2
movl 0x18(%rsp), %edx
imull 0x48(%rsp), %edx
movl %ebx, %ecx
imull %r14d, %ecx
xorl %esi, %esi
testl %r14d, %r14d
cmovlel %esi, %r14d
testl %r15d, %r15d
cmovlel %esi, %r15d
movq 0x20(%rsp), %rax
addq $0x60, %rax
movq %rax, 0x48(%rsp)
movq 0x28(%rsp), %r9
testl %r9d, %r9d
cmovlel %esi, %r9d
movslq 0xbc(%rsp), %rdi
movq 0x60(%rsp), %rax
testl %eax, %eax
cmovlel %esi, %eax
movq %rax, 0x60(%rsp)
movq 0x90(%rsp), %rax
movq %rax, 0x50(%rsp)
testl %r12d, %r12d
cmovlel %esi, %r12d
movq 0x350(%rsp), %rax
movq %rax, 0x38(%rsp)
imulq 0x30(%rsp), %rdi
movq %rdi, 0x58(%rsp)
movslq %edx, %r8
shlq $0x3, %r8
movslq %ecx, %rax
shlq $0x3, %rax
subq %rax, %r8
shlq $0x3, %rbx
cmpq %r12, %rsi
je 0xfad6e
movq 0xd0(%rsp), %rax
imulq %rsi, %rax
movq 0xa0(%rsp), %rcx
imulq %rcx, %rax
movq 0x390(%rsp), %r11
movq %rsi, 0x30(%rsp)
imulq %rsi, %r11
imulq 0x360(%rsp), %r11
addq 0x50(%rsp), %rax
movq %rax, 0x18(%rsp)
addq 0x38(%rsp), %r11
imulq 0x58(%rsp), %rcx
movq %rcx, 0x28(%rsp)
xorl %r13d, %r13d
cmpq 0x60(%rsp), %r13
je 0xfacd1
movq 0x28(%rsp), %r10
imulq %r13, %r10
addq 0x18(%rsp), %r10
xorl %edx, %edx
cmpq %r9, %rdx
je 0xfaccc
movq %rdx, %rax
imulq %rbp, %rax
leaq (%r10,%rax,8), %rdi
xorl %ecx, %ecx
cmpl %r15d, %ecx
je 0xfacc7
movl %r14d, %eax
subl $0x1, %eax
jb 0xfacc0
movq (%rdi), %rsi
movq %rsi, (%r11)
addq $0x8, %r11
addq %rbx, %rdi
jmp 0xfacac
addq %r8, %rdi
incl %ecx
jmp 0xfaca4
incq %rdx
jmp 0xfac92
incq %r13
jmp 0xfac7b
movq 0x30(%rsp), %rsi
incq %rsi
jmp 0xfac23
movl 0x138(%rsp), %eax
movq %rax, 0x80(%rsp)
movslq %r13d, %rsi
leaq 0x350(%rsp), %rdi
leaq 0x1b0(%rsp), %rdx
callq 0x73bbe
movl 0x18(%rsp), %ecx
imull 0x30(%rsp), %ecx
movl %ebp, %eax
movq 0x28(%rsp), %rdx
imull %edx, %eax
subl %eax, %ecx
movl %ecx, 0x18(%rsp)
xorl %ecx, %ecx
testl %edx, %edx
cmovlel %ecx, %edx
movq 0x60(%rsp), %r11
testl %r11d, %r11d
cmovlel %ecx, %r11d
movq 0x350(%rsp), %rax
movq %rdx, %r10
xorl %esi, %esi
xorl %edi, %edi
cmpl %r11d, %ecx
je 0xfadcb
movslq %edx, %r8
movslq %edi, %r9
leal (%r9,%r10), %edi
cmpq %r9, %r8
je 0xfad63
movl %esi, (%rax,%r9,4)
incq %r9
addl %ebp, %esi
jmp 0xfad53
addl 0x18(%rsp), %esi
incl %ecx
addl %r10d, %edx
jmp 0xfad40
leaq 0x350(%rsp), %rdi
leaq 0x100(%rsp), %rsi
movq 0x48(%rsp), %rdx
movq 0x40(%rsp), %rcx
callq 0x10865b
movq 0x358(%rsp), %rax
testq %rax, %rax
je 0xfbc1a
lock
decl (%rax)
jne 0xfbc1a
movq 0x350(%rsp), %rsi
movq 0x370(%rsp), %rdi
testq %rdi, %rdi
je 0xff2e2
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xfbc1a
movq 0x110(%rsp), %rcx
imulq 0x140(%rsp), %rcx
movq %rcx, 0x70(%rsp)
shll $0x3, %ebx
xorl %edx, %edx
testl %r13d, %r13d
cmovlel %edx, %r13d
movq 0x100(%rsp), %rcx
movq %rcx, 0x78(%rsp)
testl %r12d, %r12d
cmovlel %edx, %r12d
testl %r14d, %r14d
movl $0x0, %ecx
cmovgl %r14d, %ecx
movq %rcx, 0x28(%rsp)
testl %r15d, %r15d
cmovlel %edx, %r15d
movq 0x80(%rsp), %rcx
testl %ecx, %ecx
cmovlel %edx, %ecx
movq %rcx, 0x80(%rsp)
xorl %ecx, %ecx
leaq (,%r13,8), %r8
vpxor %xmm0, %xmm0, %xmm0
cmpq 0x80(%rsp), %rcx
je 0xfbc0d
movq 0x70(%rsp), %rdx
imulq %rcx, %rdx
addq 0x78(%rsp), %rdx
movq %rdx, 0x18(%rsp)
xorl %esi, %esi
movq %rcx, 0x38(%rsp)
cmpq %r15, %rsi
je 0xfaf8f
movq %rcx, %rdx
movq 0x20(%rsp), %rcx
movq 0x58(%rcx), %rdi
imulq %rdx, %rdi
imulq 0x28(%rcx), %rdi
addq 0x18(%rcx), %rdi
movq %rdi, 0x58(%rsp)
movq %rsi, %rcx
imulq 0x48(%rsp), %rcx
movq %rcx, 0x50(%rsp)
xorl %edx, %edx
movq %rsi, 0x30(%rsp)
cmpq 0x28(%rsp), %rdx
je 0xfaf74
movl %ebx, %ecx
movq %rdx, 0x60(%rsp)
imull %edx, %ecx
movslq 0xbc(%rsp), %rdi
imulq 0x50(%rsp), %rdi
movq 0xa0(%rsp), %rdx
movq 0xd0(%rsp), %rbp
imulq %rdx, %rbp
imulq %rdx, %rdi
addq 0x90(%rsp), %rdi
movslq %ecx, %rdx
addq %rdi, %rdx
xorl %r10d, %r10d
xorl %edi, %edi
movq 0x58(%rsp), %r11
cmpq %r12, %r10
je 0xfaf5a
movq %rbp, %rcx
imulq %r10, %rcx
addq %rdx, %rcx
xorl %r9d, %r9d
cmpq %r9, %r13
je 0xfaf52
movslq (%rax,%r9,4), %rsi
vmovq (%rcx,%rsi,8), %xmm1
vpcmpgtb %xmm1, %xmm0, %xmm2
vpunpcklbw %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
vmovq (%r11,%r9,8), %xmm2
vpcmpgtb %xmm2, %xmm0, %xmm3
vpunpcklbw %xmm3, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
vpmullw %xmm1, %xmm2, %xmm3
vpmulhw %xmm2, %xmm1, %xmm1
vpunpcklwd %xmm1, %xmm3, %xmm2 # xmm2 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
vpunpckhwd %xmm1, %xmm3, %xmm1 # xmm1 = xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
vpaddd %xmm1, %xmm2, %xmm1
vphaddd %xmm1, %xmm1, %xmm1
vphaddd %xmm1, %xmm1, %xmm1
vmovd %xmm1, %esi
addl %esi, %edi
incq %r9
jmp 0xfaf05
addq %r8, %r11
incq %r10
jmp 0xfaef3
movq 0x18(%rsp), %rcx
movq 0x60(%rsp), %rdx
movl %edi, (%rcx,%rdx,4)
incq %rdx
movq 0x30(%rsp), %rsi
jmp 0xfaea0
movq 0x18(%rsp), %rcx
leaq (%rcx,%r14,4), %rcx
movq %rcx, 0x18(%rsp)
incq %rsi
movq 0x38(%rsp), %rcx
jmp 0xfae64
incq %rcx
jmp 0xfae3c
cmpl $0x1, 0xdc(%rcx,%rax)
jne 0xfb137
cmpl $0x1, 0xe0(%rcx,%rax)
jne 0xfb137
cmpl $0x2, 0xe4(%rcx,%rax)
jne 0xfb137
cmpl $0x2, 0xe8(%rcx,%rax)
jne 0xfb137
movl 0xbc(%rsp), %ebx
movl 0xc8(%rsp), %r12d
movq 0xa0(%rsp), %r8
movl 0xa8(%rsp), %r9d
leaq 0x350(%rsp), %rdi
andq $0x0, 0x40(%rdi)
movl 0x12c(%rsp), %ebp
movl 0x130(%rsp), %r13d
vpxor %xmm0, %xmm0, %xmm0
vmovdqa %xmm0, (%rdi)
vmovdqu %xmm0, 0xc(%rdi)
vmovdqa %xmm0, 0x20(%rdi)
vmovdqu %xmm0, 0x2c(%rdi)
movq 0x10(%r14), %rax
movq %rax, (%rsp)
movl %ebp, %esi
movl %r13d, %edx
movl %r12d, %ecx
callq 0x628f2
movq 0x20(%rsp), %rax
leaq 0x60(%rax), %rdx
subl %ebp, %ebx
addl %ebx, %ebx
xorl %eax, %eax
testl %r13d, %r13d
cmovlel %eax, %r13d
movslq %ebx, %rcx
testl %r12d, %r12d
cmovlel %eax, %r12d
cmpq %r12, %rax
je 0xfbfc2
movq 0xd0(%rsp), %rsi
imulq %rax, %rsi
imulq 0xa0(%rsp), %rsi
addq 0x90(%rsp), %rsi
movq 0x390(%rsp), %rdi
imulq %rax, %rdi
imulq 0x360(%rsp), %rdi
addq 0x350(%rsp), %rdi
xorl %r8d, %r8d
cmpl %r13d, %r8d
je 0xfb12f
leaq (%rsi,%rcx), %r9
xorl %r10d, %r10d
movq %rsi, %r11
leal 0x3(%r10), %ebx
cmpl %ebp, %ebx
jge 0xfb103
movb (%r11), %bl
movb %bl, (%rdi,%r10)
movb 0x2(%r11), %bl
movb %bl, 0x1(%rdi,%r10)
movb 0x4(%r11), %bl
movb %bl, 0x2(%rdi,%r10)
movb 0x6(%r11), %bl
movb %bl, 0x3(%rdi,%r10)
addq $0x8, %r11
addq $0x4, %r10
addq $0x8, %r9
jmp 0xfb0af
movb (%r11), %bl
movb %bl, (%rdi,%r10)
movb 0x2(%r11), %bl
movb %bl, 0x1(%rdi,%r10)
addq $0x4, %r11
addq $0x2, %r10
addq $0x4, %r9
leal 0x1(%r10), %ebx
cmpl %ebp, %ebx
jl 0xfb0e7
jmp 0xfb11c
movb (%rsi,%r10,2), %r11b
movb %r11b, (%rdi,%r10)
addq $0x2, %r9
incq %r10
cmpl %ebp, %r10d
jl 0xfb10d
incl %r8d
addq %r10, %rdi
movq %r9, %rsi
jmp 0xfb09c
incq %rax
jmp 0xfb056
cmpb $0x1, 0x1c(%r14)
jne 0xfb6c2
cmpl $0x3, 0x18(%rsp)
jne 0xfb6c2
cmpb $0x0, 0x37(%r14)
je 0xfb6c2
movq 0x20(%rsp), %rcx
cmpl $0x3, 0xd8(%rcx,%rax)
jne 0xfb6c2
movq 0x20(%rsp), %rcx
cmpl $0x1, 0xdc(%rcx,%rax)
jne 0xfb6c2
movq 0x20(%rsp), %rcx
cmpl $0x1, 0xe0(%rcx,%rax)
jne 0xfb6c2
movq 0x20(%rsp), %rcx
cmpl $0x1, 0xe4(%rcx,%rax)
jne 0xfb6c2
movq 0x20(%rsp), %rcx
cmpl $0x1, 0xe8(%rcx,%rax)
jne 0xfb6c2
movl 0x70(%rsp), %ecx
imull 0xf8(%rsp), %ecx
cmpl $0x10, %ecx
jl 0xfb6c2
movq 0x20(%rsp), %rcx
cmpl $0x10, 0xd0(%rcx,%rax)
jl 0xfb6c2
movslq 0xc8(%rsp), %r8
movl 0x12c(%rsp), %eax
movl 0x130(%rsp), %ecx
movslq 0x138(%rsp), %rdx
movq %rdx, 0x88(%rsp)
movq 0x98(%rsp), %rdx
vmovaps 0x90(%rsp), %xmm0
vmovaps %xmm0, 0x350(%rsp)
movq 0xa0(%rsp), %rsi
movq %rsi, 0x360(%rsp)
movl 0xa8(%rsp), %esi
movl %esi, 0x368(%rsp)
movq 0xb0(%rsp), %rsi
movq %rsi, 0x370(%rsp)
movl 0xbc(%rsp), %esi
movl 0xc0(%rsp), %edi
vmovups 0xb8(%rsp), %xmm0
vmovups %xmm0, 0x378(%rsp)
movq %r8, 0x60(%rsp)
movl %r8d, 0x388(%rsp)
movq 0xd0(%rsp), %r8
movq %r8, 0x390(%rsp)
testq %rdx, %rdx
je 0xfb2a0
lock
incl (%rdx)
movl 0xbc(%rsp), %esi
movl 0xc0(%rsp), %edi
incl %eax
pushq $0x2
popq %r8
cltd
idivl %r8d
movl %eax, %r9d
incl %ecx
movl %ecx, %eax
cltd
idivl %r8d
movq %rax, %rbx
leal (%rax,%rax), %edx
movq %r9, 0x318(%rsp)
leal 0x2(%r9,%r9), %r14d
movq 0x40(%rsp), %rcx
vmovdqu (%rcx), %ymm0
vmovdqu 0x20(%rcx), %ymm1
leaq 0x290(%rsp), %rax
vmovdqu %ymm1, 0x20(%rax)
vmovdqu %ymm0, (%rax)
movq 0x10(%rcx), %rcx
movq %rcx, 0x8(%rax)
movl %edx, 0x338(%rsp)
movl %edx, %ecx
subl %edi, %ecx
addl $0x2, %ecx
movl %r14d, %r9d
subl %esi, %r9d
movq %rax, 0x8(%rsp)
andl $0x0, (%rsp)
leaq 0x90(%rsp), %rdi
leaq 0x350(%rsp), %rsi
vpxor %xmm0, %xmm0, %xmm0
xorl %edx, %edx
xorl %r8d, %r8d
vzeroupper
callq 0x6466c
leaq 0x1b0(%rsp), %rdi
andq $0x0, 0x40(%rdi)
movl %ebx, %edx
imull 0x318(%rsp), %edx
vpxor %xmm0, %xmm0, %xmm0
vmovdqa %xmm0, (%rdi)
vmovdqu %xmm0, 0xc(%rdi)
vmovdqa %xmm0, 0x20(%rdi)
vmovdqu %xmm0, 0x2c(%rdi)
movq 0x40(%rsp), %rax
movq 0x10(%rax), %r9
pushq $0x10
popq %rsi
pushq $0x2
popq %r8
movq %rdx, 0x48(%rsp)
movq 0x60(%rsp), %rcx
callq 0x63810
leaq 0x370(%rsp), %rsi
movq 0x318(%rsp), %rax
leal (%rax,%rax), %ecx
movl %ecx, 0x334(%rsp)
movq -0x10(%rsi), %rcx
imulq 0x20(%rsi), %rcx
movq %rcx, 0x28(%rsp)
leaq 0x1d0(%rsp), %rdx
movq -0x10(%rdx), %rcx
imulq 0x20(%rdx), %rcx
movq %rcx, 0x30(%rsp)
movq -0x20(%rsi), %rcx
movq %rcx, 0x50(%rsp)
movslq %r14d, %rsi
addl %r14d, %r14d
xorl %edi, %edi
testl %eax, %eax
movl $0x0, %ecx
cmovgl %eax, %ecx
movl %ecx, 0x58(%rsp)
movq -0x20(%rdx), %rax
movq %rax, 0x38(%rsp)
testl %ebx, %ebx
cmovlel %edi, %ebx
movq %rbx, 0x80(%rsp)
movq 0x60(%rsp), %rax
testl %eax, %eax
movl $0x0, %ecx
cmovgl %eax, %ecx
movq %rcx, 0x18(%rsp)
cmpq 0x18(%rsp), %rdi
je 0xfc076
movq 0x28(%rsp), %r10
imulq %rdi, %r10
addq 0x50(%rsp), %r10
movq 0x30(%rsp), %r11
imulq %rdi, %r11
addq 0x38(%rsp), %r11
xorl %ebx, %ebx
cmpq 0x80(%rsp), %rbx
je 0xfb6ba
movl %r14d, %edx
imull %ebx, %edx
movslq %edx, %r12
addq %r10, %r12
leaq (%r12,%rsi), %r13
leaq (%rsi,%r13), %r15
leaq (%r15,%rsi), %rdx
xorl %r8d, %r8d
cmpl 0x58(%rsp), %r8d
je 0xfb6b2
xorl %ebp, %ebp
cmpq $0x4, %rbp
je 0xfb49f
movsbl (%r12,%rbp), %eax
movw %ax, 0x160(%rsp,%rbp,2)
movsbl (%r13,%rbp), %eax
movw %ax, 0x240(%rsp,%rbp,2)
movsbl (%r15,%rbp), %eax
movw %ax, 0x6a0(%rsp,%rbp,2)
movsbl (%rdx,%rbp), %eax
movw %ax, 0x660(%rsp,%rbp,2)
incq %rbp
jmp 0xfb460
xorl %ebp, %ebp
cmpq $0x8, %rbp
je 0xfb4f8
movzwl 0x160(%rsp,%rbp), %eax
movzwl 0x6a0(%rsp,%rbp), %ecx
subl %ecx, %eax
movw %ax, 0x620(%rsp,%rbp)
movzwl 0x240(%rsp,%rbp), %eax
leal (%rax,%rcx), %r9d
movw %r9w, 0x340(%rsp,%rbp)
subl %eax, %ecx
movw %cx, 0x300(%rsp,%rbp)
movzwl 0x660(%rsp,%rbp), %ecx
subl %eax, %ecx
movw %cx, 0x2f0(%rsp,%rbp)
addq $0x2, %rbp
jmp 0xfb4a1
movzwl 0x620(%rsp), %eax
movzwl 0x622(%rsp), %ebp
movw %ax, 0x228(%rsp)
movw %bp, 0x220(%rsp)
movzwl 0x624(%rsp), %eax
movw %ax, 0x218(%rsp)
movzwl 0x626(%rsp), %eax
movw %ax, 0x210(%rsp)
movzwl 0x340(%rsp), %eax
movzwl 0x342(%rsp), %ebp
movw %ax, 0x22a(%rsp)
movw %bp, 0x222(%rsp)
movzwl 0x344(%rsp), %eax
movw %ax, 0x21a(%rsp)
movzwl 0x346(%rsp), %eax
movw %ax, 0x212(%rsp)
movzwl 0x300(%rsp), %eax
movzwl 0x302(%rsp), %ebp
movw %ax, 0x22c(%rsp)
movw %bp, 0x224(%rsp)
movzwl 0x304(%rsp), %eax
movzwl 0x306(%rsp), %ebp
movw %ax, 0x21c(%rsp)
movw %bp, 0x214(%rsp)
movzwl 0x2f0(%rsp), %eax
movzwl 0x2f2(%rsp), %ebp
movw %ax, 0x22e(%rsp)
movw %bp, 0x226(%rsp)
movzwl 0x2f4(%rsp), %eax
movw %ax, 0x21e(%rsp)
movzwl 0x2f6(%rsp), %eax
movw %ax, 0x216(%rsp)
xorl %ebp, %ebp
cmpq $0x8, %rbp
je 0xfb651
movzwl 0x228(%rsp,%rbp), %eax
movzwl 0x218(%rsp,%rbp), %ecx
subl %ecx, %eax
movw %ax, 0x160(%rsp,%rbp)
movzwl 0x220(%rsp,%rbp), %eax
leal (%rax,%rcx), %r9d
movw %r9w, 0x240(%rsp,%rbp)
subl %eax, %ecx
movw %cx, 0x6a0(%rsp,%rbp)
movzwl 0x210(%rsp,%rbp), %ecx
subl %eax, %ecx
movw %cx, 0x660(%rsp,%rbp)
addq $0x2, %rbp
jmp 0xfb5fa
xorl %ebp, %ebp
cmpq $0x8, %rbp
je 0xfb696
movzwl 0x160(%rsp,%rbp), %eax
movw %ax, (%r11,%rbp)
movzwl 0x240(%rsp,%rbp), %eax
movw %ax, 0x8(%r11,%rbp)
movzwl 0x6a0(%rsp,%rbp), %eax
movw %ax, 0x10(%r11,%rbp)
movzwl 0x660(%rsp,%rbp), %eax
movw %ax, 0x18(%r11,%rbp)
addq $0x2, %rbp
jmp 0xfb653
addq $0x2, %r12
addq $0x2, %r13
addq $0x2, %r15
addq $0x2, %rdx
addq $0x20, %r11
incl %r8d
jmp 0xfb453
incq %rbx
jmp 0xfb42a
incq %rdi
jmp 0xfb401
movq 0x20(%rsp), %rcx
movl 0xd8(%rcx,%rax), %r13d
movslq 0xdc(%rcx,%rax), %rdx
movq %rdx, 0x28(%rsp)
movslq 0xe0(%rcx,%rax), %rdx
movq %rdx, 0x70(%rsp)
movslq 0xe4(%rcx,%rax), %rbx
movslq 0xe8(%rcx,%rax), %rax
movq %rax, 0x80(%rsp)
movl 0xbc(%rsp), %r15d
movl 0xc8(%rsp), %r12d
movq %r14, %rax
movslq 0x12c(%rsp), %r14
movl 0x130(%rsp), %ecx
movq %r13, 0x50(%rsp)
imull 0x18(%rsp), %r13d
cmpb $0x1, 0x1d(%rax)
jne 0xfb95d
movq %rcx, %rbp
movl %ecx, %esi
imull %r14d, %esi
movq 0x10(%rax), %rax
leaq 0x350(%rsp), %rdi
andq $0x0, 0x40(%rdi)
vpxor %xmm0, %xmm0, %xmm0
vmovdqa %xmm0, (%rdi)
vmovdqu %xmm0, 0xc(%rdi)
vmovdqa %xmm0, 0x20(%rdi)
vmovdqu %xmm0, 0x2c(%rdi)
movq %rax, (%rsp)
pushq $0x1
popq %r8
pushq $0x1
popq %r9
movl %r13d, %edx
movl %r12d, %ecx
callq 0x628f2
movq 0x20(%rsp), %rax
addq $0x60, %rax
movq %rax, 0x78(%rsp)
movl %r15d, %r8d
imull 0x80(%rsp), %r8d
movl %r14d, %eax
imull %ebx, %eax
subl %eax, %r8d
leal (%rbx,%rbx), %eax
cltq
leal (%rbx,%rbx,2), %ecx
movslq %ecx, %rcx
xorl %r9d, %r9d
movq %rbp, %r15
testl %r15d, %r15d
cmovlel %r9d, %r15d
leal (,%rbx,4), %edx
movq 0x18(%rsp), %rsi
testl %esi, %esi
cmovlel %r9d, %esi
movq %rsi, 0x18(%rsp)
movslq %edx, %rdi
movq 0x50(%rsp), %rdx
testl %edx, %edx
cmovlel %r9d, %edx
movq %rdx, 0x50(%rsp)
movslq %r8d, %r8
testl %r12d, %r12d
cmovlel %r9d, %r12d
cmpq %r12, %r9
je 0xfb9fa
movq 0xd0(%rsp), %rsi
imulq %r9, %rsi
movq 0xa0(%rsp), %rdx
imulq %rdx, %rsi
addq 0x90(%rsp), %rsi
movq %rsi, 0x38(%rsp)
movq 0x390(%rsp), %r10
movq %r9, 0x80(%rsp)
imulq %r9, %r10
imulq 0x360(%rsp), %r10
addq 0x350(%rsp), %r10
movslq 0xbc(%rsp), %rsi
imulq %rdx, %rsi
imulq 0x70(%rsp), %rsi
movq %rsi, 0x48(%rsp)
xorl %edx, %edx
cmpq 0x50(%rsp), %rdx
je 0xfb94d
movq 0x48(%rsp), %rsi
imulq %rdx, %rsi
addq 0x38(%rsp), %rsi
movq %rsi, 0x58(%rsp)
xorl %esi, %esi
movq %rdx, 0x30(%rsp)
cmpq 0x18(%rsp), %rsi
je 0xfb945
movq %rsi, 0x60(%rsp)
movq %rsi, %rdx
imulq 0x28(%rsp), %rdx
addq 0x58(%rsp), %rdx
xorl %r9d, %r9d
cmpl %r15d, %r9d
je 0xfb933
xorl %r13d, %r13d
xorl %r11d, %r11d
movq %rdx, %rsi
leal 0x3(%r11), %ebp
cmpl %r14d, %ebp
jge 0xfb904
movb (%rsi), %bpl
movb %bpl, (%r10,%r11)
movb (%rsi,%rbx), %bpl
movb %bpl, 0x1(%r10,%r11)
movb (%rsi,%rax), %bpl
movb %bpl, 0x2(%r10,%r11)
movb (%rsi,%rcx), %bpl
movb %bpl, 0x3(%r10,%r11)
addq %rdi, %rsi
addq $0x4, %r11
addq %rdi, %r13
jmp 0xfb8b3
movb (%rsi), %bpl
movb %bpl, (%r10,%r11)
movb (%rsi,%rbx), %bpl
movb %bpl, 0x1(%r10,%r11)
addq %rax, %rsi
addq $0x2, %r11
addq %rax, %r13
leal 0x1(%r11), %ebp
cmpl %r14d, %ebp
jl 0xfb8ea
jmp 0xfb91d
movb (%rdx,%r13), %sil
movb %sil, (%r10,%r11)
incq %r11
addq %rbx, %r13
cmpl %r14d, %r11d
jl 0xfb90f
addq %r8, %rdx
addq %r13, %rdx
incl %r9d
addq %r11, %r10
jmp 0xfb8a1
movq 0x60(%rsp), %rsi
incq %rsi
movq 0x30(%rsp), %rdx
jmp 0xfb880
incq %rdx
jmp 0xfb85b
movq 0x80(%rsp), %r9
incq %r9
jmp 0xfb7ef
movq %rcx, 0x38(%rsp)
movl 0x138(%rsp), %eax
movq %rax, 0x78(%rsp)
movslq %r13d, %rbp
leaq 0x350(%rsp), %rdi
leaq 0x1b0(%rsp), %rdx
movq %rbp, %rsi
callq 0x73bbe
imull 0x70(%rsp), %r15d
movq 0x28(%rsp), %rax
movq 0x18(%rsp), %rdx
imull %edx, %eax
subl %eax, %r15d
movl %r15d, 0x60(%rsp)
xorl %ecx, %ecx
testl %edx, %edx
cmovlel %ecx, %edx
movq 0x50(%rsp), %r11
testl %r11d, %r11d
cmovlel %ecx, %r11d
movq 0x350(%rsp), %rax
movq %rdx, %r10
xorl %esi, %esi
xorl %edi, %edi
movq 0x28(%rsp), %r15
cmpl %r11d, %esi
je 0xfba57
movslq %edx, %r8
movslq %edi, %r9
leal (%r9,%r10), %edi
cmpq %r9, %r8
je 0xfb9ef
movl %ecx, (%rax,%r9,4)
incq %r9
addl %r15d, %ecx
jmp 0xfb9de
addl 0x60(%rsp), %ecx
incl %esi
addl %r10d, %edx
jmp 0xfb9cb
leaq 0x350(%rsp), %rdi
leaq 0x100(%rsp), %rsi
movq 0x78(%rsp), %rdx
movq 0x40(%rsp), %rcx
callq 0x108d93
movq 0x358(%rsp), %rax
testq %rax, %rax
je 0xfbc1a
lock
decl (%rax)
jne 0xfbc1a
movq 0x350(%rsp), %rsi
movq 0x370(%rsp), %rdi
testq %rdi, %rdi
je 0xff2e2
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xfbc1a
movq 0x110(%rsp), %rcx
imulq 0x140(%rsp), %rcx
movq %rcx, 0xf8(%rsp)
movl %r12d, %ecx
imull %r13d, %ecx
movl %ecx, 0xe0(%rsp)
xorl %edx, %edx
testl %r13d, %r13d
cmovlel %edx, %r13d
testl %r12d, %r12d
cmovlel %edx, %r12d
testl %r14d, %r14d
movl $0x0, %ecx
cmovgl %r14d, %ecx
movq %rcx, 0x28(%rsp)
movq 0x38(%rsp), %r15
testl %r15d, %r15d
cmovlel %edx, %r15d
movq %r15, 0x38(%rsp)
movq 0x100(%rsp), %rcx
movq %rcx, 0xd8(%rsp)
movq 0x78(%rsp), %rcx
testl %ecx, %ecx
cmovlel %edx, %ecx
movq %rcx, 0x78(%rsp)
xorl %ecx, %ecx
cmpq 0x78(%rsp), %rcx
je 0xfbc0d
movq 0xf8(%rsp), %rdx
imulq %rcx, %rdx
addq 0xd8(%rsp), %rdx
movq %rdx, 0x18(%rsp)
movq %rcx, %rdx
movl 0xe0(%rsp), %ecx
movq %rdx, 0x70(%rsp)
imull %edx, %ecx
movslq %ecx, %rcx
movq %rcx, 0x48(%rsp)
xorl %ecx, %ecx
cmpq 0x38(%rsp), %rcx
je 0xfbc00
movq %rcx, %rdx
movq 0x20(%rsp), %rcx
movq 0x18(%rcx), %rcx
addq 0x48(%rsp), %rcx
movq %rcx, 0x30(%rsp)
movq %rdx, 0x50(%rsp)
imulq 0x80(%rsp), %rdx
movq %rdx, 0x58(%rsp)
xorl %r8d, %r8d
cmpq 0x28(%rsp), %r8
je 0xfbbe5
movslq 0xbc(%rsp), %rcx
imulq 0x58(%rsp), %rcx
movq 0xa0(%rsp), %rdx
movq 0xd0(%rsp), %r9
imulq %rdx, %r9
imulq %rdx, %rcx
addq 0x90(%rsp), %rcx
movq %r8, 0x60(%rsp)
imulq %rbx, %r8
addq %rcx, %r8
xorl %edx, %edx
xorl %edi, %edi
movq 0x30(%rsp), %rcx
cmpq %r12, %rdx
je 0xfbbcf
movq %r9, %rsi
imulq %rdx, %rsi
addq %r8, %rsi
xorl %r11d, %r11d
cmpq %r11, %r13
je 0xfbbc7
movslq (%rax,%r11,4), %r10
movsbl (%rsi,%r10), %r10d
movsbl (%rcx,%r11), %r15d
imull %r10d, %r15d
addl %r15d, %edi
incq %r11
jmp 0xfbba8
addq %rbp, %rcx
incq %rdx
jmp 0xfbb96
movq 0x18(%rsp), %rcx
movq 0x60(%rsp), %r8
movl %edi, (%rcx,%r8,4)
incq %r8
jmp 0xfbb48
movq 0x18(%rsp), %rcx
leaq (%rcx,%r14,4), %rcx
movq %rcx, 0x18(%rsp)
movq 0x50(%rsp), %rcx
incq %rcx
jmp 0xfbb11
movq 0x70(%rsp), %rcx
incq %rcx
jmp 0xfbad1
leaq 0x350(%rsp), %rdi
callq 0x624be
movq 0x20(%rsp), %rsi
leaq 0x190(%rsi), %rdx
movq (%rsi), %rax
movq -0x18(%rax), %rcx
addq %rsi, %rcx
cmpl $0x65, 0x33c(%rsp)
jl 0xfbc80
leaq 0x280(%rcx), %rax
leaq 0x1a8(%rcx), %r8
movl 0x10c(%rcx), %r9d
addq $0x110, %rcx # imm = 0x110
xorl %ebp, %ebp
movq 0x40(%rsp), %rsi
movq %rsi, 0x8(%rsp)
movq %rcx, (%rsp)
leaq 0x100(%rsp), %rdi
movq 0x328(%rsp), %rsi
movq %rax, %rcx
callq 0x657e8
jmp 0xfbcc4
addq $0x1a8, %rcx # imm = 0x1A8
leaq 0x100(%rsp), %rdi
movq 0x328(%rsp), %rsi
movq 0x40(%rsp), %r8
callq 0x654c7
movq 0x20(%rsp), %rax
movq 0x8(%rax), %rdi
xorl %ebp, %ebp
testq %rdi, %rdi
je 0xfbcc4
movq (%rdi), %rax
movq 0x328(%rsp), %rsi
movq 0x40(%rsp), %rdx
callq *0x48(%rax)
movq 0x108(%rsp), %rax
testq %rax, %rax
je 0xfbcfb
lock
decl (%rax)
jne 0xfbcfb
movq 0x100(%rsp), %rsi
movq 0x120(%rsp), %rdi
testq %rdi, %rdi
je 0xfbcf3
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xfbcfb
movq %rsi, %rdi
callq 0x5f3e0
movq 0x98(%rsp), %rax
testq %rax, %rax
je 0xfbd32
lock
decl (%rax)
jne 0xfbd32
movq 0x90(%rsp), %rsi
movq 0xb0(%rsp), %rdi
testq %rdi, %rdi
je 0xfbd2a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xfbd32
movq %rsi, %rdi
callq 0x5f3e0
movq 0x5d8(%rsp), %rax
testq %rax, %rax
je 0xfbd69
lock
decl (%rax)
jne 0xfbd69
movq 0x5d0(%rsp), %rsi
movq 0x5f0(%rsp), %rdi
testq %rdi, %rdi
je 0xfbd61
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xfbd69
movq %rsi, %rdi
callq 0x5f3e0
movl %ebp, %eax
addq $0x6e8, %rsp # imm = 0x6E8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
leaq 0x350(%rsp), %rdi
leaq 0x100(%rsp), %rsi
movq %r14, %rcx
callq 0x101345
movq 0x358(%rsp), %rax
testq %rax, %rax
movq 0x20(%rsp), %r14
movl 0x70(%rsp), %ebx
je 0xf99ed
lock
decl (%rax)
jne 0xf99ed
movq 0x350(%rsp), %rsi
movq 0x370(%rsp), %rdi
testq %rdi, %rdi
je 0xfbdd8
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xf99ed
movq %rsi, %rdi
callq 0x5f3e0
jmp 0xf99ed
leaq 0x350(%rsp), %rdi
leaq 0x100(%rsp), %rsi
movq %r14, %rcx
callq 0x10150b
movq 0x358(%rsp), %rax
testq %rax, %rax
je 0xfbc1a
lock
decl (%rax)
jne 0xfbc1a
movq 0x350(%rsp), %rsi
movq 0x370(%rsp), %rdi
testq %rdi, %rdi
je 0xff2e2
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xfbc1a
leaq 0x350(%rsp), %rdi
leaq 0x100(%rsp), %rsi
movq 0x30(%rsp), %rdx
callq 0x107fba
movq 0x20(%rsp), %r14
movq 0x358(%rsp), %rax
testq %rax, %rax
movl 0x70(%rsp), %ebx
je 0xfa7ac
lock
decl (%rax)
jne 0xfa7ac
movq 0x350(%rsp), %rsi
movq 0x370(%rsp), %rdi
testq %rdi, %rdi
je 0xfbfb5
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xfa7ac
leaq 0x350(%rsp), %rdi
leaq 0x100(%rsp), %rsi
movq 0x18(%rsp), %rdx
callq 0x107fba
movq 0x20(%rsp), %r14
movq 0x358(%rsp), %rax
testq %rax, %rax
movl 0x70(%rsp), %ebx
je 0xfa7ac
lock
decl (%rax)
jne 0xfa7ac
movq 0x350(%rsp), %rsi
movq 0x370(%rsp), %rdi
testq %rdi, %rdi
je 0xfbfb5
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xfa7ac
leaq 0x350(%rsp), %rdi
leaq 0x100(%rsp), %rsi
movq %r14, %rcx
callq 0x101428
movq 0x358(%rsp), %rax
testq %rax, %rax
movq 0x20(%rsp), %r14
movl 0x70(%rsp), %ebx
je 0xfa7ac
lock
decl (%rax)
jne 0xfa7ac
movq 0x350(%rsp), %rsi
movq 0x370(%rsp), %rdi
testq %rdi, %rdi
je 0xfbfb5
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xfa7ac
leaq 0x350(%rsp), %rdi
leaq 0x100(%rsp), %rsi
movq 0x30(%rsp), %rdx
callq 0x107fba
movq 0x20(%rsp), %r14
movq 0x358(%rsp), %rax
testq %rax, %rax
movl 0x70(%rsp), %ebx
je 0xfa7ac
lock
decl (%rax)
jne 0xfa7ac
movq 0x350(%rsp), %rsi
movq 0x370(%rsp), %rdi
testq %rdi, %rdi
je 0xfbfb5
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xfa7ac
movq %rsi, %rdi
callq 0x5f3e0
jmp 0xfa7ac
leaq 0x350(%rsp), %rdi
leaq 0x100(%rsp), %rsi
movq %r14, %rcx
callq 0x1015ee
movq 0x358(%rsp), %rax
testq %rax, %rax
je 0xfbc1a
lock
decl (%rax)
jne 0xfbc1a
movq 0x350(%rsp), %rsi
movq 0x370(%rsp), %rdi
testq %rdi, %rdi
je 0xff2e2
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xfbc1a
callq 0x732db
testl %eax, %eax
je 0xfc0b4
leaq 0x90(%rsp), %rdi
leaq 0x100(%rsp), %rsi
movq %r12, %rdx
movq 0x40(%rsp), %rcx
callq 0x13b55a
jmp 0xfbc1a
callq 0x732db
testl %eax, %eax
je 0xfc603
leaq 0x90(%rsp), %rdi
leaq 0x100(%rsp), %rsi
movq %r12, %rdx
movq 0x40(%rsp), %rcx
callq 0x1397bb
jmp 0xfbc1a
movq 0x358(%rsp), %rax
testq %rax, %rax
je 0xfcb32
lock
decl (%rax)
jne 0xfcb32
movq 0x350(%rsp), %rsi
movq 0x370(%rsp), %rdi
testq %rdi, %rdi
je 0xfcb2a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xfcb32
movl 0xc8(%rsp), %esi
movl 0xa8(%rsp), %edx
vmovq 0x12c(%rsp), %xmm0
movl 0x138(%rsp), %eax
movl %eax, 0xd8(%rsp)
movq 0x98(%rsp), %rcx
vmovaps 0x90(%rsp), %xmm1
vmovaps %xmm1, 0x1b0(%rsp)
movq 0xa0(%rsp), %rax
movq %rax, 0x1c0(%rsp)
movq %rdx, 0xe8(%rsp)
movl %edx, 0x1c8(%rsp)
movq 0xb0(%rsp), %rax
movq %rax, 0x1d0(%rsp)
movl 0xbc(%rsp), %eax
movl 0xc0(%rsp), %edx
vmovups 0xb8(%rsp), %xmm1
vmovups %xmm1, 0x1d8(%rsp)
movl %esi, 0x78(%rsp)
movl %esi, 0x1e8(%rsp)
movq 0xd0(%rsp), %rsi
movq %rsi, 0x1f0(%rsp)
testq %rcx, %rcx
je 0xfc173
lock
incl (%rcx)
movl 0xbc(%rsp), %eax
movl 0xc0(%rsp), %edx
vbroadcastss 0x2f6590(%rip), %xmm1 # 0x3f270c
vpaddd %xmm1, %xmm0, %xmm0
vpsrad $0x1f, %xmm0, %xmm1
vpsrld $0x1e, %xmm1, %xmm1
vpaddd %xmm1, %xmm0, %xmm1
vbroadcastss 0x2f6579(%rip), %xmm0 # 0x3f2710
vmovdqa %xmm1, 0x60(%rsp)
vpand %xmm0, %xmm1, %xmm0
vpextrd $0x1, %xmm0, %ecx
vmovdqa %xmm0, 0x2e0(%rsp)
vmovd %xmm0, %esi
movq %rsi, 0x208(%rsp)
leal 0x2(%rsi), %ebx
movq %rcx, 0x158(%rsp)
addl $0x2, %ecx
subl %edx, %ecx
movl %ebx, %r9d
subl %eax, %r9d
movq 0x40(%rsp), %rax
movq %rax, 0x8(%rsp)
andl $0x0, (%rsp)
leaq 0x90(%rsp), %rdi
leaq 0x1b0(%rsp), %rsi
vpxor %xmm0, %xmm0, %xmm0
xorl %edx, %edx
xorl %r8d, %r8d
callq 0x6466c
vmovdqa 0x60(%rsp), %xmm0
vpsrad $0x2, %xmm0, %xmm0
leaq 0x160(%rsp), %rdi
andq $0x0, 0x40(%rdi)
vmovd %xmm0, %eax
movl %eax, 0x230(%rsp)
imull $0x6, %eax, %eax
vpextrd $0x1, %xmm0, %ecx
movl %eax, 0x50(%rsp)
movq %rcx, %r14
imull %ecx, %eax
movl %eax, 0x150(%rsp)
cltd
pushq $0x6
popq %rcx
idivl %ecx
movl %eax, %esi
movq 0xe8(%rsp), %r9
leal (%r9,%r9), %r8d
vpxor %xmm0, %xmm0, %xmm0
vmovdqa %xmm0, (%rdi)
vmovdqu %xmm0, 0xc(%rdi)
vmovdqa %xmm0, 0x20(%rdi)
vmovdqu %xmm0, 0x2c(%rdi)
movq 0x40(%rsp), %rax
movq 0x10(%rax), %rax
movq %rax, (%rsp)
pushq $0x24
popq %rdx
movq %rsi, 0x28(%rsp)
movl 0x78(%rsp), %ecx
movq %r8, 0xf0(%rsp)
callq 0x628f2
shll $0x3, %ebx
movslq %ebx, %rcx
movq 0x28(%rsp), %rdx
leal (,%rdx,8), %eax
movslq %eax, %r12
movl %edx, %eax
shll $0x4, %eax
movslq %eax, %rbp
imull $0x18, %edx, %eax
movslq %eax, %rbx
movl %edx, %eax
shll $0x5, %eax
movslq %eax, %rsi
imull $0x28, %edx, %eax
movslq %eax, %rdi
imull $0x30, %edx, %eax
xorl %r10d, %r10d
movl 0x230(%rsp), %r8d
testl %r8d, %r8d
movl $0x0, %edx
cmovgl %r8d, %edx
movq %rdx, 0x60(%rsp)
movslq %eax, %r9
testl %r14d, %r14d
cmovlel %r10d, %r14d
movq %r14, 0x58(%rsp)
movl 0x78(%rsp), %eax
testl %eax, %eax
movl $0x0, %r13d
cmovgl %eax, %r13d
addq %r9, %r9
pushq $-0x60
popq %r8
pushq $0x50
popq %r11
vbroadcastss 0x2f65b9(%rip), %xmm0 # 0x3f28c8
vpxor %xmm10, %xmm10, %xmm10
cmpq %r13, %r10
je 0xfc5c5
movq %r10, %r14
movslq 0x1dc(%rsp), %r15
movq 0x1b0(%rsp), %rax
movq %rax, 0x18(%rsp)
movq 0x1c0(%rsp), %rdx
movq 0x1a0(%rsp), %rax
movq 0x1f0(%rsp), %r10
imulq %rdx, %r10
imulq 0x170(%rsp), %rax
imulq %r14, %rax
addq 0x160(%rsp), %rax
movq %rax, 0x48(%rsp)
imulq %rdx, %r15
shlq $0x2, %r15
movq %r15, 0x38(%rsp)
movq %r14, 0x80(%rsp)
imulq %r14, %r10
movq 0x18(%rsp), %rax
leaq (%rax,%r10), %r14
addq $0x20, %r14
xorl %edx, %edx
cmpq 0x58(%rsp), %rdx
je 0xfc5b5
movl 0x50(%rsp), %eax
movq %rdx, 0x30(%rsp)
imull %edx, %eax
cltd
pushq $0x6
popq %r10
idivl %r10d
cltq
shlq $0x4, %rax
addq 0x48(%rsp), %rax
movq %r14, 0x18(%rsp)
movq %r14, %rdx
xorl %r15d, %r15d
cmpq 0x60(%rsp), %r15
je 0xfc59e
movq %rdx, %r14
movq %r8, %r10
testq %r10, %r10
je 0xfc4bb
vmovdqu -0x20(%r14), %xmm1
vmovdqu -0x10(%r14), %xmm2
vmovdqu (%r14), %xmm3
vpcmpgtb %xmm1, %xmm10, %xmm4
vpcmpgtb %xmm2, %xmm10, %xmm5
vpcmpgtb %xmm3, %xmm10, %xmm6
vpunpcklbw %xmm4, %xmm1, %xmm7 # xmm7 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
vpunpckhbw %xmm4, %xmm1, %xmm1 # xmm1 = xmm1[8],xmm4[8],xmm1[9],xmm4[9],xmm1[10],xmm4[10],xmm1[11],xmm4[11],xmm1[12],xmm4[12],xmm1[13],xmm4[13],xmm1[14],xmm4[14],xmm1[15],xmm4[15]
vpunpcklbw %xmm5, %xmm2, %xmm4 # xmm4 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3],xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7]
vpunpckhbw %xmm5, %xmm2, %xmm2 # xmm2 = xmm2[8],xmm5[8],xmm2[9],xmm5[9],xmm2[10],xmm5[10],xmm2[11],xmm5[11],xmm2[12],xmm5[12],xmm2[13],xmm5[13],xmm2[14],xmm5[14],xmm2[15],xmm5[15]
vpunpcklbw %xmm6, %xmm3, %xmm5 # xmm5 = xmm3[0],xmm6[0],xmm3[1],xmm6[1],xmm3[2],xmm6[2],xmm3[3],xmm6[3],xmm3[4],xmm6[4],xmm3[5],xmm6[5],xmm3[6],xmm6[6],xmm3[7],xmm6[7]
vpunpckhbw %xmm6, %xmm3, %xmm3 # xmm3 = xmm3[8],xmm6[8],xmm3[9],xmm6[9],xmm3[10],xmm6[10],xmm3[11],xmm6[11],xmm3[12],xmm6[12],xmm3[13],xmm6[13],xmm3[14],xmm6[14],xmm3[15],xmm6[15]
vpsllw $0x2, %xmm7, %xmm6
vpaddw %xmm5, %xmm6, %xmm6
vpmullw %xmm0, %xmm4, %xmm7
vpaddw %xmm7, %xmm6, %xmm6
vpaddw %xmm2, %xmm5, %xmm7
vpaddw %xmm1, %xmm4, %xmm8
vpsllw $0x2, %xmm8, %xmm8
vpsubw %xmm8, %xmm7, %xmm7
vpsubw %xmm2, %xmm5, %xmm8
vpsubw %xmm4, %xmm1, %xmm9
vpsllw $0x2, %xmm9, %xmm9
vpaddw %xmm9, %xmm8, %xmm8
vpsubw %xmm4, %xmm5, %xmm4
vpsubw %xmm2, %xmm1, %xmm5
vpaddw %xmm5, %xmm5, %xmm5
vpsubw %xmm5, %xmm4, %xmm9
vpaddw %xmm5, %xmm4, %xmm4
vpsllw $0x2, %xmm1, %xmm1
vpaddw %xmm3, %xmm1, %xmm1
vpmullw %xmm0, %xmm2, %xmm2
vpaddw %xmm2, %xmm1, %xmm1
vmovdqa %xmm6, 0x3b0(%rsp,%r10)
vmovdqa %xmm7, 0x410(%rsp,%r10)
vmovdqa %xmm8, 0x470(%rsp,%r10)
vmovdqa %xmm9, 0x4d0(%rsp,%r10)
vmovdqa %xmm4, 0x530(%rsp,%r10)
vmovdqa %xmm1, 0x590(%rsp,%r10)
addq $0x10, %r10
addq %rcx, %r14
jmp 0xfc3d9
movq %rax, %r10
movq %r11, %r14
cmpq $0x290, %r14 # imm = 0x290
je 0xfc58e
vmovdqa 0x300(%rsp,%r14), %xmm1
vmovdqa 0x310(%rsp,%r14), %xmm2
vmovdqa 0x320(%rsp,%r14), %xmm3
vmovdqa 0x330(%rsp,%r14), %xmm4
vmovdqa 0x340(%rsp,%r14), %xmm5
vpsllw $0x2, %xmm1, %xmm1
vpmullw %xmm0, %xmm3, %xmm6
vpaddw %xmm5, %xmm1, %xmm1
vpaddw %xmm1, %xmm6, %xmm1
vpaddw %xmm4, %xmm5, %xmm6
vpaddw %xmm2, %xmm3, %xmm7
vpsllw $0x2, %xmm7, %xmm7
vpsubw %xmm7, %xmm6, %xmm6
vpsubw %xmm4, %xmm5, %xmm7
vpsubw %xmm3, %xmm2, %xmm8
vpsllw $0x2, %xmm8, %xmm8
vpaddw %xmm7, %xmm8, %xmm7
vpsubw %xmm3, %xmm5, %xmm3
vpsubw %xmm4, %xmm2, %xmm5
vpaddw %xmm5, %xmm5, %xmm5
vpsubw %xmm5, %xmm3, %xmm8
vpaddw %xmm5, %xmm3, %xmm3
vpsllw $0x2, %xmm2, %xmm2
vpmullw %xmm0, %xmm4, %xmm4
vpaddw %xmm2, %xmm4, %xmm2
vpaddw 0x350(%rsp,%r14), %xmm2, %xmm2
vmovdqu %xmm1, (%r10)
vmovdqu %xmm6, (%r10,%r12,2)
vmovdqu %xmm7, (%r10,%rbp,2)
vmovdqu %xmm8, (%r10,%rbx,2)
vmovdqu %xmm3, (%r10,%rsi,2)
vmovdqu %xmm2, (%r10,%rdi,2)
addq $0x60, %r14
addq %r9, %r10
jmp 0xfc4c1
incq %r15
addq $0x20, %rdx
addq $0x10, %rax
jmp 0xfc3c8
movq 0x30(%rsp), %rdx
incq %rdx
movq 0x18(%rsp), %r14
addq 0x38(%rsp), %r14
jmp 0xfc393
movq 0x80(%rsp), %r10
incq %r10
jmp 0xfc314
movq 0x1b8(%rsp), %rax
testq %rax, %rax
je 0xfd8a0
lock
decl (%rax)
jne 0xfd8a0
movq 0x1b0(%rsp), %rsi
movq 0x1d0(%rsp), %rdi
testq %rdi, %rdi
je 0xfd898
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xfd8a0
movl 0xc8(%rsp), %esi
movl 0xa8(%rsp), %edx
vmovq 0x12c(%rsp), %xmm0
movslq 0x138(%rsp), %rax
movq %rax, 0x88(%rsp)
movq 0x98(%rsp), %rcx
vmovaps 0x90(%rsp), %xmm1
vmovaps %xmm1, 0x1b0(%rsp)
movq 0xa0(%rsp), %rax
movq %rax, 0x1c0(%rsp)
movq %rdx, 0x80(%rsp)
movl %edx, 0x1c8(%rsp)
movq 0xb0(%rsp), %rax
movq %rax, 0x1d0(%rsp)
movl 0xbc(%rsp), %eax
movl 0xc0(%rsp), %edx
vmovups 0xb8(%rsp), %xmm1
vmovups %xmm1, 0x1d8(%rsp)
movl %esi, 0x48(%rsp)
movl %esi, 0x1e8(%rsp)
movq 0xd0(%rsp), %rsi
movq %rsi, 0x1f0(%rsp)
testq %rcx, %rcx
je 0xfc6c4
lock
incl (%rcx)
movl 0xbc(%rsp), %eax
movl 0xc0(%rsp), %edx
vbroadcastss 0x2f603f(%rip), %xmm1 # 0x3f270c
vpaddd %xmm1, %xmm0, %xmm0
vpsrad $0x1f, %xmm0, %xmm1
vpsrld $0x1e, %xmm1, %xmm1
vpaddd %xmm1, %xmm0, %xmm1
vbroadcastss 0x2f6028(%rip), %xmm0 # 0x3f2710
vmovdqa %xmm1, 0x60(%rsp)
vpand %xmm0, %xmm1, %xmm0
vpextrd $0x1, %xmm0, %esi
vmovdqa %xmm0, 0x230(%rsp)
vmovd %xmm0, %ecx
movq %rcx, 0xe8(%rsp)
leal 0x2(%rcx), %ebx
movq %rsi, 0xf0(%rsp)
leal 0x2(%rsi), %ecx
subl %edx, %ecx
movl %ebx, %r9d
subl %eax, %r9d
movq 0x40(%rsp), %rax
movq %rax, 0x8(%rsp)
andl $0x0, (%rsp)
leaq 0x90(%rsp), %rdi
leaq 0x1b0(%rsp), %rsi
vpxor %xmm0, %xmm0, %xmm0
xorl %edx, %edx
xorl %r8d, %r8d
callq 0x6466c
vmovdqa 0x60(%rsp), %xmm0
vpsrad $0x2, %xmm0, %xmm0
leaq 0x160(%rsp), %rdi
andq $0x0, 0x40(%rdi)
vmovd %xmm0, %eax
movq %rax, %r14
imull $0x6, %eax, %eax
vpextrd $0x1, %xmm0, %ecx
movl %eax, 0xe0(%rsp)
movq %rcx, %r15
imull %ecx, %eax
movl %eax, 0x78(%rsp)
cltd
pushq $0x6
popq %rcx
idivl %ecx
movl %eax, %esi
movq 0x80(%rsp), %r9
leal (%r9,%r9), %r8d
vpxor %xmm0, %xmm0, %xmm0
vmovdqa %xmm0, (%rdi)
vmovdqu %xmm0, 0xc(%rdi)
vmovdqa %xmm0, 0x20(%rdi)
vmovdqu %xmm0, 0x2c(%rdi)
movq 0x40(%rsp), %rax
movq 0x10(%rax), %rax
movq %rax, (%rsp)
pushq $0x24
popq %rdx
movq %rsi, 0x60(%rsp)
movl 0x48(%rsp), %ecx
movq %r8, 0x70(%rsp)
callq 0x628f2
pushq $-0x60
popq %rbp
pushq $0x50
popq %rsi
shll $0x3, %ebx
movslq %ebx, %rdi
movq 0x60(%rsp), %rcx
leal (,%rcx,8), %eax
movslq %eax, %r8
movl %ecx, %eax
shll $0x4, %eax
movslq %eax, %r9
imull $0x18, %ecx, %eax
movslq %eax, %r10
movl %ecx, %eax
shll $0x5, %eax
movslq %eax, %r11
imull $0x28, %ecx, %eax
movslq %eax, %rbx
imull $0x30, %ecx, %eax
movslq %eax, %r12
xorl %ecx, %ecx
testl %r14d, %r14d
cmovlel %ecx, %r14d
movq %r14, 0x18(%rsp)
testl %r15d, %r15d
cmovlel %ecx, %r15d
movq %r15, 0xf8(%rsp)
movl 0x48(%rsp), %eax
testl %eax, %eax
movl $0x0, %r14d
cmovgl %eax, %r14d
addq %r12, %r12
vbroadcastss 0x2f607c(%rip), %xmm0 # 0x3f28c8
vpxor %xmm10, %xmm10, %xmm10
cmpq %r14, %rcx
je 0xfcaec
movq %rcx, %r15
movslq 0x1dc(%rsp), %r13
movq 0x1b0(%rsp), %rax
movq %rax, 0x28(%rsp)
movq 0x1c0(%rsp), %rcx
movq 0x1a0(%rsp), %rax
movq 0x1f0(%rsp), %rdx
imulq %rcx, %rdx
imulq 0x170(%rsp), %rax
imulq %r15, %rax
addq 0x160(%rsp), %rax
movq %rax, 0x50(%rsp)
imulq %rcx, %r13
shlq $0x2, %r13
movq %r13, 0x58(%rsp)
movq %r15, 0x38(%rsp)
imulq %r15, %rdx
movq 0x28(%rsp), %rax
leaq (%rax,%rdx), %r13
addq $0x20, %r13
xorl %ecx, %ecx
cmpq 0xf8(%rsp), %rcx
je 0xfcadf
movl 0xe0(%rsp), %eax
movq %rcx, 0x30(%rsp)
imull %ecx, %eax
cltd
pushq $0x6
popq %rcx
idivl %ecx
cltq
shlq $0x4, %rax
addq 0x50(%rsp), %rax
movq %r13, 0x28(%rsp)
xorl %r15d, %r15d
cmpq 0x18(%rsp), %r15
je 0xfcac8
movq %r13, %rdx
movq %rbp, %rcx
testq %rcx, %rcx
je 0xfc9ed
vmovdqu -0x20(%rdx), %xmm1
vmovdqu -0x10(%rdx), %xmm2
vmovdqu (%rdx), %xmm3
vpcmpgtb %xmm1, %xmm10, %xmm4
vpcmpgtb %xmm2, %xmm10, %xmm5
vpcmpgtb %xmm3, %xmm10, %xmm6
vpunpcklbw %xmm4, %xmm1, %xmm7 # xmm7 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
vpunpckhbw %xmm4, %xmm1, %xmm1 # xmm1 = xmm1[8],xmm4[8],xmm1[9],xmm4[9],xmm1[10],xmm4[10],xmm1[11],xmm4[11],xmm1[12],xmm4[12],xmm1[13],xmm4[13],xmm1[14],xmm4[14],xmm1[15],xmm4[15]
vpunpcklbw %xmm5, %xmm2, %xmm4 # xmm4 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3],xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7]
vpunpckhbw %xmm5, %xmm2, %xmm2 # xmm2 = xmm2[8],xmm5[8],xmm2[9],xmm5[9],xmm2[10],xmm5[10],xmm2[11],xmm5[11],xmm2[12],xmm5[12],xmm2[13],xmm5[13],xmm2[14],xmm5[14],xmm2[15],xmm5[15]
vpunpcklbw %xmm6, %xmm3, %xmm5 # xmm5 = xmm3[0],xmm6[0],xmm3[1],xmm6[1],xmm3[2],xmm6[2],xmm3[3],xmm6[3],xmm3[4],xmm6[4],xmm3[5],xmm6[5],xmm3[6],xmm6[6],xmm3[7],xmm6[7]
vpunpckhbw %xmm6, %xmm3, %xmm3 # xmm3 = xmm3[8],xmm6[8],xmm3[9],xmm6[9],xmm3[10],xmm6[10],xmm3[11],xmm6[11],xmm3[12],xmm6[12],xmm3[13],xmm6[13],xmm3[14],xmm6[14],xmm3[15],xmm6[15]
vpsllw $0x2, %xmm7, %xmm6
vpaddw %xmm5, %xmm6, %xmm6
vpmullw %xmm0, %xmm4, %xmm7
vpaddw %xmm7, %xmm6, %xmm6
vpaddw %xmm2, %xmm5, %xmm7
vpaddw %xmm1, %xmm4, %xmm8
vpsllw $0x2, %xmm8, %xmm8
vpsubw %xmm8, %xmm7, %xmm7
vpsubw %xmm2, %xmm5, %xmm8
vpsubw %xmm4, %xmm1, %xmm9
vpsllw $0x2, %xmm9, %xmm9
vpaddw %xmm9, %xmm8, %xmm8
vpsubw %xmm4, %xmm5, %xmm4
vpsubw %xmm2, %xmm1, %xmm5
vpaddw %xmm5, %xmm5, %xmm5
vpsubw %xmm5, %xmm4, %xmm9
vpaddw %xmm5, %xmm4, %xmm4
vpsllw $0x2, %xmm1, %xmm1
vpaddw %xmm3, %xmm1, %xmm1
vpmullw %xmm0, %xmm2, %xmm2
vpaddw %xmm2, %xmm1, %xmm1
vmovdqa %xmm6, 0x3b0(%rsp,%rcx)
vmovdqa %xmm7, 0x410(%rsp,%rcx)
vmovdqa %xmm8, 0x470(%rsp,%rcx)
vmovdqa %xmm9, 0x4d0(%rsp,%rcx)
vmovdqa %xmm4, 0x530(%rsp,%rcx)
vmovdqa %xmm1, 0x590(%rsp,%rcx)
addq $0x10, %rcx
addq %rdi, %rdx
jmp 0xfc914
movq %rax, %rcx
movq %rsi, %rdx
cmpq $0x290, %rdx # imm = 0x290
je 0xfcab8
vmovdqa 0x300(%rsp,%rdx), %xmm1
vmovdqa 0x310(%rsp,%rdx), %xmm2
vmovdqa 0x320(%rsp,%rdx), %xmm3
vmovdqa 0x330(%rsp,%rdx), %xmm4
vmovdqa 0x340(%rsp,%rdx), %xmm5
vpsllw $0x2, %xmm1, %xmm1
vpmullw %xmm0, %xmm3, %xmm6
vpaddw %xmm5, %xmm1, %xmm1
vpaddw %xmm1, %xmm6, %xmm1
vpaddw %xmm4, %xmm5, %xmm6
vpaddw %xmm2, %xmm3, %xmm7
vpsllw $0x2, %xmm7, %xmm7
vpsubw %xmm7, %xmm6, %xmm6
vpsubw %xmm4, %xmm5, %xmm7
vpsubw %xmm3, %xmm2, %xmm8
vpsllw $0x2, %xmm8, %xmm8
vpaddw %xmm7, %xmm8, %xmm7
vpsubw %xmm3, %xmm5, %xmm3
vpsubw %xmm4, %xmm2, %xmm5
vpaddw %xmm5, %xmm5, %xmm5
vpsubw %xmm5, %xmm3, %xmm8
vpaddw %xmm5, %xmm3, %xmm3
vpsllw $0x2, %xmm2, %xmm2
vpmullw %xmm0, %xmm4, %xmm4
vpaddw %xmm2, %xmm4, %xmm2
vpaddw 0x350(%rsp,%rdx), %xmm2, %xmm2
vmovdqu %xmm1, (%rcx)
vmovdqu %xmm6, (%rcx,%r8,2)
vmovdqu %xmm7, (%rcx,%r9,2)
vmovdqu %xmm8, (%rcx,%r10,2)
vmovdqu %xmm3, (%rcx,%r11,2)
vmovdqu %xmm2, (%rcx,%rbx,2)
addq $0x60, %rdx
addq %r12, %rcx
jmp 0xfc9f3
incq %r15
addq $0x20, %r13
addq $0x10, %rax
jmp 0xfc903
movq 0x30(%rsp), %rcx
incq %rcx
movq 0x28(%rsp), %r13
addq 0x58(%rsp), %r13
jmp 0xfc8cd
movq 0x38(%rsp), %rcx
incq %rcx
jmp 0xfc851
movq 0x1b8(%rsp), %rax
testq %rax, %rax
je 0xfdf63
lock
decl (%rax)
jne 0xfdf63
movq 0x1b0(%rsp), %rsi
movq 0x1d0(%rsp), %rdi
testq %rdi, %rdi
je 0xfdf5b
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xfdf63
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x390(%rsp)
vpxor %xmm0, %xmm0, %xmm0
vmovdqa %xmm0, 0x350(%rsp)
vmovdqu %xmm0, 0x35c(%rsp)
leaq 0x370(%rsp), %rax
vmovdqu %xmm0, 0xc(%rax)
vmovdqa %xmm0, (%rax)
leaq 0x160(%rsp), %rdi
andq $0x0, 0x40(%rdi)
vmovdqa %xmm0, (%rdi)
vmovdqu %xmm0, 0xc(%rdi)
vmovdqa %xmm0, 0x20(%rdi)
vmovdqu %xmm0, 0x2c(%rdi)
movq 0x40(%rsp), %rax
movq 0x10(%rax), %r9
pushq $0x10
popq %rsi
pushq $0x4
popq %r8
movq 0x48(%rsp), %rdx
movq 0x88(%rsp), %rcx
callq 0x63810
movq 0x88(%rsp), %rax
movl %eax, %ecx
sarl $0x2, %ecx
movq 0x160(%rsp), %rdx
movq 0x170(%rsp), %rax
movq 0x1a0(%rsp), %rsi
movq %rax, 0x5b0(%rsp)
imulq %rax, %rsi
movq 0x20(%rsp), %r14
movq 0xb8(%r14), %rdi
movq 0xe8(%r14), %r8
imulq %rdi, %r8
xorl %r9d, %r9d
movq 0x48(%rsp), %rax
testl %eax, %eax
cmovlel %r9d, %eax
movq %rax, 0x48(%rsp)
movq 0xa8(%r14), %rax
testl %ecx, %ecx
cmovlel %r9d, %ecx
movq %rcx, 0x5b8(%rsp)
leaq (,%r8,4), %rcx
movq %rcx, 0x5a0(%rsp)
movq %rdi, 0x5a8(%rsp)
leaq (,%rdi,4), %rcx
movq %rcx, 0x598(%rsp)
leaq (%r8,%r8,2), %rcx
addq %rax, %rcx
movq %rcx, 0xf0(%rsp)
leaq (%rax,%r8,2), %rcx
movq %rcx, 0x150(%rsp)
leaq (%rsi,%rsi,2), %rcx
addq %rdx, %rcx
leaq (,%rsi,4), %rdi
movq %rdi, 0x590(%rsp)
leaq (%rdx,%rsi,2), %rdi
movq %rdi, 0x310(%rsp)
movq %rdx, 0x148(%rsp)
addq %rdx, %rsi
movq %rsi, 0x200(%rsp)
vpxor %xmm0, %xmm0, %xmm0
movq %rax, 0x230(%rsp)
addq %r8, %rax
movq %rax, 0x2e0(%rsp)
movq %rcx, %rax
cmpq 0x5b8(%rsp), %r9
je 0xfd0f8
movslq 0x18c(%rsp), %rcx
movslq 0xd4(%r14), %rdx
imulq 0x5b0(%rsp), %rcx
movq %rcx, 0x208(%rsp)
movq 0x5a8(%rsp), %rsi
imulq %rdx, %rsi
imulq 0x598(%rsp), %rdx
movq %rdx, 0x38(%rsp)
movq 0x148(%rsp), %rdx
movq 0x200(%rsp), %rdi
movq 0x310(%rsp), %r10
movq %rax, 0x320(%rsp)
movq %rax, %r11
xorl %ebx, %ebx
movq %r9, 0x158(%rsp)
movq %rsi, 0x2d8(%rsp)
cmpq 0x48(%rsp), %rbx
je 0xfd09d
movq %r11, 0xe0(%rsp)
movq %r10, 0xf8(%rsp)
movq %rdi, 0x78(%rsp)
movq %rdx, 0x70(%rsp)
vmovdqu %ymm0, 0x260(%rsp)
vmovdqu %ymm0, 0x240(%rsp)
vmovdqu %ymm0, 0x6c0(%rsp)
vmovdqu %ymm0, 0x6a0(%rsp)
vmovdqu %ymm0, 0x680(%rsp)
vmovdqu %ymm0, 0x660(%rsp)
vmovdqu %ymm0, 0x640(%rsp)
vmovdqu %ymm0, 0x620(%rsp)
movslq 0x1dc(%rsp), %r10
movq 0x1f0(%rsp), %r15
movq 0x1b0(%rsp), %rax
movq 0x1c0(%rsp), %rcx
movq %rcx, %rdx
imulq %r15, %rdx
movq %rbx, 0xd8(%rsp)
imulq %rbx, %r10
leaq (%r15,%r15,2), %r11
addq %r10, %r11
imulq %rcx, %r11
addq %rax, %r11
movq %rdx, 0xe8(%rsp)
leaq (,%rdx,4), %rdx
movq %rdx, 0x50(%rsp)
leaq (%r10,%r15,2), %r12
imulq %rcx, %r12
addq %rax, %r12
addq %r10, %r15
imulq %rcx, %r15
addq %rax, %r15
imulq %rcx, %r10
addq %rax, %r10
movq 0x2e0(%rsp), %rbp
movq 0x150(%rsp), %rax
movq %rax, 0x30(%rsp)
movq 0xf0(%rsp), %r9
movq 0x230(%rsp), %r13
xorl %ecx, %ecx
movq %rcx, 0x18(%rsp)
movq %rcx, %rax
orq $0x3, %rax
cmpq 0x60(%rsp), %rax
jge 0xfcf9a
movq %rbp, 0x28(%rsp)
xorl %ebp, %ebp
cmpq $0x20, %rbp
je 0xfcf5e
movswl (%r10,%rbp), %r14d
movswl (%r13,%rbp), %ebx
imull %r14d, %ebx
addl 0x240(%rsp,%rbp,2), %ebx
movswl (%r15,%rbp), %eax
movswl 0x20(%r13,%rbp), %edi
imull %eax, %edi
movswl (%r12,%rbp), %esi
movswl 0x40(%r13,%rbp), %ecx
imull %esi, %ecx
addl %edi, %ecx
movswl (%r11,%rbp), %edi
movswl 0x60(%r13,%rbp), %edx
imull %edi, %edx
addl %ecx, %edx
movq %r13, %rcx
addq %rbp, %rcx
addl %ebx, %edx
movl %edx, 0x240(%rsp,%rbp,2)
movswl 0x20(%r8,%rcx), %edx
imull %eax, %edx
movswl 0x40(%r8,%rcx), %ebx
imull %esi, %ebx
addl %edx, %ebx
movswl 0x60(%r8,%rcx), %edx
imull %edi, %edx
addl %ebx, %edx
leaq (%rcx,%r8), %rbx
movswl (%r8,%rcx), %ecx
imull %r14d, %ecx
addl 0x6a0(%rsp,%rbp,2), %ecx
addl %ecx, %edx
movl %edx, 0x6a0(%rsp,%rbp,2)
movswl 0x20(%r8,%rbx), %ecx
imull %eax, %ecx
movswl 0x40(%r8,%rbx), %edx
imull %esi, %edx
addl %ecx, %edx
movswl 0x60(%r8,%rbx), %ecx
imull %edi, %ecx
addl %edx, %ecx
movswl (%r8,%rbx), %edx
imull %r14d, %edx
addl 0x660(%rsp,%rbp,2), %edx
addl %edx, %ecx
leaq (%rbx,%r8), %rdx
movl %ecx, 0x660(%rsp,%rbp,2)
movswl (%r8,%rdx), %ecx
imull %r14d, %ecx
movswl 0x20(%r8,%rdx), %ebx
imull %eax, %ebx
movswl 0x40(%r8,%rdx), %eax
imull %esi, %eax
addl %ebx, %eax
movswl 0x60(%r8,%rdx), %edx
imull %edi, %edx
addl 0x620(%rsp,%rbp,2), %ecx
addl %eax, %edx
addl %ecx, %edx
movl %edx, 0x620(%rsp,%rbp,2)
addq $0x2, %rbp
jmp 0xfce48
movq 0x18(%rsp), %rcx
addq $0x4, %rcx
movq 0x50(%rsp), %rdx
addq %rdx, %r11
addq %rdx, %r12
addq %rdx, %r15
movq 0x38(%rsp), %rax
addq %rax, %r13
addq %rdx, %r10
addq %rax, %r9
addq %rax, 0x30(%rsp)
movq 0x28(%rsp), %rbp
addq %rax, %rbp
movq 0x20(%rsp), %r14
jmp 0xfce2a
movq 0x2d8(%rsp), %rsi
movq 0xe0(%rsp), %r11
movq 0xd8(%rsp), %rbx
movq 0xe8(%rsp), %rdi
movq 0x30(%rsp), %r15
movq 0x18(%rsp), %r12
cmpq 0x60(%rsp), %r12
jge 0xfd02f
xorl %eax, %eax
cmpq $0x20, %rax
je 0xfd01b
movswl (%r10,%rax), %ecx
movswl (%r13,%rax), %edx
imull %ecx, %edx
addl %edx, 0x240(%rsp,%rax,2)
movswl (%rbp,%rax), %edx
imull %ecx, %edx
addl %edx, 0x6a0(%rsp,%rax,2)
movswl (%r15,%rax), %edx
imull %ecx, %edx
addl %edx, 0x660(%rsp,%rax,2)
movswl (%r9,%rax), %edx
imull %ecx, %edx
addl %edx, 0x620(%rsp,%rax,2)
addq $0x2, %rax
jmp 0xfcfcd
incq %r12
addq %rsi, %r9
addq %rsi, %r15
addq %rsi, %rbp
addq %rsi, %r13
addq %rdi, %r10
jmp 0xfcfc4
xorl %eax, %eax
movq 0x158(%rsp), %r9
movq 0x70(%rsp), %rdx
movq 0x78(%rsp), %rdi
movq 0xf8(%rsp), %r10
cmpq $0x40, %rax
je 0xfd081
movl 0x240(%rsp,%rax), %ecx
movl %ecx, (%rdx,%rax)
movl 0x6a0(%rsp,%rax), %ecx
movl %ecx, (%rdi,%rax)
movl 0x660(%rsp,%rax), %ecx
movl %ecx, (%r10,%rax)
movl 0x620(%rsp,%rax), %ecx
movl %ecx, (%r11,%rax)
addq $0x4, %rax
jmp 0xfd04b
incq %rbx
movq 0x208(%rsp), %rax
addq %rax, %r11
addq %rax, %r10
addq %rax, %rdi
addq %rax, %rdx
jmp 0xfcd24
incq %r9
movq 0x5a0(%rsp), %rax
addq %rax, 0x230(%rsp)
addq %rax, 0xf0(%rsp)
addq %rax, 0x150(%rsp)
addq %rax, 0x2e0(%rsp)
movq 0x320(%rsp), %rax
movq 0x590(%rsp), %rcx
addq %rcx, %rax
addq %rcx, 0x310(%rsp)
addq %rcx, 0x200(%rsp)
addq %rcx, 0x148(%rsp)
jmp 0xfcca7
movq 0x88(%rsp), %rax
andq $-0x4, %rax
movq 0x170(%rsp), %rcx
movq 0xb8(%r14), %rdx
movq 0xe8(%r14), %rsi
imulq %rdx, %rsi
movq %rsi, 0x2e0(%rsp)
imulq %rax, %rsi
addq 0xa8(%r14), %rsi
movq %rsi, 0x38(%rsp)
leaq (%rdx,%rdx,2), %rsi
movq %rsi, 0x158(%rsp)
leaq (,%rdx,4), %rsi
movq %rsi, 0x208(%rsp)
movq %rdx, 0x150(%rsp)
addq %rdx, %rdx
movq %rdx, 0x2d8(%rsp)
movq 0x1a0(%rsp), %rdx
movq %rcx, 0xf0(%rsp)
imulq %rcx, %rdx
movq %rdx, 0x148(%rsp)
imulq %rax, %rdx
addq 0x160(%rsp), %rdx
movq %rdx, 0x230(%rsp)
vpxor %xmm0, %xmm0, %xmm0
cmpq 0x88(%rsp), %rax
jge 0xfd41e
movslq 0x18c(%rsp), %rdx
movslq 0xd4(%r14), %rsi
movq 0x150(%rsp), %r9
imulq %rsi, %r9
movq 0x158(%rsp), %rdi
imulq %rsi, %rdi
movq 0x38(%rsp), %rcx
addq %rcx, %rdi
movq %rdi, 0xd8(%rsp)
movq 0x208(%rsp), %rdi
imulq %rsi, %rdi
movq %rdi, 0x18(%rsp)
imulq 0x2d8(%rsp), %rsi
addq %rcx, %rsi
movq %rsi, 0xf8(%rsp)
addq %r9, %rcx
movq %rcx, 0xe8(%rsp)
imulq 0xf0(%rsp), %rdx
movq %rdx, 0x78(%rsp)
movq 0x230(%rsp), %rdx
xorl %edi, %edi
movq %rax, 0x70(%rsp)
movq %r9, 0xe0(%rsp)
cmpq 0x48(%rsp), %rdi
je 0xfd3ec
movq %rdx, 0x28(%rsp)
vmovdqu %ymm0, 0x260(%rsp)
vmovdqu %ymm0, 0x240(%rsp)
movslq 0x1dc(%rsp), %rsi
movq 0x1f0(%rsp), %r11
movq 0x1b0(%rsp), %rax
movq 0x1c0(%rsp), %rcx
movq %rcx, %rdx
imulq %r11, %rdx
movq %rdi, 0x30(%rsp)
imulq %rdi, %rsi
leaq (%r11,%r11,2), %r8
addq %rsi, %r8
imulq %rcx, %r8
addq %rax, %r8
movq %rdx, 0x50(%rsp)
leaq (,%rdx,4), %rbp
leaq (%rsi,%r11,2), %rbx
imulq %rcx, %rbx
addq %rax, %rbx
addq %rsi, %r11
imulq %rcx, %r11
addq %rax, %r11
imulq %rcx, %rsi
addq %rax, %rsi
movq 0x38(%rsp), %rdi
movq 0xe8(%rsp), %r10
movq 0xf8(%rsp), %r15
movq 0xd8(%rsp), %r13
xorl %eax, %eax
movq %rax, %rcx
orq $0x3, %rcx
cmpq 0x60(%rsp), %rcx
jge 0xfd378
xorl %ecx, %ecx
cmpq $0x20, %rcx
je 0xfd34d
movswl (%rsi,%rcx), %r12d
movswl (%rdi,%rcx), %r14d
imull %r12d, %r14d
addl 0x240(%rsp,%rcx,2), %r14d
movswl (%r11,%rcx), %r12d
movswl (%r10,%rcx), %edx
imull %r12d, %edx
movswl (%rbx,%rcx), %r12d
movswl (%r15,%rcx), %r9d
imull %r12d, %r9d
addl %edx, %r9d
movswl (%r8,%rcx), %edx
movswl (%r13,%rcx), %r12d
imull %edx, %r12d
addl %r9d, %r12d
addl %r14d, %r12d
movl %r12d, 0x240(%rsp,%rcx,2)
addq $0x2, %rcx
jmp 0xfd2ef
addq $0x4, %rax
movq 0x18(%rsp), %rcx
addq %rcx, %r13
addq %rbp, %r8
addq %rcx, %r15
addq %rbp, %rbx
addq %rcx, %r10
addq %rbp, %r11
addq %rcx, %rdi
addq %rbp, %rsi
movq 0x20(%rsp), %r14
jmp 0xfd2db
movq 0xe0(%rsp), %r9
movq 0x50(%rsp), %r10
cmpq 0x60(%rsp), %rax
jge 0xfd3b9
xorl %ecx, %ecx
cmpq $0x10, %rcx
je 0xfd3ae
movswl (%rsi,%rcx,2), %edx
movswl (%rdi,%rcx,2), %r8d
imull %edx, %r8d
addl %r8d, 0x240(%rsp,%rcx,4)
incq %rcx
jmp 0xfd38e
incq %rax
addq %r9, %rdi
addq %r10, %rsi
jmp 0xfd385
xorl %eax, %eax
movq 0x28(%rsp), %rdx
movq 0x30(%rsp), %rdi
cmpq $0x10, %rax
je 0xfd3da
movl 0x240(%rsp,%rax,4), %ecx
movl %ecx, (%rdx,%rax,4)
incq %rax
jmp 0xfd3c5
incq %rdi
addq 0x78(%rsp), %rdx
movq 0x70(%rsp), %rax
jmp 0xfd233
incq %rax
movq 0x38(%rsp), %rcx
addq 0x2e0(%rsp), %rcx
movq %rcx, 0x38(%rsp)
movq 0x230(%rsp), %rcx
addq 0x148(%rsp), %rcx
movq %rcx, 0x230(%rsp)
jmp 0xfd199
movq 0x1b8(%rsp), %rax
testq %rax, %rax
je 0xfd45b
lock
decl (%rax)
jne 0xfd45b
movq 0x1b0(%rsp), %rsi
movq 0x1d0(%rsp), %rdi
testq %rdi, %rdi
je 0xfd450
movq (%rdi), %rax
vzeroupper
callq *0x18(%rax)
jmp 0xfd45b
movq %rsi, %rdi
vzeroupper
callq 0x5f3e0
andq $0x0, 0x1f0(%rsp)
vpxor %xmm0, %xmm0, %xmm0
vmovdqa %xmm0, 0x1b0(%rsp)
vmovdqu %xmm0, 0x1bc(%rsp)
leaq 0x1d0(%rsp), %rax
vmovdqu %xmm0, 0xc(%rax)
vmovdqu %xmm0, (%rax)
leaq 0x240(%rsp), %rdi
andq $0x0, 0x40(%rdi)
vmovdqa %xmm0, (%rdi)
vmovdqu %xmm0, 0xc(%rdi)
vmovdqa %xmm0, 0x20(%rdi)
vmovdqu %xmm0, 0x2c(%rdi)
movq 0x40(%rsp), %rax
movq 0x10(%rax), %r9
pushq $0x4
popq %r8
movl 0x334(%rsp), %esi
movl 0x338(%rsp), %edx
movq 0x88(%rsp), %rcx
vzeroupper
callq 0x63810
movq 0x160(%rsp), %rcx
movq 0x170(%rsp), %rdx
movq 0x240(%rsp), %rax
movq %rax, 0x30(%rsp)
movq 0x250(%rsp), %rax
movq 0x280(%rsp), %rsi
movq %rax, 0x50(%rsp)
imulq %rax, %rsi
movq %rsi, 0x38(%rsp)
movslq 0x318(%rsp), %rsi
xorl %edi, %edi
movq 0x88(%rsp), %rax
testl %eax, %eax
cmovlel %edi, %eax
movq %rax, 0x88(%rsp)
movl 0x58(%rsp), %r10d
addq $0x30, %rcx
movq 0x1a0(%rsp), %rax
imulq %rdx, %rax
movq %rax, 0x48(%rsp)
movq %rdx, 0x28(%rsp)
imulq %rdx, %rsi
movq %rsi, 0x58(%rsp)
cmpq 0x88(%rsp), %rdi
je 0xfd784
movslq 0x18c(%rsp), %rbx
movslq 0x26c(%rsp), %r14
movq 0x38(%rsp), %r15
movq %rdi, 0x18(%rsp)
imulq %rdi, %r15
addq 0x30(%rsp), %r15
imulq 0x50(%rsp), %r14
movq 0x28(%rsp), %r12
imulq %rbx, %r12
imulq 0x58(%rsp), %rbx
movq %rcx, 0x60(%rsp)
movq %rcx, %rax
xorl %ebp, %ebp
cmpq 0x80(%rsp), %rbp
je 0xfd76d
leaq (,%rbp,2), %rdx
imulq %r14, %rdx
addq %r15, %rdx
leaq 0x1(,%rbp,2), %rsi
imulq %r14, %rsi
addq %r15, %rsi
movq %rax, %r13
xorl %edi, %edi
cmpq %r10, %rdi
je 0xfd762
xorl %r8d, %r8d
cmpq $0x10, %r8
je 0xfd625
movl -0x30(%r13,%r8), %r11d
movl %r11d, 0x6a0(%rsp,%r8)
movl -0x20(%r13,%r8), %r11d
movl %r11d, 0x660(%rsp,%r8)
movl -0x10(%r13,%r8), %r11d
movl %r11d, 0x620(%rsp,%r8)
movl (%r13,%r8), %r11d
movl %r11d, 0x340(%rsp,%r8)
addq $0x4, %r8
jmp 0xfd5e5
xorl %r8d, %r8d
cmpq $0x10, %r8
je 0xfd66d
movl 0x660(%rsp,%r8), %ecx
movl 0x6a0(%rsp,%r8), %r11d
addl %ecx, %r11d
movl 0x620(%rsp,%r8), %r9d
addl %r9d, %r11d
movl %r11d, 0x300(%rsp,%r8)
subl %r9d, %ecx
addl 0x340(%rsp,%r8), %ecx
movl %ecx, 0x2f0(%rsp,%r8)
addq $0x4, %r8
jmp 0xfd628
movl 0x300(%rsp), %r8d
movl 0x304(%rsp), %r11d
movl %r8d, 0x228(%rsp)
movl 0x2f0(%rsp), %r8d
movl 0x2f4(%rsp), %ecx
movl %r8d, 0x22c(%rsp)
movl %r11d, 0x220(%rsp)
movl %ecx, 0x224(%rsp)
movl 0x308(%rsp), %ecx
movl %ecx, 0x218(%rsp)
movl 0x2f8(%rsp), %ecx
movl %ecx, 0x21c(%rsp)
movl 0x30c(%rsp), %ecx
movl %ecx, 0x210(%rsp)
movl 0x2fc(%rsp), %ecx
movl %ecx, 0x214(%rsp)
xorl %r8d, %r8d
cmpq $0x8, %r8
je 0xfd72b
movl 0x220(%rsp,%r8), %ecx
movl 0x228(%rsp,%r8), %r9d
addl %ecx, %r9d
movl 0x218(%rsp,%r8), %r11d
addl %r11d, %r9d
movl %r9d, 0x5c8(%rsp,%r8)
subl %r11d, %ecx
addl 0x210(%rsp,%r8), %ecx
movl %ecx, 0x5c0(%rsp,%r8)
addq $0x4, %r8
jmp 0xfd6e6
vmovq 0x5c8(%rsp), %xmm0
vpsrad $0x2, %xmm0, %xmm0
vmovq %xmm0, (%rdx)
vmovq 0x5c0(%rsp), %xmm0
vpsrad $0x2, %xmm0, %xmm0
vmovq %xmm0, (%rsi)
addq $0x8, %rdx
addq $0x8, %rsi
incq %rdi
addq %r12, %r13
jmp 0xfd5d9
incq %rbp
addq %rbx, %rax
jmp 0xfd5a8
movq 0x18(%rsp), %rdi
incq %rdi
movq 0x60(%rsp), %rcx
addq 0x48(%rsp), %rcx
jmp 0xfd558
leaq 0x240(%rsp), %rdi
movl 0x2c(%rdi), %r9d
movl 0x30(%rdi), %ecx
leaq 0x100(%rsp), %rsi
subl 0x30(%rsi), %ecx
subl 0x2c(%rsi), %r9d
movq 0x40(%rsp), %rax
movq %rax, (%rsp)
xorl %edx, %edx
xorl %r8d, %r8d
callq 0x6489a
movq 0x248(%rsp), %rax
testq %rax, %rax
je 0xfd7ec
lock
decl (%rax)
jne 0xfd7ec
movq 0x240(%rsp), %rsi
movq 0x260(%rsp), %rdi
testq %rdi, %rdi
je 0xfd7e4
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xfd7ec
movq %rsi, %rdi
callq 0x5f3e0
movq 0x168(%rsp), %rax
testq %rax, %rax
je 0xfd823
lock
decl (%rax)
jne 0xfd823
movq 0x160(%rsp), %rsi
movq 0x180(%rsp), %rdi
testq %rdi, %rdi
je 0xfd81b
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xfd823
movq %rsi, %rdi
callq 0x5f3e0
movq 0x1b8(%rsp), %rax
testq %rax, %rax
je 0xfd85a
lock
decl (%rax)
jne 0xfd85a
movq 0x1b0(%rsp), %rsi
movq 0x1d0(%rsp), %rdi
testq %rdi, %rdi
je 0xfd852
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xfd85a
movq %rsi, %rdi
callq 0x5f3e0
movq 0x358(%rsp), %rax
testq %rax, %rax
je 0xfbc1a
lock
decl (%rax)
jne 0xfbc1a
movq 0x350(%rsp), %rsi
movq 0x370(%rsp), %rdi
testq %rdi, %rdi
je 0xff2e2
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xfbc1a
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x1f0(%rsp)
vpxor %xmm0, %xmm0, %xmm0
vmovdqa %xmm0, 0x1b0(%rsp)
vmovdqu %xmm0, 0x1bc(%rsp)
andq $0x0, 0x280(%rsp)
leaq 0x1d0(%rsp), %rax
vmovdqu %xmm0, 0xc(%rax)
vmovdqa %xmm0, (%rax)
vmovdqa %xmm0, 0x240(%rsp)
vmovdqu %xmm0, 0x24c(%rsp)
leaq 0x350(%rsp), %rdi
andq $0x0, 0x40(%rdi)
vmovdqa %xmm0, 0x260(%rsp)
vmovdqu %xmm0, 0x26c(%rsp)
movq 0x28(%rsp), %rdx
movl %edx, %eax
shrl %eax
xorl %r8d, %r8d
cmpl $0xc, 0x150(%rsp)
setge %cl
vmovdqu %xmm0, 0xc(%rdi)
vmovdqa %xmm0, (%rdi)
vmovdqu %xmm0, 0x2c(%rdi)
cmovgel %eax, %r8d
movl 0x78(%rsp), %esi
shll %cl, %esi
vmovdqa %xmm0, 0x20(%rdi)
subl %r8d, %edx
movq 0x40(%rsp), %rax
movq 0x10(%rax), %rax
movq %rax, (%rsp)
pushq $0x24
popq %rcx
movq 0xf0(%rsp), %r8
movq 0xe8(%rsp), %r9
callq 0x628f2
movslq 0x28(%rsp), %rax
movq %rax, 0x18(%rsp)
xorl %eax, %eax
cmpq $0x24, %rax
je 0xfda71
movslq 0x37c(%rsp), %rcx
movq 0x390(%rsp), %rdx
imulq %rax, %rdx
movq 0x360(%rsp), %rsi
imulq %rsi, %rdx
addq 0x350(%rsp), %rdx
imulq %rsi, %rcx
movq %rax, %rsi
imulq 0x18(%rsp), %rsi
xorl %edi, %edi
movq %rdi, %r8
orq $0x1, %r8
cmpq 0x18(%rsp), %r8
jge 0xfda62
movq %rdi, %r8
shrq %r8
imulq %rcx, %r8
addq %rdx, %r8
leaq (%rdi,%rsi), %r9
shlq $0x4, %r9
addq 0x160(%rsp), %r9
movl %r13d, %r10d
subl $0x1, %r10d
jb 0xfda0a
vmovdqu (%r9), %ymm0
vmovdqu %ymm0, (%r8)
movq 0x1a0(%rsp), %r11
shlq $0x4, %r11
addq %r11, %r9
addq $0x20, %r8
jmp 0xfd9e5
addq $0x2, %rdi
jmp 0xfd9b3
movl %edi, %r9d
shrl %r9d
movl %edi, %r8d
andl $0x1, %r8d
addl %r9d, %r8d
imulq %rcx, %r8
addq %rdx, %r8
leaq (%rdi,%rsi), %r9
shlq $0x4, %r9
addq 0x160(%rsp), %r9
movl %r13d, %r10d
subl $0x1, %r10d
jb 0xfda5f
vmovdqu (%r9), %xmm0
vmovdqu %xmm0, (%r8)
movq 0x1a0(%rsp), %r11
shlq $0x4, %r11
addq %r11, %r9
addq $0x10, %r8
jmp 0xfda3a
incq %rdi
cmpq 0x18(%rsp), %rdi
jl 0xfda10
incq %rax
jmp 0xfd972
movq 0x168(%rsp), %rax
testq %rax, %rax
je 0xfdaae
lock
decl (%rax)
jne 0xfdaae
movq 0x160(%rsp), %rsi
movq 0x180(%rsp), %rdi
testq %rdi, %rdi
je 0xfdaa3
movq (%rdi), %rax
vzeroupper
callq *0x18(%rax)
jmp 0xfdaae
movq %rsi, %rdi
vzeroupper
callq 0x5f3e0
andq $0x0, 0x1a0(%rsp)
vpxor %xmm0, %xmm0, %xmm0
vmovdqa %xmm0, 0x160(%rsp)
vmovdqu %xmm0, 0x16c(%rsp)
leaq 0x180(%rsp), %rax
vmovdqu %xmm0, 0xc(%rax)
vmovdqu %xmm0, (%rax)
movq 0x40(%rsp), %rax
movq 0x10(%rax), %rax
movq %rax, (%rsp)
leaq 0x240(%rsp), %rdi
pushq $0x24
popq %rdx
pushq $0x10
popq %r8
pushq $0x4
popq %r9
movq 0x28(%rsp), %rsi
movl 0xd8(%rsp), %ecx
vzeroupper
callq 0x628f2
xorl %edx, %edx
movl 0xd8(%rsp), %eax
testl %eax, %eax
movl $0x0, %ecx
cmovgl %eax, %ecx
movq %rcx, 0x48(%rsp)
movq 0x20(%rsp), %rax
cmpq 0x48(%rsp), %rdx
je 0xfddde
movq 0x280(%rsp), %rcx
imulq %rdx, %rcx
imulq 0x250(%rsp), %rcx
addq 0x240(%rsp), %rcx
movslq 0x11c(%rax), %rdi
movq 0x100(%rax), %rsi
imulq %rsi, %rdi
movq %rdi, 0x38(%rsp)
imulq 0x130(%rax), %rsi
movq %rdx, 0x80(%rsp)
imulq %rdx, %rsi
addq 0xf0(%rax), %rsi
xorl %edx, %edx
cmpq $0x24, %rdx
je 0xfddce
movslq 0x37c(%rsp), %r8
movq 0x350(%rsp), %r9
movq %rdx, %rax
movq 0x360(%rsp), %rdx
movq 0x390(%rsp), %rdi
movq %rdx, %r10
imulq %r8, %r10
movq %rdi, %r11
imulq %rdx, %r11
movq %rax, 0x30(%rsp)
imulq %rax, %r11
addq %r9, %r11
xorl %eax, %eax
movq %rax, %r14
orq $0x1, %r14
cmpq 0x18(%rsp), %r14
jge 0xfdcfb
vpxor %xmm0, %xmm0, %xmm0
xorl %r14d, %r14d
movl %r13d, %r15d
vpxor %xmm1, %xmm1, %xmm1
vpxor %xmm2, %xmm2, %xmm2
vpxor %xmm3, %xmm3, %xmm3
vpxor %xmm4, %xmm4, %xmm4
vpxor %xmm5, %xmm5, %xmm5
vpxor %xmm6, %xmm6, %xmm6
vpxor %xmm7, %xmm7, %xmm7
subl $0x1, %r15d
jb 0xfdc88
vmovdqu (%r11,%r14), %xmm8
vmovdqu 0x10(%r11,%r14), %xmm9
vmovdqu (%rsi,%r14,2), %xmm10
vmovdqu 0x10(%rsi,%r14,2), %xmm11
vmovdqu 0x20(%rsi,%r14,2), %xmm12
vmovdqu 0x30(%rsi,%r14,2), %xmm13
vpmaddwd %xmm10, %xmm8, %xmm14
vpaddd %xmm7, %xmm14, %xmm7
vpmaddwd %xmm11, %xmm8, %xmm14
vpaddd %xmm6, %xmm14, %xmm6
vpmaddwd %xmm12, %xmm8, %xmm14
vpaddd %xmm5, %xmm14, %xmm5
vpmaddwd %xmm13, %xmm8, %xmm8
vpaddd %xmm4, %xmm8, %xmm4
vpmaddwd %xmm10, %xmm9, %xmm8
vpaddd %xmm3, %xmm8, %xmm3
vpmaddwd %xmm11, %xmm9, %xmm8
vpaddd %xmm2, %xmm8, %xmm2
vpmaddwd %xmm12, %xmm9, %xmm8
vpaddd %xmm1, %xmm8, %xmm1
vpmaddwd %xmm13, %xmm9, %xmm8
vpaddd %xmm0, %xmm8, %xmm0
addq $0x20, %r14
jmp 0xfdc0c
vpunpckldq %xmm6, %xmm7, %xmm8 # xmm8 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
vpunpckldq %xmm4, %xmm5, %xmm9 # xmm9 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
vpunpckhdq %xmm6, %xmm7, %xmm6 # xmm6 = xmm7[2],xmm6[2],xmm7[3],xmm6[3]
vpunpckhdq %xmm4, %xmm5, %xmm4 # xmm4 = xmm5[2],xmm4[2],xmm5[3],xmm4[3]
vpunpckldq %xmm2, %xmm3, %xmm5 # xmm5 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
vpunpckldq %xmm0, %xmm1, %xmm7 # xmm7 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
vpunpckhdq %xmm2, %xmm3, %xmm2 # xmm2 = xmm3[2],xmm2[2],xmm3[3],xmm2[3]
vpunpckhdq %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
vpunpcklqdq %xmm9, %xmm8, %xmm1 # xmm1 = xmm8[0],xmm9[0]
vpunpckhqdq %xmm9, %xmm8, %xmm3 # xmm3 = xmm8[1],xmm9[1]
vpaddd %xmm3, %xmm1, %xmm1
vpunpcklqdq %xmm4, %xmm6, %xmm3 # xmm3 = xmm6[0],xmm4[0]
vpunpckhqdq %xmm4, %xmm6, %xmm4 # xmm4 = xmm6[1],xmm4[1]
vpaddd %xmm3, %xmm4, %xmm3
vpaddd %xmm3, %xmm1, %xmm1
vpunpcklqdq %xmm7, %xmm5, %xmm3 # xmm3 = xmm5[0],xmm7[0]
vpunpckhqdq %xmm7, %xmm5, %xmm4 # xmm4 = xmm5[1],xmm7[1]
vpaddd %xmm4, %xmm3, %xmm3
vpunpcklqdq %xmm0, %xmm2, %xmm4 # xmm4 = xmm2[0],xmm0[0]
vpunpckhqdq %xmm0, %xmm2, %xmm0 # xmm0 = xmm2[1],xmm0[1]
vpaddd %xmm4, %xmm0, %xmm0
vpaddd %xmm0, %xmm3, %xmm0
vmovdqu %xmm1, (%rcx)
vmovdqu %xmm0, 0x10(%rcx)
addq $0x20, %rcx
addq $0x2, %rax
addq %r10, %r11
jmp 0xfdbd4
imulq 0x30(%rsp), %rdi
cmpl 0x28(%rsp), %eax
jge 0xfddb7
movl %eax, %r11d
shrl %r11d
movl %eax, %r10d
andl $0x1, %r10d
addl %r11d, %r10d
imulq %r8, %r10
addq %rdi, %r10
imulq %rdx, %r10
addq %r9, %r10
vpxor %xmm0, %xmm0, %xmm0
xorl %r11d, %r11d
movl %r13d, %r14d
vpxor %xmm1, %xmm1, %xmm1
vpxor %xmm2, %xmm2, %xmm2
vpxor %xmm3, %xmm3, %xmm3
subl $0x1, %r14d
jb 0xfdd7c
vmovdqu (%r10,%r11), %xmm4
vpmaddwd (%rsi,%r11,4), %xmm4, %xmm5
vpaddd %xmm3, %xmm5, %xmm3
vpmaddwd 0x10(%rsi,%r11,4), %xmm4, %xmm5
vpaddd %xmm2, %xmm5, %xmm2
vpmaddwd 0x20(%rsi,%r11,4), %xmm4, %xmm5
vpaddd %xmm1, %xmm5, %xmm1
vpmaddwd 0x30(%rsi,%r11,4), %xmm4, %xmm4
vpaddd %xmm0, %xmm4, %xmm0
addq $0x10, %r11
jmp 0xfdd3f
vpunpckldq %xmm2, %xmm3, %xmm4 # xmm4 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
vpunpckldq %xmm0, %xmm1, %xmm5 # xmm5 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
vpunpckhdq %xmm2, %xmm3, %xmm2 # xmm2 = xmm3[2],xmm2[2],xmm3[3],xmm2[3]
vpunpckhdq %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
vpunpcklqdq %xmm5, %xmm4, %xmm1 # xmm1 = xmm4[0],xmm5[0]
vpunpckhqdq %xmm5, %xmm4, %xmm3 # xmm3 = xmm4[1],xmm5[1]
vpaddd %xmm3, %xmm1, %xmm1
vpunpcklqdq %xmm0, %xmm2, %xmm3 # xmm3 = xmm2[0],xmm0[0]
vpunpckhqdq %xmm0, %xmm2, %xmm0 # xmm0 = xmm2[1],xmm0[1]
vpaddd %xmm3, %xmm0, %xmm0
vpaddd %xmm0, %xmm1, %xmm0
vmovdqu %xmm0, (%rcx)
addq $0x10, %rcx
incl %eax
jmp 0xfdd01
movq 0x30(%rsp), %rdx
incq %rdx
addq 0x38(%rsp), %rsi
movq 0x20(%rsp), %rax
jmp 0xfdb8b
movq 0x80(%rsp), %rdx
incq %rdx
jmp 0xfdb2f
movq 0x358(%rsp), %rax
testq %rax, %rax
je 0xfde15
lock
decl (%rax)
jne 0xfde15
movq 0x350(%rsp), %rsi
movq 0x370(%rsp), %rdi
testq %rdi, %rdi
je 0xfde0d
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xfde15
movq %rsi, %rdi
callq 0x5f3e0
movq 0x168(%rsp), %rax
testq %rax, %rax
je 0xfde4c
lock
decl (%rax)
jne 0xfde4c
movq 0x160(%rsp), %rsi
movq 0x180(%rsp), %rdi
testq %rdi, %rdi
je 0xfde44
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xfde4c
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x1a0(%rsp)
vpxor %xmm0, %xmm0, %xmm0
vmovdqa %xmm0, 0x160(%rsp)
vmovdqu %xmm0, 0x16c(%rsp)
leaq 0x180(%rsp), %rax
vmovdqu %xmm0, 0xc(%rax)
vmovdqu %xmm0, (%rax)
andq $0x0, 0x2d0(%rsp)
vmovdqa %xmm0, 0x290(%rsp)
vmovdqu %xmm0, 0x29c(%rsp)
vmovdqa %xmm0, 0x2b0(%rsp)
vmovdqu %xmm0, 0x2bc(%rsp)
vmovq 0x12c(%rsp), %xmm0
vpcmpeqd 0x2e0(%rsp), %xmm0, %xmm0
vpmovsxdq %xmm0, %xmm0
vpcmpeqd %xmm1, %xmm1, %xmm1
vtestpd %xmm1, %xmm0
jae 0xfdf1d
movq 0x108(%rsp), %rax
testq %rax, %rax
je 0xfe8af
lock
incl (%rax)
movq 0x298(%rsp), %rax
testq %rax, %rax
je 0xfe8af
lock
decl (%rax)
jne 0xfe8af
movq 0x290(%rsp), %rsi
movq 0x2b0(%rsp), %rdi
testq %rdi, %rdi
je 0xfe8a7
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xfe8af
movq 0x40(%rsp), %rax
movq 0x10(%rax), %rax
movq %rax, (%rsp)
leaq 0x290(%rsp), %rdi
pushq $0x10
popq %r8
pushq $0x4
popq %r9
movq 0x208(%rsp), %rsi
movq 0x158(%rsp), %rdx
movl 0xd8(%rsp), %ecx
callq 0x628f2
jmp 0xfe91f
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x1f0(%rsp)
vpxor %xmm0, %xmm0, %xmm0
vmovdqa %xmm0, 0x1b0(%rsp)
vmovdqu %xmm0, 0x1bc(%rsp)
andq $0x0, 0x280(%rsp)
leaq 0x1d0(%rsp), %rax
vmovdqu %xmm0, 0xc(%rax)
vmovdqa %xmm0, (%rax)
vmovdqa %xmm0, 0x240(%rsp)
vmovdqu %xmm0, 0x24c(%rsp)
leaq 0x350(%rsp), %rdi
andq $0x0, 0x40(%rdi)
vmovdqa %xmm0, 0x260(%rsp)
vmovdqu %xmm0, 0x26c(%rsp)
movq 0x60(%rsp), %rdx
movl %edx, %eax
shrl %eax
xorl %r8d, %r8d
cmpl $0xc, 0x78(%rsp)
setge %cl
vmovdqu %xmm0, 0xc(%rdi)
vmovdqa %xmm0, (%rdi)
vmovdqu %xmm0, 0x2c(%rdi)
cmovgel %eax, %r8d
movl 0x48(%rsp), %esi
shll %cl, %esi
vmovdqa %xmm0, 0x20(%rdi)
subl %r8d, %edx
movq 0x40(%rsp), %rax
movq 0x10(%rax), %rax
movq %rax, (%rsp)
pushq $0x24
popq %rcx
movq 0x70(%rsp), %r8
movq 0x80(%rsp), %r9
callq 0x628f2
movslq 0x60(%rsp), %r13
xorl %eax, %eax
cmpq $0x24, %rax
je 0xfe123
movslq 0x37c(%rsp), %rcx
movq 0x390(%rsp), %rdx
imulq %rax, %rdx
movq 0x360(%rsp), %rsi
imulq %rsi, %rdx
addq 0x350(%rsp), %rdx
imulq %rsi, %rcx
movq %rax, %rsi
imulq %r13, %rsi
xorl %edi, %edi
movq %rdi, %r8
orq $0x1, %r8
cmpq %r13, %r8
jge 0xfe116
movq %rdi, %r8
shrq %r8
imulq %rcx, %r8
addq %rdx, %r8
leaq (%rdi,%rsi), %r9
shlq $0x4, %r9
addq 0x160(%rsp), %r9
movl %r14d, %r10d
subl $0x1, %r10d
jb 0xfe0be
vmovdqu (%r9), %ymm0
vmovdqu %ymm0, (%r8)
movq 0x1a0(%rsp), %r11
shlq $0x4, %r11
addq %r11, %r9
addq $0x20, %r8
jmp 0xfe099
addq $0x2, %rdi
jmp 0xfe069
movl %edi, %r9d
shrl %r9d
movl %edi, %r8d
andl $0x1, %r8d
addl %r9d, %r8d
imulq %rcx, %r8
addq %rdx, %r8
leaq (%rdi,%rsi), %r9
shlq $0x4, %r9
addq 0x160(%rsp), %r9
movl %r14d, %r10d
subl $0x1, %r10d
jb 0xfe113
vmovdqu (%r9), %xmm0
vmovdqu %xmm0, (%r8)
movq 0x1a0(%rsp), %r11
shlq $0x4, %r11
addq %r11, %r9
addq $0x10, %r8
jmp 0xfe0ee
incq %rdi
cmpq %r13, %rdi
jl 0xfe0c4
incq %rax
jmp 0xfe02a
movq 0x168(%rsp), %rax
testq %rax, %rax
je 0xfe160
lock
decl (%rax)
jne 0xfe160
movq 0x160(%rsp), %rsi
movq 0x180(%rsp), %rdi
testq %rdi, %rdi
je 0xfe155
movq (%rdi), %rax
vzeroupper
callq *0x18(%rax)
jmp 0xfe160
movq %rsi, %rdi
vzeroupper
callq 0x5f3e0
andq $0x0, 0x1a0(%rsp)
vpxor %xmm0, %xmm0, %xmm0
vmovdqa %xmm0, 0x160(%rsp)
vmovdqu %xmm0, 0x16c(%rsp)
leaq 0x180(%rsp), %rax
vmovdqu %xmm0, 0xc(%rax)
vmovdqu %xmm0, (%rax)
movq 0x40(%rsp), %rax
movq 0x10(%rax), %rax
movq %rax, (%rsp)
leaq 0x240(%rsp), %rdi
pushq $0x24
popq %rdx
pushq $0x4
popq %r8
pushq $0x1
popq %r9
movq 0x60(%rsp), %rsi
movq 0x88(%rsp), %rcx
vzeroupper
callq 0x628f2
movq 0x88(%rsp), %rax
movl %eax, %ecx
sarl $0x2, %ecx
movq 0x240(%rsp), %rax
movq %rax, 0x48(%rsp)
movq 0x250(%rsp), %rax
imulq 0x280(%rsp), %rax
movq %rax, 0x80(%rsp)
movq 0x20(%rsp), %rax
movq 0xf0(%rax), %rsi
movq 0x100(%rax), %rdx
movq 0x350(%rsp), %rdi
movq %rdi, 0x78(%rsp)
movq 0x360(%rsp), %rdi
movq 0x390(%rsp), %r8
movq %rdi, 0x28(%rsp)
imulq %rdi, %r8
movq %r8, 0x30(%rsp)
xorl %r9d, %r9d
testl %ecx, %ecx
cmovlel %r9d, %ecx
movq %rcx, 0x38(%rsp)
movq 0x130(%rax), %rax
movq %rdx, 0x70(%rsp)
imulq %rdx, %rax
movq %rax, 0xd8(%rsp)
cmpq 0x38(%rsp), %r9
je 0xfe53c
leaq (,%r9,4), %r8
movq 0x80(%rsp), %rdx
imulq %rdx, %r8
movq 0x48(%rsp), %rcx
addq %rcx, %r8
leaq 0x1(,%r9,4), %rdi
imulq %rdx, %rdi
addq %rcx, %rdi
leaq 0x2(,%r9,4), %r15
imulq %rdx, %r15
addq %rcx, %r15
movq %r9, 0x50(%rsp)
leaq 0x3(,%r9,4), %rax
imulq %rdx, %rax
addq %rcx, %rax
movq 0x20(%rsp), %rcx
movslq 0x11c(%rcx), %rbp
imulq 0x70(%rsp), %rbp
movq 0x78(%rsp), %rcx
movq %rsi, 0x58(%rsp)
movq %rsi, %rdx
xorl %r12d, %r12d
cmpq $0x24, %r12
je 0xfe522
movslq 0x37c(%rsp), %r11
imulq 0x28(%rsp), %r11
movq %rcx, %rbx
xorl %r9d, %r9d
movq %r9, %rsi
orq $0x1, %rsi
cmpq %r13, %rsi
jge 0xfe507
vpxor %xmm0, %xmm0, %xmm0
xorl %r10d, %r10d
movl %r14d, %esi
vpxor %xmm1, %xmm1, %xmm1
vpxor %xmm2, %xmm2, %xmm2
vpxor %xmm3, %xmm3, %xmm3
vpxor %xmm4, %xmm4, %xmm4
vpxor %xmm5, %xmm5, %xmm5
vpxor %xmm6, %xmm6, %xmm6
vpxor %xmm7, %xmm7, %xmm7
subl $0x1, %esi
jb 0xfe3a6
vmovdqu (%rbx,%r10), %xmm8
vmovdqu 0x10(%rbx,%r10), %xmm9
vmovdqu (%rdx,%r10,2), %xmm10
vmovdqu 0x10(%rdx,%r10,2), %xmm11
vmovdqu 0x20(%rdx,%r10,2), %xmm12
vmovdqu 0x30(%rdx,%r10,2), %xmm13
vpmaddwd %xmm10, %xmm8, %xmm14
vpaddd %xmm7, %xmm14, %xmm7
vpmaddwd %xmm11, %xmm8, %xmm14
vpaddd %xmm6, %xmm14, %xmm6
vpmaddwd %xmm12, %xmm8, %xmm14
vpaddd %xmm5, %xmm14, %xmm5
vpmaddwd %xmm13, %xmm8, %xmm8
vpaddd %xmm4, %xmm8, %xmm4
vpmaddwd %xmm10, %xmm9, %xmm8
vpaddd %xmm3, %xmm8, %xmm3
vpmaddwd %xmm11, %xmm9, %xmm8
vpaddd %xmm2, %xmm8, %xmm2
vpmaddwd %xmm12, %xmm9, %xmm8
vpaddd %xmm1, %xmm8, %xmm1
vpmaddwd %xmm13, %xmm9, %xmm8
vpaddd %xmm0, %xmm8, %xmm0
addq $0x20, %r10
jmp 0xfe32b
vpunpckldq %xmm6, %xmm7, %xmm8 # xmm8 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
vpunpckldq %xmm4, %xmm5, %xmm9 # xmm9 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
vpunpckhdq %xmm6, %xmm7, %xmm6 # xmm6 = xmm7[2],xmm6[2],xmm7[3],xmm6[3]
vpunpckhdq %xmm4, %xmm5, %xmm4 # xmm4 = xmm5[2],xmm4[2],xmm5[3],xmm4[3]
vpunpckldq %xmm2, %xmm3, %xmm5 # xmm5 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
vpunpckldq %xmm0, %xmm1, %xmm7 # xmm7 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
vpunpckhdq %xmm2, %xmm3, %xmm2 # xmm2 = xmm3[2],xmm2[2],xmm3[3],xmm2[3]
vpunpckhdq %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
vpunpcklqdq %xmm9, %xmm8, %xmm1 # xmm1 = xmm8[0],xmm9[0]
vpunpckhqdq %xmm9, %xmm8, %xmm3 # xmm3 = xmm8[1],xmm9[1]
vpaddd %xmm3, %xmm1, %xmm1
vpunpcklqdq %xmm4, %xmm6, %xmm3 # xmm3 = xmm6[0],xmm4[0]
vpunpckhqdq %xmm4, %xmm6, %xmm4 # xmm4 = xmm6[1],xmm4[1]
vpaddd %xmm3, %xmm4, %xmm3
vpaddd %xmm3, %xmm1, %xmm1
vpunpcklqdq %xmm7, %xmm5, %xmm3 # xmm3 = xmm5[0],xmm7[0]
vpunpckhqdq %xmm7, %xmm5, %xmm4 # xmm4 = xmm5[1],xmm7[1]
vpaddd %xmm4, %xmm3, %xmm3
vpunpcklqdq %xmm0, %xmm2, %xmm4 # xmm4 = xmm2[0],xmm0[0]
vmovd %xmm1, (%r8)
vpextrd $0x1, %xmm1, (%rdi)
vpextrd $0x2, %xmm1, (%r15)
vpextrd $0x3, %xmm1, (%rax)
vpunpckhqdq %xmm0, %xmm2, %xmm0 # xmm0 = xmm2[1],xmm0[1]
vpaddd %xmm4, %xmm0, %xmm0
vpaddd %xmm0, %xmm3, %xmm0
vmovd %xmm0, 0x4(%r8)
vpextrd $0x1, %xmm0, 0x4(%rdi)
vpextrd $0x2, %xmm0, 0x4(%r15)
vpextrd $0x3, %xmm0, 0x4(%rax)
addq $0x8, %r8
addq $0x8, %rdi
addq $0x8, %r15
addq $0x8, %rax
addq $0x2, %r9
addq %r11, %rbx
jmp 0xfe2f5
movl %r9d, %r10d
shrl %r10d
movl %r9d, %esi
andl $0x1, %esi
addl %r10d, %esi
imulq %r11, %rsi
addq %rcx, %rsi
vpxor %xmm0, %xmm0, %xmm0
movl %r14d, %r10d
xorl %ebx, %ebx
vpxor %xmm1, %xmm1, %xmm1
vpxor %xmm2, %xmm2, %xmm2
vpxor %xmm3, %xmm3, %xmm3
subl $0x1, %r10d
jb 0xfe4b1
vmovdqu (%rsi,%rbx), %xmm4
vpmaddwd (%rdx,%rbx,4), %xmm4, %xmm5
vpaddd %xmm0, %xmm5, %xmm0
vpmaddwd 0x10(%rdx,%rbx,4), %xmm4, %xmm5
vpaddd %xmm1, %xmm5, %xmm1
vpmaddwd 0x20(%rdx,%rbx,4), %xmm4, %xmm5
vpaddd %xmm2, %xmm5, %xmm2
vpmaddwd 0x30(%rdx,%rbx,4), %xmm4, %xmm4
vpaddd %xmm3, %xmm4, %xmm3
addq $0x10, %rbx
jmp 0xfe479
vpunpckldq %xmm1, %xmm0, %xmm4 # xmm4 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vpunpckldq %xmm3, %xmm2, %xmm5 # xmm5 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
vpunpckhdq %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
vpunpckhdq %xmm3, %xmm2, %xmm1 # xmm1 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
vpunpcklqdq %xmm5, %xmm4, %xmm2 # xmm2 = xmm4[0],xmm5[0]
vpunpckhqdq %xmm5, %xmm4, %xmm3 # xmm3 = xmm4[1],xmm5[1]
vpaddd %xmm3, %xmm2, %xmm2
vpunpcklqdq %xmm1, %xmm0, %xmm3 # xmm3 = xmm0[0],xmm1[0]
vpunpckhqdq %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[1],xmm1[1]
vpaddd %xmm3, %xmm0, %xmm0
vpaddd %xmm0, %xmm2, %xmm0
vmovd %xmm0, (%r8)
vpextrd $0x1, %xmm0, (%rdi)
vpextrd $0x2, %xmm0, (%r15)
vpextrd $0x3, %xmm0, (%rax)
addq $0x4, %r8
addq $0x4, %rdi
addq $0x4, %r15
addq $0x4, %rax
incl %r9d
cmpl 0x60(%rsp), %r9d
jl 0xfe44e
incq %r12
addq %rbp, %rdx
addq 0x30(%rsp), %rcx
jmp 0xfe2d7
movq 0x50(%rsp), %r9
incq %r9
movq 0x58(%rsp), %rsi
addq 0xd8(%rsp), %rsi
jmp 0xfe25c
movq 0x88(%rsp), %rcx
andq $-0x4, %rcx
movq 0x240(%rsp), %rax
movq %rax, 0x28(%rsp)
movq 0x250(%rsp), %rax
imulq 0x280(%rsp), %rax
movq %rax, 0x30(%rsp)
movq 0x20(%rsp), %rax
movq 0xf0(%rax), %rdx
movq %rdx, 0x58(%rsp)
movq 0x100(%rax), %rdx
movq 0x130(%rax), %rax
movq %rdx, 0x50(%rsp)
imulq %rdx, %rax
movq %rax, 0x38(%rsp)
movq 0x350(%rsp), %rax
movq %rax, 0x48(%rsp)
movq 0x360(%rsp), %rbx
movq 0x390(%rsp), %r15
imulq %rbx, %r15
cmpq 0x88(%rsp), %rcx
jge 0xfe701
movl %ecx, %eax
cltd
pushq $0x4
popq %rsi
idivl %esi
movq 0x30(%rsp), %rdi
imulq %rcx, %rdi
addq 0x28(%rsp), %rdi
addl %eax, %edx
movq 0x20(%rsp), %rax
movslq 0x11c(%rax), %rax
movslq %edx, %rdx
imulq 0x38(%rsp), %rdx
addq 0x58(%rsp), %rdx
imulq 0x50(%rsp), %rax
movq 0x48(%rsp), %r12
xorl %r8d, %r8d
cmpq $0x24, %r8
je 0xfe6f9
movslq 0x37c(%rsp), %rsi
imulq %rbx, %rsi
movq %r12, %r11
xorl %r9d, %r9d
movq %r9, %r10
orq $0x1, %r10
cmpq %r13, %r10
jge 0xfe6e4
vpxor %xmm0, %xmm0, %xmm0
movl %r14d, %ebp
xorl %r10d, %r10d
vpxor %xmm1, %xmm1, %xmm1
subl $0x1, %ebp
jb 0xfe667
vmovdqu (%rdx,%r10), %xmm2
vpmaddwd (%r11,%r10,2), %xmm2, %xmm3
vpaddd %xmm0, %xmm3, %xmm0
vpmaddwd 0x10(%r11,%r10,2), %xmm2, %xmm2
vpaddd %xmm1, %xmm2, %xmm1
addq $0x10, %r10
jmp 0xfe641
vpshufd $0xee, %xmm0, %xmm2 # xmm2 = xmm0[2,3,2,3]
vpaddd %xmm0, %xmm2, %xmm0
vpshufd $0xee, %xmm1, %xmm2 # xmm2 = xmm1[2,3,2,3]
vpaddd %xmm1, %xmm2, %xmm1
vphaddd %xmm1, %xmm0, %xmm0
vpshufd $0xe8, %xmm0, %xmm0 # xmm0 = xmm0[0,2,2,3]
vmovq %xmm0, (%rdi)
addq $0x8, %rdi
addq $0x2, %r9
addq %rsi, %r11
jmp 0xfe623
movl %r9d, %r10d
shrl %r10d
movl %r9d, %r11d
andl $0x1, %r11d
addl %r10d, %r11d
imulq %rsi, %r11
addq %r12, %r11
vpxor %xmm0, %xmm0, %xmm0
movl %r14d, %r10d
xorl %ebp, %ebp
subl $0x1, %r10d
jb 0xfe6cf
vmovdqu (%r11,%rbp), %xmm1
vpmaddwd (%rdx,%rbp), %xmm1, %xmm1
vpaddd %xmm0, %xmm1, %xmm0
addq $0x10, %rbp
jmp 0xfe6b4
vphaddd %xmm0, %xmm0, %xmm0
vphaddd %xmm0, %xmm0, %xmm0
vmovd %xmm0, (%rdi)
addq $0x4, %rdi
incl %r9d
cmpl 0x60(%rsp), %r9d
jl 0xfe694
incq %r8
addq %r15, %r12
addq %rax, %rdx
jmp 0xfe607
incq %rcx
jmp 0xfe5b9
movq 0x358(%rsp), %rax
testq %rax, %rax
je 0xfe738
lock
decl (%rax)
jne 0xfe738
movq 0x350(%rsp), %rsi
movq 0x370(%rsp), %rdi
testq %rdi, %rdi
je 0xfe730
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xfe738
movq %rsi, %rdi
callq 0x5f3e0
movq 0x168(%rsp), %rax
testq %rax, %rax
je 0xfe76f
lock
decl (%rax)
jne 0xfe76f
movq 0x160(%rsp), %rsi
movq 0x180(%rsp), %rdi
testq %rdi, %rdi
je 0xfe767
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xfe76f
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x1a0(%rsp)
vpxor %xmm0, %xmm0, %xmm0
vmovdqa %xmm0, 0x160(%rsp)
vmovdqu %xmm0, 0x16c(%rsp)
leaq 0x180(%rsp), %rax
vmovdqu %xmm0, 0xc(%rax)
vmovdqu %xmm0, (%rax)
andq $0x0, 0x2d0(%rsp)
vmovdqa %xmm0, 0x290(%rsp)
vmovdqu %xmm0, 0x29c(%rsp)
vmovdqa %xmm0, 0x2b0(%rsp)
vmovdqu %xmm0, 0x2bc(%rsp)
vmovq 0x12c(%rsp), %xmm0
vpcmpeqd 0x230(%rsp), %xmm0, %xmm0
vpmovsxdq %xmm0, %xmm0
vpcmpeqd %xmm1, %xmm1, %xmm1
vtestpd %xmm1, %xmm0
jae 0xfe840
movq 0x108(%rsp), %rax
testq %rax, %rax
je 0xfed60
lock
incl (%rax)
movq 0x298(%rsp), %rax
testq %rax, %rax
je 0xfed60
lock
decl (%rax)
jne 0xfed60
movq 0x290(%rsp), %rsi
movq 0x2b0(%rsp), %rdi
testq %rdi, %rdi
je 0xfed58
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xfed60
movq 0x40(%rsp), %rax
movq 0x10(%rax), %rax
movq %rax, (%rsp)
leaq 0x290(%rsp), %rdi
pushq $0x4
popq %r8
pushq $0x1
popq %r9
movq 0xe8(%rsp), %rsi
movq 0xf0(%rsp), %rdx
movq 0x88(%rsp), %rcx
callq 0x628f2
movq 0x290(%rsp), %rax
movq %rax, 0x148(%rsp)
movq 0x2a0(%rsp), %r8
movq 0x2d0(%rsp), %rax
movq %rax, 0x200(%rsp)
jmp 0xfedeb
movq %rsi, %rdi
callq 0x5f3e0
vmovaps 0x100(%rsp), %xmm0
vmovaps %xmm0, 0x290(%rsp)
movq 0x110(%rsp), %rax
movq %rax, 0x2a0(%rsp)
movl 0x118(%rsp), %eax
movl %eax, 0x2a8(%rsp)
movq 0x120(%rsp), %rax
movq %rax, 0x2b0(%rsp)
vmovupd 0x128(%rsp), %xmm0
vmovupd %xmm0, 0x2b8(%rsp)
movl 0x138(%rsp), %eax
movl %eax, 0x2c8(%rsp)
movq 0x140(%rsp), %rax
movq %rax, 0x2d0(%rsp)
movq 0x28(%rsp), %rdx
leal (,%rdx,4), %eax
movslq %eax, %rcx
imull $0xc, %edx, %eax
movslq %eax, %rsi
imull $0x14, %edx, %eax
movslq %eax, %rdi
movl 0x230(%rsp), %eax
shll $0x4, %eax
movslq %eax, %r8
shlq $0x2, %rbx
shlq $0x2, %r8
xorl %r14d, %r14d
pushq $-0x50
popq %r10
pushq $0x50
popq %r9
vbroadcastss 0x2f3db1(%rip), %xmm0 # 0x3f2714
cmpq 0x48(%rsp), %r14
je 0xfec3b
movq 0x280(%rsp), %rdx
movslq 0x2bc(%rsp), %r15
imulq 0x250(%rsp), %rdx
movq 0x290(%rsp), %rax
imulq %r14, %rdx
addq 0x240(%rsp), %rdx
movq %rdx, 0x28(%rsp)
movq 0x2a0(%rsp), %rdx
movq 0x2d0(%rsp), %r11
imulq %rdx, %r11
imulq %rdx, %r15
shlq $0x2, %r15
movq %r15, 0x30(%rsp)
movq %r14, 0x38(%rsp)
imulq %r14, %r11
leaq (%rax,%r11), %r14
addq $0x30, %r14
xorl %r13d, %r13d
cmpq 0x58(%rsp), %r13
je 0xfec2e
movl 0x50(%rsp), %eax
imull %r13d, %eax
cltd
pushq $0x6
popq %r11
idivl %r11d
cltq
shlq $0x4, %rax
addq 0x28(%rsp), %rax
movq %r14, 0x18(%rsp)
movq %r14, %rdx
xorl %r15d, %r15d
cmpq 0x60(%rsp), %r15
je 0xfec1c
movq %rax, %r14
movq %r10, %r11
testq %r11, %r11
je 0xfeaae
vmovdqu (%r14,%rcx,4), %xmm1
vmovdqu (%r14,%r12,4), %xmm2
vmovdqu (%r14,%rsi,4), %xmm3
vmovdqu (%r14,%rbp,4), %xmm4
vmovdqu (%r14,%rdi,4), %xmm5
vpaddd %xmm1, %xmm2, %xmm6
vpsubd %xmm2, %xmm1, %xmm1
vpaddd %xmm3, %xmm4, %xmm2
vpaddd (%r14), %xmm6, %xmm7
vpsubd %xmm4, %xmm3, %xmm3
vpaddd %xmm2, %xmm7, %xmm4
vpaddd %xmm3, %xmm3, %xmm7
vpaddd %xmm1, %xmm7, %xmm7
vpslld $0x2, %xmm2, %xmm2
vpaddd %xmm6, %xmm2, %xmm2
vpslld $0x2, %xmm5, %xmm5
vpaddd %xmm1, %xmm5, %xmm1
vpslld $0x3, %xmm3, %xmm3
vpaddd %xmm3, %xmm1, %xmm1
vmovdqa %xmm4, 0x3a0(%rsp,%r11)
vmovdqa %xmm7, 0x400(%rsp,%r11)
vmovdqa %xmm2, 0x460(%rsp,%r11)
vmovdqa %xmm1, 0x4c0(%rsp,%r11)
addq $0x10, %r11
addq %rbx, %r14
jmp 0xfea17
vmovdqu (%r14,%rdi,4), %xmm1
vpslld $0x4, %xmm1, %xmm1
vmovdqu (%r14,%rcx,4), %xmm2
vmovdqu (%r14,%r12,4), %xmm3
vpsubd %xmm3, %xmm2, %xmm4
vpslld $0x2, %xmm4, %xmm4
vpaddd %xmm1, %xmm4, %xmm1
vmovdqu (%r14,%rsi,4), %xmm5
vmovdqu (%r14,%rbp,4), %xmm6
vpsubd %xmm6, %xmm5, %xmm7
vpslld $0x5, %xmm7, %xmm8
vpaddd %xmm1, %xmm8, %xmm1
vpaddd %xmm5, %xmm6, %xmm5
vpslld $0x4, %xmm5, %xmm6
vpaddd %xmm2, %xmm3, %xmm2
vpslld $0x2, %xmm2, %xmm3
vpaddd %xmm3, %xmm6, %xmm3
vpslld $0x3, %xmm7, %xmm6
vpaddd %xmm4, %xmm6, %xmm4
vpaddd (%r14), %xmm2, %xmm2
vpaddd %xmm5, %xmm2, %xmm2
vpslld $0x2, %xmm2, %xmm2
vmovdqa %xmm2, 0x3a0(%rsp)
vmovdqa %xmm4, 0x400(%rsp)
vmovdqa %xmm3, 0x460(%rsp)
vmovdqa %xmm1, 0x4c0(%rsp)
movq %rdx, %r14
movq %r9, %r11
cmpq $0x1d0, %r11 # imm = 0x1D0
je 0xfec0c
vmovdqa 0x310(%rsp,%r11), %xmm1
vmovdqa 0x320(%rsp,%r11), %xmm2
vmovdqa 0x330(%rsp,%r11), %xmm3
vmovdqa 0x340(%rsp,%r11), %xmm4
vpaddd %xmm1, %xmm2, %xmm5
vpsubd %xmm2, %xmm1, %xmm1
vpaddd %xmm3, %xmm4, %xmm2
vpaddd 0x300(%rsp,%r11), %xmm5, %xmm6
vpsubd %xmm4, %xmm3, %xmm3
vpaddd %xmm2, %xmm6, %xmm4
vpaddd %xmm3, %xmm3, %xmm6
vpaddd %xmm1, %xmm6, %xmm6
vpslld $0x2, %xmm2, %xmm2
vpaddd %xmm5, %xmm2, %xmm2
vpaddd 0x350(%rsp,%r11), %xmm1, %xmm1
vpslld $0x3, %xmm3, %xmm3
vpaddd %xmm3, %xmm1, %xmm1
vcvtdq2ps %xmm4, %xmm3
vmulps %xmm0, %xmm3, %xmm3
vcvttps2dq %xmm3, %xmm3
vcvtdq2ps %xmm6, %xmm4
vmulps %xmm0, %xmm4, %xmm4
vcvttps2dq %xmm4, %xmm4
vcvtdq2ps %xmm2, %xmm2
vmulps %xmm0, %xmm2, %xmm2
vcvttps2dq %xmm2, %xmm2
vcvtdq2ps %xmm1, %xmm1
vmulps %xmm0, %xmm1, %xmm1
vcvttps2dq %xmm1, %xmm1
vmovdqu %xmm3, -0x30(%r14)
vmovdqu %xmm4, -0x20(%r14)
vmovdqu %xmm2, -0x10(%r14)
vmovupd %xmm1, (%r14)
addq $0x60, %r11
addq %r8, %r14
jmp 0xfeb42
incq %r15
addq $0x10, %rax
addq $0x40, %rdx
jmp 0xfea06
incq %r13
movq 0x18(%rsp), %r14
addq 0x30(%rsp), %r14
jmp 0xfe9d5
movq 0x38(%rsp), %r14
incq %r14
jmp 0xfe963
leaq 0x290(%rsp), %rdi
movl 0x2c(%rdi), %r9d
movl 0x30(%rdi), %ecx
leaq 0x100(%rsp), %rsi
subl 0x30(%rsi), %ecx
subl 0x2c(%rsi), %r9d
movq 0x40(%rsp), %rax
movq %rax, (%rsp)
xorl %edx, %edx
xorl %r8d, %r8d
callq 0x6489a
movq 0x298(%rsp), %rax
testq %rax, %rax
je 0xfeca3
lock
decl (%rax)
jne 0xfeca3
movq 0x290(%rsp), %rsi
movq 0x2b0(%rsp), %rdi
testq %rdi, %rdi
je 0xfec9b
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xfeca3
movq %rsi, %rdi
callq 0x5f3e0
movq 0x248(%rsp), %rax
testq %rax, %rax
je 0xfecda
lock
decl (%rax)
jne 0xfecda
movq 0x240(%rsp), %rsi
movq 0x260(%rsp), %rdi
testq %rdi, %rdi
je 0xfecd2
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xfecda
movq %rsi, %rdi
callq 0x5f3e0
movq 0x168(%rsp), %rax
testq %rax, %rax
je 0xfed11
lock
decl (%rax)
jne 0xfed11
movq 0x160(%rsp), %rsi
movq 0x180(%rsp), %rdi
testq %rdi, %rdi
je 0xfed09
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xfed11
movq %rsi, %rdi
callq 0x5f3e0
movq 0x1b8(%rsp), %rax
testq %rax, %rax
je 0xf99e4
lock
decl (%rax)
jne 0xf99e4
movq 0x1b0(%rsp), %rsi
movq 0x1d0(%rsp), %rdi
testq %rdi, %rdi
je 0xfed4b
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xf99e4
movq %rsi, %rdi
callq 0x5f3e0
jmp 0xf99e4
movq %rsi, %rdi
callq 0x5f3e0
vmovaps 0x100(%rsp), %xmm0
vmovaps %xmm0, 0x290(%rsp)
movq 0x100(%rsp), %rax
movq %rax, 0x148(%rsp)
movq 0x110(%rsp), %rax
movq %rax, %r8
movq %rax, 0x2a0(%rsp)
movl 0x118(%rsp), %eax
movl %eax, 0x2a8(%rsp)
movq 0x120(%rsp), %rax
movq %rax, 0x2b0(%rsp)
vmovupd 0x128(%rsp), %xmm0
vmovupd %xmm0, 0x2b8(%rsp)
movl 0x138(%rsp), %eax
movl %eax, 0x2c8(%rsp)
movq 0x140(%rsp), %rax
movq %rax, 0x200(%rsp)
movq %rax, 0x2d0(%rsp)
movq 0x240(%rsp), %r9
movq 0x250(%rsp), %r11
movq 0x60(%rsp), %rdi
leal (%rdi,%rdi), %eax
cltq
leal (%rdi,%rdi,2), %ecx
movslq %ecx, %rcx
leal (,%rdi,4), %edx
movslq %edx, %rdx
leal (%rdi,%rdi,4), %esi
movslq %esi, %rsi
imull $0x6, %edi, %edi
movslq %edi, %r10
xorl %ebx, %ebx
movq 0x88(%rsp), %rdi
testl %edi, %edi
cmovlel %ebx, %edi
movq %rdi, 0x88(%rsp)
movslq 0xe8(%rsp), %rbx
imulq 0x280(%rsp), %r11
movq %r11, 0x320(%rsp)
shlq $0x2, %r10
leaq (%r9,%r13,4), %rdi
movq %rdi, 0x230(%rsp)
leaq (%r9,%rax,4), %rax
movq %rax, 0xf0(%rsp)
leaq (%r9,%rcx,4), %rax
movq %rax, 0x150(%rsp)
xorl %ecx, %ecx
leaq (%r9,%rdx,4), %rax
movq %rax, 0x2e0(%rsp)
movq %r9, 0xe8(%rsp)
leaq (%r9,%rsi,4), %rax
movq %rax, 0x158(%rsp)
addq $0xc, 0x148(%rsp)
movq 0x200(%rsp), %rax
imulq %r8, %rax
movq %rax, 0x200(%rsp)
shlq $0x2, %r8
movq %r8, 0x310(%rsp)
shlq $0x2, %rbx
movl $0x240, %r9d # imm = 0x240
movq %rbx, 0x70(%rsp)
cmpq 0x88(%rsp), %rcx
je 0xff1d2
movslq 0x2bc(%rsp), %rax
imulq 0x310(%rsp), %rax
movq %rax, 0x2d8(%rsp)
movq 0x148(%rsp), %rax
movq %rax, 0x78(%rsp)
xorl %edx, %edx
movq %rcx, 0x208(%rsp)
cmpq 0xf8(%rsp), %rdx
je 0xff182
movl 0xe0(%rsp), %eax
movq %rdx, 0xd8(%rsp)
imull %edx, %eax
cltd
pushq $0x6
popq %rcx
idivl %ecx
movslq %eax, %r13
movq 0x78(%rsp), %r8
movq 0x158(%rsp), %rdx
movq 0x2e0(%rsp), %rdi
movq 0x150(%rsp), %r12
movq 0xf0(%rsp), %rbp
movq 0x230(%rsp), %r15
movq 0xe8(%rsp), %rax
xorl %ecx, %ecx
cmpq 0x18(%rsp), %rcx
je 0xff158
movq %rcx, 0x80(%rsp)
movq %r8, 0x60(%rsp)
movq %rdx, 0x28(%rsp)
movq %rdi, 0x30(%rsp)
movq %r12, 0x58(%rsp)
movq %rbp, 0x50(%rsp)
movq %r15, 0x38(%rsp)
movq %rax, 0x48(%rsp)
pushq $-0x5
popq %rsi
testq %rsi, %rsi
je 0xff01c
movl (%r15,%r13,4), %r11d
movl (%rbp,%r13,4), %r8d
leal (%r8,%r11), %ebx
subl %r8d, %r11d
movl (%r12,%r13,4), %r8d
movl (%rdi,%r13,4), %ecx
leal (%rcx,%r8), %r14d
subl %ecx, %r8d
leal (%rbx,%r14,4), %ecx
addl (%rax,%r13,4), %ebx
addl %r14d, %ebx
movl %ebx, 0x364(%rsp,%rsi,4)
leal (%r11,%r8,2), %ebx
movl %ebx, 0x37c(%rsp,%rsi,4)
movl %ecx, 0x394(%rsp,%rsi,4)
movl (%rdx,%r13,4), %ecx
leal (%r11,%r8,8), %r8d
leal (%r8,%rcx,4), %ecx
movl %ecx, 0x3ac(%rsp,%rsi,4)
incq %rsi
addq %r10, %rax
addq %r10, %r15
addq %r10, %rbp
addq %r10, %r12
addq %r10, %rdi
addq %r10, %rdx
jmp 0xfefaa
movl (%r12,%r13,4), %ecx
movl (%rdi,%r13,4), %edi
movl %ecx, %esi
subl %edi, %esi
movl %esi, %r8d
shll $0x5, %r8d
movl (%r15,%r13,4), %ebx
movl (%rbp,%r13,4), %r11d
movl %ebx, %r14d
subl %r11d, %r14d
leal (%r8,%r14,4), %r8d
movl (%rdx,%r13,4), %edx
shll $0x4, %edx
addl %r8d, %edx
addl %ecx, %edi
movl %edi, %ecx
shll $0x4, %ecx
addl %ebx, %r11d
leal (%rcx,%r11,4), %ecx
shll $0x3, %esi
addl %edi, %r11d
addl (%rax,%r13,4), %r11d
leal (%rsi,%r14,4), %eax
shll $0x2, %r11d
movl %r11d, 0x364(%rsp)
movl %eax, 0x37c(%rsp)
movl %ecx, 0x394(%rsp)
movl %edx, 0x3ac(%rsp)
movq 0x60(%rsp), %r8
movq %r8, %rsi
pushq $0x14
popq %rdi
movq 0x70(%rsp), %rbx
movq 0x50(%rsp), %rbp
cmpq $0x74, %rdi
je 0xff113
movl 0x340(%rsp,%rdi), %r15d
movl 0x344(%rsp,%rdi), %eax
leal (%rax,%r15), %r11d
subl %eax, %r15d
movl 0x348(%rsp,%rdi), %r12d
movl 0x34c(%rsp,%rdi), %eax
leal (%rax,%r12), %ecx
subl %eax, %r12d
movl 0x33c(%rsp,%rdi), %eax
addl %r11d, %eax
addl %ecx, %eax
cltd
idivl %r9d
movl %eax, -0xc(%rsi)
leal (%r15,%r12,2), %eax
cltd
idivl %r9d
movl %eax, -0x8(%rsi)
leal (%r11,%rcx,4), %eax
cltd
idivl %r9d
movl %eax, -0x4(%rsi)
leal (%r15,%r12,8), %eax
addl 0x350(%rsp,%rdi), %eax
cltd
idivl %r9d
movl %eax, (%rsi)
addq $0x18, %rdi
addq %rbx, %rsi
jmp 0xff09e
movq 0x80(%rsp), %rcx
incq %rcx
movq 0x48(%rsp), %rax
addq $0x4, %rax
movq 0x38(%rsp), %r15
addq $0x4, %r15
addq $0x4, %rbp
movq 0x58(%rsp), %r12
addq $0x4, %r12
movq 0x30(%rsp), %rdi
addq $0x4, %rdi
movq 0x28(%rsp), %rdx
addq $0x4, %rdx
addq $0x10, %r8
jmp 0xfef71
movq 0xd8(%rsp), %rdx
incq %rdx
movq 0x78(%rsp), %rax
addq 0x2d8(%rsp), %rax
movq %rax, 0x78(%rsp)
movq 0x208(%rsp), %rcx
jmp 0xfef11
incq %rcx
movq 0x320(%rsp), %rax
addq %rax, 0xe8(%rsp)
addq %rax, 0x230(%rsp)
addq %rax, 0xf0(%rsp)
addq %rax, 0x150(%rsp)
addq %rax, 0x2e0(%rsp)
addq %rax, 0x158(%rsp)
movq 0x200(%rsp), %rax
addq %rax, 0x148(%rsp)
jmp 0xfeed3
leaq 0x290(%rsp), %rdi
movl 0x2c(%rdi), %r9d
movl 0x30(%rdi), %ecx
leaq 0x100(%rsp), %rsi
subl 0x30(%rsi), %ecx
subl 0x2c(%rsi), %r9d
movq 0x40(%rsp), %rax
movq %rax, (%rsp)
xorl %edx, %edx
xorl %r8d, %r8d
callq 0x6489a
movq 0x298(%rsp), %rax
testq %rax, %rax
je 0xff23a
lock
decl (%rax)
jne 0xff23a
movq 0x290(%rsp), %rsi
movq 0x2b0(%rsp), %rdi
testq %rdi, %rdi
je 0xff232
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xff23a
movq %rsi, %rdi
callq 0x5f3e0
movq 0x248(%rsp), %rax
testq %rax, %rax
je 0xff271
lock
decl (%rax)
jne 0xff271
movq 0x240(%rsp), %rsi
movq 0x260(%rsp), %rdi
testq %rdi, %rdi
je 0xff269
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xff271
movq %rsi, %rdi
callq 0x5f3e0
movq 0x168(%rsp), %rax
testq %rax, %rax
je 0xff2a8
lock
decl (%rax)
jne 0xff2a8
movq 0x160(%rsp), %rsi
movq 0x180(%rsp), %rdi
testq %rdi, %rdi
je 0xff2a0
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xff2a8
movq %rsi, %rdi
callq 0x5f3e0
movq 0x1b8(%rsp), %rax
testq %rax, %rax
je 0xfbc1a
lock
decl (%rax)
jne 0xfbc1a
movq 0x1b0(%rsp), %rsi
movq 0x1d0(%rsp), %rdi
testq %rdi, %rdi
je 0xff2e2
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xfbc1a
movq %rsi, %rdi
callq 0x5f3e0
jmp 0xfbc1a
jmp 0xffaff
jmp 0xffaff
jmp 0xffaff
jmp 0xffaff
movq %rax, %rbx
movq 0x298(%rsp), %rax
testq %rax, %rax
je 0xff45c
lock
decl (%rax)
jne 0xff45c
movq 0x290(%rsp), %rsi
movq 0x2b0(%rsp), %rdi
testq %rdi, %rdi
je 0xff44c
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xff45c
jmp 0xffaff
jmp 0xffaff
jmp 0xffaff
jmp 0xffaff
jmp 0xffaff
movq %rax, %rbx
jmp 0xff45c
jmp 0xffaff
jmp 0xff422
movq %rax, %rbx
jmp 0xff556
jmp 0xffaff
jmp 0xff51c
jmp 0xffaff
jmp 0xffaff
jmp 0xffaff
jmp 0xffaff
jmp 0xff616
jmp 0xff39c
movq %rax, %rbx
jmp 0xff493
movq %rax, %rbx
jmp 0xff4ca
jmp 0xff3ae
movq %rax, %rbx
jmp 0xff58d
movq %rax, %rbx
jmp 0xff5c4
jmp 0xff61b
jmp 0xffaff
jmp 0xffaff
jmp 0xffaff
jmp 0xffaff
jmp 0xffaff
movq %rax, %rbx
movq 0x298(%rsp), %rax
testq %rax, %rax
je 0xff556
lock
decl (%rax)
jne 0xff556
movq 0x290(%rsp), %rsi
movq 0x2b0(%rsp), %rdi
testq %rdi, %rdi
je 0xff546
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xff556
jmp 0xffaff
movq %rax, %rbx
movq 0x358(%rsp), %rax
testq %rax, %rax
je 0xff45c
lock
decl (%rax)
jne 0xff45c
movq 0x350(%rsp), %rsi
movq 0x370(%rsp), %rdi
testq %rdi, %rdi
jne 0xff456
movq %rsi, %rdi
callq 0x5f3e0
jmp 0xff45c
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x248(%rsp), %rax
testq %rax, %rax
je 0xff493
lock
decl (%rax)
jne 0xff493
movq 0x240(%rsp), %rsi
movq 0x260(%rsp), %rdi
testq %rdi, %rdi
jne 0xff48d
movq %rsi, %rdi
callq 0x5f3e0
jmp 0xff493
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x168(%rsp), %rax
testq %rax, %rax
je 0xff4ca
lock
decl (%rax)
jne 0xff4ca
movq 0x160(%rsp), %rsi
movq 0x180(%rsp), %rdi
testq %rdi, %rdi
jne 0xff4c4
movq %rsi, %rdi
callq 0x5f3e0
jmp 0xff4ca
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x1b8(%rsp), %rax
testq %rax, %rax
je 0xffa3f
lock
decl (%rax)
jne 0xffa3f
movq 0x1b0(%rsp), %rsi
movq 0x1d0(%rsp), %rdi
testq %rdi, %rdi
je 0xffa16
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xffa3f
jmp 0xffaff
jmp 0xffaff
jmp 0xffaff
jmp 0xffaff
movq %rax, %rbx
movq 0x358(%rsp), %rax
testq %rax, %rax
je 0xff556
lock
decl (%rax)
jne 0xff556
movq 0x350(%rsp), %rsi
movq 0x370(%rsp), %rdi
testq %rdi, %rdi
jne 0xff550
movq %rsi, %rdi
callq 0x5f3e0
jmp 0xff556
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x248(%rsp), %rax
testq %rax, %rax
je 0xff58d
lock
decl (%rax)
jne 0xff58d
movq 0x240(%rsp), %rsi
movq 0x260(%rsp), %rdi
testq %rdi, %rdi
jne 0xff587
movq %rsi, %rdi
callq 0x5f3e0
jmp 0xff58d
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x168(%rsp), %rax
testq %rax, %rax
je 0xff5c4
lock
decl (%rax)
jne 0xff5c4
movq 0x160(%rsp), %rsi
movq 0x180(%rsp), %rdi
testq %rdi, %rdi
jne 0xff5be
movq %rsi, %rdi
callq 0x5f3e0
jmp 0xff5c4
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x1b8(%rsp), %rax
testq %rax, %rax
je 0xffa3f
lock
decl (%rax)
jne 0xffa3f
movq 0x1b0(%rsp), %rsi
movq 0x1d0(%rsp), %rdi
testq %rdi, %rdi
je 0xffa16
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xffa3f
jmp 0xffaff
jmp 0xffaff
jmp 0xffaff
jmp 0xffaff
movq %rax, %rbx
jmp 0xff66f
movq %rax, %rbx
jmp 0xff6a6
movq %rax, %rbx
jmp 0xff6dd
jmp 0xffaff
jmp 0xffaff
movq %rax, %rbx
movq 0x248(%rsp), %rax
testq %rax, %rax
je 0xff66f
lock
decl (%rax)
jne 0xff66f
movq 0x240(%rsp), %rsi
movq 0x260(%rsp), %rdi
testq %rdi, %rdi
jne 0xff669
movq %rsi, %rdi
callq 0x5f3e0
jmp 0xff66f
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x168(%rsp), %rax
testq %rax, %rax
je 0xff6a6
lock
decl (%rax)
jne 0xff6a6
movq 0x160(%rsp), %rsi
movq 0x180(%rsp), %rdi
testq %rdi, %rdi
jne 0xff6a0
movq %rsi, %rdi
callq 0x5f3e0
jmp 0xff6a6
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x1b8(%rsp), %rax
testq %rax, %rax
je 0xff6dd
lock
decl (%rax)
jne 0xff6dd
movq 0x1b0(%rsp), %rsi
movq 0x1d0(%rsp), %rdi
testq %rdi, %rdi
jne 0xff6d7
movq %rsi, %rdi
callq 0x5f3e0
jmp 0xff6dd
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x358(%rsp), %rax
testq %rax, %rax
je 0xffa3f
lock
decl (%rax)
jne 0xffa3f
movq 0x350(%rsp), %rsi
movq 0x370(%rsp), %rdi
testq %rdi, %rdi
je 0xffa16
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xffa3f
jmp 0xffaff
jmp 0xffaff
jmp 0xffaff
jmp 0xffaff
movq %rax, %rbx
movq 0x358(%rsp), %rax
testq %rax, %rax
je 0xffa3f
lock
decl (%rax)
jne 0xffa3f
movq 0x350(%rsp), %rsi
movq 0x370(%rsp), %rdi
testq %rdi, %rdi
je 0xffa16
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xffa3f
jmp 0xffaff
movq %rax, %rbx
movq 0x358(%rsp), %rax
testq %rax, %rax
je 0xffa3f
lock
decl (%rax)
jne 0xffa3f
movq 0x350(%rsp), %rsi
movq 0x370(%rsp), %rdi
testq %rdi, %rdi
je 0xffa16
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xffa3f
jmp 0xffaff
movq %rax, %rbx
movq 0x358(%rsp), %rax
testq %rax, %rax
je 0xffa3f
lock
decl (%rax)
jne 0xffa3f
movq 0x350(%rsp), %rsi
movq 0x370(%rsp), %rdi
testq %rdi, %rdi
je 0xffa16
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xffa3f
jmp 0xffaff
movq %rax, %rbx
movq 0x358(%rsp), %rax
testq %rax, %rax
je 0xffa3f
lock
decl (%rax)
jne 0xffa3f
movq 0x350(%rsp), %rsi
movq 0x370(%rsp), %rdi
testq %rdi, %rdi
je 0xffa16
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xffa3f
jmp 0xffaff
movq %rax, %rbx
movq 0x358(%rsp), %rax
testq %rax, %rax
je 0xffa3f
lock
decl (%rax)
jne 0xffa3f
movq 0x350(%rsp), %rsi
movq 0x370(%rsp), %rdi
testq %rdi, %rdi
je 0xffa16
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xffa3f
jmp 0xffaff
movq %rax, %rbx
movq 0x358(%rsp), %rax
testq %rax, %rax
je 0xffa3f
lock
decl (%rax)
jne 0xffa3f
movq 0x350(%rsp), %rsi
movq 0x370(%rsp), %rdi
testq %rdi, %rdi
je 0xffa16
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xffa3f
jmp 0xffaff
movq %rax, %rbx
movq 0x358(%rsp), %rax
testq %rax, %rax
je 0xffa3f
lock
decl (%rax)
jne 0xffa3f
movq 0x350(%rsp), %rsi
movq 0x370(%rsp), %rdi
testq %rdi, %rdi
je 0xffa16
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xffa3f
jmp 0xffaff
jmp 0xffaff
jmp 0xffaff
jmp 0xffaff
jmp 0xffaff
movq %rax, %rbx
movq 0x358(%rsp), %rax
testq %rax, %rax
je 0xffa3f
lock
decl (%rax)
jne 0xffa3f
movq 0x350(%rsp), %rsi
movq 0x370(%rsp), %rdi
testq %rdi, %rdi
je 0xffa16
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xffa3f
jmp 0xffaff
movq %rax, %rbx
movq 0x358(%rsp), %rax
testq %rax, %rax
je 0xffa3f
lock
decl (%rax)
jne 0xffa3f
movq 0x350(%rsp), %rsi
movq 0x370(%rsp), %rdi
testq %rdi, %rdi
je 0xffa16
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xffa3f
jmp 0xffaff
movq %rax, %rbx
movq 0x358(%rsp), %rax
testq %rax, %rax
je 0xffa3f
lock
decl (%rax)
jne 0xffa3f
movq 0x350(%rsp), %rsi
movq 0x370(%rsp), %rdi
testq %rdi, %rdi
je 0xffa16
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xffa3f
jmp 0xffaff
movq %rax, %rbx
movq 0x358(%rsp), %rax
testq %rax, %rax
je 0xffa3f
lock
decl (%rax)
jne 0xffa3f
movq 0x350(%rsp), %rsi
movq 0x370(%rsp), %rdi
testq %rdi, %rdi
jne 0xffa20
movq %rsi, %rdi
callq 0x5f3e0
jmp 0xffa3f
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xffa3f
jmp 0xffaff
jmp 0xffaff
jmp 0xffaff
jmp 0xffaff
movq %rax, %rbx
movq 0x108(%rsp), %rax
testq %rax, %rax
je 0xffa87
lock
decl (%rax)
jne 0xffa87
movq 0x100(%rsp), %rsi
movq 0x120(%rsp), %rdi
testq %rdi, %rdi
jne 0xffa70
movq %rsi, %rdi
callq 0x5f3e0
jmp 0xffa87
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xffa87
jmp 0xffaff
jmp 0xffa84
movq %rax, %rbx
jmp 0xffabe
movq %rax, %rbx
movq 0x98(%rsp), %rax
testq %rax, %rax
je 0xffabe
lock
decl (%rax)
jne 0xffabe
movq 0x90(%rsp), %rsi
movq 0xb0(%rsp), %rdi
testq %rdi, %rdi
jne 0xffab8
movq %rsi, %rdi
callq 0x5f3e0
jmp 0xffabe
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x5d8(%rsp), %rax
testq %rax, %rax
je 0xffaf5
lock
decl (%rax)
jne 0xffaf5
movq 0x5d0(%rsp), %rsi
movq 0x5f0(%rsp), %rdi
testq %rdi, %rdi
jne 0xffaef
movq %rsi, %rdi
callq 0x5f3e0
jmp 0xffaf5
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
jmp 0xffaff
movq %rax, %rdi
callq 0x61d68
nop
|
/csukuangfj[P]ncnn/build_O2/src/layer/x86/convolution_x86_fma.cpp
|
ncnn::Convolution_x86_fma::forward(std::vector<ncnn::Mat, std::allocator<ncnn::Mat>> const&, std::vector<ncnn::Mat, std::allocator<ncnn::Mat>>&, ncnn::Option const&) const
|
int Convolution_x86_fma::forward(const std::vector<Mat>& bottom_blobs, std::vector<Mat>& top_blobs, const Option& opt) const
{
const Mat& bottom_blob = bottom_blobs[0];
const Mat& _weight_data = bottom_blobs[1];
Mat& top_blob = top_blobs[0];
const int _kernel_w = _weight_data.w;
const int _kernel_h = _weight_data.h;
const int _num_output = _weight_data.c * _weight_data.elempack;
Mat weight_data_flattened;
flatten(_weight_data, weight_data_flattened, opt);
if (weight_data_flattened.empty())
return -100;
// weight_data_flattened as pack1
weight_data_flattened.w *= weight_data_flattened.elempack;
weight_data_flattened.elemsize /= weight_data_flattened.elempack;
weight_data_flattened.elempack = 1;
Mat bias_data_flattened;
if (bias_term)
{
const Mat& _bias_data = bottom_blobs[2];
flatten(_bias_data, bias_data_flattened, opt);
if (bias_data_flattened.empty())
return -100;
// bias_data_flattened as pack1
bias_data_flattened.w *= bias_data_flattened.elempack;
bias_data_flattened.elemsize /= bias_data_flattened.elempack;
bias_data_flattened.elempack = 1;
}
ncnn::Layer* op = ncnn::create_layer(ncnn::LayerType::Convolution);
ncnn::ParamDict pd;
pd.set(0, _num_output);
pd.set(1, _kernel_w);
pd.set(11, _kernel_h);
pd.set(2, dilation_w);
pd.set(21, dilation_h);
pd.set(3, stride_w);
pd.set(31, stride_h);
pd.set(4, pad_left);
pd.set(15, pad_right);
pd.set(14, pad_top);
pd.set(16, pad_bottom);
pd.set(18, pad_value);
pd.set(5, bias_term);
pd.set(6, weight_data_flattened.w);
pd.set(8, int8_scale_term);
pd.set(9, activation_type);
pd.set(10, activation_params);
op->load_param(pd);
ncnn::Mat weights[2];
weights[0] = weight_data_flattened;
weights[1] = bias_data_flattened;
op->load_model(ncnn::ModelBinFromMatArray(weights));
op->create_pipeline(opt);
op->forward(bottom_blob, top_blob, opt);
op->destroy_pipeline(opt);
delete op;
return 0;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x168, %rsp # imm = 0x168
movq %rsi, %rbp
movq %rdi, %r13
movq (%rsi), %r14
leaq 0x48(%r14), %rdi
movq (%rdx), %rax
movq %rax, 0xb8(%rsp)
movl 0x60(%r14), %ebx
movl 0x74(%r14), %eax
movl %eax, 0x1c(%rsp)
movl 0x78(%r14), %eax
movl %eax, 0x18(%rsp)
imull 0x80(%r14), %ebx
leaq 0x70(%rsp), %rsi
andq $0x0, 0x40(%rsi)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, (%rsi)
vmovups %xmm0, 0xc(%rsi)
vmovaps %xmm0, 0x20(%rsi)
vmovups %xmm0, 0x2c(%rsi)
movq %rcx, %r15
movq %rcx, %rdx
callq 0x64ee7
pushq $-0x64
popq %r12
cmpq $0x0, 0x70(%rsp)
je 0x10072a
movslq 0xa8(%rsp), %rax
imulq 0xb0(%rsp), %rax
testq %rax, %rax
je 0x10072a
movslq 0x88(%rsp), %rcx
movl 0x9c(%rsp), %eax
imull %ecx, %eax
movl %eax, 0x9c(%rsp)
movq 0x80(%rsp), %rax
xorl %edx, %edx
divq %rcx
movq %rax, 0x80(%rsp)
movl $0x1, 0x88(%rsp)
andq $0x0, 0x60(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x20(%rsp)
vmovups %xmm0, 0x2c(%rsp)
vmovaps %xmm0, 0x40(%rsp)
vmovups %xmm0, 0x4c(%rsp)
movq (%r13), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0x100(%r13,%rax)
je 0x100290
movl $0x90, %edi
addq (%rbp), %rdi
leaq 0x20(%rsp), %rsi
movq %r15, %rdx
callq 0x64ee7
pushq $-0x64
popq %r12
cmpq $0x0, 0x20(%rsp)
je 0x1006fc
movslq 0x58(%rsp), %rax
imulq 0x60(%rsp), %rax
testq %rax, %rax
je 0x1006fc
movslq 0x38(%rsp), %rcx
movl 0x4c(%rsp), %eax
imull %ecx, %eax
movl %eax, 0x4c(%rsp)
movq 0x30(%rsp), %rax
xorl %edx, %edx
divq %rcx
movq %rax, 0x30(%rsp)
movl $0x1, 0x38(%rsp)
pushq $0x6
popq %rdi
callq 0x782bf
movq %rax, %r12
leaq 0x8(%rsp), %rdi
callq 0x71548
leaq 0x8(%rsp), %rdi
xorl %esi, %esi
movl %ebx, %edx
callq 0x7193a
leaq 0x8(%rsp), %rdi
pushq $0x1
popq %rsi
movl 0x1c(%rsp), %edx
callq 0x7193a
leaq 0x8(%rsp), %rdi
pushq $0xb
popq %rsi
movl 0x18(%rsp), %edx
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0xdc(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0x2
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0xe0(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0x15
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0xe4(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0x3
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0xe8(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0x1f
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0xec(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0x4
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0xf0(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0xf
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0xf4(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0xe
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0xf8(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0x10
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
vmovss 0xfc(%r13,%rax), %xmm0
leaq 0x8(%rsp), %rdi
pushq $0x12
popq %rsi
callq 0x71952
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0x100(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0x5
popq %rsi
callq 0x7193a
movl 0x9c(%rsp), %edx
leaq 0x8(%rsp), %rdi
pushq $0x6
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0x108(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0x8
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0x10c(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0x9
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
leaq (%rax,%r13), %rdx
addq $0x110, %rdx # imm = 0x110
leaq 0x8(%rsp), %rdi
pushq $0xa
popq %rsi
callq 0x7196c
movq (%r12), %rax
leaq 0x8(%rsp), %rsi
movq %r12, %rdi
callq *0x10(%rax)
andq $0x0, 0x110(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0xd0(%rsp)
vmovups %xmm0, 0xdc(%rsp)
vmovaps %xmm0, 0xf0(%rsp)
vmovups %xmm0, 0xfc(%rsp)
andq $0x0, 0x158(%rsp)
vmovups %xmm0, 0x118(%rsp)
vmovups %xmm0, 0x124(%rsp)
vmovups %xmm0, 0x138(%rsp)
vmovups %xmm0, 0x144(%rsp)
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x1004e1
lock
incl (%rax)
movq 0xd8(%rsp), %rax
testq %rax, %rax
je 0x100518
lock
decl (%rax)
jne 0x100518
movq 0xd0(%rsp), %rsi
movq 0xf0(%rsp), %rdi
testq %rdi, %rdi
je 0x100510
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x100518
movq %rsi, %rdi
callq 0x5f3e0
vmovaps 0x70(%rsp), %xmm0
vmovaps %xmm0, 0xd0(%rsp)
movq 0x80(%rsp), %rax
movq %rax, 0xe0(%rsp)
movl 0x88(%rsp), %eax
movl %eax, 0xe8(%rsp)
movq 0x90(%rsp), %rax
movq %rax, 0xf0(%rsp)
vmovups 0x98(%rsp), %xmm0
vmovups %xmm0, 0xf8(%rsp)
movl 0xa8(%rsp), %eax
movl %eax, 0x108(%rsp)
movq 0xb0(%rsp), %rax
movq %rax, 0x110(%rsp)
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x100592
lock
incl (%rax)
movq 0x120(%rsp), %rax
testq %rax, %rax
je 0x1005c9
lock
decl (%rax)
jne 0x1005c9
movq 0x118(%rsp), %rsi
movq 0x138(%rsp), %rdi
testq %rdi, %rdi
je 0x1005c1
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1005c9
movq %rsi, %rdi
callq 0x5f3e0
vmovaps 0x20(%rsp), %xmm0
leaq 0xd0(%rsp), %rsi
vmovups %xmm0, 0x48(%rsi)
movq 0x30(%rsp), %rax
movq %rax, 0x58(%rsi)
movl 0x38(%rsp), %eax
movl %eax, 0x60(%rsi)
movq 0x40(%rsp), %rax
movq %rax, 0x68(%rsi)
vmovups 0x48(%rsp), %xmm0
vmovaps %xmm0, 0x70(%rsi)
movl 0x58(%rsp), %eax
movl %eax, 0x80(%rsi)
movq 0x60(%rsp), %rax
movq %rax, 0x88(%rsi)
leaq 0xc0(%rsp), %rdi
callq 0x6b00e
movq (%r12), %rax
leaq 0xc0(%rsp), %rsi
movq %r12, %rdi
callq *0x18(%rax)
leaq 0xc0(%rsp), %rdi
callq 0x6b03a
movq (%r12), %rax
movq %r12, %rdi
movq %r15, %rsi
callq *0x20(%rax)
movq (%r12), %rax
movq %r12, %rdi
movq %r14, %rsi
movq 0xb8(%rsp), %rdx
movq %r15, %rcx
callq *0x38(%rax)
movq (%r12), %rax
movq %r12, %rdi
movq %r15, %rsi
callq *0x28(%rax)
movq (%r12), %rax
movq %r12, %rdi
callq *0x8(%rax)
pushq $0x48
popq %rbx
vxorps %xmm0, %xmm0, %xmm0
movq 0xd8(%rsp,%rbx), %rax
testq %rax, %rax
je 0x1006c4
lock
decl (%rax)
jne 0x1006c4
movq 0xd0(%rsp,%rbx), %rsi
movq 0xf0(%rsp,%rbx), %rdi
testq %rdi, %rdi
je 0x1006b8
movq (%rdi), %rax
callq *0x18(%rax)
vxorps %xmm0, %xmm0, %xmm0
jmp 0x1006c4
movq %rsi, %rdi
callq 0x5f3e0
vxorps %xmm0, %xmm0, %xmm0
leaq (%rsp,%rbx), %rax
addq $0xd0, %rax
andq $0x0, 0x40(%rax)
vmovups %xmm0, 0xc(%rax)
vmovups %xmm0, (%rax)
andl $0x0, 0x38(%rax)
vmovups %xmm0, 0x28(%rax)
addq $-0x48, %rbx
cmpq $-0x48, %rbx
jne 0x100685
leaq 0x8(%rsp), %rdi
callq 0x71614
xorl %r12d, %r12d
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x10072a
lock
decl (%rax)
jne 0x10072a
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x100722
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x10072a
movq %rsi, %rdi
callq 0x5f3e0
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x10075b
lock
decl (%rax)
jne 0x10075b
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x100753
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x10075b
movq %rsi, %rdi
callq 0x5f3e0
movl %r12d, %eax
addq $0x168, %rsp # imm = 0x168
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0x100890
jmp 0x100890
movq %rax, %rbx
leaq 0xc0(%rsp), %rdi
callq 0x6b03a
jmp 0x10079d
jmp 0x10079a
jmp 0x100792
jmp 0x100792
movq %rax, %rbx
jmp 0x100827
movq %rax, %rbx
pushq $0x48
popq %r14
vxorps %xmm0, %xmm0, %xmm0
movq 0xd8(%rsp,%r14), %rax
testq %rax, %rax
je 0x1007e4
lock
decl (%rax)
jne 0x1007e4
movq 0xd0(%rsp,%r14), %rsi
movq 0xf0(%rsp,%r14), %rdi
testq %rdi, %rdi
je 0x1007d8
movq (%rdi), %rax
callq *0x18(%rax)
vxorps %xmm0, %xmm0, %xmm0
jmp 0x1007e4
movq %rsi, %rdi
callq 0x5f3e0
vxorps %xmm0, %xmm0, %xmm0
leaq (%rsp,%r14), %rax
addq $0xd0, %rax
andq $0x0, 0x40(%rax)
vmovups %xmm0, 0xc(%rax)
vmovups %xmm0, (%rax)
andl $0x0, 0x38(%rax)
vmovups %xmm0, 0x28(%rax)
addq $-0x48, %r14
cmpq $-0x48, %r14
jne 0x1007a5
jmp 0x10081d
jmp 0x100890
movq %rax, %rbx
jmp 0x100855
jmp 0x100890
movq %rax, %rbx
leaq 0x8(%rsp), %rdi
callq 0x71614
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x100855
lock
decl (%rax)
jne 0x100855
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
jne 0x10084f
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x100855
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x100886
lock
decl (%rax)
jne 0x100886
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
jne 0x100880
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x100886
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
jmp 0x100890
movq %rax, %rdi
callq 0x61d68
|
/csukuangfj[P]ncnn/build_O2/src/layer/x86/convolution_x86_fma.cpp
|
virtual thunk to ncnn::Convolution_x86_fma::forward(std::vector<ncnn::Mat, std::allocator<ncnn::Mat>> const&, std::vector<ncnn::Mat, std::allocator<ncnn::Mat>>&, ncnn::Option const&) const
|
int Convolution_x86_fma::forward(const std::vector<Mat>& bottom_blobs, std::vector<Mat>& top_blobs, const Option& opt) const
{
const Mat& bottom_blob = bottom_blobs[0];
const Mat& _weight_data = bottom_blobs[1];
Mat& top_blob = top_blobs[0];
const int _kernel_w = _weight_data.w;
const int _kernel_h = _weight_data.h;
const int _num_output = _weight_data.c * _weight_data.elempack;
Mat weight_data_flattened;
flatten(_weight_data, weight_data_flattened, opt);
if (weight_data_flattened.empty())
return -100;
// weight_data_flattened as pack1
weight_data_flattened.w *= weight_data_flattened.elempack;
weight_data_flattened.elemsize /= weight_data_flattened.elempack;
weight_data_flattened.elempack = 1;
Mat bias_data_flattened;
if (bias_term)
{
const Mat& _bias_data = bottom_blobs[2];
flatten(_bias_data, bias_data_flattened, opt);
if (bias_data_flattened.empty())
return -100;
// bias_data_flattened as pack1
bias_data_flattened.w *= bias_data_flattened.elempack;
bias_data_flattened.elemsize /= bias_data_flattened.elempack;
bias_data_flattened.elempack = 1;
}
ncnn::Layer* op = ncnn::create_layer(ncnn::LayerType::Convolution);
ncnn::ParamDict pd;
pd.set(0, _num_output);
pd.set(1, _kernel_w);
pd.set(11, _kernel_h);
pd.set(2, dilation_w);
pd.set(21, dilation_h);
pd.set(3, stride_w);
pd.set(31, stride_h);
pd.set(4, pad_left);
pd.set(15, pad_right);
pd.set(14, pad_top);
pd.set(16, pad_bottom);
pd.set(18, pad_value);
pd.set(5, bias_term);
pd.set(6, weight_data_flattened.w);
pd.set(8, int8_scale_term);
pd.set(9, activation_type);
pd.set(10, activation_params);
op->load_param(pd);
ncnn::Mat weights[2];
weights[0] = weight_data_flattened;
weights[1] = bias_data_flattened;
op->load_model(ncnn::ModelBinFromMatArray(weights));
op->create_pipeline(opt);
op->forward(bottom_blob, top_blob, opt);
op->destroy_pipeline(opt);
delete op;
return 0;
}
|
movq (%rdi), %rax
addq -0x40(%rax), %rdi
jmp 0x100126
|
/csukuangfj[P]ncnn/build_O2/src/layer/x86/convolution_x86_fma.cpp
|
ncnn::convolution_im2col_sgemm_transform_kernel_pack8to4_int8_sse(ncnn::Mat const&, ncnn::Mat&, int, int, int, int)
|
static void convolution_im2col_sgemm_transform_kernel_pack8to4_int8_sse(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h)
{
const int maxk = kernel_w * kernel_h;
// interleave
// src = maxk-inch-outch
// dst = 8a-4b-maxk-inch/8a-outch/4b
Mat kernel = _kernel.reshape(maxk, inch, outch);
kernel_tm.create(32 * maxk, inch / 8, outch / 4, (size_t)1u);
for (int q = 0; q + 3 < outch; q += 4)
{
signed char* g00 = kernel_tm.channel(q / 4);
for (int p = 0; p + 7 < inch; p += 8)
{
for (int k = 0; k < maxk; k++)
{
for (int i = 0; i < 4; i++)
{
for (int j = 0; j < 8; j++)
{
const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j);
g00[0] = k00[k];
g00++;
}
}
}
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x48, %rsp
movl %r8d, %ebx
movl %ecx, %ebp
movl %edx, %r15d
movq %rsi, %r14
movq %rdi, %rsi
imull %r9d, %ebx
xorl %r12d, %r12d
movq %rsp, %rdi
movl %ebx, %edx
movl %r15d, %ecx
movl %ebp, %r8d
xorl %r9d, %r9d
callq 0x63020
movl %ebx, %esi
shll $0x5, %esi
pushq $0x8
popq %rcx
movl %r15d, %eax
cltd
idivl %ecx
movl %eax, %ecx
pushq $0x4
popq %rdi
movl %ebp, %eax
cltd
idivl %edi
pushq $0x1
popq %r8
movq %r14, %rdi
movl %ecx, %edx
movl %eax, %ecx
xorl %r9d, %r9d
callq 0x63810
testl %ebx, %ebx
cmovlel %r12d, %ebx
movslq %r15d, %rax
movslq %ebp, %rcx
movq %r12, %rdx
orq $0x3, %rdx
cmpq %rcx, %rdx
jge 0x1009b5
movq %r12, %rdx
shrq $0x2, %rdx
imulq 0x40(%r14), %rdx
imulq 0x10(%r14), %rdx
addq (%r14), %rdx
xorl %esi, %esi
movq %rsi, %rdi
orq $0x7, %rdi
cmpq %rax, %rdi
jge 0x1009ac
xorl %edi, %edi
cmpq %rbx, %rdi
je 0x1009a6
xorl %r8d, %r8d
cmpq $0x4, %r8
je 0x1009a1
movq %r8, %r10
orq %r12, %r10
xorl %r9d, %r9d
cmpq $0x8, %r9
je 0x100999
movq 0x40(%rsp), %r11
imulq %r10, %r11
movq 0x10(%rsp), %r15
imulq %r15, %r11
addq (%rsp), %r11
movslq 0x2c(%rsp), %r13
leaq (%rsi,%r9), %rbp
imulq %r13, %rbp
imulq %r15, %rbp
addq %r11, %rbp
movb (%rdi,%rbp), %r11b
movb %r11b, (%rdx,%r9)
incq %r9
jmp 0x10095c
incq %r8
addq %r9, %rdx
jmp 0x10094d
incq %rdi
jmp 0x100945
addq $0x8, %rsi
jmp 0x100937
addq $0x4, %r12
jmp 0x100911
movq 0x8(%rsp), %rax
testq %rax, %rax
je 0x1009e2
lock
decl (%rax)
jne 0x1009e2
movq (%rsp), %rsi
movq 0x20(%rsp), %rdi
testq %rdi, %rdi
je 0x1009da
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1009e2
movq %rsi, %rdi
callq 0x5f3e0
addq $0x48, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0x100a2b
movq %rax, %rbx
movq 0x8(%rsp), %rax
testq %rax, %rax
je 0x100a23
lock
decl (%rax)
jne 0x100a23
movq (%rsp), %rsi
movq 0x20(%rsp), %rdi
testq %rdi, %rdi
jne 0x100a1d
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x100a23
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
movq %rax, %rdi
callq 0x61d68
|
/csukuangfj[P]ncnn/src/layer/x86/convolution_sgemm_pack8to4_int8.h
|
ncnn::convolution_transform_kernel_packed_int8_sse(ncnn::Mat const&, ncnn::Mat&, int, int, int, int, int, int)
|
static void convolution_transform_kernel_packed_int8_sse(const Mat& weight_data, Mat& weight_data_tm, int num_input, int num_output, int kernel_w, int kernel_h, int elempack, int out_elempack)
{
const int maxk = kernel_w * kernel_h;
// src = kw-kh-inch-outch
// dst = pa-pb-kw-kh-inch/pa-outch/pb
{
Mat weight_data_r2 = weight_data.reshape(maxk, num_input, num_output);
weight_data_tm.create(maxk, num_input / elempack, num_output / out_elempack, (size_t)elempack * out_elempack, elempack * out_elempack);
for (int q = 0; q + (out_elempack - 1) < num_output; q += out_elempack)
{
signed char* g00 = weight_data_tm.channel(q / out_elempack);
for (int p = 0; p + (elempack - 1) < num_input; p += elempack)
{
for (int k = 0; k < maxk; k++)
{
for (int i = 0; i < out_elempack; i++)
{
for (int j = 0; j < elempack; j++)
{
const signed char* k00 = weight_data_r2.channel(q + i).row<const signed char>(p + j);
g00[0] = k00[k];
g00++;
}
}
}
}
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x78, %rsp
movl %r8d, %ebx
movl %ecx, %r12d
movl %edx, %r13d
movq %rsi, %r14
movq %rdi, %rsi
movl 0xb0(%rsp), %ebp
imull %r9d, %ebx
xorl %r15d, %r15d
leaq 0x30(%rsp), %rdi
movl %ebx, %edx
movl %r13d, %ecx
movl %r12d, %r8d
xorl %r9d, %r9d
callq 0x63020
movl %r13d, 0x10(%rsp)
movl %r13d, %eax
movl 0xb8(%rsp), %r13d
cltd
idivl %ebp
movl %eax, %ecx
movl %r12d, 0x18(%rsp)
movl %r12d, %eax
cltd
idivl %r13d
movq %r13, %r8
imulq %rbp, %r8
movl %r13d, %r9d
imull %ebp, %r9d
andq $0x0, (%rsp)
movq %r14, 0x20(%rsp)
movq %r14, %rdi
movl %ebx, %esi
movl %ecx, %edx
movl %eax, %ecx
callq 0x628f2
leal -0x1(%r13), %eax
testl %ebp, %ebp
movl $0x0, %ecx
cmovgl %ebp, %ecx
testl %r13d, %r13d
movl $0x0, %esi
cmovgl %r13d, %esi
leal -0x1(%rbp), %edx
testl %ebx, %ebx
cmovlel %r15d, %ebx
movslq %ebp, %rdi
movq %rdi, 0x28(%rsp)
movslq %edx, %rdx
movslq 0x10(%rsp), %r8
subq %rdx, %r8
movslq %r13d, %rdx
movq %rdx, 0x10(%rsp)
cltq
movslq 0x18(%rsp), %rdx
subq %rax, %rdx
movq %rdx, 0x18(%rsp)
cmpq 0x18(%rsp), %r15
jge 0x100bad
movl %r15d, %eax
cltd
idivl %r13d
cltq
movq 0x20(%rsp), %rdx
imulq 0x40(%rdx), %rax
imulq 0x10(%rdx), %rax
addq (%rdx), %rax
xorl %edx, %edx
cmpq %r8, %rdx
jge 0x100b9b
xorl %r11d, %r11d
cmpq %rbx, %r11
je 0x100b94
xorl %r12d, %r12d
cmpq %rsi, %r12
je 0x100b8f
leaq (%r12,%r15), %r13
xorl %ebp, %ebp
cmpq %rbp, %rcx
je 0x100b87
movq 0x70(%rsp), %r9
imulq %r13, %r9
movq 0x40(%rsp), %r10
imulq %r10, %r9
addq 0x30(%rsp), %r9
movslq 0x5c(%rsp), %r14
leaq (%rdx,%rbp), %rdi
imulq %r14, %rdi
imulq %r10, %rdi
addq %r9, %rdi
movb (%r11,%rdi), %dil
movb %dil, (%rax,%rbp)
incq %rbp
jmp 0x100b4a
incq %r12
addq %rbp, %rax
jmp 0x100b3f
incq %r11
jmp 0x100b37
addq 0x28(%rsp), %rdx
jmp 0x100b2f
addq 0x10(%rsp), %r15
movl 0xb8(%rsp), %r13d
jmp 0x100b07
movq 0x38(%rsp), %rax
testq %rax, %rax
je 0x100bdb
lock
decl (%rax)
jne 0x100bdb
movq 0x30(%rsp), %rsi
movq 0x50(%rsp), %rdi
testq %rdi, %rdi
je 0x100bd3
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x100bdb
movq %rsi, %rdi
callq 0x5f3e0
addq $0x78, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0x100c25
movq %rax, %rbx
movq 0x38(%rsp), %rax
testq %rax, %rax
je 0x100c1d
lock
decl (%rax)
jne 0x100c1d
movq 0x30(%rsp), %rsi
movq 0x50(%rsp), %rdi
testq %rdi, %rdi
jne 0x100c17
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x100c1d
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
movq %rax, %rdi
callq 0x61d68
|
/csukuangfj[P]ncnn/build_O2/src/layer/x86/convolution_x86_fma.cpp
|
ncnn::convolution_im2col_sgemm_transform_kernel_pack8to1_int8_sse(ncnn::Mat const&, ncnn::Mat&, int, int, int, int)
|
static void convolution_im2col_sgemm_transform_kernel_pack8to1_int8_sse(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h)
{
const int maxk = kernel_w * kernel_h;
// interleave
// src = maxk-inch-outch
// dst = 8a-4b-maxk-inch/8a-outch/4b
Mat kernel = _kernel.reshape(maxk, inch, outch);
if (outch >= 4)
kernel_tm.create(32 * maxk, inch / 8, outch / 4 + outch % 4, (size_t)1u);
else
kernel_tm.create(8 * maxk, inch / 8, outch, (size_t)1u);
int q = 0;
for (; q + 3 < outch; q += 4)
{
signed char* g00 = kernel_tm.channel(q / 4);
for (int p = 0; p + 7 < inch; p += 8)
{
for (int k = 0; k < maxk; k++)
{
for (int i = 0; i < 4; i++)
{
for (int j = 0; j < 8; j++)
{
const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j);
g00[0] = k00[k];
g00++;
}
}
}
}
}
// TODO unroll 2
for (; q < outch; q++)
{
signed char* g00 = kernel_tm.channel(q / 4 + q % 4);
for (int p = 0; p + 7 < inch; p += 8)
{
for (int k = 0; k < maxk; k++)
{
for (int j = 0; j < 8; j++)
{
const signed char* k00 = kernel.channel(q).row<const signed char>(p + j);
g00[0] = k00[k];
g00++;
}
}
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x48, %rsp
movl %r8d, %ebx
movl %ecx, %ebp
movl %edx, %r15d
movq %rsi, %r14
movq %rdi, %rsi
imull %r9d, %ebx
movq %rsp, %rdi
movl %ebx, %edx
movl %r15d, %ecx
movl %ebp, %r8d
xorl %r9d, %r9d
callq 0x63020
pushq $0x8
popq %rcx
movl %r15d, %eax
cltd
idivl %ecx
cmpl $0x4, %ebp
jl 0x100e79
movl %ebx, %esi
shll $0x5, %esi
movl %ebp, %ecx
shrl $0x2, %ecx
imull $-0x3, %ecx, %ecx
addl %ebp, %ecx
jmp 0x100e82
leal (,%rbx,8), %esi
movl %ebp, %ecx
xorl %r12d, %r12d
pushq $0x1
popq %r8
movq %r14, %rdi
movl %eax, %edx
xorl %r9d, %r9d
callq 0x63810
testl %ebx, %ebx
cmovlel %r12d, %ebx
movslq %r15d, %rax
movslq %ebp, %rcx
movq %r12, %rdx
orq $0x3, %rdx
cmpq %rcx, %rdx
jge 0x100fc7
movq %r12, %rdx
shrq $0x2, %rdx
imulq 0x40(%r14), %rdx
imulq 0x10(%r14), %rdx
addq (%r14), %rdx
xorl %esi, %esi
movq %rsi, %rdi
orq $0x7, %rdi
cmpq %rax, %rdi
jge 0x100f3d
xorl %edi, %edi
cmpq %rbx, %rdi
je 0x100f37
xorl %r8d, %r8d
cmpq $0x4, %r8
je 0x100f32
movq %r8, %r10
orq %r12, %r10
xorl %r9d, %r9d
cmpq $0x8, %r9
je 0x100f2a
movq 0x40(%rsp), %r11
imulq %r10, %r11
movq 0x10(%rsp), %r15
imulq %r15, %r11
addq (%rsp), %r11
movslq 0x2c(%rsp), %r13
leaq (%rsi,%r9), %rbp
imulq %r13, %rbp
imulq %r15, %rbp
addq %r11, %rbp
movb (%rdi,%rbp), %r11b
movb %r11b, (%rdx,%r9)
incq %r9
jmp 0x100eed
incq %r8
addq %r9, %rdx
jmp 0x100ede
incq %rdi
jmp 0x100ed6
addq $0x8, %rsi
jmp 0x100ec8
addq $0x4, %r12
jmp 0x100ea2
movl %r12d, %esi
shrl $0x2, %esi
movl %r12d, %edx
andl $0x3, %edx
addl %esi, %edx
imulq 0x40(%r14), %rdx
imulq 0x10(%r14), %rdx
addq (%r14), %rdx
xorl %esi, %esi
movq %rsi, %rdi
orq $0x7, %rdi
cmpq %rax, %rdi
jge 0x100fc4
xorl %edi, %edi
cmpq %rbx, %rdi
je 0x100fbe
xorl %r8d, %r8d
cmpq $0x8, %r8
je 0x100fb6
movq 0x40(%rsp), %r9
imulq %r12, %r9
movq 0x10(%rsp), %r10
imulq %r10, %r9
addq (%rsp), %r9
movslq 0x2c(%rsp), %r11
leaq (%rsi,%r8), %r15
imulq %r11, %r15
imulq %r10, %r15
addq %r9, %r15
movb (%rdi,%r15), %r9b
movb %r9b, (%rdx,%r8)
incq %r8
jmp 0x100f79
incq %rdi
addq %r8, %rdx
jmp 0x100f71
addq $0x8, %rsi
jmp 0x100f63
incq %r12
cmpq %rcx, %r12
jl 0x100f46
movq 0x8(%rsp), %rax
testq %rax, %rax
je 0x100ffd
lock
decl (%rax)
jne 0x100ffd
movq (%rsp), %rsi
movq 0x20(%rsp), %rdi
testq %rdi, %rdi
je 0x100ff5
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x100ffd
movq %rsi, %rdi
callq 0x5f3e0
addq $0x48, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0x101046
movq %rax, %rbx
movq 0x8(%rsp), %rax
testq %rax, %rax
je 0x10103e
lock
decl (%rax)
jne 0x10103e
movq (%rsp), %rsi
movq 0x20(%rsp), %rdi
testq %rdi, %rdi
jne 0x101038
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x10103e
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
movq %rax, %rdi
callq 0x61d68
|
/csukuangfj[P]ncnn/src/layer/x86/convolution_sgemm_pack8to1_int8.h
|
ncnn::convolution_im2col_sgemm_transform_kernel_int8_sse(ncnn::Mat const&, ncnn::Mat&, int, int, int, int)
|
static void convolution_im2col_sgemm_transform_kernel_int8_sse(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h)
{
const int maxk = kernel_w * kernel_h;
#if __SSE2__
// interleave
// src = maxk-inch-outch
// dst = 4a-4b-maxk-inch/4a-outch/4b
Mat kernel = _kernel.reshape(maxk, inch, outch);
if (outch >= 4)
{
if (inch >= 4)
kernel_tm.create(16 * maxk, inch / 4 + inch % 4, outch / 4 + outch % 4, (size_t)1u);
else
kernel_tm.create(4 * maxk, inch, outch / 4 + outch % 4, (size_t)1u);
}
else
{
if (inch >= 4)
kernel_tm.create(4 * maxk, inch / 4 + inch % 4, outch, (size_t)1u);
else
kernel_tm.create(1 * maxk, inch, outch, (size_t)1u);
}
int q = 0;
for (; q + 3 < outch; q += 4)
{
signed char* g00 = kernel_tm.channel(q / 4);
int p = 0;
for (; p + 3 < inch; p += 4)
{
for (int k = 0; k < maxk; k++)
{
for (int i = 0; i < 4; i++)
{
for (int j = 0; j < 4; j++)
{
const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j);
g00[0] = k00[k];
g00++;
}
}
}
}
for (; p < inch; p++)
{
for (int k = 0; k < maxk; k++)
{
for (int i = 0; i < 4; i++)
{
const signed char* k00 = kernel.channel(q + i).row<const signed char>(p);
g00[0] = k00[k];
g00++;
}
}
}
}
// TODO unroll 2
for (; q < outch; q++)
{
signed char* g00 = kernel_tm.channel(q / 4 + q % 4);
int p = 0;
for (; p + 3 < inch; p += 4)
{
for (int k = 0; k < maxk; k++)
{
for (int j = 0; j < 4; j++)
{
const signed char* k00 = kernel.channel(q).row<const signed char>(p + j);
g00[0] = k00[k];
g00++;
}
}
}
for (; p < inch; p++)
{
for (int k = 0; k < maxk; k++)
{
const signed char* k00 = kernel.channel(q).row<const signed char>(p);
g00[0] = k00[k];
g00++;
}
}
}
#else // __SSE2__
kernel_tm = _kernel.reshape(maxk, inch, outch);
#endif // __SSE2__
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x48, %rsp
movl %r8d, %ebx
movl %ecx, %ebp
movl %edx, %r15d
movq %rsi, %r14
movq %rdi, %rsi
imull %r9d, %ebx
movq %rsp, %rdi
movl %ebx, %edx
movl %r15d, %ecx
movl %ebp, %r8d
xorl %r9d, %r9d
callq 0x63020
cmpl $0x4, %ebp
jl 0x1010a9
cmpl $0x4, %r15d
jl 0x1010cd
movl %ebx, %esi
shll $0x4, %esi
movl %r15d, %eax
shrl $0x2, %eax
imull $-0x3, %eax, %edx
addl %r15d, %edx
movl %ebp, %eax
shrl $0x2, %eax
imull $-0x3, %eax, %ecx
addl %ebp, %ecx
jmp 0x1010e1
movl %ebx, %esi
movl %r15d, %edx
movl %ebp, %ecx
cmpl $0x4, %r15d
jl 0x1010e1
leal (,%rbx,4), %esi
movl %r15d, %eax
shrl $0x2, %eax
imull $-0x3, %eax, %edx
addl %r15d, %edx
movl %ebp, %ecx
jmp 0x1010e1
leal (,%rbx,4), %esi
movl %ebp, %eax
shrl $0x2, %eax
imull $-0x3, %eax, %ecx
addl %ebp, %ecx
movl %r15d, %edx
xorl %r12d, %r12d
pushq $0x1
popq %r8
movq %r14, %rdi
xorl %r9d, %r9d
callq 0x63810
testl %ebx, %ebx
cmovlel %r12d, %ebx
movslq %r15d, %rax
movslq %ebp, %rcx
movq %r12, %rdx
orq $0x3, %rdx
cmpq %rcx, %rdx
jge 0x1012be
movq %r12, %rdx
shrq $0x2, %rdx
imulq 0x40(%r14), %rdx
imulq 0x10(%r14), %rdx
addq (%r14), %rdx
xorl %esi, %esi
movq %rsi, %rdi
orq $0x3, %rdi
cmpq %rax, %rdi
jge 0x1011ed
xorl %edi, %edi
cmpq %rbx, %rdi
je 0x101198
xorl %r8d, %r8d
cmpq $0x4, %r8
je 0x101193
movq %r8, %r10
orq %r12, %r10
xorl %r9d, %r9d
cmpq $0x4, %r9
je 0x10118b
movq 0x40(%rsp), %r11
imulq %r10, %r11
movq 0x10(%rsp), %r15
imulq %r15, %r11
addq (%rsp), %r11
movslq 0x2c(%rsp), %r13
leaq (%rsi,%r9), %rbp
imulq %r13, %rbp
imulq %r15, %rbp
addq %r11, %rbp
movb (%rdi,%rbp), %r11b
movb %r11b, (%rdx,%r9)
incq %r9
jmp 0x10114e
incq %r8
addq %r9, %rdx
jmp 0x10113f
incq %rdi
jmp 0x101137
addq $0x4, %rsi
jmp 0x101125
xorl %edi, %edi
cmpq %rbx, %rdi
je 0x1011ea
xorl %r8d, %r8d
cmpq $0x4, %r8
je 0x1011e2
leaq (%r12,%r8), %r9
movslq 0x2c(%rsp), %r10
imulq 0x40(%rsp), %r9
movq 0x10(%rsp), %r11
imulq %r11, %r9
addq (%rsp), %r9
imulq %rsi, %r10
imulq %r11, %r10
addq %r9, %r10
movb (%rdi,%r10), %r9b
movb %r9b, (%rdx,%r8)
incq %r8
jmp 0x1011a8
incq %rdi
addq %r8, %rdx
jmp 0x1011a0
incq %rsi
cmpq %rax, %rsi
jl 0x10119e
addq $0x4, %r12
jmp 0x1010ff
movl %r12d, %esi
shrl $0x2, %esi
movl %r12d, %edx
andl $0x3, %edx
addl %esi, %edx
imulq 0x40(%r14), %rdx
imulq 0x10(%r14), %rdx
addq (%r14), %rdx
xorl %esi, %esi
movq %rsi, %rdi
orq $0x3, %rdi
cmpq %rax, %rdi
jge 0x1012b6
xorl %edi, %edi
cmpq %rbx, %rdi
je 0x101277
xorl %r8d, %r8d
cmpq $0x4, %r8
je 0x10126f
movq 0x40(%rsp), %r9
imulq %r12, %r9
movq 0x10(%rsp), %r10
imulq %r10, %r9
addq (%rsp), %r9
movslq 0x2c(%rsp), %r11
leaq (%rsi,%r8), %r15
imulq %r11, %r15
imulq %r10, %r15
addq %r9, %r15
movb (%rdi,%r15), %r9b
movb %r9b, (%rdx,%r8)
incq %r8
jmp 0x101232
incq %rdi
addq %r8, %rdx
jmp 0x10122a
addq $0x4, %rsi
jmp 0x101218
xorl %edi, %edi
cmpq %rdi, %rbx
je 0x1012b0
movslq 0x2c(%rsp), %r8
movq 0x40(%rsp), %r9
imulq %r12, %r9
imulq %rsi, %r8
addq %r9, %r8
imulq 0x10(%rsp), %r8
addq (%rsp), %r8
movb (%rdi,%r8), %r8b
movb %r8b, (%rdx,%rdi)
incq %rdi
jmp 0x10127f
incq %rsi
addq %rdi, %rdx
cmpq %rax, %rsi
jl 0x10127d
incq %r12
cmpq %rcx, %r12
jl 0x1011fb
movq 0x8(%rsp), %rax
testq %rax, %rax
je 0x1012f4
lock
decl (%rax)
jne 0x1012f4
movq (%rsp), %rsi
movq 0x20(%rsp), %rdi
testq %rdi, %rdi
je 0x1012ec
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1012f4
movq %rsi, %rdi
callq 0x5f3e0
addq $0x48, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0x10133d
movq %rax, %rbx
movq 0x8(%rsp), %rax
testq %rax, %rax
je 0x101335
lock
decl (%rax)
jne 0x101335
movq (%rsp), %rsi
movq 0x20(%rsp), %rdi
testq %rdi, %rdi
jne 0x10132f
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x101335
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
movq %rax, %rdi
callq 0x61d68
|
/csukuangfj[P]ncnn/src/layer/x86/convolution_sgemm_int8.h
|
ncnn::get_optimal_tile_mnk(int, int, int, int&, int&, int&, int)
|
static void get_optimal_tile_mnk(int M, int N, int K, int& TILE_M, int& TILE_N, int& TILE_K, int nT)
{
// resolve optimal tile size from cache size
const size_t l2_cache_size = get_cpu_level2_cache_size();
if (nT == 0)
nT = get_physical_big_cpu_count();
// solve M
{
int tile_size = (int)sqrt((float)l2_cache_size / sizeof(float) / 3);
#if __AVX512F__
TILE_M = std::max(16, tile_size / 16 * 16);
#elif __AVX__
TILE_M = std::max(8, tile_size / 8 * 8);
#elif __SSE2__
TILE_M = std::max(4, tile_size / 4 * 4);
#else
TILE_M = std::max(2, tile_size / 2 * 2);
#endif
TILE_M *= std::min(nT, get_physical_cpu_count());
int nn_M = (M + TILE_M - 1) / TILE_M;
#if __AVX512F__
TILE_M = std::min(TILE_M, ((M + nn_M - 1) / nn_M + 15) / 16 * 16);
#elif __AVX__
TILE_M = std::min(TILE_M, ((M + nn_M - 1) / nn_M + 7) / 8 * 8);
#elif __SSE2__
TILE_M = std::min(TILE_M, ((M + nn_M - 1) / nn_M + 3) / 4 * 4);
#else
TILE_M = std::min(TILE_M, ((M + nn_M - 1) / nn_M + 1) / 2 * 2);
#endif
if (nT > 1)
{
#if __AVX512F__
TILE_M = std::min(TILE_M, (std::max(1, TILE_M / nT) + 15) / 16 * 16);
#elif __AVX__
TILE_M = std::min(TILE_M, (std::max(1, TILE_M / nT) + 7) / 8 * 8);
#elif __SSE2__
TILE_M = std::min(TILE_M, (std::max(1, TILE_M / nT) + 3) / 4 * 4);
#else
TILE_M = std::min(TILE_M, (std::max(1, TILE_M / nT) + 1) / 2 * 2);
#endif
}
}
// solve K
{
int tile_size = (int)(sqrt((float)l2_cache_size / sizeof(float)) - TILE_M);
#if __AVX512F__
TILE_K = std::max(16, tile_size / 16 * 16);
#elif __AVX__
TILE_K = std::max(8, tile_size / 8 * 8);
#elif __SSE2__
TILE_K = std::max(4, tile_size / 4 * 4);
#else
TILE_K = std::max(2, tile_size / 2 * 2);
#endif
int nn_K = (K + TILE_K - 1) / TILE_K;
#if __AVX512F__
TILE_K = std::min(TILE_K, ((K + nn_K - 1) / nn_K + 15) / 16 * 16);
#elif __AVX__
TILE_K = std::min(TILE_K, ((K + nn_K - 1) / nn_K + 7) / 8 * 8);
#elif __SSE2__
TILE_K = std::min(TILE_K, ((K + nn_K - 1) / nn_K + 3) / 4 * 4);
#else
TILE_K = std::min(TILE_K, ((K + nn_K - 1) / nn_K + 1) / 2 * 2);
#endif
}
if (N > 0)
{
int tile_size = (int)(((float)l2_cache_size / sizeof(float) - TILE_M * TILE_K) / (TILE_M + TILE_K));
#if __AVX512F__
TILE_N = std::max(4, tile_size / 4 * 4);
#elif __AVX__
TILE_N = std::max(4, tile_size / 4 * 4);
#elif __SSE2__
TILE_N = std::max(4, tile_size / 4 * 4);
#else
TILE_N = std::max(1, tile_size);
#endif
int nn_N = (N + TILE_N - 1) / TILE_N;
#if __AVX512F__
TILE_N = std::min(TILE_N, ((N + nn_N - 1) / nn_N + 3) / 4 * 4);
#elif __AVX__
TILE_N = std::min(TILE_N, ((N + nn_N - 1) / nn_N + 3) / 4 * 4);
#elif __SSE2__
TILE_N = std::min(TILE_N, ((N + nn_N - 1) / nn_N + 3) / 4 * 4);
#else
TILE_N = std::min(TILE_N, (N + nn_N - 1) / nn_N);
#endif
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x28, %rsp
movq %r9, 0x20(%rsp)
movq %r8, 0x18(%rsp)
movq %rcx, %r15
movl %edx, %r13d
movq %rsi, 0x10(%rsp)
movl %edi, %ebx
movl 0x60(%rsp), %ebp
callq 0x73479
movl %eax, %r12d
movslq %eax, %r14
testl %ebp, %ebp
jne 0x101710
callq 0x7345d
movl %eax, %ebp
testq %r14, %r14
js 0x10171c
vcvtsi2ss %r12d, %xmm0, %xmm0
jmp 0x10172f
shrq %r14
andl $0x1, %r12d
orq %r14, %r12
vcvtsi2ss %r12, %xmm0, %xmm0
vaddss %xmm0, %xmm0, %xmm0
vmulss 0x2f03d9(%rip), %xmm0, %xmm1 # 0x3f1b10
vmovss %xmm1, 0xc(%rsp)
vmulss 0x2f03cf(%rip), %xmm0, %xmm0 # 0x3f1b14
vsqrtss %xmm0, %xmm0, %xmm0
vcvttss2si %xmm0, %eax
pushq $0x8
popq %r12
cltd
idivl %r12d
shll $0x3, %eax
cmpl $0x9, %eax
cmovll %r12d, %eax
movl %eax, (%r15)
callq 0x7342d
movl %eax, %esi
cmpl %ebp, %eax
cmovgel %ebp, %esi
imull (%r15), %esi
leal (%rbx,%rsi), %eax
decl %eax
cltd
idivl %esi
movl %eax, %ecx
leal (%rbx,%rcx), %eax
decl %eax
cltd
idivl %ecx
addl $0x7, %eax
cltd
idivl %r12d
movl %eax, %ecx
shll $0x3, %ecx
cmpl %esi, %ecx
cmovgel %esi, %ecx
cmpl $0x2, %ebp
jl 0x1017b6
movl %ecx, %eax
cltd
idivl %ebp
cmpl $0x2, %eax
pushq $0x1
popq %rdx
cmovgel %eax, %edx
addl $0x7, %edx
andl $0x7ffffff8, %edx # imm = 0x7FFFFFF8
cmpl %ecx, %edx
cmovll %edx, %ecx
movq 0x10(%rsp), %rdi
movl %ecx, (%r15)
vmovss 0xc(%rsp), %xmm2
vsqrtss %xmm2, %xmm2, %xmm0
vcvtsi2ss %ecx, %xmm3, %xmm1
vsubss %xmm1, %xmm0, %xmm0
vcvttss2si %xmm0, %eax
cltd
idivl %r12d
movl %eax, %ecx
shll $0x3, %ecx
cmpl $0x9, %ecx
cmovll %r12d, %ecx
leal (%rcx,%r13), %eax
decl %eax
cltd
idivl %ecx
movl %eax, %esi
leal (%rsi,%r13), %eax
decl %eax
cltd
idivl %esi
addl $0x7, %eax
cltd
idivl %r12d
shll $0x3, %eax
cmpl %ecx, %eax
cmovgel %ecx, %eax
movq 0x20(%rsp), %rcx
movl %eax, (%rcx)
testl %edi, %edi
jle 0x101867
movl (%r15), %ecx
movl %ecx, %edx
imull %eax, %edx
vcvtsi2ss %edx, %xmm3, %xmm0
vsubss %xmm0, %xmm2, %xmm0
addl %eax, %ecx
vcvtsi2ss %ecx, %xmm3, %xmm1
vdivss %xmm1, %xmm0, %xmm0
vcvttss2si %xmm0, %eax
pushq $0x4
popq %rsi
cltd
idivl %esi
movl %eax, %ecx
shll $0x2, %ecx
cmpl $0x5, %ecx
cmovll %esi, %ecx
leal (%rdi,%rcx), %eax
decl %eax
xorl %edx, %edx
divl %ecx
movl %eax, %esi
leal (%rdi,%rsi), %eax
decl %eax
xorl %edx, %edx
divl %esi
addl $0x3, %eax
andl $-0x4, %eax
cmpl %ecx, %eax
cmovael %ecx, %eax
movq 0x18(%rsp), %rcx
movl %eax, (%rcx)
addq $0x28, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/csukuangfj[P]ncnn/src/layer/x86/convolution_3x3_winograd.h
|
ncnn::pack_A_tile(ncnn::Mat const&, ncnn::Mat&, int, int, int)
|
static void pack_A_tile(const Mat& A, Mat& AT, int batch, int max_ii, int max_kk)
{
const int N = max_kk * batch;
for (int b = 0; b < batch; b++)
{
float* pp = AT.row(b);
int ii = 0;
#if __SSE2__
#if __AVX__
#if __AVX512F__
for (; ii + 15 < max_ii; ii += 16)
{
const float* p0 = (const float*)A + ii * N + b;
int kk = 0;
for (; kk < max_kk; kk++)
{
pp[0] = p0[0];
pp[1] = p0[N];
pp[2] = p0[2 * N];
pp[3] = p0[3 * N];
pp[4] = p0[4 * N];
pp[5] = p0[5 * N];
pp[6] = p0[6 * N];
pp[7] = p0[7 * N];
pp[8] = p0[8 * N];
pp[9] = p0[9 * N];
pp[10] = p0[10 * N];
pp[11] = p0[11 * N];
pp[12] = p0[12 * N];
pp[13] = p0[13 * N];
pp[14] = p0[14 * N];
pp[15] = p0[15 * N];
p0 += batch;
pp += 16;
}
}
#endif // __AVX512F__
for (; ii + 7 < max_ii; ii += 8)
{
const float* p0 = (const float*)A + ii * N + b;
int kk = 0;
for (; kk < max_kk; kk++)
{
pp[0] = p0[0];
pp[1] = p0[N];
pp[2] = p0[2 * N];
pp[3] = p0[3 * N];
pp[4] = p0[4 * N];
pp[5] = p0[5 * N];
pp[6] = p0[6 * N];
pp[7] = p0[7 * N];
p0 += batch;
pp += 8;
}
}
#endif // __AVX__
for (; ii + 3 < max_ii; ii += 4)
{
const float* p0 = (const float*)A + ii * N + b;
int kk = 0;
for (; kk < max_kk; kk++)
{
pp[0] = p0[0];
pp[1] = p0[N];
pp[2] = p0[2 * N];
pp[3] = p0[3 * N];
p0 += batch;
pp += 4;
}
}
#endif // __SSE2__
for (; ii + 1 < max_ii; ii += 2)
{
const float* p0 = (const float*)A + ii * N + b;
int kk = 0;
for (; kk < max_kk; kk++)
{
pp[0] = p0[0];
pp[1] = p0[N];
p0 += batch;
pp += 2;
}
}
for (; ii < max_ii; ii++)
{
const float* p0 = (const float*)A + ii * N + b;
int kk = 0;
for (; kk < max_kk; kk++)
{
pp[0] = p0[0];
p0 += batch;
pp += 1;
}
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
movl %r8d, %r10d
movq %rdi, -0x68(%rsp)
movl %r8d, %eax
imull %edx, %eax
movq (%rsi), %r8
movq %r8, -0x30(%rsp)
movslq 0x2c(%rsi), %r8
imulq 0x10(%rsi), %r8
movq %r8, -0x38(%rsp)
movslq %eax, %r11
leal (%r11,%r11), %eax
movslq %eax, %r14
leal (%r11,%r11,2), %eax
movslq %eax, %r15
leal (,%r11,4), %eax
movslq %eax, %rdi
leal (%r11,%r11,4), %eax
movslq %eax, %r12
imull $0x6, %r11d, %eax
movslq %eax, %r9
imull $0x7, %r11d, %eax
cltq
xorl %r8d, %r8d
testl %r10d, %r10d
cmovlel %r8d, %r10d
movl %r10d, -0x7c(%rsp)
movl %edx, %ebp
testl %edx, %edx
movl $0x0, %esi
movq %rsi, -0x58(%rsp)
cmovlel %r8d, %edx
movq %rdx, -0x28(%rsp)
movslq %ecx, %rcx
movq %rcx, -0x60(%rsp)
movq %r11, %rcx
shlq $0x5, %rcx
movq %rcx, -0x8(%rsp)
shlq $0x2, %rbp
shlq $0x2, %rax
movq %rax, -0x50(%rsp)
shlq $0x2, %r9
shlq $0x2, %r12
shlq $0x2, %rdi
shlq $0x2, %r15
shlq $0x2, %r14
movq %r11, %rax
shlq $0x4, %rax
movq %rax, -0x10(%rsp)
leaq (,%r11,4), %rsi
shlq $0x3, %r11
movq %rsi, %rcx
xorl %eax, %eax
movq %r11, -0x40(%rsp)
movq %rsi, -0x48(%rsp)
cmpq -0x28(%rsp), %rax
je 0x101bb1
movq -0x38(%rsp), %r8
movq %rax, -0x20(%rsp)
imulq %rax, %r8
addq -0x30(%rsp), %r8
movq -0x68(%rsp), %rax
movq (%rax), %rax
movq %rax, -0x78(%rsp)
movq %rcx, %rdx
movq %r14, %r10
movq %r15, %r11
movq -0x58(%rsp), %r13
xorl %esi, %esi
movq %rsi, -0x70(%rsp)
movq %rsi, %rax
orq $0x7, %rax
cmpq -0x60(%rsp), %rax
jge 0x101a48
movl -0x7c(%rsp), %eax
movq -0x78(%rsp), %rsi
subl $0x1, %eax
jb 0x101a24
movq -0x58(%rsp), %rbx
vmovss (%rsi,%rbx), %xmm0
vmovss %xmm0, (%r8)
vmovss (%rsi,%rcx), %xmm0
vmovss %xmm0, 0x4(%r8)
vmovss (%rsi,%r14), %xmm0
vmovss %xmm0, 0x8(%r8)
vmovss (%rsi,%r15), %xmm0
vmovss %xmm0, 0xc(%r8)
vmovss (%rsi,%rdi), %xmm0
vmovss %xmm0, 0x10(%r8)
vmovss (%rsi,%r12), %xmm0
vmovss %xmm0, 0x14(%r8)
vmovss (%rsi,%r9), %xmm0
vmovss %xmm0, 0x18(%r8)
movq -0x50(%rsp), %rbx
vmovss (%rsi,%rbx), %xmm0
vmovss %xmm0, 0x1c(%r8)
addq $0x20, %r8
addq %rbp, %rsi
jmp 0x1019b1
movq -0x70(%rsp), %rsi
addq $0x8, %rsi
movq -0x8(%rsp), %rax
addq %rax, -0x78(%rsp)
addq %rax, %r13
addq %rax, %r11
addq %rax, %r10
addq %rax, %rdx
jmp 0x101991
movq %r14, -0x18(%rsp)
movq -0x68(%rsp), %rax
movq (%rax), %rax
leaq (%rax,%r13), %r14
addq %rax, %r11
addq %rax, %r10
addq %rdx, %rax
movq $0x0, -0x78(%rsp)
movq -0x70(%rsp), %rbx
movq %rbx, -0x70(%rsp)
movq %rbx, %rsi
orq $0x3, %rsi
cmpq -0x60(%rsp), %rsi
jge 0x101ae3
movl -0x7c(%rsp), %ebx
movq -0x78(%rsp), %rsi
subl $0x1, %ebx
jb 0x101ac8
vmovss (%r14,%rsi), %xmm0
vmovss %xmm0, (%r8)
vmovss (%rax,%rsi), %xmm0
vmovss %xmm0, 0x4(%r8)
vmovss (%r10,%rsi), %xmm0
vmovss %xmm0, 0x8(%r8)
vmovss (%r11,%rsi), %xmm0
vmovss %xmm0, 0xc(%r8)
addq $0x10, %r8
addq %rbp, %rsi
jmp 0x101a8c
movq -0x70(%rsp), %rbx
addq $0x4, %rbx
movq -0x10(%rsp), %rsi
addq %rsi, -0x78(%rsp)
addq %rsi, %r13
addq %rsi, %rdx
jmp 0x101a70
movq -0x68(%rsp), %rax
movq (%rax), %rax
movq -0x40(%rsp), %r11
movq -0x18(%rsp), %r14
movq -0x70(%rsp), %r10
movq %r10, %rsi
orq $0x1, %rsi
cmpq -0x60(%rsp), %rsi
jge 0x101b46
movq %r10, %rbx
movl -0x7c(%rsp), %esi
movq %rax, %r10
subl $0x1, %esi
jb 0x101b37
vmovss (%r10,%r13), %xmm0
vmovss %xmm0, (%r8)
vmovss (%r10,%rdx), %xmm0
vmovss %xmm0, 0x4(%r8)
addq $0x8, %r8
addq %rbp, %r10
jmp 0x101b12
movq %rbx, %r10
addq $0x2, %r10
addq %r11, %r13
addq %r11, %rdx
jmp 0x101afa
movq -0x68(%rsp), %rax
addq (%rax), %r13
movq -0x48(%rsp), %rsi
cmpq -0x60(%rsp), %r10
jge 0x101b80
movl -0x7c(%rsp), %eax
movq %r13, %rdx
subl $0x1, %eax
jb 0x101b78
vmovss (%rdx), %xmm0
vmovss %xmm0, (%r8)
addq $0x4, %r8
addq %rbp, %rdx
jmp 0x101b61
incq %r10
addq %rsi, %r13
jmp 0x101b53
movq -0x20(%rsp), %rax
incq %rax
addq $0x4, -0x58(%rsp)
addq $0x4, -0x50(%rsp)
addq $0x4, %r9
addq $0x4, %r12
addq $0x4, %rdi
addq $0x4, %r15
addq $0x4, %r14
addq $0x4, %rcx
jmp 0x101956
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/csukuangfj[P]ncnn/src/layer/x86/convolution_3x3_winograd.h
|
ncnn::im2col_sgemm_pack8to4_int8_sse(ncnn::Mat const&, ncnn::Mat&, ncnn::Mat const&, ncnn::Option const&)
|
static void im2col_sgemm_pack8to4_int8_sse(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt)
{
#if !(__AVX512VNNI__ || __AVXVNNI__ || __AVX2__ || __XOP__)
#if NCNN_RUNTIME_CPU && NCNN_AVX512VNNI && __AVX512F__ && !__AVX512VNNI__
if (ncnn::cpu_support_x86_avx512_vnni())
{
im2col_sgemm_pack8to4_int8_sse_avx512vnni(bottom_im2col, top_blob, kernel, opt);
return;
}
#endif
#if NCNN_RUNTIME_CPU && NCNN_AVXVNNI && __AVX2__ && !__AVXVNNI__
if (ncnn::cpu_support_x86_avx_vnni())
{
im2col_sgemm_pack8to4_int8_sse_avxvnni(bottom_im2col, top_blob, kernel, opt);
return;
}
#endif
#if NCNN_RUNTIME_CPU && NCNN_AVX2 && __AVX__ && !__AVX2__
if (ncnn::cpu_support_x86_avx2())
{
im2col_sgemm_pack8to4_int8_sse_avx2(bottom_im2col, top_blob, kernel, opt);
return;
}
#endif
#if NCNN_RUNTIME_CPU && NCNN_XOP && __SSE2__ && !__XOP__
if (ncnn::cpu_support_x86_xop())
{
im2col_sgemm_pack8to4_int8_sse_xop(bottom_im2col, top_blob, kernel, opt);
return;
}
#endif
#endif
// Mat bottom_im2col(size, maxk, inch, 8u, 8, opt.workspace_allocator);
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
const int outch = top_blob.c;
// permute
Mat tmp;
#if __AVX2__
if (size >= 4)
tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator);
else if (size >= 2)
tmp.create(2 * maxk, inch, size / 2 + size % 2, 8u, 8, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 8u, 8, opt.workspace_allocator);
#else
if (size >= 2)
tmp.create(2 * maxk, inch, size / 2 + size % 2, 8u, 8, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 8u, 8, opt.workspace_allocator);
#endif
{
#if __AVX2__
int remain_size_start = 0;
int nn_size = size >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
int64_t* tmpptr = tmp.channel(i / 4);
for (int q = 0; q < inch; q++)
{
const int64_t* img0 = (const int64_t*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
__m256i _v = _mm256_loadu_si256((const __m256i*)img0);
_mm256_storeu_si256((__m256i*)tmpptr, _v);
tmpptr += 4;
img0 += size;
}
}
}
remain_size_start += nn_size << 2;
nn_size = (size - remain_size_start) >> 1;
#else
int remain_size_start = 0;
int nn_size = size >> 1;
#endif
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 2;
#if __AVX2__
int64_t* tmpptr = tmp.channel(i / 4 + (i % 4) / 2);
#else
int64_t* tmpptr = tmp.channel(i / 2);
#endif
for (int q = 0; q < inch; q++)
{
const int64_t* img0 = (const int64_t*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
__m128i _v = _mm_loadu_si128((const __m128i*)img0);
_mm_storeu_si128((__m128i*)tmpptr, _v);
tmpptr += 2;
img0 += size;
}
}
}
remain_size_start += nn_size << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
#if __AVX2__
int64_t* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2);
#else
int64_t* tmpptr = tmp.channel(i / 2 + i % 2);
#endif
for (int q = 0; q < inch; q++)
{
const int64_t* img0 = (const int64_t*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr += 1;
img0 += size;
}
}
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
int* outptr0 = top_blob.channel(p);
int i = 0;
#if __AVX2__
for (; i + 3 < size; i += 4)
{
const signed char* tmpptr = tmp.channel(i / 4);
const signed char* kptr0 = kernel.channel(p);
int nn = inch * maxk; // inch always > 0
__m256i _sum00_11 = _mm256_setzero_si256();
__m256i _sum10_01 = _mm256_setzero_si256();
__m256i _sum02_13 = _mm256_setzero_si256();
__m256i _sum12_03 = _mm256_setzero_si256();
__m256i _sum04_15 = _mm256_setzero_si256();
__m256i _sum14_05 = _mm256_setzero_si256();
__m256i _sum06_17 = _mm256_setzero_si256();
__m256i _sum16_07 = _mm256_setzero_si256();
int j = 0;
for (; j < nn; j++)
{
__m128i _val01 = _mm_loadu_si128((const __m128i*)tmpptr);
__m256i _val01_16 = _mm256_cvtepi8_epi16(_val01);
__m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0);
__m128i _w23 = _mm_loadu_si128((const __m128i*)(kptr0 + 16));
__m256i _w01_16 = _mm256_cvtepi8_epi16(_w01);
__m256i _w23_16 = _mm256_cvtepi8_epi16(_w23);
__m256i _val10_16 = _mm256_permute4x64_epi64(_val01_16, 78);
#if __AVXVNNI__ || __AVX512VNNI__
_sum00_11 = _mm256_dpwssd_epi32(_sum00_11, _val01_16, _w01_16);
_sum10_01 = _mm256_dpwssd_epi32(_sum10_01, _val10_16, _w01_16);
_sum02_13 = _mm256_dpwssd_epi32(_sum02_13, _val01_16, _w23_16);
_sum12_03 = _mm256_dpwssd_epi32(_sum12_03, _val10_16, _w23_16);
#else
_sum00_11 = _mm256_add_epi32(_sum00_11, _mm256_madd_epi16(_val01_16, _w01_16));
_sum10_01 = _mm256_add_epi32(_sum10_01, _mm256_madd_epi16(_val10_16, _w01_16));
_sum02_13 = _mm256_add_epi32(_sum02_13, _mm256_madd_epi16(_val01_16, _w23_16));
_sum12_03 = _mm256_add_epi32(_sum12_03, _mm256_madd_epi16(_val10_16, _w23_16));
#endif
__m128i _val23 = _mm_loadu_si128((const __m128i*)(tmpptr + 16));
__m256i _val23_16 = _mm256_cvtepi8_epi16(_val23);
__m256i _val32_16 = _mm256_permute4x64_epi64(_val23_16, 78);
#if __AVXVNNI__ || __AVX512VNNI__
_sum04_15 = _mm256_dpwssd_epi32(_sum04_15, _val23_16, _w01_16);
_sum14_05 = _mm256_dpwssd_epi32(_sum14_05, _val32_16, _w01_16);
_sum06_17 = _mm256_dpwssd_epi32(_sum06_17, _val23_16, _w23_16);
_sum16_07 = _mm256_dpwssd_epi32(_sum16_07, _val32_16, _w23_16);
#else
_sum04_15 = _mm256_add_epi32(_sum04_15, _mm256_madd_epi16(_val23_16, _w01_16));
_sum14_05 = _mm256_add_epi32(_sum14_05, _mm256_madd_epi16(_val32_16, _w01_16));
_sum06_17 = _mm256_add_epi32(_sum06_17, _mm256_madd_epi16(_val23_16, _w23_16));
_sum16_07 = _mm256_add_epi32(_sum16_07, _mm256_madd_epi16(_val32_16, _w23_16));
#endif
tmpptr += 32;
kptr0 += 32;
}
// transpose 4x8
{
__m256i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm256_unpacklo_epi32(_sum00_11, _sum10_01);
_tmp1 = _mm256_unpacklo_epi32(_sum02_13, _sum12_03);
_tmp2 = _mm256_unpackhi_epi32(_sum00_11, _sum10_01);
_tmp3 = _mm256_unpackhi_epi32(_sum02_13, _sum12_03);
_sum00_11 = _mm256_unpacklo_epi64(_tmp0, _tmp1);
_sum10_01 = _mm256_unpackhi_epi64(_tmp0, _tmp1);
_sum02_13 = _mm256_unpacklo_epi64(_tmp2, _tmp3);
_sum12_03 = _mm256_unpackhi_epi64(_tmp2, _tmp3);
}
{
__m256i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm256_unpacklo_epi32(_sum04_15, _sum14_05);
_tmp1 = _mm256_unpacklo_epi32(_sum06_17, _sum16_07);
_tmp2 = _mm256_unpackhi_epi32(_sum04_15, _sum14_05);
_tmp3 = _mm256_unpackhi_epi32(_sum06_17, _sum16_07);
_sum04_15 = _mm256_unpacklo_epi64(_tmp0, _tmp1);
_sum14_05 = _mm256_unpackhi_epi64(_tmp0, _tmp1);
_sum06_17 = _mm256_unpacklo_epi64(_tmp2, _tmp3);
_sum16_07 = _mm256_unpackhi_epi64(_tmp2, _tmp3);
}
_sum00_11 = _mm256_add_epi32(_sum00_11, _sum10_01);
_sum02_13 = _mm256_add_epi32(_sum02_13, _sum12_03);
_sum00_11 = _mm256_add_epi32(_sum00_11, _sum02_13);
_sum04_15 = _mm256_add_epi32(_sum04_15, _sum14_05);
_sum06_17 = _mm256_add_epi32(_sum06_17, _sum16_07);
_sum04_15 = _mm256_add_epi32(_sum04_15, _sum06_17);
__m256i _perm_mask = _mm256_set_epi32(6, 3, 4, 1, 7, 2, 5, 0);
_sum00_11 = _mm256_permutevar8x32_epi32(_sum00_11, _perm_mask);
_sum04_15 = _mm256_permutevar8x32_epi32(_sum04_15, _perm_mask);
_mm256_storeu_si256((__m256i*)outptr0, _sum00_11);
_mm256_storeu_si256((__m256i*)(outptr0 + 8), _sum04_15);
outptr0 += 16;
}
#endif
for (; i + 1 < size; i += 2)
{
#if __AVX2__
const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2);
#else
const signed char* tmpptr = tmp.channel(i / 2);
#endif
const signed char* kptr0 = kernel.channel(p);
int nn = inch * maxk; // inch always > 0
#if __AVX2__
__m256i _sum00_11 = _mm256_setzero_si256();
__m256i _sum10_01 = _mm256_setzero_si256();
__m256i _sum02_13 = _mm256_setzero_si256();
__m256i _sum12_03 = _mm256_setzero_si256();
#else
__m128i _sum00 = _mm_setzero_si128();
__m128i _sum01 = _mm_setzero_si128();
__m128i _sum02 = _mm_setzero_si128();
__m128i _sum03 = _mm_setzero_si128();
__m128i _sum10 = _mm_setzero_si128();
__m128i _sum11 = _mm_setzero_si128();
__m128i _sum12 = _mm_setzero_si128();
__m128i _sum13 = _mm_setzero_si128();
#endif
int j = 0;
for (; j < nn; j++)
{
#if __AVX2__
__m128i _val01 = _mm_loadu_si128((const __m128i*)tmpptr);
__m256i _val01_16 = _mm256_cvtepi8_epi16(_val01);
__m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0);
__m128i _w23 = _mm_loadu_si128((const __m128i*)(kptr0 + 16));
__m256i _w01_16 = _mm256_cvtepi8_epi16(_w01);
__m256i _w23_16 = _mm256_cvtepi8_epi16(_w23);
__m256i _val10_16 = _mm256_permute4x64_epi64(_val01_16, 78);
#if __AVXVNNI__ || __AVX512VNNI__
_sum00_11 = _mm256_dpwssd_epi32(_sum00_11, _val01_16, _w01_16);
_sum10_01 = _mm256_dpwssd_epi32(_sum10_01, _val10_16, _w01_16);
_sum02_13 = _mm256_dpwssd_epi32(_sum02_13, _val01_16, _w23_16);
_sum12_03 = _mm256_dpwssd_epi32(_sum12_03, _val10_16, _w23_16);
#else
_sum00_11 = _mm256_add_epi32(_sum00_11, _mm256_madd_epi16(_val01_16, _w01_16));
_sum10_01 = _mm256_add_epi32(_sum10_01, _mm256_madd_epi16(_val10_16, _w01_16));
_sum02_13 = _mm256_add_epi32(_sum02_13, _mm256_madd_epi16(_val01_16, _w23_16));
_sum12_03 = _mm256_add_epi32(_sum12_03, _mm256_madd_epi16(_val10_16, _w23_16));
#endif
#else
__m128i _val01 = _mm_loadu_si128((const __m128i*)tmpptr);
__m128i _extval01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _val01);
__m128i _val0 = _mm_unpacklo_epi8(_val01, _extval01);
__m128i _val1 = _mm_unpackhi_epi8(_val01, _extval01);
__m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0);
__m128i _w23 = _mm_loadu_si128((const __m128i*)(kptr0 + 16));
__m128i _extw01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w01);
__m128i _extw23 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w23);
__m128i _w0 = _mm_unpacklo_epi8(_w01, _extw01);
__m128i _w1 = _mm_unpackhi_epi8(_w01, _extw01);
__m128i _w2 = _mm_unpacklo_epi8(_w23, _extw23);
__m128i _w3 = _mm_unpackhi_epi8(_w23, _extw23);
#if __XOP__
_sum00 = _mm_maddd_epi16(_val0, _w0, _sum00);
_sum01 = _mm_maddd_epi16(_val0, _w1, _sum01);
_sum02 = _mm_maddd_epi16(_val0, _w2, _sum02);
_sum03 = _mm_maddd_epi16(_val0, _w3, _sum03);
_sum10 = _mm_maddd_epi16(_val1, _w0, _sum10);
_sum11 = _mm_maddd_epi16(_val1, _w1, _sum11);
_sum12 = _mm_maddd_epi16(_val1, _w2, _sum12);
_sum13 = _mm_maddd_epi16(_val1, _w3, _sum13);
#else
_sum00 = _mm_add_epi32(_mm_madd_epi16(_val0, _w0), _sum00);
_sum01 = _mm_add_epi32(_mm_madd_epi16(_val0, _w1), _sum01);
_sum02 = _mm_add_epi32(_mm_madd_epi16(_val0, _w2), _sum02);
_sum03 = _mm_add_epi32(_mm_madd_epi16(_val0, _w3), _sum03);
_sum10 = _mm_add_epi32(_mm_madd_epi16(_val1, _w0), _sum10);
_sum11 = _mm_add_epi32(_mm_madd_epi16(_val1, _w1), _sum11);
_sum12 = _mm_add_epi32(_mm_madd_epi16(_val1, _w2), _sum12);
_sum13 = _mm_add_epi32(_mm_madd_epi16(_val1, _w3), _sum13);
#endif
#endif
tmpptr += 16;
kptr0 += 32;
}
#if __AVX2__
// transpose 4x8
{
__m256i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm256_unpacklo_epi32(_sum00_11, _sum10_01);
_tmp1 = _mm256_unpacklo_epi32(_sum02_13, _sum12_03);
_tmp2 = _mm256_unpackhi_epi32(_sum00_11, _sum10_01);
_tmp3 = _mm256_unpackhi_epi32(_sum02_13, _sum12_03);
_sum00_11 = _mm256_unpacklo_epi64(_tmp0, _tmp1);
_sum10_01 = _mm256_unpackhi_epi64(_tmp0, _tmp1);
_sum02_13 = _mm256_unpacklo_epi64(_tmp2, _tmp3);
_sum12_03 = _mm256_unpackhi_epi64(_tmp2, _tmp3);
}
_sum00_11 = _mm256_add_epi32(_sum00_11, _sum10_01);
_sum02_13 = _mm256_add_epi32(_sum02_13, _sum12_03);
_sum00_11 = _mm256_add_epi32(_sum00_11, _sum02_13);
__m256i _perm_mask = _mm256_set_epi32(6, 3, 4, 1, 7, 2, 5, 0);
_sum00_11 = _mm256_permutevar8x32_epi32(_sum00_11, _perm_mask);
_mm256_storeu_si256((__m256i*)outptr0, _sum00_11);
#else
// transpose 4x4
{
__m128i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm_unpacklo_epi32(_sum00, _sum01);
_tmp1 = _mm_unpacklo_epi32(_sum02, _sum03);
_tmp2 = _mm_unpackhi_epi32(_sum00, _sum01);
_tmp3 = _mm_unpackhi_epi32(_sum02, _sum03);
_sum00 = _mm_unpacklo_epi64(_tmp0, _tmp1);
_sum01 = _mm_unpackhi_epi64(_tmp0, _tmp1);
_sum02 = _mm_unpacklo_epi64(_tmp2, _tmp3);
_sum03 = _mm_unpackhi_epi64(_tmp2, _tmp3);
}
{
__m128i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm_unpacklo_epi32(_sum10, _sum11);
_tmp1 = _mm_unpacklo_epi32(_sum12, _sum13);
_tmp2 = _mm_unpackhi_epi32(_sum10, _sum11);
_tmp3 = _mm_unpackhi_epi32(_sum12, _sum13);
_sum10 = _mm_unpacklo_epi64(_tmp0, _tmp1);
_sum11 = _mm_unpackhi_epi64(_tmp0, _tmp1);
_sum12 = _mm_unpacklo_epi64(_tmp2, _tmp3);
_sum13 = _mm_unpackhi_epi64(_tmp2, _tmp3);
}
_sum00 = _mm_add_epi32(_sum00, _sum01);
_sum02 = _mm_add_epi32(_sum02, _sum03);
_sum10 = _mm_add_epi32(_sum10, _sum11);
_sum12 = _mm_add_epi32(_sum12, _sum13);
_sum00 = _mm_add_epi32(_sum00, _sum02);
_sum10 = _mm_add_epi32(_sum10, _sum12);
_mm_storeu_si128((__m128i*)outptr0, _sum00);
_mm_storeu_si128((__m128i*)(outptr0 + 4), _sum10);
#endif
outptr0 += 8;
}
for (; i < size; i++)
{
#if __AVX2__
const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2);
#else
const signed char* tmpptr = tmp.channel(i / 2 + i % 2);
#endif
const signed char* kptr0 = kernel.channel(p);
int nn = inch * maxk; // inch always > 0
#if __AVX2__
__m256i _sum0_1 = _mm256_setzero_si256();
__m256i _sum2_3 = _mm256_setzero_si256();
#else
__m128i _sum0 = _mm_setzero_si128();
__m128i _sum1 = _mm_setzero_si128();
__m128i _sum2 = _mm_setzero_si128();
__m128i _sum3 = _mm_setzero_si128();
#endif
int j = 0;
for (; j < nn; j++)
{
#if __AVX2__
__m128i _val = _mm_loadl_epi64((const __m128i*)tmpptr);
_val = _mm_cvtepi8_epi16(_val);
__m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0);
__m128i _w23 = _mm_loadu_si128((const __m128i*)(kptr0 + 16));
__m256i _w01_16 = _mm256_cvtepi8_epi16(_w01);
__m256i _w23_16 = _mm256_cvtepi8_epi16(_w23);
__m256i _valval = _mm256_inserti128_si256(_mm256_castsi128_si256(_val), _val, 1);
#if __AVXVNNI__ || __AVX512VNNI__
_sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _valval, _w01_16);
_sum2_3 = _mm256_dpwssd_epi32(_sum2_3, _valval, _w23_16);
#else
_sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_madd_epi16(_valval, _w01_16));
_sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_madd_epi16(_valval, _w23_16));
#endif
#else
__m128i _val = _mm_loadl_epi64((const __m128i*)tmpptr);
#if __SSE4_1__
_val = _mm_cvtepi8_epi16(_val);
#else
_val = _mm_unpacklo_epi8(_val, _mm_cmpgt_epi8(_mm_setzero_si128(), _val));
#endif
__m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0);
__m128i _w23 = _mm_loadu_si128((const __m128i*)(kptr0 + 16));
__m128i _extw01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w01);
__m128i _extw23 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w23);
__m128i _w0 = _mm_unpacklo_epi8(_w01, _extw01);
__m128i _w1 = _mm_unpackhi_epi8(_w01, _extw01);
__m128i _w2 = _mm_unpacklo_epi8(_w23, _extw23);
__m128i _w3 = _mm_unpackhi_epi8(_w23, _extw23);
#if __XOP__
_sum0 = _mm_maddd_epi16(_val, _w0, _sum0);
_sum1 = _mm_maddd_epi16(_val, _w1, _sum1);
_sum2 = _mm_maddd_epi16(_val, _w2, _sum2);
_sum3 = _mm_maddd_epi16(_val, _w3, _sum3);
#else
_sum0 = _mm_add_epi32(_mm_madd_epi16(_val, _w0), _sum0);
_sum1 = _mm_add_epi32(_mm_madd_epi16(_val, _w1), _sum1);
_sum2 = _mm_add_epi32(_mm_madd_epi16(_val, _w2), _sum2);
_sum3 = _mm_add_epi32(_mm_madd_epi16(_val, _w3), _sum3);
#endif
#endif
tmpptr += 8;
kptr0 += 32;
}
#if __AVX2__
__m128i _sum0 = _mm256_extracti128_si256(_sum0_1, 0);
__m128i _sum1 = _mm256_extracti128_si256(_sum0_1, 1);
__m128i _sum2 = _mm256_extracti128_si256(_sum2_3, 0);
__m128i _sum3 = _mm256_extracti128_si256(_sum2_3, 1);
#endif
// transpose 4x4
{
__m128i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm_unpacklo_epi32(_sum0, _sum1);
_tmp1 = _mm_unpacklo_epi32(_sum2, _sum3);
_tmp2 = _mm_unpackhi_epi32(_sum0, _sum1);
_tmp3 = _mm_unpackhi_epi32(_sum2, _sum3);
_sum0 = _mm_unpacklo_epi64(_tmp0, _tmp1);
_sum1 = _mm_unpackhi_epi64(_tmp0, _tmp1);
_sum2 = _mm_unpacklo_epi64(_tmp2, _tmp3);
_sum3 = _mm_unpackhi_epi64(_tmp2, _tmp3);
}
_sum0 = _mm_add_epi32(_sum0, _sum1);
_sum2 = _mm_add_epi32(_sum2, _sum3);
_sum0 = _mm_add_epi32(_sum0, _sum2);
_mm_storeu_si128((__m128i*)outptr0, _sum0);
outptr0 += 4;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x78, %rsp
movq %rcx, %r13
movq %rdx, %rbx
movq %rsi, %r14
movq %rdi, %r12
callq 0x732f7
testl %eax, %eax
je 0x107af0
movq %r12, %rdi
movq %r14, %rsi
movq %rbx, %rdx
movq %r13, %rcx
addq $0x78, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp 0x135d20
callq 0x732db
testl %eax, %eax
je 0x107b18
movq %r12, %rdi
movq %r14, %rsi
movq %rbx, %rdx
movq %r13, %rcx
addq $0x78, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp 0x13ac96
movslq 0x2c(%r12), %rbp
movl 0x30(%r12), %ecx
movl 0x38(%r12), %r15d
movq %r14, 0x68(%rsp)
movl 0x38(%r14), %eax
movl %ecx, %r14d
movq %rax, 0x60(%rsp)
leaq 0x10(%rsp), %rdi
andq $0x0, 0x40(%rdi)
movl %ebp, %ecx
shrl %ecx
movl %ebp, %eax
andl $0x1, %eax
addl %ecx, %eax
cmpq $0x2, %rbp
setge %cl
vpxor %xmm0, %xmm0, %xmm0
vmovdqu %xmm0, 0xc(%rdi)
vmovdqa %xmm0, (%rdi)
vmovdqu %xmm0, 0x2c(%rdi)
vmovdqa %xmm0, 0x20(%rdi)
cmovll %ebp, %eax
movl %r14d, %esi
shll %cl, %esi
movq 0x10(%r13), %rcx
movq %rcx, (%rsp)
pushq $0x8
popq %r8
pushq $0x8
popq %r9
movl %r15d, %edx
movl %eax, %ecx
callq 0x628f2
movl %ebp, %eax
sarl %eax
xorl %ecx, %ecx
testl %r14d, %r14d
movl %r14d, %edx
movl $0x0, %r14d
movl %edx, 0xc(%rsp)
cmovgl %edx, %r14d
testl %r15d, %r15d
movl $0x0, %edi
cmovgl %r15d, %edi
testl %eax, %eax
cmovlel %ecx, %eax
leaq (,%rbp,8), %r8
xorl %edx, %edx
cmpq %rax, %rdx
je 0x107c26
movq 0x50(%rsp), %r9
imulq %rdx, %r9
imulq 0x20(%rsp), %r9
addq 0x10(%rsp), %r9
xorl %r10d, %r10d
cmpq %rdi, %r10
je 0x107c1d
movq 0x40(%r12), %r11
movq (%r12), %rsi
imulq 0x10(%r12), %r11
addq %rcx, %rsi
imulq %r10, %r11
addq %rsi, %r11
movl %r14d, %r13d
subl $0x1, %r13d
jb 0x107c18
vmovdqu (%r11), %xmm0
vmovdqu %xmm0, (%r9)
addq $0x10, %r9
addq %r8, %r11
jmp 0x107bff
incq %r10
jmp 0x107bde
incq %rdx
addq $0x10, %rcx
jmp 0x107bc2
movl %ebp, %eax
andl $-0x2, %eax
movq 0x10(%rsp), %rcx
movq %rcx, 0x70(%rsp)
movslq %eax, %rcx
leaq (,%rcx,8), %r10
cmpq %rbp, %rcx
jge 0x107ca9
movl %ecx, %eax
cltd
pushq $0x2
popq %rsi
idivl %esi
addl %eax, %edx
movslq %edx, %rax
imulq 0x50(%rsp), %rax
imulq 0x20(%rsp), %rax
addq 0x70(%rsp), %rax
movq (%r12), %rdx
addq %r10, %rdx
xorl %r13d, %r13d
cmpq %rdi, %r13
je 0x107ca0
movq 0x40(%r12), %r11
imulq 0x10(%r12), %r11
imulq %r13, %r11
addq %rdx, %r11
movl %r14d, %esi
subl $0x1, %esi
jb 0x107c9b
movq (%r11), %r9
movq %r9, (%rax)
addq $0x8, %rax
addq %r8, %r11
jmp 0x107c87
incq %r13
jmp 0x107c6d
incq %rcx
addq $0x8, %r10
jmp 0x107c40
imull 0xc(%rsp), %r15d
xorl %eax, %eax
testl %r15d, %r15d
cmovlel %eax, %r15d
movq 0x60(%rsp), %r14
testl %r14d, %r14d
cmovlel %eax, %r14d
vpxor %xmm0, %xmm0, %xmm0
movq 0x68(%rsp), %r11
cmpq %r14, %rax
je 0x107f3a
movq 0x40(%r11), %rcx
imulq %rax, %rcx
imulq 0x10(%r11), %rcx
addq (%r11), %rcx
xorl %esi, %esi
xorl %edx, %edx
movq %rdx, %rdi
orq $0x1, %rdi
cmpq %rbp, %rdi
jge 0x107f2a
movq 0x40(%rbx), %rdi
imulq %rax, %rdi
imulq 0x10(%rbx), %rdi
addq (%rbx), %rdi
movq 0x50(%rsp), %r8
imulq 0x20(%rsp), %r8
imulq %rsi, %r8
addq 0x10(%rsp), %r8
vpxor %xmm1, %xmm1, %xmm1
xorl %r9d, %r9d
movl %r15d, %r10d
vpxor %xmm2, %xmm2, %xmm2
vpxor %xmm3, %xmm3, %xmm3
vpxor %xmm4, %xmm4, %xmm4
vpxor %xmm5, %xmm5, %xmm5
vpxor %xmm6, %xmm6, %xmm6
vpxor %xmm7, %xmm7, %xmm7
vpxor %xmm8, %xmm8, %xmm8
subl $0x1, %r10d
jb 0x107de1
vmovdqu (%r8,%r9), %xmm9
vpcmpgtb %xmm9, %xmm0, %xmm10
vpunpcklbw %xmm10, %xmm9, %xmm11 # xmm11 = xmm9[0],xmm10[0],xmm9[1],xmm10[1],xmm9[2],xmm10[2],xmm9[3],xmm10[3],xmm9[4],xmm10[4],xmm9[5],xmm10[5],xmm9[6],xmm10[6],xmm9[7],xmm10[7]
vpunpckhbw %xmm10, %xmm9, %xmm9 # xmm9 = xmm9[8],xmm10[8],xmm9[9],xmm10[9],xmm9[10],xmm10[10],xmm9[11],xmm10[11],xmm9[12],xmm10[12],xmm9[13],xmm10[13],xmm9[14],xmm10[14],xmm9[15],xmm10[15]
vmovdqu (%rdi,%r9,2), %xmm10
vmovdqu 0x10(%rdi,%r9,2), %xmm12
vpcmpgtb %xmm10, %xmm0, %xmm13
vpcmpgtb %xmm12, %xmm0, %xmm14
vpunpcklbw %xmm13, %xmm10, %xmm15 # xmm15 = xmm10[0],xmm13[0],xmm10[1],xmm13[1],xmm10[2],xmm13[2],xmm10[3],xmm13[3],xmm10[4],xmm13[4],xmm10[5],xmm13[5],xmm10[6],xmm13[6],xmm10[7],xmm13[7]
vpunpckhbw %xmm13, %xmm10, %xmm10 # xmm10 = xmm10[8],xmm13[8],xmm10[9],xmm13[9],xmm10[10],xmm13[10],xmm10[11],xmm13[11],xmm10[12],xmm13[12],xmm10[13],xmm13[13],xmm10[14],xmm13[14],xmm10[15],xmm13[15]
vpunpcklbw %xmm14, %xmm12, %xmm13 # xmm13 = xmm12[0],xmm14[0],xmm12[1],xmm14[1],xmm12[2],xmm14[2],xmm12[3],xmm14[3],xmm12[4],xmm14[4],xmm12[5],xmm14[5],xmm12[6],xmm14[6],xmm12[7],xmm14[7]
vpunpckhbw %xmm14, %xmm12, %xmm12 # xmm12 = xmm12[8],xmm14[8],xmm12[9],xmm14[9],xmm12[10],xmm14[10],xmm12[11],xmm14[11],xmm12[12],xmm14[12],xmm12[13],xmm14[13],xmm12[14],xmm14[14],xmm12[15],xmm14[15]
vpmaddwd %xmm15, %xmm11, %xmm14
vpaddd %xmm8, %xmm14, %xmm8
vpmaddwd %xmm10, %xmm11, %xmm14
vpaddd %xmm7, %xmm14, %xmm7
vpmaddwd %xmm13, %xmm11, %xmm14
vpaddd %xmm6, %xmm14, %xmm6
vpmaddwd %xmm12, %xmm11, %xmm11
vpaddd %xmm5, %xmm11, %xmm5
vpmaddwd %xmm15, %xmm9, %xmm11
vpaddd %xmm4, %xmm11, %xmm4
vpmaddwd %xmm10, %xmm9, %xmm10
vpaddd %xmm3, %xmm10, %xmm3
vpmaddwd %xmm13, %xmm9, %xmm10
vpaddd %xmm2, %xmm10, %xmm2
vpmaddwd %xmm12, %xmm9, %xmm9
vpaddd %xmm1, %xmm9, %xmm1
addq $0x10, %r9
jmp 0x107d45
vpunpckldq %xmm7, %xmm8, %xmm9 # xmm9 = xmm8[0],xmm7[0],xmm8[1],xmm7[1]
vpunpckldq %xmm5, %xmm6, %xmm10 # xmm10 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
vpunpckhdq %xmm7, %xmm8, %xmm7 # xmm7 = xmm8[2],xmm7[2],xmm8[3],xmm7[3]
vpunpckhdq %xmm5, %xmm6, %xmm5 # xmm5 = xmm6[2],xmm5[2],xmm6[3],xmm5[3]
vpunpckldq %xmm3, %xmm4, %xmm6 # xmm6 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
vpunpckldq %xmm1, %xmm2, %xmm8 # xmm8 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
vpunpckhdq %xmm3, %xmm4, %xmm3 # xmm3 = xmm4[2],xmm3[2],xmm4[3],xmm3[3]
vpunpckhdq %xmm1, %xmm2, %xmm1 # xmm1 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
vpunpcklqdq %xmm10, %xmm9, %xmm2 # xmm2 = xmm9[0],xmm10[0]
vpunpckhqdq %xmm10, %xmm9, %xmm4 # xmm4 = xmm9[1],xmm10[1]
vpaddd %xmm4, %xmm2, %xmm2
vpunpcklqdq %xmm5, %xmm7, %xmm4 # xmm4 = xmm7[0],xmm5[0]
vpunpckhqdq %xmm5, %xmm7, %xmm5 # xmm5 = xmm7[1],xmm5[1]
vpaddd %xmm4, %xmm5, %xmm4
vpaddd %xmm4, %xmm2, %xmm2
vpunpcklqdq %xmm8, %xmm6, %xmm4 # xmm4 = xmm6[0],xmm8[0]
vpunpckhqdq %xmm8, %xmm6, %xmm5 # xmm5 = xmm6[1],xmm8[1]
vpaddd %xmm5, %xmm4, %xmm4
vpunpcklqdq %xmm1, %xmm3, %xmm5 # xmm5 = xmm3[0],xmm1[0]
vpunpckhqdq %xmm1, %xmm3, %xmm1 # xmm1 = xmm3[1],xmm1[1]
vpaddd %xmm5, %xmm1, %xmm1
vpaddd %xmm1, %xmm4, %xmm1
vmovdqu %xmm2, (%rcx)
vmovdqu %xmm1, 0x10(%rcx)
addq $0x20, %rcx
addq $0x2, %rdx
incq %rsi
jmp 0x107cea
movl %edx, %esi
shrl %esi
movl %edx, %r8d
andl $0x1, %r8d
addl %esi, %r8d
movq 0x50(%rsp), %rsi
movq 0x40(%rbx), %rdi
imulq %rax, %rdi
imulq 0x10(%rbx), %rdi
addq (%rbx), %rdi
imulq 0x20(%rsp), %rsi
imulq %r8, %rsi
addq 0x10(%rsp), %rsi
vpxor %xmm1, %xmm1, %xmm1
xorl %r8d, %r8d
vpxor %xmm2, %xmm2, %xmm2
vpxor %xmm3, %xmm3, %xmm3
vpxor %xmm4, %xmm4, %xmm4
cmpl %r8d, %r15d
je 0x107ef4
vpmovsxbw (%rsi,%r8,8), %xmm5
vmovdqu (%rdi), %xmm6
vmovdqu 0x10(%rdi), %xmm7
vpcmpgtb %xmm6, %xmm0, %xmm8
vpcmpgtb %xmm7, %xmm0, %xmm9
vpunpcklbw %xmm8, %xmm6, %xmm10 # xmm10 = xmm6[0],xmm8[0],xmm6[1],xmm8[1],xmm6[2],xmm8[2],xmm6[3],xmm8[3],xmm6[4],xmm8[4],xmm6[5],xmm8[5],xmm6[6],xmm8[6],xmm6[7],xmm8[7]
vpunpckhbw %xmm8, %xmm6, %xmm6 # xmm6 = xmm6[8],xmm8[8],xmm6[9],xmm8[9],xmm6[10],xmm8[10],xmm6[11],xmm8[11],xmm6[12],xmm8[12],xmm6[13],xmm8[13],xmm6[14],xmm8[14],xmm6[15],xmm8[15]
vpunpcklbw %xmm9, %xmm7, %xmm8 # xmm8 = xmm7[0],xmm9[0],xmm7[1],xmm9[1],xmm7[2],xmm9[2],xmm7[3],xmm9[3],xmm7[4],xmm9[4],xmm7[5],xmm9[5],xmm7[6],xmm9[6],xmm7[7],xmm9[7]
vpunpckhbw %xmm9, %xmm7, %xmm7 # xmm7 = xmm7[8],xmm9[8],xmm7[9],xmm9[9],xmm7[10],xmm9[10],xmm7[11],xmm9[11],xmm7[12],xmm9[12],xmm7[13],xmm9[13],xmm7[14],xmm9[14],xmm7[15],xmm9[15]
vpmaddwd %xmm5, %xmm10, %xmm9
vpaddd %xmm1, %xmm9, %xmm1
vpmaddwd %xmm6, %xmm5, %xmm6
vpaddd %xmm2, %xmm6, %xmm2
vpmaddwd %xmm5, %xmm8, %xmm6
vpaddd %xmm3, %xmm6, %xmm3
vpmaddwd %xmm7, %xmm5, %xmm5
vpaddd %xmm4, %xmm5, %xmm4
addq $0x20, %rdi
incq %r8
jmp 0x107e9b
vpunpckldq %xmm2, %xmm1, %xmm5 # xmm5 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
vpunpckldq %xmm4, %xmm3, %xmm6 # xmm6 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
vpunpckhdq %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
vpunpckhdq %xmm4, %xmm3, %xmm2 # xmm2 = xmm3[2],xmm4[2],xmm3[3],xmm4[3]
vpunpcklqdq %xmm6, %xmm5, %xmm3 # xmm3 = xmm5[0],xmm6[0]
vpunpckhqdq %xmm6, %xmm5, %xmm4 # xmm4 = xmm5[1],xmm6[1]
vpaddd %xmm4, %xmm3, %xmm3
vpunpcklqdq %xmm2, %xmm1, %xmm4 # xmm4 = xmm1[0],xmm2[0]
vpunpckhqdq %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[1],xmm2[1]
vpaddd %xmm4, %xmm1, %xmm1
vpaddd %xmm1, %xmm3, %xmm1
vmovdqu %xmm1, (%rcx)
addq $0x10, %rcx
incl %edx
cmpl %ebp, %edx
jl 0x107e56
incq %rax
jmp 0x107ccd
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x107f68
lock
decl (%rax)
jne 0x107f68
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x107f60
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x107f68
movq %rsi, %rdi
callq 0x5f3e0
addq $0x78, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0x107fb2
movq %rax, %rbx
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x107faa
lock
decl (%rax)
jne 0x107faa
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
jne 0x107fa4
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x107faa
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
movq %rax, %rdi
callq 0x61d68
|
/csukuangfj[P]ncnn/src/layer/x86/convolution_sgemm_pack8to4_int8.h
|
ncnn::im2col_sgemm_pack8to1_int8_sse(ncnn::Mat const&, ncnn::Mat&, ncnn::Mat const&, ncnn::Option const&)
|
static void im2col_sgemm_pack8to1_int8_sse(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt)
{
#if !(__AVX512VNNI__ || __AVXVNNI__ || __AVX2__ || __XOP__)
#if NCNN_RUNTIME_CPU && NCNN_AVX512VNNI && __AVX512F__ && !__AVX512VNNI__
if (ncnn::cpu_support_x86_avx512_vnni())
{
im2col_sgemm_pack8to1_int8_sse_avx512vnni(bottom_im2col, top_blob, kernel, opt);
return;
}
#endif
#if NCNN_RUNTIME_CPU && NCNN_AVXVNNI && __AVX2__ && !__AVXVNNI__
if (ncnn::cpu_support_x86_avx_vnni())
{
im2col_sgemm_pack8to1_int8_sse_avxvnni(bottom_im2col, top_blob, kernel, opt);
return;
}
#endif
#if NCNN_RUNTIME_CPU && NCNN_AVX2 && __AVX__ && !__AVX2__
if (ncnn::cpu_support_x86_avx2())
{
im2col_sgemm_pack8to1_int8_sse_avx2(bottom_im2col, top_blob, kernel, opt);
return;
}
#endif
#if NCNN_RUNTIME_CPU && NCNN_XOP && __SSE2__ && !__XOP__
if (ncnn::cpu_support_x86_xop())
{
im2col_sgemm_pack8to1_int8_sse_xop(bottom_im2col, top_blob, kernel, opt);
return;
}
#endif
#endif
// Mat bottom_im2col(size, maxk, inch, 8u, 8, opt.workspace_allocator);
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
const int outch = top_blob.c;
// permute
Mat tmp;
#if __AVX2__
if (size >= 4)
tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator);
else if (size >= 2)
tmp.create(2 * maxk, inch, size / 2 + size % 2, 8u, 8, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 8u, 8, opt.workspace_allocator);
#else
if (size >= 2)
tmp.create(2 * maxk, inch, size / 2 + size % 2, 8u, 8, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 8u, 8, opt.workspace_allocator);
#endif
{
#if __AVX2__
int remain_size_start = 0;
int nn_size = size >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
int64_t* tmpptr = tmp.channel(i / 4);
for (int q = 0; q < inch; q++)
{
const int64_t* img0 = (const int64_t*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
__m256i _v = _mm256_loadu_si256((const __m256i*)img0);
_mm256_storeu_si256((__m256i*)tmpptr, _v);
tmpptr += 4;
img0 += size;
}
}
}
remain_size_start += nn_size << 2;
nn_size = (size - remain_size_start) >> 1;
#else
int remain_size_start = 0;
int nn_size = (size - remain_size_start) >> 1;
#endif
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 2;
#if __AVX2__
int64_t* tmpptr = tmp.channel(i / 4 + (i % 4) / 2);
#else
int64_t* tmpptr = tmp.channel(i / 2);
#endif
for (int q = 0; q < inch; q++)
{
const int64_t* img0 = (const int64_t*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
__m128i _v = _mm_loadu_si128((const __m128i*)img0);
_mm_storeu_si128((__m128i*)tmpptr, _v);
tmpptr += 2;
img0 += size;
}
}
}
remain_size_start += nn_size << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
#if __AVX2__
int64_t* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2);
#else
int64_t* tmpptr = tmp.channel(i / 2 + i % 2);
#endif
for (int q = 0; q < inch; q++)
{
const int64_t* img0 = (const int64_t*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr += 1;
img0 += size;
}
}
}
}
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 4;
int* outptr0 = top_blob.channel(p);
int* outptr1 = top_blob.channel(p + 1);
int* outptr2 = top_blob.channel(p + 2);
int* outptr3 = top_blob.channel(p + 3);
int i = 0;
#if __AVX2__
for (; i + 3 < size; i += 4)
{
const signed char* tmpptr = tmp.channel(i / 4);
const signed char* kptr0 = kernel.channel(p / 4);
int nn = inch * maxk; // inch always > 0
__m256i _sum00_11 = _mm256_setzero_si256();
__m256i _sum10_01 = _mm256_setzero_si256();
__m256i _sum02_13 = _mm256_setzero_si256();
__m256i _sum12_03 = _mm256_setzero_si256();
__m256i _sum04_15 = _mm256_setzero_si256();
__m256i _sum14_05 = _mm256_setzero_si256();
__m256i _sum06_17 = _mm256_setzero_si256();
__m256i _sum16_07 = _mm256_setzero_si256();
int j = 0;
for (; j < nn; j++)
{
__m128i _val01 = _mm_loadu_si128((const __m128i*)tmpptr);
__m256i _val01_16 = _mm256_cvtepi8_epi16(_val01);
__m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0);
__m128i _w23 = _mm_loadu_si128((const __m128i*)(kptr0 + 16));
__m256i _w01_16 = _mm256_cvtepi8_epi16(_w01);
__m256i _w23_16 = _mm256_cvtepi8_epi16(_w23);
__m256i _val10_16 = _mm256_permute4x64_epi64(_val01_16, 78);
#if __AVXVNNI__ || __AVX512VNNI__
_sum00_11 = _mm256_dpwssd_epi32(_sum00_11, _val01_16, _w01_16);
_sum10_01 = _mm256_dpwssd_epi32(_sum10_01, _val10_16, _w01_16);
_sum02_13 = _mm256_dpwssd_epi32(_sum02_13, _val01_16, _w23_16);
_sum12_03 = _mm256_dpwssd_epi32(_sum12_03, _val10_16, _w23_16);
#else
_sum00_11 = _mm256_add_epi32(_sum00_11, _mm256_madd_epi16(_val01_16, _w01_16));
_sum10_01 = _mm256_add_epi32(_sum10_01, _mm256_madd_epi16(_val10_16, _w01_16));
_sum02_13 = _mm256_add_epi32(_sum02_13, _mm256_madd_epi16(_val01_16, _w23_16));
_sum12_03 = _mm256_add_epi32(_sum12_03, _mm256_madd_epi16(_val10_16, _w23_16));
#endif
__m128i _val23 = _mm_loadu_si128((const __m128i*)(tmpptr + 16));
__m256i _val23_16 = _mm256_cvtepi8_epi16(_val23);
__m256i _val32_16 = _mm256_permute4x64_epi64(_val23_16, 78);
#if __AVXVNNI__ || __AVX512VNNI__
_sum04_15 = _mm256_dpwssd_epi32(_sum04_15, _val23_16, _w01_16);
_sum14_05 = _mm256_dpwssd_epi32(_sum14_05, _val32_16, _w01_16);
_sum06_17 = _mm256_dpwssd_epi32(_sum06_17, _val23_16, _w23_16);
_sum16_07 = _mm256_dpwssd_epi32(_sum16_07, _val32_16, _w23_16);
#else
_sum04_15 = _mm256_add_epi32(_sum04_15, _mm256_madd_epi16(_val23_16, _w01_16));
_sum14_05 = _mm256_add_epi32(_sum14_05, _mm256_madd_epi16(_val32_16, _w01_16));
_sum06_17 = _mm256_add_epi32(_sum06_17, _mm256_madd_epi16(_val23_16, _w23_16));
_sum16_07 = _mm256_add_epi32(_sum16_07, _mm256_madd_epi16(_val32_16, _w23_16));
#endif
tmpptr += 32;
kptr0 += 32;
}
// transpose 4x8
{
__m256i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm256_unpacklo_epi32(_sum00_11, _sum10_01);
_tmp1 = _mm256_unpacklo_epi32(_sum02_13, _sum12_03);
_tmp2 = _mm256_unpackhi_epi32(_sum00_11, _sum10_01);
_tmp3 = _mm256_unpackhi_epi32(_sum02_13, _sum12_03);
_sum00_11 = _mm256_unpacklo_epi64(_tmp0, _tmp1);
_sum10_01 = _mm256_unpackhi_epi64(_tmp0, _tmp1);
_sum02_13 = _mm256_unpacklo_epi64(_tmp2, _tmp3);
_sum12_03 = _mm256_unpackhi_epi64(_tmp2, _tmp3);
}
{
__m256i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm256_unpacklo_epi32(_sum04_15, _sum14_05);
_tmp1 = _mm256_unpacklo_epi32(_sum06_17, _sum16_07);
_tmp2 = _mm256_unpackhi_epi32(_sum04_15, _sum14_05);
_tmp3 = _mm256_unpackhi_epi32(_sum06_17, _sum16_07);
_sum04_15 = _mm256_unpacklo_epi64(_tmp0, _tmp1);
_sum14_05 = _mm256_unpackhi_epi64(_tmp0, _tmp1);
_sum06_17 = _mm256_unpacklo_epi64(_tmp2, _tmp3);
_sum16_07 = _mm256_unpackhi_epi64(_tmp2, _tmp3);
}
_sum00_11 = _mm256_add_epi32(_sum00_11, _sum10_01);
_sum02_13 = _mm256_add_epi32(_sum02_13, _sum12_03);
_sum00_11 = _mm256_add_epi32(_sum00_11, _sum02_13);
_sum04_15 = _mm256_add_epi32(_sum04_15, _sum14_05);
_sum06_17 = _mm256_add_epi32(_sum06_17, _sum16_07);
_sum04_15 = _mm256_add_epi32(_sum04_15, _sum06_17);
__m256i _perm_mask = _mm256_set_epi32(6, 3, 4, 1, 7, 2, 5, 0);
_sum00_11 = _mm256_permutevar8x32_epi32(_sum00_11, _perm_mask);
_sum04_15 = _mm256_permutevar8x32_epi32(_sum04_15, _perm_mask);
int sum[16];
_mm256_storeu_si256((__m256i*)sum, _sum00_11);
_mm256_storeu_si256((__m256i*)(sum + 8), _sum04_15);
outptr0[0] = sum[0];
outptr1[0] = sum[1];
outptr2[0] = sum[2];
outptr3[0] = sum[3];
outptr0[1] = sum[4];
outptr1[1] = sum[5];
outptr2[1] = sum[6];
outptr3[1] = sum[7];
outptr0[2] = sum[8];
outptr1[2] = sum[9];
outptr2[2] = sum[10];
outptr3[2] = sum[11];
outptr0[3] = sum[12];
outptr1[3] = sum[13];
outptr2[3] = sum[14];
outptr3[3] = sum[15];
outptr0 += 4;
outptr1 += 4;
outptr2 += 4;
outptr3 += 4;
}
#endif
for (; i + 1 < size; i += 2)
{
#if __AVX2__
const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2);
#else
const signed char* tmpptr = tmp.channel(i / 2);
#endif
const signed char* kptr0 = kernel.channel(p / 4);
int nn = inch * maxk; // inch always > 0
#if __AVX2__
__m256i _sum00_11 = _mm256_setzero_si256();
__m256i _sum10_01 = _mm256_setzero_si256();
__m256i _sum02_13 = _mm256_setzero_si256();
__m256i _sum12_03 = _mm256_setzero_si256();
#else
__m128i _sum00 = _mm_setzero_si128();
__m128i _sum01 = _mm_setzero_si128();
__m128i _sum02 = _mm_setzero_si128();
__m128i _sum03 = _mm_setzero_si128();
__m128i _sum10 = _mm_setzero_si128();
__m128i _sum11 = _mm_setzero_si128();
__m128i _sum12 = _mm_setzero_si128();
__m128i _sum13 = _mm_setzero_si128();
#endif
int j = 0;
for (; j < nn; j++)
{
#if __AVX2__
__m128i _val01 = _mm_loadu_si128((const __m128i*)tmpptr);
__m256i _val01_16 = _mm256_cvtepi8_epi16(_val01);
__m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0);
__m128i _w23 = _mm_loadu_si128((const __m128i*)(kptr0 + 16));
__m256i _w01_16 = _mm256_cvtepi8_epi16(_w01);
__m256i _w23_16 = _mm256_cvtepi8_epi16(_w23);
__m256i _val10_16 = _mm256_permute4x64_epi64(_val01_16, 78);
#if __AVXVNNI__ || __AVX512VNNI__
_sum00_11 = _mm256_dpwssd_epi32(_sum00_11, _val01_16, _w01_16);
_sum10_01 = _mm256_dpwssd_epi32(_sum10_01, _val10_16, _w01_16);
_sum02_13 = _mm256_dpwssd_epi32(_sum02_13, _val01_16, _w23_16);
_sum12_03 = _mm256_dpwssd_epi32(_sum12_03, _val10_16, _w23_16);
#else
_sum00_11 = _mm256_add_epi32(_sum00_11, _mm256_madd_epi16(_val01_16, _w01_16));
_sum10_01 = _mm256_add_epi32(_sum10_01, _mm256_madd_epi16(_val10_16, _w01_16));
_sum02_13 = _mm256_add_epi32(_sum02_13, _mm256_madd_epi16(_val01_16, _w23_16));
_sum12_03 = _mm256_add_epi32(_sum12_03, _mm256_madd_epi16(_val10_16, _w23_16));
#endif
#else
__m128i _val01 = _mm_loadu_si128((const __m128i*)tmpptr);
__m128i _extval01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _val01);
__m128i _val0 = _mm_unpacklo_epi8(_val01, _extval01);
__m128i _val1 = _mm_unpackhi_epi8(_val01, _extval01);
__m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0);
__m128i _w23 = _mm_loadu_si128((const __m128i*)(kptr0 + 16));
__m128i _extw01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w01);
__m128i _extw23 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w23);
__m128i _w0 = _mm_unpacklo_epi8(_w01, _extw01);
__m128i _w1 = _mm_unpackhi_epi8(_w01, _extw01);
__m128i _w2 = _mm_unpacklo_epi8(_w23, _extw23);
__m128i _w3 = _mm_unpackhi_epi8(_w23, _extw23);
#if __XOP__
_sum00 = _mm_maddd_epi16(_val0, _w0, _sum00);
_sum01 = _mm_maddd_epi16(_val0, _w1, _sum01);
_sum02 = _mm_maddd_epi16(_val0, _w2, _sum02);
_sum03 = _mm_maddd_epi16(_val0, _w3, _sum03);
_sum10 = _mm_maddd_epi16(_val1, _w0, _sum10);
_sum11 = _mm_maddd_epi16(_val1, _w1, _sum11);
_sum12 = _mm_maddd_epi16(_val1, _w2, _sum12);
_sum13 = _mm_maddd_epi16(_val1, _w3, _sum13);
#else
_sum00 = _mm_add_epi32(_mm_madd_epi16(_val0, _w0), _sum00);
_sum01 = _mm_add_epi32(_mm_madd_epi16(_val0, _w1), _sum01);
_sum02 = _mm_add_epi32(_mm_madd_epi16(_val0, _w2), _sum02);
_sum03 = _mm_add_epi32(_mm_madd_epi16(_val0, _w3), _sum03);
_sum10 = _mm_add_epi32(_mm_madd_epi16(_val1, _w0), _sum10);
_sum11 = _mm_add_epi32(_mm_madd_epi16(_val1, _w1), _sum11);
_sum12 = _mm_add_epi32(_mm_madd_epi16(_val1, _w2), _sum12);
_sum13 = _mm_add_epi32(_mm_madd_epi16(_val1, _w3), _sum13);
#endif
#endif
tmpptr += 16;
kptr0 += 32;
}
#if __AVX2__
// transpose 4x8
{
__m256i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm256_unpacklo_epi32(_sum00_11, _sum10_01);
_tmp1 = _mm256_unpacklo_epi32(_sum02_13, _sum12_03);
_tmp2 = _mm256_unpackhi_epi32(_sum00_11, _sum10_01);
_tmp3 = _mm256_unpackhi_epi32(_sum02_13, _sum12_03);
_sum00_11 = _mm256_unpacklo_epi64(_tmp0, _tmp1);
_sum10_01 = _mm256_unpackhi_epi64(_tmp0, _tmp1);
_sum02_13 = _mm256_unpacklo_epi64(_tmp2, _tmp3);
_sum12_03 = _mm256_unpackhi_epi64(_tmp2, _tmp3);
}
_sum00_11 = _mm256_add_epi32(_sum00_11, _sum10_01);
_sum02_13 = _mm256_add_epi32(_sum02_13, _sum12_03);
_sum00_11 = _mm256_add_epi32(_sum00_11, _sum02_13);
__m256i _perm_mask = _mm256_set_epi32(6, 3, 4, 1, 7, 2, 5, 0);
_sum00_11 = _mm256_permutevar8x32_epi32(_sum00_11, _perm_mask);
int sum[8];
_mm256_storeu_si256((__m256i*)sum, _sum00_11);
#else
// transpose 4x4
{
__m128i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm_unpacklo_epi32(_sum00, _sum01);
_tmp1 = _mm_unpacklo_epi32(_sum02, _sum03);
_tmp2 = _mm_unpackhi_epi32(_sum00, _sum01);
_tmp3 = _mm_unpackhi_epi32(_sum02, _sum03);
_sum00 = _mm_unpacklo_epi64(_tmp0, _tmp1);
_sum01 = _mm_unpackhi_epi64(_tmp0, _tmp1);
_sum02 = _mm_unpacklo_epi64(_tmp2, _tmp3);
_sum03 = _mm_unpackhi_epi64(_tmp2, _tmp3);
}
{
__m128i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm_unpacklo_epi32(_sum10, _sum11);
_tmp1 = _mm_unpacklo_epi32(_sum12, _sum13);
_tmp2 = _mm_unpackhi_epi32(_sum10, _sum11);
_tmp3 = _mm_unpackhi_epi32(_sum12, _sum13);
_sum10 = _mm_unpacklo_epi64(_tmp0, _tmp1);
_sum11 = _mm_unpackhi_epi64(_tmp0, _tmp1);
_sum12 = _mm_unpacklo_epi64(_tmp2, _tmp3);
_sum13 = _mm_unpackhi_epi64(_tmp2, _tmp3);
}
_sum00 = _mm_add_epi32(_sum00, _sum01);
_sum02 = _mm_add_epi32(_sum02, _sum03);
_sum10 = _mm_add_epi32(_sum10, _sum11);
_sum12 = _mm_add_epi32(_sum12, _sum13);
_sum00 = _mm_add_epi32(_sum00, _sum02);
_sum10 = _mm_add_epi32(_sum10, _sum12);
int sum[8];
_mm_storeu_si128((__m128i*)sum, _sum00);
_mm_storeu_si128((__m128i*)(sum + 4), _sum10);
#endif
outptr0[0] = sum[0];
outptr1[0] = sum[1];
outptr2[0] = sum[2];
outptr3[0] = sum[3];
outptr0[1] = sum[4];
outptr1[1] = sum[5];
outptr2[1] = sum[6];
outptr3[1] = sum[7];
outptr0 += 2;
outptr1 += 2;
outptr2 += 2;
outptr3 += 2;
}
for (; i < size; i++)
{
#if __AVX2__
const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2);
#else
const signed char* tmpptr = tmp.channel(i / 2 + i % 2);
#endif
const signed char* kptr0 = kernel.channel(p / 4);
int nn = inch * maxk; // inch always > 0
#if __AVX2__
__m256i _sum0_1 = _mm256_setzero_si256();
__m256i _sum2_3 = _mm256_setzero_si256();
#else
__m128i _sum0 = _mm_setzero_si128();
__m128i _sum1 = _mm_setzero_si128();
__m128i _sum2 = _mm_setzero_si128();
__m128i _sum3 = _mm_setzero_si128();
#endif
int j = 0;
for (; j < nn; j++)
{
#if __AVX2__
__m128i _val = _mm_loadl_epi64((const __m128i*)tmpptr);
_val = _mm_cvtepi8_epi16(_val);
__m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0);
__m128i _w23 = _mm_loadu_si128((const __m128i*)(kptr0 + 16));
__m256i _w01_16 = _mm256_cvtepi8_epi16(_w01);
__m256i _w23_16 = _mm256_cvtepi8_epi16(_w23);
__m256i _valval = _mm256_inserti128_si256(_mm256_castsi128_si256(_val), _val, 1);
#if __AVXVNNI__ || __AVX512VNNI__
_sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _valval, _w01_16);
_sum2_3 = _mm256_dpwssd_epi32(_sum2_3, _valval, _w23_16);
#else
_sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_madd_epi16(_valval, _w01_16));
_sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_madd_epi16(_valval, _w23_16));
#endif
#else
__m128i _val = _mm_loadl_epi64((const __m128i*)tmpptr);
#if __SSE4_1__
_val = _mm_cvtepi8_epi16(_val);
#else
_val = _mm_unpacklo_epi8(_val, _mm_cmpgt_epi8(_mm_setzero_si128(), _val));
#endif
__m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0);
__m128i _w23 = _mm_loadu_si128((const __m128i*)(kptr0 + 16));
__m128i _extw01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w01);
__m128i _extw23 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w23);
__m128i _w0 = _mm_unpacklo_epi8(_w01, _extw01);
__m128i _w1 = _mm_unpackhi_epi8(_w01, _extw01);
__m128i _w2 = _mm_unpacklo_epi8(_w23, _extw23);
__m128i _w3 = _mm_unpackhi_epi8(_w23, _extw23);
#if __XOP__
_sum0 = _mm_maddd_epi16(_val, _w0, _sum0);
_sum1 = _mm_maddd_epi16(_val, _w1, _sum1);
_sum2 = _mm_maddd_epi16(_val, _w2, _sum2);
_sum3 = _mm_maddd_epi16(_val, _w3, _sum3);
#else
_sum0 = _mm_add_epi32(_mm_madd_epi16(_val, _w0), _sum0);
_sum1 = _mm_add_epi32(_mm_madd_epi16(_val, _w1), _sum1);
_sum2 = _mm_add_epi32(_mm_madd_epi16(_val, _w2), _sum2);
_sum3 = _mm_add_epi32(_mm_madd_epi16(_val, _w3), _sum3);
#endif
#endif
tmpptr += 8;
kptr0 += 32;
}
#if __AVX2__
__m128i _sum0 = _mm256_extracti128_si256(_sum0_1, 0);
__m128i _sum1 = _mm256_extracti128_si256(_sum0_1, 1);
__m128i _sum2 = _mm256_extracti128_si256(_sum2_3, 0);
__m128i _sum3 = _mm256_extracti128_si256(_sum2_3, 1);
#endif
// transpose 4x4
{
__m128i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm_unpacklo_epi32(_sum0, _sum1);
_tmp1 = _mm_unpacklo_epi32(_sum2, _sum3);
_tmp2 = _mm_unpackhi_epi32(_sum0, _sum1);
_tmp3 = _mm_unpackhi_epi32(_sum2, _sum3);
_sum0 = _mm_unpacklo_epi64(_tmp0, _tmp1);
_sum1 = _mm_unpackhi_epi64(_tmp0, _tmp1);
_sum2 = _mm_unpacklo_epi64(_tmp2, _tmp3);
_sum3 = _mm_unpackhi_epi64(_tmp2, _tmp3);
}
_sum0 = _mm_add_epi32(_sum0, _sum1);
_sum2 = _mm_add_epi32(_sum2, _sum3);
_sum0 = _mm_add_epi32(_sum0, _sum2);
int sum[4];
_mm_storeu_si128((__m128i*)sum, _sum0);
outptr0[0] = sum[0];
outptr1[0] = sum[1];
outptr2[0] = sum[2];
outptr3[0] = sum[3];
outptr0 += 1;
outptr1 += 1;
outptr2 += 1;
outptr3 += 1;
}
}
remain_outch_start += nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
int* outptr0 = top_blob.channel(p);
int i = 0;
#if __AVX2__
for (; i + 3 < size; i += 4)
{
const signed char* tmpptr = tmp.channel(i / 4);
const signed char* kptr0 = kernel.channel(p / 4 + p % 4);
int nn = inch * maxk; // inch always > 0
__m256i _sum01 = _mm256_setzero_si256();
__m256i _sum23 = _mm256_setzero_si256();
int j = 0;
for (; j < nn; j++)
{
__m128i _val01 = _mm_loadu_si128((const __m128i*)tmpptr);
__m128i _val23 = _mm_loadu_si128((const __m128i*)(tmpptr + 16));
__m256i _val01_16 = _mm256_cvtepi8_epi16(_val01);
__m256i _val23_16 = _mm256_cvtepi8_epi16(_val23);
__m128i _w01 = _mm_loadl_epi64((const __m128i*)kptr0);
__m256i _w01_16 = _mm256_cvtepi8_epi16(_w01);
_w01_16 = _mm256_permute4x64_epi64(_w01_16, _MM_SHUFFLE(1, 0, 1, 0));
#if __AVXVNNI__ || __AVX512VNNI__
_sum01 = _mm256_dpwssd_epi32(_sum01, _val01_16, _w01_16);
_sum23 = _mm256_dpwssd_epi32(_sum23, _val23_16, _w01_16);
#else
_sum01 = _mm256_add_epi32(_sum01, _mm256_madd_epi16(_val01_16, _w01_16));
_sum23 = _mm256_add_epi32(_sum23, _mm256_madd_epi16(_val23_16, _w01_16));
#endif
tmpptr += 32;
kptr0 += 8;
}
__m128i _sum0 = _mm256_extracti128_si256(_sum01, 0);
__m128i _sum1 = _mm256_extracti128_si256(_sum01, 1);
__m128i _sum2 = _mm256_extracti128_si256(_sum23, 0);
__m128i _sum3 = _mm256_extracti128_si256(_sum23, 1);
outptr0[0] = _mm_reduce_add_epi32(_sum0);
outptr0[1] = _mm_reduce_add_epi32(_sum1);
outptr0[2] = _mm_reduce_add_epi32(_sum2);
outptr0[3] = _mm_reduce_add_epi32(_sum3);
outptr0 += 4;
}
#endif
for (; i + 1 < size; i += 2)
{
#if __AVX2__
const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2);
#else
const signed char* tmpptr = tmp.channel(i / 2);
#endif
const signed char* kptr0 = kernel.channel(p / 4 + p % 4);
int nn = inch * maxk; // inch always > 0
#if __AVX2__
__m256i _sum01 = _mm256_setzero_si256();
#else
__m128i _sum0 = _mm_setzero_si128();
__m128i _sum1 = _mm_setzero_si128();
#endif
int j = 0;
for (; j < nn; j++)
{
#if __AVX2__
__m128i _val01 = _mm_loadu_si128((const __m128i*)tmpptr);
__m256i _val01_16 = _mm256_cvtepi8_epi16(_val01);
__m128i _w01 = _mm_loadl_epi64((const __m128i*)kptr0);
__m256i _w01_16 = _mm256_cvtepi8_epi16(_w01);
_w01_16 = _mm256_permute4x64_epi64(_w01_16, _MM_SHUFFLE(1, 0, 1, 0));
#if __AVXVNNI__ || __AVX512VNNI__
_sum01 = _mm256_dpwssd_epi32(_sum01, _val01_16, _w01_16);
#else
_sum01 = _mm256_add_epi32(_sum01, _mm256_madd_epi16(_val01_16, _w01_16));
#endif
#else
__m128i _val01 = _mm_loadu_si128((const __m128i*)tmpptr);
__m128i _extval01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _val01);
__m128i _val0 = _mm_unpacklo_epi8(_val01, _extval01);
__m128i _val1 = _mm_unpackhi_epi8(_val01, _extval01);
__m128i _w01 = _mm_loadl_epi64((const __m128i*)kptr0);
#if __SSE4_1__
__m128i _w0 = _mm_cvtepi8_epi16(_w01);
#else
__m128i _extw01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w01);
__m128i _w0 = _mm_unpacklo_epi8(_w01, _extw01);
#endif
#if __XOP__
_sum0 = _mm_maddd_epi16(_val0, _w0, _sum0);
_sum1 = _mm_maddd_epi16(_val1, _w0, _sum1);
#else
_sum0 = _mm_add_epi32(_mm_madd_epi16(_val0, _w0), _sum0);
_sum1 = _mm_add_epi32(_mm_madd_epi16(_val1, _w0), _sum1);
#endif
#endif
tmpptr += 16;
kptr0 += 8;
}
#if __AVX2__
__m128i _sum0 = _mm256_extracti128_si256(_sum01, 0);
__m128i _sum1 = _mm256_extracti128_si256(_sum01, 1);
#endif
outptr0[0] = _mm_reduce_add_epi32(_sum0);
outptr0[1] = _mm_reduce_add_epi32(_sum1);
outptr0 += 2;
}
for (; i < size; i++)
{
#if __AVX2__
const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2);
#else
const signed char* tmpptr = tmp.channel(i / 2 + i % 2);
#endif
const signed char* kptr0 = kernel.channel(p / 4 + p % 4);
int nn = inch * maxk; // inch always > 0
__m128i _sum0 = _mm_setzero_si128();
int j = 0;
for (; j < nn; j++)
{
__m128i _val01 = _mm_loadl_epi64((const __m128i*)tmpptr);
#if __SSE4_1__
__m128i _val0 = _mm_cvtepi8_epi16(_val01);
#else
__m128i _extval01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _val01);
__m128i _val0 = _mm_unpacklo_epi8(_val01, _extval01);
#endif
__m128i _w01 = _mm_loadl_epi64((const __m128i*)kptr0);
#if __SSE4_1__
__m128i _w0 = _mm_cvtepi8_epi16(_w01);
#else
__m128i _extw01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w01);
__m128i _w0 = _mm_unpacklo_epi8(_w01, _extw01);
#endif
#if __XOP__
_sum0 = _mm_maddd_epi16(_val0, _w0, _sum0);
#else
_sum0 = _mm_add_epi32(_mm_madd_epi16(_val0, _w0), _sum0);
#endif
tmpptr += 8;
kptr0 += 8;
}
outptr0[0] = _mm_reduce_add_epi32(_sum0);
outptr0 += 1;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x88, %rsp
movq %rcx, %r13
movq %rdx, %rbx
movq %rsi, %r14
movq %rdi, %r12
callq 0x732f7
testl %eax, %eax
je 0x1086a3
movq %r12, %rdi
movq %r14, %rsi
movq %rbx, %rdx
movq %r13, %rcx
addq $0x88, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp 0x13366d
callq 0x732db
testl %eax, %eax
je 0x1086ce
movq %r12, %rdi
movq %r14, %rsi
movq %rbx, %rdx
movq %r13, %rcx
addq $0x88, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp 0x138bf0
movq %rbx, 0x10(%rsp)
movslq 0x2c(%r12), %rbp
movl 0x30(%r12), %ebx
movl 0x38(%r12), %r15d
movq %r14, 0x28(%rsp)
movslq 0x38(%r14), %rax
movq %rax, 0x18(%rsp)
leaq 0x30(%rsp), %rdi
andq $0x0, 0x40(%rdi)
movl %ebp, %ecx
shrl %ecx
movl %ebp, %eax
andl $0x1, %eax
addl %ecx, %eax
cmpq $0x2, %rbp
setge %cl
vpxor %xmm0, %xmm0, %xmm0
vmovdqu %xmm0, 0xc(%rdi)
vmovdqa %xmm0, (%rdi)
vmovdqu %xmm0, 0x2c(%rdi)
vmovdqa %xmm0, 0x20(%rdi)
cmovll %ebp, %eax
movl %ebx, %esi
shll %cl, %esi
movq 0x10(%r13), %rcx
movq %rcx, (%rsp)
pushq $0x8
popq %r8
pushq $0x8
popq %r9
movl %r15d, %edx
movl %eax, %ecx
callq 0x628f2
movl %ebp, %eax
sarl %eax
xorl %ecx, %ecx
testl %ebx, %ebx
movl %ebx, %edx
movl $0x0, %ebx
movl %edx, 0x24(%rsp)
cmovgl %edx, %ebx
testl %r15d, %r15d
movl $0x0, %edi
cmovgl %r15d, %edi
testl %eax, %eax
cmovlel %ecx, %eax
leaq (,%rbp,8), %r8
xorl %edx, %edx
cmpq %rax, %rdx
je 0x1087d9
movq 0x70(%rsp), %r9
imulq %rdx, %r9
imulq 0x40(%rsp), %r9
addq 0x30(%rsp), %r9
xorl %r10d, %r10d
cmpq %rdi, %r10
je 0x1087d0
movq 0x40(%r12), %r11
movq (%r12), %rsi
imulq 0x10(%r12), %r11
addq %rcx, %rsi
imulq %r10, %r11
addq %rsi, %r11
movl %ebx, %r14d
subl $0x1, %r14d
jb 0x1087cb
vmovdqu (%r11), %xmm0
vmovdqu %xmm0, (%r9)
addq $0x10, %r9
addq %r8, %r11
jmp 0x1087b2
incq %r10
jmp 0x108791
incq %rdx
addq $0x10, %rcx
jmp 0x108775
movl %ebp, %eax
andl $-0x2, %eax
movq 0x30(%rsp), %r9
movslq %eax, %rcx
leaq (,%rcx,8), %r10
cmpq %rbp, %rcx
jge 0x108855
movl %ecx, %eax
cltd
pushq $0x2
popq %rsi
idivl %esi
addl %eax, %edx
movslq %edx, %rax
imulq 0x70(%rsp), %rax
imulq 0x40(%rsp), %rax
addq %r9, %rax
movq (%r12), %rdx
addq %r10, %rdx
xorl %r14d, %r14d
cmpq %rdi, %r14
je 0x10884c
movq 0x40(%r12), %r13
imulq 0x10(%r12), %r13
imulq %r14, %r13
addq %rdx, %r13
movl %ebx, %esi
subl $0x1, %esi
jb 0x108847
movq (%r13), %r11
movq %r11, (%rax)
addq $0x8, %rax
addq %r8, %r13
jmp 0x108832
incq %r14
jmp 0x108819
incq %rcx
addq $0x8, %r10
jmp 0x1087ee
movq 0x18(%rsp), %rax
movl %eax, %ebx
sarl $0x2, %ebx
movq 0x28(%rsp), %rax
movq (%rax), %rcx
movq %rcx, 0x80(%rsp)
movq 0x40(%rax), %rcx
imulq 0x10(%rax), %rcx
movq %rcx, 0x78(%rsp)
imull 0x24(%rsp), %r15d
xorl %esi, %esi
testl %r15d, %r15d
cmovlel %esi, %r15d
testl %ebx, %ebx
cmovlel %esi, %ebx
vpxor %xmm0, %xmm0, %xmm0
cmpq %rbx, %rsi
je 0x108b9e
leaq (,%rsi,4), %rdi
movq 0x78(%rsp), %rcx
imulq %rcx, %rdi
movq 0x80(%rsp), %rax
addq %rax, %rdi
leaq 0x1(,%rsi,4), %r8
imulq %rcx, %r8
addq %rax, %r8
leaq 0x2(,%rsi,4), %r9
imulq %rcx, %r9
addq %rax, %r9
leaq 0x3(,%rsi,4), %r10
imulq %rcx, %r10
addq %rax, %r10
movq 0x10(%rsp), %rax
movq 0x40(%rax), %r12
imulq 0x10(%rax), %r12
movq 0x30(%rsp), %r13
imulq %rsi, %r12
addq (%rax), %r12
movq 0x70(%rsp), %r14
imulq 0x40(%rsp), %r14
xorl %r11d, %r11d
movq %r11, %rax
orq $0x1, %rax
cmpq %rbp, %rax
jge 0x108a89
vpxor %xmm1, %xmm1, %xmm1
xorl %eax, %eax
movl %r15d, %ecx
vpxor %xmm2, %xmm2, %xmm2
vpxor %xmm3, %xmm3, %xmm3
vpxor %xmm4, %xmm4, %xmm4
vpxor %xmm5, %xmm5, %xmm5
vpxor %xmm6, %xmm6, %xmm6
vpxor %xmm7, %xmm7, %xmm7
vpxor %xmm8, %xmm8, %xmm8
subl $0x1, %ecx
jb 0x1089e1
vmovdqu (%r13,%rax), %xmm9
vpcmpgtb %xmm9, %xmm0, %xmm10
vpunpcklbw %xmm10, %xmm9, %xmm11 # xmm11 = xmm9[0],xmm10[0],xmm9[1],xmm10[1],xmm9[2],xmm10[2],xmm9[3],xmm10[3],xmm9[4],xmm10[4],xmm9[5],xmm10[5],xmm9[6],xmm10[6],xmm9[7],xmm10[7]
vpunpckhbw %xmm10, %xmm9, %xmm9 # xmm9 = xmm9[8],xmm10[8],xmm9[9],xmm10[9],xmm9[10],xmm10[10],xmm9[11],xmm10[11],xmm9[12],xmm10[12],xmm9[13],xmm10[13],xmm9[14],xmm10[14],xmm9[15],xmm10[15]
vmovdqu (%r12,%rax,2), %xmm10
vmovdqu 0x10(%r12,%rax,2), %xmm12
vpcmpgtb %xmm10, %xmm0, %xmm13
vpcmpgtb %xmm12, %xmm0, %xmm14
vpunpcklbw %xmm13, %xmm10, %xmm15 # xmm15 = xmm10[0],xmm13[0],xmm10[1],xmm13[1],xmm10[2],xmm13[2],xmm10[3],xmm13[3],xmm10[4],xmm13[4],xmm10[5],xmm13[5],xmm10[6],xmm13[6],xmm10[7],xmm13[7]
vpunpckhbw %xmm13, %xmm10, %xmm10 # xmm10 = xmm10[8],xmm13[8],xmm10[9],xmm13[9],xmm10[10],xmm13[10],xmm10[11],xmm13[11],xmm10[12],xmm13[12],xmm10[13],xmm13[13],xmm10[14],xmm13[14],xmm10[15],xmm13[15]
vpunpcklbw %xmm14, %xmm12, %xmm13 # xmm13 = xmm12[0],xmm14[0],xmm12[1],xmm14[1],xmm12[2],xmm14[2],xmm12[3],xmm14[3],xmm12[4],xmm14[4],xmm12[5],xmm14[5],xmm12[6],xmm14[6],xmm12[7],xmm14[7]
vpunpckhbw %xmm14, %xmm12, %xmm12 # xmm12 = xmm12[8],xmm14[8],xmm12[9],xmm14[9],xmm12[10],xmm14[10],xmm12[11],xmm14[11],xmm12[12],xmm14[12],xmm12[13],xmm14[13],xmm12[14],xmm14[14],xmm12[15],xmm14[15]
vpmaddwd %xmm15, %xmm11, %xmm14
vpaddd %xmm8, %xmm14, %xmm8
vpmaddwd %xmm10, %xmm11, %xmm14
vpaddd %xmm7, %xmm14, %xmm7
vpmaddwd %xmm13, %xmm11, %xmm14
vpaddd %xmm6, %xmm14, %xmm6
vpmaddwd %xmm12, %xmm11, %xmm11
vpaddd %xmm5, %xmm11, %xmm5
vpmaddwd %xmm15, %xmm9, %xmm11
vpaddd %xmm4, %xmm11, %xmm4
vpmaddwd %xmm10, %xmm9, %xmm10
vpaddd %xmm3, %xmm10, %xmm3
vpmaddwd %xmm13, %xmm9, %xmm10
vpaddd %xmm2, %xmm10, %xmm2
vpmaddwd %xmm12, %xmm9, %xmm9
vpaddd %xmm1, %xmm9, %xmm1
addq $0x10, %rax
jmp 0x108945
vpunpckldq %xmm7, %xmm8, %xmm9 # xmm9 = xmm8[0],xmm7[0],xmm8[1],xmm7[1]
vpunpckldq %xmm5, %xmm6, %xmm10 # xmm10 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
vpunpckhdq %xmm7, %xmm8, %xmm7 # xmm7 = xmm8[2],xmm7[2],xmm8[3],xmm7[3]
vpunpckhdq %xmm5, %xmm6, %xmm5 # xmm5 = xmm6[2],xmm5[2],xmm6[3],xmm5[3]
vpunpckldq %xmm3, %xmm4, %xmm6 # xmm6 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
vpunpckldq %xmm1, %xmm2, %xmm8 # xmm8 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
vpunpckhdq %xmm3, %xmm4, %xmm3 # xmm3 = xmm4[2],xmm3[2],xmm4[3],xmm3[3]
vpunpckhdq %xmm1, %xmm2, %xmm1 # xmm1 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
vpunpcklqdq %xmm10, %xmm9, %xmm2 # xmm2 = xmm9[0],xmm10[0]
vpunpckhqdq %xmm10, %xmm9, %xmm4 # xmm4 = xmm9[1],xmm10[1]
vpaddd %xmm4, %xmm2, %xmm2
vpunpcklqdq %xmm5, %xmm7, %xmm4 # xmm4 = xmm7[0],xmm5[0]
vpunpckhqdq %xmm5, %xmm7, %xmm5 # xmm5 = xmm7[1],xmm5[1]
vpaddd %xmm4, %xmm5, %xmm4
vpaddd %xmm4, %xmm2, %xmm2
vpunpcklqdq %xmm8, %xmm6, %xmm4 # xmm4 = xmm6[0],xmm8[0]
vpunpckhqdq %xmm8, %xmm6, %xmm5 # xmm5 = xmm6[1],xmm8[1]
vpaddd %xmm5, %xmm4, %xmm4
vpunpcklqdq %xmm1, %xmm3, %xmm5 # xmm5 = xmm3[0],xmm1[0]
vmovd %xmm2, (%rdi)
vpextrd $0x1, %xmm2, (%r8)
vpextrd $0x2, %xmm2, (%r9)
vpextrd $0x3, %xmm2, (%r10)
vpunpckhqdq %xmm1, %xmm3, %xmm1 # xmm1 = xmm3[1],xmm1[1]
vpaddd %xmm5, %xmm1, %xmm1
vpaddd %xmm1, %xmm4, %xmm1
vmovd %xmm1, 0x4(%rdi)
vpextrd $0x1, %xmm1, 0x4(%r8)
vpextrd $0x2, %xmm1, 0x4(%r9)
vpextrd $0x3, %xmm1, 0x4(%r10)
addq $0x8, %rdi
addq $0x8, %r8
addq $0x8, %r9
addq $0x8, %r10
addq $0x2, %r11
addq %r14, %r13
jmp 0x10890f
movq 0x30(%rsp), %r14
movq 0x70(%rsp), %r12
movq 0x10(%rsp), %rax
movq 0x40(%rax), %rdx
imulq %rsi, %rdx
imulq 0x10(%rax), %rdx
addq (%rax), %rdx
imulq 0x40(%rsp), %r12
cmpl %ebp, %r11d
jge 0x108b96
movl %r11d, %ecx
shrl %ecx
movl %r11d, %eax
andl $0x1, %eax
addl %ecx, %eax
imulq %r12, %rax
addq %r14, %rax
vpxor %xmm1, %xmm1, %xmm1
xorl %ecx, %ecx
movq %rdx, %r13
vpxor %xmm2, %xmm2, %xmm2
vpxor %xmm3, %xmm3, %xmm3
vpxor %xmm4, %xmm4, %xmm4
cmpl %ecx, %r15d
je 0x108b3c
vpmovsxbw (%rax,%rcx,8), %xmm5
vmovdqu (%r13), %xmm6
vmovdqu 0x10(%r13), %xmm7
vpcmpgtb %xmm6, %xmm0, %xmm8
vpcmpgtb %xmm7, %xmm0, %xmm9
vpunpcklbw %xmm8, %xmm6, %xmm10 # xmm10 = xmm6[0],xmm8[0],xmm6[1],xmm8[1],xmm6[2],xmm8[2],xmm6[3],xmm8[3],xmm6[4],xmm8[4],xmm6[5],xmm8[5],xmm6[6],xmm8[6],xmm6[7],xmm8[7]
vpunpckhbw %xmm8, %xmm6, %xmm6 # xmm6 = xmm6[8],xmm8[8],xmm6[9],xmm8[9],xmm6[10],xmm8[10],xmm6[11],xmm8[11],xmm6[12],xmm8[12],xmm6[13],xmm8[13],xmm6[14],xmm8[14],xmm6[15],xmm8[15]
vpunpcklbw %xmm9, %xmm7, %xmm8 # xmm8 = xmm7[0],xmm9[0],xmm7[1],xmm9[1],xmm7[2],xmm9[2],xmm7[3],xmm9[3],xmm7[4],xmm9[4],xmm7[5],xmm9[5],xmm7[6],xmm9[6],xmm7[7],xmm9[7]
vpunpckhbw %xmm9, %xmm7, %xmm7 # xmm7 = xmm7[8],xmm9[8],xmm7[9],xmm9[9],xmm7[10],xmm9[10],xmm7[11],xmm9[11],xmm7[12],xmm9[12],xmm7[13],xmm9[13],xmm7[14],xmm9[14],xmm7[15],xmm9[15]
vpmaddwd %xmm5, %xmm10, %xmm9
vpaddd %xmm1, %xmm9, %xmm1
vpmaddwd %xmm6, %xmm5, %xmm6
vpaddd %xmm2, %xmm6, %xmm2
vpmaddwd %xmm5, %xmm8, %xmm6
vpaddd %xmm3, %xmm6, %xmm3
vpmaddwd %xmm7, %xmm5, %xmm5
vpaddd %xmm4, %xmm5, %xmm4
addq $0x20, %r13
incq %rcx
jmp 0x108ae0
vpunpckldq %xmm2, %xmm1, %xmm5 # xmm5 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
vpunpckldq %xmm4, %xmm3, %xmm6 # xmm6 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
vpunpckhdq %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
vpunpckhdq %xmm4, %xmm3, %xmm2 # xmm2 = xmm3[2],xmm4[2],xmm3[3],xmm4[3]
vpunpcklqdq %xmm6, %xmm5, %xmm3 # xmm3 = xmm5[0],xmm6[0]
vpunpckhqdq %xmm6, %xmm5, %xmm4 # xmm4 = xmm5[1],xmm6[1]
vpaddd %xmm4, %xmm3, %xmm3
vpunpcklqdq %xmm2, %xmm1, %xmm4 # xmm4 = xmm1[0],xmm2[0]
vpunpckhqdq %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[1],xmm2[1]
vpaddd %xmm4, %xmm1, %xmm1
vpaddd %xmm1, %xmm3, %xmm1
vmovd %xmm1, (%rdi)
vpextrd $0x1, %xmm1, (%r8)
vpextrd $0x2, %xmm1, (%r9)
vpextrd $0x3, %xmm1, (%r10)
addq $0x4, %rdi
addq $0x4, %r8
addq $0x4, %r9
addq $0x4, %r10
incl %r11d
jmp 0x108aae
incq %rsi
jmp 0x108895
movq 0x18(%rsp), %rcx
andq $-0x4, %rcx
movq 0x28(%rsp), %rax
movq 0x40(%rax), %rsi
imulq 0x10(%rax), %rsi
movq (%rax), %rdi
movl %r15d, %r8d
pushq $0x4
popq %r9
vpxor %xmm0, %xmm0, %xmm0
cmpq 0x18(%rsp), %rcx
jge 0x108d10
movq 0x30(%rsp), %r11
movq 0x70(%rsp), %r14
imulq 0x40(%rsp), %r14
movl %ecx, %eax
cltd
idivl %r9d
movq %rsi, %r10
imulq %rcx, %r10
addl %eax, %edx
movslq %edx, %rax
movq 0x10(%rsp), %rdx
movq 0x40(%rdx), %r15
imulq %rax, %r15
imulq 0x10(%rdx), %r15
addq %rdi, %r10
addq (%rdx), %r15
xorl %edx, %edx
movq %rdx, %rbx
orq $0x1, %rbx
cmpq %rbp, %rbx
jge 0x108c92
movq %rdx, %r12
shrq %r12
imulq %r14, %r12
addq %r11, %r12
vpxor %xmm1, %xmm1, %xmm1
xorl %r13d, %r13d
vpxor %xmm2, %xmm2, %xmm2
cmpl %r13d, %r8d
je 0x108c64
vmovdqu (%r12), %xmm3
vpcmpgtb %xmm3, %xmm0, %xmm4
vpunpcklbw %xmm4, %xmm3, %xmm5 # xmm5 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
vpunpckhbw %xmm4, %xmm3, %xmm3 # xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15]
vpmovsxbw (%r15,%r13,8), %xmm4
vpmaddwd %xmm4, %xmm5, %xmm5
vpaddd %xmm1, %xmm5, %xmm1
vpmaddwd %xmm4, %xmm3, %xmm3
vpaddd %xmm2, %xmm3, %xmm2
addq $0x10, %r12
incq %r13
jmp 0x108c2e
vpshufd $0xee, %xmm1, %xmm3 # xmm3 = xmm1[2,3,2,3]
vpaddd %xmm1, %xmm3, %xmm1
vpshufd $0xee, %xmm2, %xmm3 # xmm3 = xmm2[2,3,2,3]
vpaddd %xmm2, %xmm3, %xmm2
vphaddd %xmm2, %xmm1, %xmm1
vpshufd $0xe8, %xmm1, %xmm1 # xmm1 = xmm1[0,2,2,3]
vmovq %xmm1, (%r10)
addq $0x8, %r10
addq $0x2, %rdx
jmp 0x108c0a
movq 0x30(%rsp), %r11
movq 0x70(%rsp), %r14
movq 0x10(%rsp), %rbx
imulq 0x40(%rbx), %rax
imulq 0x10(%rbx), %rax
addq (%rbx), %rax
imulq 0x40(%rsp), %r14
cmpl %ebp, %edx
jge 0x108d08
movl %edx, %ebx
shrl %ebx
movl %edx, %r15d
andl $0x1, %r15d
addl %ebx, %r15d
imulq %r14, %r15
addq %r11, %r15
vpxor %xmm1, %xmm1, %xmm1
xorl %ebx, %ebx
cmpl %ebx, %r8d
je 0x108cf1
vpmovsxbw (%r15,%rbx,8), %xmm2
vpmovsxbw (%rax,%rbx,8), %xmm3
vpmaddwd %xmm3, %xmm2, %xmm2
vpaddd %xmm1, %xmm2, %xmm1
incq %rbx
jmp 0x108cd3
vphaddd %xmm1, %xmm1, %xmm1
vphaddd %xmm1, %xmm1, %xmm1
vmovd %xmm1, (%r10)
addq $0x4, %r10
incl %edx
jmp 0x108cb4
incq %rcx
jmp 0x108bc3
movq 0x38(%rsp), %rax
testq %rax, %rax
je 0x108d3e
lock
decl (%rax)
jne 0x108d3e
movq 0x30(%rsp), %rsi
movq 0x50(%rsp), %rdi
testq %rdi, %rdi
je 0x108d36
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x108d3e
movq %rsi, %rdi
callq 0x5f3e0
addq $0x88, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0x108d8b
movq %rax, %rbx
movq 0x38(%rsp), %rax
testq %rax, %rax
je 0x108d83
lock
decl (%rax)
jne 0x108d83
movq 0x30(%rsp), %rsi
movq 0x50(%rsp), %rdi
testq %rdi, %rdi
jne 0x108d7d
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x108d83
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
movq %rax, %rdi
callq 0x61d68
|
/csukuangfj[P]ncnn/src/layer/x86/convolution_sgemm_pack8to1_int8.h
|
ncnn::im2col_sgemm_int8_sse(ncnn::Mat const&, ncnn::Mat&, ncnn::Mat const&, ncnn::Option const&)
|
static void im2col_sgemm_int8_sse(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt)
{
#if !(__AVX512VNNI__ || __AVXVNNI__ || __AVX2__ || __XOP__)
#if NCNN_RUNTIME_CPU && NCNN_AVX512VNNI && __AVX512F__ && !__AVX512VNNI__
if (ncnn::cpu_support_x86_avx512_vnni())
{
im2col_sgemm_int8_sse_avx512vnni(bottom_im2col, top_blob, kernel, opt);
return;
}
#endif
#if NCNN_RUNTIME_CPU && NCNN_AVXVNNI && __AVX2__ && !__AVXVNNI__
if (ncnn::cpu_support_x86_avx_vnni())
{
im2col_sgemm_int8_sse_avxvnni(bottom_im2col, top_blob, kernel, opt);
return;
}
#endif
#if NCNN_RUNTIME_CPU && NCNN_AVX2 && __AVX__ && !__AVX2__
if (ncnn::cpu_support_x86_avx2())
{
im2col_sgemm_int8_sse_avx2(bottom_im2col, top_blob, kernel, opt);
return;
}
#endif
#if NCNN_RUNTIME_CPU && NCNN_XOP && __SSE2__ && !__XOP__
if (ncnn::cpu_support_x86_xop())
{
im2col_sgemm_int8_sse_xop(bottom_im2col, top_blob, kernel, opt);
return;
}
#endif
#endif
// Mat bottom_im2col(size, maxk, inch, 8u, 8, opt.workspace_allocator);
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
const int outch = top_blob.c;
// permute
Mat tmp;
#if __SSE2__
if (inch >= 4)
{
#if __AVX2__
if (size >= 4)
tmp.create(4 * maxk, inch / 4 + inch % 4, size / 4 + (size % 4) / 2 + size % 2, 4u, 4, opt.workspace_allocator);
else if (size >= 2)
tmp.create(2 * maxk, inch / 4 + inch % 4, size / 2 + size % 2, 4u, 4, opt.workspace_allocator);
else
tmp.create(maxk, inch / 4 + inch % 4, size, 4u, 4, opt.workspace_allocator);
#else
if (size >= 2)
tmp.create(2 * maxk, inch / 4 + inch % 4, size / 2 + size % 2, 4u, 4, opt.workspace_allocator);
else
tmp.create(maxk, inch / 4 + inch % 4, size, 4u, 4, opt.workspace_allocator);
#endif
}
else
{
#if __AVX2__
if (size >= 4)
tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 1u, 1, opt.workspace_allocator);
else if (size >= 2)
tmp.create(2 * maxk, inch, size / 2 + size % 2, 1u, 1, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 1u, 1, opt.workspace_allocator);
#else
if (size >= 2)
tmp.create(2 * maxk, inch, size / 2 + size % 2, 1u, 1, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 1u, 1, opt.workspace_allocator);
#endif
}
{
#if __AVX2__
int remain_size_start = 0;
int nn_size = size >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
signed char* tmpptr = tmp.channel(i / 4);
int q = 0;
for (; q + 3 < inch; q += 4)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i;
const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i;
const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img1[0];
tmpptr[2] = img2[0];
tmpptr[3] = img3[0];
tmpptr[4] = img0[1];
tmpptr[5] = img1[1];
tmpptr[6] = img2[1];
tmpptr[7] = img3[1];
tmpptr[8] = img0[2];
tmpptr[9] = img1[2];
tmpptr[10] = img2[2];
tmpptr[11] = img3[2];
tmpptr[12] = img0[3];
tmpptr[13] = img1[3];
tmpptr[14] = img2[3];
tmpptr[15] = img3[3];
tmpptr += 16;
img0 += size;
img1 += size;
img2 += size;
img3 += size;
}
}
for (; q < inch; q++)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img0[2];
tmpptr[3] = img0[3];
tmpptr += 4;
img0 += size;
}
}
}
remain_size_start += nn_size << 2;
nn_size = (size - remain_size_start) >> 1;
#else
int remain_size_start = 0;
int nn_size = (size - remain_size_start) >> 1;
#endif
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 2;
#if __AVX2__
signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2);
#else
signed char* tmpptr = tmp.channel(i / 2);
#endif
int q = 0;
for (; q + 3 < inch; q += 4)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i;
const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i;
const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img1[0];
tmpptr[2] = img2[0];
tmpptr[3] = img3[0];
tmpptr[4] = img0[1];
tmpptr[5] = img1[1];
tmpptr[6] = img2[1];
tmpptr[7] = img3[1];
tmpptr += 8;
img0 += size;
img1 += size;
img2 += size;
img3 += size;
}
}
for (; q < inch; q++)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr += 2;
img0 += size;
}
}
}
remain_size_start += nn_size << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
#if __AVX2__
signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2);
#else
signed char* tmpptr = tmp.channel(i / 2 + i % 2);
#endif
int q = 0;
for (; q + 3 < inch; q += 4)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i;
const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i;
const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img1[0];
tmpptr[2] = img2[0];
tmpptr[3] = img3[0];
tmpptr += 4;
img0 += size;
img1 += size;
img2 += size;
img3 += size;
}
}
for (; q < inch; q++)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr += 1;
img0 += size;
}
}
}
}
#else // __SSE2__
tmp.create(maxk, inch, size, 1u, 1, opt.workspace_allocator);
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < size; i++)
{
signed char* tmpptr = tmp.channel(i);
int q = 0;
for (; q < inch; q++)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr += 1;
img0 += size;
}
}
}
}
#endif // __SSE2__
int nn_outch = 0;
int remain_outch_start = 0;
#if __SSE2__
nn_outch = outch >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 4;
int* outptr0 = top_blob.channel(p);
int* outptr1 = top_blob.channel(p + 1);
int* outptr2 = top_blob.channel(p + 2);
int* outptr3 = top_blob.channel(p + 3);
int i = 0;
#if __AVX2__
for (; i + 3 < size; i += 4)
{
const signed char* tmpptr = tmp.channel(i / 4);
const signed char* kptr0 = kernel.channel(p / 4);
int nn4 = (inch / 4) * maxk;
int nn1 = (inch % 4) * maxk;
__m256i _sum00_12 = _mm256_setzero_si256();
__m256i _sum20_32 = _mm256_setzero_si256();
if (nn4 > 0)
{
__m256i _sum10_02 = _mm256_setzero_si256();
__m256i _sum30_22 = _mm256_setzero_si256();
int j = 0;
for (; j < nn4; j++)
{
__m128i _val0123 = _mm_loadu_si128((const __m128i*)tmpptr);
__m256i _val0123_16 = _mm256_cvtepi8_epi16(_val0123);
__m256i _val01_16 = _mm256_permute4x64_epi64(_val0123_16, _MM_SHUFFLE(1, 1, 0, 0));
__m256i _val23_16 = _mm256_permute4x64_epi64(_val0123_16, _MM_SHUFFLE(3, 3, 2, 2));
__m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0);
__m256i _w01_16 = _mm256_cvtepi8_epi16(_w01);
__m256i _val10_16 = _mm256_permute4x64_epi64(_val01_16, 78);
__m256i _val32_16 = _mm256_permute4x64_epi64(_val23_16, 78);
#if __AVXVNNI__ || __AVX512VNNI__
_sum00_12 = _mm256_dpwssd_epi32(_sum00_12, _val01_16, _w01_16);
_sum10_02 = _mm256_dpwssd_epi32(_sum10_02, _val10_16, _w01_16);
_sum20_32 = _mm256_dpwssd_epi32(_sum20_32, _val23_16, _w01_16);
_sum30_22 = _mm256_dpwssd_epi32(_sum30_22, _val32_16, _w01_16);
#else
_sum00_12 = _mm256_add_epi32(_sum00_12, _mm256_madd_epi16(_val01_16, _w01_16));
_sum10_02 = _mm256_add_epi32(_sum10_02, _mm256_madd_epi16(_val10_16, _w01_16));
_sum20_32 = _mm256_add_epi32(_sum20_32, _mm256_madd_epi16(_val23_16, _w01_16));
_sum30_22 = _mm256_add_epi32(_sum30_22, _mm256_madd_epi16(_val32_16, _w01_16));
#endif
tmpptr += 16;
kptr0 += 16;
}
_sum00_12 = _mm256_hadd_epi32(_sum00_12, _sum10_02);
_sum20_32 = _mm256_hadd_epi32(_sum20_32, _sum30_22);
_sum00_12 = _mm256_permute4x64_epi64(_sum00_12, _MM_SHUFFLE(2, 1, 3, 0));
_sum20_32 = _mm256_permute4x64_epi64(_sum20_32, _MM_SHUFFLE(2, 1, 3, 0));
}
__m128i _sum00 = _mm256_extracti128_si256(_sum00_12, 0);
__m128i _sum10 = _mm256_extracti128_si256(_sum00_12, 1);
__m128i _sum20 = _mm256_extracti128_si256(_sum20_32, 0);
__m128i _sum30 = _mm256_extracti128_si256(_sum20_32, 1);
int j = 0;
for (; j < nn1; j++)
{
__m128i _val0123 = _mm_loadl_epi64((const __m128i*)tmpptr);
#if __SSE4_1__
_val0123 = _mm_cvtepi8_epi16(_val0123);
#else
__m128i _extval0123 = _mm_cmpgt_epi8(_mm_setzero_si128(), _val0123);
_val0123 = _mm_unpacklo_epi8(_val0123, _extval0123);
#endif
__m128i _val01 = _mm_shufflelo_epi16(_val0123, _MM_SHUFFLE(1, 1, 0, 0));
_val01 = _mm_shuffle_epi32(_val01, _MM_SHUFFLE(1, 1, 0, 0));
__m128i _val23 = _mm_shufflelo_epi16(_val0123, _MM_SHUFFLE(3, 3, 2, 2));
_val23 = _mm_shuffle_epi32(_val23, _MM_SHUFFLE(1, 1, 0, 0));
__m128i _w0123 = _mm_loadl_epi64((const __m128i*)kptr0);
#if __SSE4_1__
_w0123 = _mm_cvtepi8_epi16(_w0123);
#else
__m128i _extw0123 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w0123);
_w0123 = _mm_unpacklo_epi8(_w0123, _extw0123);
#endif
_w0123 = _mm_shuffle_epi32(_w0123, _MM_SHUFFLE(1, 0, 1, 0));
__m128i _sl00 = _mm_mullo_epi16(_val01, _w0123);
__m128i _sh00 = _mm_mulhi_epi16(_val01, _w0123);
__m128i _sl10 = _mm_mullo_epi16(_val23, _w0123);
__m128i _sh10 = _mm_mulhi_epi16(_val23, _w0123);
_sum00 = _mm_add_epi32(_sum00, _mm_unpacklo_epi16(_sl00, _sh00));
_sum10 = _mm_add_epi32(_sum10, _mm_unpackhi_epi16(_sl00, _sh00));
_sum20 = _mm_add_epi32(_sum20, _mm_unpacklo_epi16(_sl10, _sh10));
_sum30 = _mm_add_epi32(_sum30, _mm_unpackhi_epi16(_sl10, _sh10));
tmpptr += 4;
kptr0 += 4;
}
// transpose 4x4
{
__m128i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm_unpacklo_epi32(_sum00, _sum10);
_tmp1 = _mm_unpacklo_epi32(_sum20, _sum30);
_tmp2 = _mm_unpackhi_epi32(_sum00, _sum10);
_tmp3 = _mm_unpackhi_epi32(_sum20, _sum30);
_sum00 = _mm_unpacklo_epi64(_tmp0, _tmp1);
_sum10 = _mm_unpackhi_epi64(_tmp0, _tmp1);
_sum20 = _mm_unpacklo_epi64(_tmp2, _tmp3);
_sum30 = _mm_unpackhi_epi64(_tmp2, _tmp3);
}
_mm_storeu_si128((__m128i*)outptr0, _sum00);
_mm_storeu_si128((__m128i*)outptr1, _sum10);
_mm_storeu_si128((__m128i*)outptr2, _sum20);
_mm_storeu_si128((__m128i*)outptr3, _sum30);
outptr0 += 4;
outptr1 += 4;
outptr2 += 4;
outptr3 += 4;
}
#endif
for (; i + 1 < size; i += 2)
{
#if __AVX2__
const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2);
#else
const signed char* tmpptr = tmp.channel(i / 2);
#endif
const signed char* kptr0 = kernel.channel(p / 4);
int nn4 = (inch / 4) * maxk;
int nn1 = (inch % 4) * maxk;
#if __AVX2__
__m256i _sum00_12 = _mm256_setzero_si256();
#else
__m128i _sum00 = _mm_setzero_si128();
__m128i _sum10 = _mm_setzero_si128();
#endif
if (nn4 > 0)
{
#if __AVX2__
__m256i _sum10_02 = _mm256_setzero_si256();
#else
__m128i _sum01 = _mm_setzero_si128();
__m128i _sum11 = _mm_setzero_si128();
#endif
int j = 0;
for (; j < nn4; j++)
{
#if __AVX2__
__m128i _val01 = _mm_loadu_si128((const __m128i*)tmpptr);
__m256i _val01_16 = _mm256_cvtepi8_epi16(_val01);
_val01_16 = _mm256_permute4x64_epi64(_val01_16, _MM_SHUFFLE(1, 1, 0, 0));
__m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0);
__m256i _w01_16 = _mm256_cvtepi8_epi16(_w01);
__m256i _val10_16 = _mm256_permute4x64_epi64(_val01_16, 78);
#if __AVXVNNI__ || __AVX512VNNI__
_sum00_12 = _mm256_dpwssd_epi32(_sum00_12, _val01_16, _w01_16);
_sum10_02 = _mm256_dpwssd_epi32(_sum10_02, _val10_16, _w01_16);
#else
_sum00_12 = _mm256_add_epi32(_sum00_12, _mm256_madd_epi16(_val01_16, _w01_16));
_sum10_02 = _mm256_add_epi32(_sum10_02, _mm256_madd_epi16(_val10_16, _w01_16));
#endif
#else
__m128i _val01 = _mm_loadl_epi64((const __m128i*)tmpptr);
#if __SSE4_1__
_val01 = _mm_cvtepi8_epi16(_val01);
#else
__m128i _extval01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _val01);
_val01 = _mm_unpacklo_epi8(_val01, _extval01);
#endif
__m128i _val0 = _mm_shuffle_epi32(_val01, _MM_SHUFFLE(1, 0, 1, 0));
__m128i _val1 = _mm_shuffle_epi32(_val01, _MM_SHUFFLE(3, 2, 3, 2));
__m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0);
__m128i _extw01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w01);
__m128i _w0 = _mm_unpacklo_epi8(_w01, _extw01);
__m128i _w1 = _mm_unpackhi_epi8(_w01, _extw01);
#if __XOP__
_sum00 = _mm_maddd_epi16(_val0, _w0, _sum00);
_sum01 = _mm_maddd_epi16(_val0, _w1, _sum01);
_sum10 = _mm_maddd_epi16(_val1, _w0, _sum10);
_sum11 = _mm_maddd_epi16(_val1, _w1, _sum11);
#else
_sum00 = _mm_add_epi32(_mm_madd_epi16(_val0, _w0), _sum00);
_sum01 = _mm_add_epi32(_mm_madd_epi16(_val0, _w1), _sum01);
_sum10 = _mm_add_epi32(_mm_madd_epi16(_val1, _w0), _sum10);
_sum11 = _mm_add_epi32(_mm_madd_epi16(_val1, _w1), _sum11);
#endif
#endif
tmpptr += 8;
kptr0 += 16;
}
#if __AVX2__
_sum00_12 = _mm256_hadd_epi32(_sum00_12, _sum10_02);
_sum00_12 = _mm256_permute4x64_epi64(_sum00_12, _MM_SHUFFLE(2, 1, 3, 0));
#else
#if __SSSE3__
_sum00 = _mm_hadd_epi32(_sum00, _sum01);
_sum10 = _mm_hadd_epi32(_sum10, _sum11);
#else
__m128i _sum00_sh = _mm_shuffle_epi32(_sum00, 216);
__m128i _sum01_sh = _mm_shuffle_epi32(_sum01, 216);
__m128i _sum10_sh = _mm_shuffle_epi32(_sum10, 216);
__m128i _sum11_sh = _mm_shuffle_epi32(_sum11, 216);
_sum00 = _mm_unpacklo_epi64(_sum00_sh, _sum01_sh);
_sum01 = _mm_unpackhi_epi64(_sum00_sh, _sum01_sh);
_sum10 = _mm_unpacklo_epi64(_sum10_sh, _sum11_sh);
_sum11 = _mm_unpackhi_epi64(_sum10_sh, _sum11_sh);
_sum00 = _mm_add_epi32(_sum00, _sum01);
_sum10 = _mm_add_epi32(_sum10, _sum11);
#endif
#endif
}
#if __AVX2__
__m128i _sum00 = _mm256_extracti128_si256(_sum00_12, 0);
__m128i _sum10 = _mm256_extracti128_si256(_sum00_12, 1);
#endif
int j = 0;
for (; j < nn1; j++)
{
__m128i _val = _mm_set_epi16(tmpptr[1], tmpptr[1], tmpptr[1], tmpptr[1], tmpptr[0], tmpptr[0], tmpptr[0], tmpptr[0]);
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99754
// gcc incorrectly put 32bit to tail with _mm_loadu_si32 :(
// 0 1 2 3 x x x x x x x x x x x x
// x x x x x x x x x x x x 0 1 2 3
// __m128i _w0123 = _mm_loadu_si32(kptr0);
__m128i _w0123 = _mm_loadl_epi64((const __m128i*)kptr0);
#if __SSE4_1__
_w0123 = _mm_cvtepi8_epi16(_w0123);
#else
__m128i _extw0123 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w0123);
_w0123 = _mm_unpacklo_epi8(_w0123, _extw0123);
#endif
_w0123 = _mm_shuffle_epi32(_w0123, _MM_SHUFFLE(1, 0, 1, 0));
__m128i _sl00 = _mm_mullo_epi16(_val, _w0123);
__m128i _sh00 = _mm_mulhi_epi16(_val, _w0123);
_sum00 = _mm_add_epi32(_sum00, _mm_unpacklo_epi16(_sl00, _sh00));
_sum10 = _mm_add_epi32(_sum10, _mm_unpackhi_epi16(_sl00, _sh00));
tmpptr += 2;
kptr0 += 4;
}
int sum[8];
_mm_storeu_si128((__m128i*)sum, _sum00);
_mm_storeu_si128((__m128i*)(sum + 4), _sum10);
outptr0[0] = sum[0];
outptr1[0] = sum[1];
outptr2[0] = sum[2];
outptr3[0] = sum[3];
outptr0[1] = sum[4];
outptr1[1] = sum[5];
outptr2[1] = sum[6];
outptr3[1] = sum[7];
outptr0 += 2;
outptr1 += 2;
outptr2 += 2;
outptr3 += 2;
}
for (; i < size; i++)
{
#if __AVX2__
const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2);
#else
const signed char* tmpptr = tmp.channel(i / 2 + i % 2);
#endif
const signed char* kptr0 = kernel.channel(p / 4);
int nn4 = (inch / 4) * maxk;
int nn1 = (inch % 4) * maxk;
__m128i _sum0 = _mm_setzero_si128();
if (nn4 > 0)
{
__m128i _sum1 = _mm_setzero_si128();
__m128i _sum2 = _mm_setzero_si128();
__m128i _sum3 = _mm_setzero_si128();
int j = 0;
for (; j < nn4; j++)
{
__m128i _val01 = _mm_loadl_epi64((const __m128i*)tmpptr);
#if __SSE4_1__
__m128i _val0 = _mm_cvtepi8_epi16(_val01);
#else
__m128i _extval01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _val01);
__m128i _val0 = _mm_unpacklo_epi8(_val01, _extval01);
#endif
_val0 = _mm_shuffle_epi32(_val0, _MM_SHUFFLE(1, 0, 1, 0));
__m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0);
__m128i _extw01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w01);
__m128i _w0 = _mm_unpacklo_epi8(_w01, _extw01);
__m128i _w1 = _mm_unpackhi_epi8(_w01, _extw01);
__m128i _sl00 = _mm_mullo_epi16(_val0, _w0);
__m128i _sh00 = _mm_mulhi_epi16(_val0, _w0);
__m128i _sl01 = _mm_mullo_epi16(_val0, _w1);
__m128i _sh01 = _mm_mulhi_epi16(_val0, _w1);
_sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl00, _sh00));
_sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl00, _sh00));
_sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl01, _sh01));
_sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl01, _sh01));
tmpptr += 4;
kptr0 += 16;
}
// transpose 4x4
{
__m128i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm_unpacklo_epi32(_sum0, _sum1);
_tmp1 = _mm_unpacklo_epi32(_sum2, _sum3);
_tmp2 = _mm_unpackhi_epi32(_sum0, _sum1);
_tmp3 = _mm_unpackhi_epi32(_sum2, _sum3);
_sum0 = _mm_unpacklo_epi64(_tmp0, _tmp1);
_sum1 = _mm_unpackhi_epi64(_tmp0, _tmp1);
_sum2 = _mm_unpacklo_epi64(_tmp2, _tmp3);
_sum3 = _mm_unpackhi_epi64(_tmp2, _tmp3);
}
_sum0 = _mm_add_epi32(_sum0, _sum1);
_sum2 = _mm_add_epi32(_sum2, _sum3);
_sum0 = _mm_add_epi32(_sum0, _sum2);
}
int j = 0;
for (; j < nn1; j++)
{
__m128i _val = _mm_set1_epi16(tmpptr[0]);
__m128i _w0123 = _mm_loadl_epi64((const __m128i*)kptr0);
#if __SSE4_1__
_w0123 = _mm_cvtepi8_epi16(_w0123);
#else
__m128i _extw0123 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w0123);
_w0123 = _mm_unpacklo_epi8(_w0123, _extw0123);
#endif
__m128i _sl00 = _mm_mullo_epi16(_val, _w0123);
__m128i _sh00 = _mm_mulhi_epi16(_val, _w0123);
_sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl00, _sh00));
tmpptr += 1;
kptr0 += 4;
}
int sum[4];
_mm_storeu_si128((__m128i*)sum, _sum0);
outptr0[0] = sum[0];
outptr1[0] = sum[1];
outptr2[0] = sum[2];
outptr3[0] = sum[3];
outptr0 += 1;
outptr1 += 1;
outptr2 += 1;
outptr3 += 1;
}
}
remain_outch_start += nn_outch << 2;
#endif // __SSE2__
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
int* outptr0 = top_blob.channel(p);
int i = 0;
#if __SSE2__
#if __AVX2__
for (; i + 3 < size; i += 4)
{
const signed char* tmpptr = tmp.channel(i / 4);
const signed char* kptr0 = kernel.channel(p / 4 + p % 4);
int nn4 = (inch / 4) * maxk;
int nn1 = (inch % 4) * maxk;
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
if (nn4 > 0)
{
__m256i _sum0_2 = _mm256_setzero_si256();
__m256i _sum1_3 = _mm256_setzero_si256();
int j = 0;
for (; j < nn4; j++)
{
__m128i _val01 = _mm_loadu_si128((const __m128i*)tmpptr);
__m256i _val01_16 = _mm256_cvtepi8_epi16(_val01);
__m128i _w0123 = _mm_loadl_epi64((const __m128i*)kptr0);
__m128i _w = _mm_cvtepi8_epi16(_w0123);
_w = _mm_unpacklo_epi64(_w, _w);
__m256i _ww = _mm256_inserti128_si256(_mm256_castsi128_si256(_w), _w, 1);
__m256i _sl0_1 = _mm256_mullo_epi16(_val01_16, _ww);
__m256i _sh0_1 = _mm256_mulhi_epi16(_val01_16, _ww);
_sum0_2 = _mm256_add_epi32(_sum0_2, _mm256_unpacklo_epi16(_sl0_1, _sh0_1));
_sum1_3 = _mm256_add_epi32(_sum1_3, _mm256_unpackhi_epi16(_sl0_1, _sh0_1));
tmpptr += 16;
kptr0 += 4;
}
__m128i _sum0 = _mm256_extracti128_si256(_sum0_2, 0);
__m128i _sum1 = _mm256_extracti128_si256(_sum1_3, 0);
__m128i _sum2 = _mm256_extracti128_si256(_sum0_2, 1);
__m128i _sum3 = _mm256_extracti128_si256(_sum1_3, 1);
sum0 = _mm_reduce_add_epi32(_sum0);
sum1 = _mm_reduce_add_epi32(_sum1);
sum2 = _mm_reduce_add_epi32(_sum2);
sum3 = _mm_reduce_add_epi32(_sum3);
}
int j = 0;
for (; j < nn1; j++)
{
signed char val0 = tmpptr[0];
signed char val1 = tmpptr[1];
signed char val2 = tmpptr[2];
signed char val3 = tmpptr[3];
signed char w = kptr0[0];
sum0 += val0 * w;
sum1 += val1 * w;
sum2 += val2 * w;
sum3 += val3 * w;
tmpptr += 4;
kptr0 += 1;
}
outptr0[0] = sum0;
outptr0[1] = sum1;
outptr0[2] = sum2;
outptr0[3] = sum3;
outptr0 += 4;
}
#endif
for (; i + 1 < size; i += 2)
{
#if __AVX2__
const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2);
#else
const signed char* tmpptr = tmp.channel(i / 2);
#endif
const signed char* kptr0 = kernel.channel(p / 4 + p % 4);
int nn4 = (inch / 4) * maxk;
int nn1 = (inch % 4) * maxk;
int sum0 = 0;
int sum1 = 0;
if (nn4 > 0)
{
__m128i _sum0 = _mm_setzero_si128();
__m128i _sum1 = _mm_setzero_si128();
int j = 0;
for (; j < nn4; j++)
{
__m128i _val = _mm_loadl_epi64((const __m128i*)tmpptr);
__m128i _extval = _mm_cmpgt_epi8(_mm_setzero_si128(), _val);
__m128i _val01 = _mm_unpacklo_epi8(_val, _extval);
__m128i _w0123 = _mm_loadl_epi64((const __m128i*)kptr0);
#if __SSE4_1__
__m128i _w = _mm_cvtepi8_epi16(_w0123);
#else
__m128i _extw = _mm_cmpgt_epi8(_mm_setzero_si128(), _w0123);
__m128i _w = _mm_unpacklo_epi8(_w0123, _extw);
#endif
_w = _mm_shuffle_epi32(_w, _MM_SHUFFLE(1, 0, 1, 0));
__m128i _sl01 = _mm_mullo_epi16(_val01, _w);
__m128i _sh01 = _mm_mulhi_epi16(_val01, _w);
_sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl01, _sh01));
_sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl01, _sh01));
tmpptr += 8;
kptr0 += 4;
}
sum0 = _mm_reduce_add_epi32(_sum0);
sum1 = _mm_reduce_add_epi32(_sum1);
}
int j = 0;
for (; j < nn1; j++)
{
signed char val0 = tmpptr[0];
signed char val1 = tmpptr[1];
signed char w = kptr0[0];
sum0 += val0 * w;
sum1 += val1 * w;
tmpptr += 2;
kptr0 += 1;
}
outptr0[0] = sum0;
outptr0[1] = sum1;
outptr0 += 2;
}
for (; i < size; i++)
{
#if __AVX2__
const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2);
#else
const signed char* tmpptr = tmp.channel(i / 2 + i % 2);
#endif
const signed char* kptr0 = kernel.channel(p / 4 + p % 4);
int nn4 = (inch / 4) * maxk;
int nn1 = (inch % 4) * maxk;
int sum = 0;
if (nn4 > 0)
{
__m128i _sum = _mm_setzero_si128();
int j = 0;
for (; j < nn4; j++)
{
__m128i _val0123 = _mm_loadl_epi64((const __m128i*)tmpptr);
#if __SSE4_1__
__m128i _val = _mm_cvtepi8_epi16(_val0123);
#else
__m128i _extval = _mm_cmpgt_epi8(_mm_setzero_si128(), _val0123);
__m128i _val = _mm_unpacklo_epi8(_val0123, _extval);
#endif
__m128i _w0123 = _mm_loadl_epi64((const __m128i*)kptr0);
#if __SSE4_1__
__m128i _w = _mm_cvtepi8_epi16(_w0123);
#else
__m128i _extw = _mm_cmpgt_epi8(_mm_setzero_si128(), _w0123);
__m128i _w = _mm_unpacklo_epi8(_w0123, _extw);
#endif
__m128i _sl = _mm_mullo_epi16(_val, _w);
__m128i _sh = _mm_mulhi_epi16(_val, _w);
_sum = _mm_add_epi32(_sum, _mm_unpacklo_epi16(_sl, _sh));
tmpptr += 4;
kptr0 += 4;
}
sum = _mm_reduce_add_epi32(_sum);
}
int j = 0;
for (; j < nn1; j++)
{
signed char val = tmpptr[0];
signed char w = kptr0[0];
sum += val * w;
tmpptr += 1;
kptr0 += 1;
}
outptr0[0] = sum;
outptr0 += 1;
}
#else // __SSE2__
for (; i < size; i++)
{
const signed char* tmpptr = tmp.channel(i);
const signed char* kptr0 = kernel.channel(p);
int nn1 = inch * maxk;
int sum = 0;
int j = 0;
for (; j < nn1; j++)
{
signed char val = tmpptr[0];
signed char w = kptr0[0];
sum += val * w;
tmpptr += 1;
kptr0 += 1;
}
outptr0[0] = sum;
outptr0 += 1;
}
#endif // __SSE2__
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xa8, %rsp
movq %rcx, %r13
movq %rdx, %rbx
movq %rsi, %r14
movq %rdi, %r15
callq 0x732f7
testl %eax, %eax
je 0x108ddb
movq %r15, %rdi
movq %r14, %rsi
movq %rbx, %rdx
movq %r13, %rcx
addq $0xa8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp 0x131d68
movq %r14, 0x30(%rsp)
movq %rbx, 0x38(%rsp)
callq 0x732db
testl %eax, %eax
je 0x108e14
movq %r15, %rdi
movq 0x30(%rsp), %rsi
movq 0x38(%rsp), %rdx
movq %r13, %rcx
addq $0xa8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp 0x137be8
movl 0x2c(%r15), %ecx
movl 0x30(%r15), %eax
movq %rax, 0x10(%rsp)
movslq 0x38(%r15), %r12
movq 0x30(%rsp), %rax
movslq 0x38(%rax), %rax
movq %rax, 0x48(%rsp)
andq $0x0, 0x90(%rsp)
vpxor %xmm0, %xmm0, %xmm0
vmovdqa %xmm0, 0x50(%rsp)
vmovdqu %xmm0, 0x5c(%rsp)
vmovdqa %xmm0, 0x70(%rsp)
vmovdqu %xmm0, 0x7c(%rsp)
cmpq $0x4, %r12
movl %ecx, 0x24(%rsp)
jl 0x108e8c
pushq $0x4
popq %r8
cmpl $0x2, %ecx
jl 0x108eac
movq 0x10(%rsp), %rax
leal (%rax,%rax), %esi
movl %r12d, %eax
shrl $0x2, %eax
movl %r12d, %edx
andl $0x3, %edx
addl %eax, %edx
movl %ecx, %eax
shrl %eax
andl $0x1, %ecx
addl %eax, %ecx
jmp 0x108ec1
pushq $0x1
popq %r8
pushq $0x1
popq %r9
cmpl $0x2, %ecx
jl 0x108ec7
movq 0x10(%rsp), %rax
leal (%rax,%rax), %esi
movl %ecx, %eax
shrl %eax
andl $0x1, %ecx
addl %eax, %ecx
jmp 0x108ece
movl %r12d, %eax
shrl $0x2, %eax
movl %r12d, %edx
andl $0x3, %edx
addl %eax, %edx
movq 0x10(%rsp), %rax
movl %eax, %esi
pushq $0x4
popq %r9
jmp 0x108ed1
movq 0x10(%rsp), %rax
movl %eax, %esi
movl %r12d, %edx
movq 0x10(%r13), %rax
movq %rax, (%rsp)
leaq 0x50(%rsp), %rdi
callq 0x628f2
movl 0x24(%rsp), %eax
movl %eax, %edi
sarl %edi
xorl %ecx, %ecx
movq 0x10(%rsp), %rdx
testl %edx, %edx
movl $0x0, %esi
cmovgl %edx, %esi
movl %esi, 0xc(%rsp)
movslq %eax, %r8
testl %edi, %edi
cmovlel %ecx, %edi
movq %rdi, 0x28(%rsp)
xorl %eax, %eax
movq %r12, 0x18(%rsp)
cmpq 0x28(%rsp), %rax
je 0x10903a
movq 0x90(%rsp), %r11
movq %rax, 0x40(%rsp)
imulq %rax, %r11
imulq 0x60(%rsp), %r11
addq 0x50(%rsp), %r11
pushq $0x1
popq %rbx
pushq $0x2
popq %r13
pushq $0x3
popq %rdx
xorl %r14d, %r14d
movq %r14, %rax
orq $0x3, %rax
cmpq %r12, %rax
jge 0x109024
movq 0x40(%r15), %rax
imulq 0x10(%r15), %rax
movq %rax, %r10
imulq %rdx, %r10
movq (%r15), %r9
addq %rcx, %r9
movq %rax, %rdi
imulq %r13, %rdi
movq %rax, %rbp
imulq %r14, %rbp
imulq %rbx, %rax
movl 0xc(%rsp), %esi
subl $0x1, %esi
jb 0x108fd5
movb (%r9,%rbp), %r12b
movb %r12b, (%r11)
movb (%r9,%rax), %r12b
movb %r12b, 0x1(%r11)
movb (%r9,%rdi), %r12b
movb %r12b, 0x2(%r11)
movb (%r9,%r10), %r12b
movb %r12b, 0x3(%r11)
movb 0x1(%r9,%rbp), %r12b
movb %r12b, 0x4(%r11)
movb 0x1(%r9,%rax), %r12b
movb %r12b, 0x5(%r11)
movb 0x1(%r9,%rdi), %r12b
movb %r12b, 0x6(%r11)
movb 0x1(%r9,%r10), %r12b
movb %r12b, 0x7(%r11)
addq $0x8, %r11
addq %r8, %r9
jmp 0x108f84
addq $0x4, %r14
addq $0x4, %rdx
addq $0x4, %r13
addq $0x4, %rbx
movq 0x18(%rsp), %r12
jmp 0x108f48
movq 0x40(%r15), %rax
imulq 0x10(%r15), %rax
imulq %r14, %rax
addq (%r15), %rax
movl 0xc(%rsp), %edx
subl $0x1, %edx
jb 0x109021
movb (%rax,%rcx), %sil
movb %sil, (%r11)
movb 0x1(%rax,%rcx), %sil
movb %sil, 0x1(%r11)
addq $0x2, %r11
addq %r8, %rax
jmp 0x109003
incq %r14
cmpq %r12, %r14
jl 0x108fef
movq 0x40(%rsp), %rax
incq %rax
addq $0x2, %rcx
jmp 0x108f14
movq %r8, %rcx
andq $-0x2, %rcx
cmpq %r8, %rcx
jge 0x109137
movl %ecx, %eax
cltd
pushq $0x2
popq %rsi
idivl %esi
addl %eax, %edx
movslq %edx, %rax
imulq 0x90(%rsp), %rax
imulq 0x60(%rsp), %rax
addq 0x50(%rsp), %rax
pushq $0x1
popq %rbx
pushq $0x2
popq %r14
pushq $0x3
popq %r13
xorl %edx, %edx
movq %rdx, %rsi
orq $0x3, %rsi
cmpq %r12, %rsi
jge 0x10912a
movq 0x40(%r15), %rdi
imulq 0x10(%r15), %rdi
movq %rdi, %r10
imulq %r13, %r10
movq (%r15), %r9
addq %rcx, %r9
movq %rdi, %rsi
imulq %r14, %rsi
movq %rdi, %rbp
imulq %rbx, %rbp
imulq %rdx, %rdi
movl 0xc(%rsp), %r12d
subl $0x1, %r12d
jb 0x1090e3
movb (%r9,%rdi), %r11b
movb %r11b, (%rax)
movb (%r9,%rbp), %r11b
movb %r11b, 0x1(%rax)
movb (%r9,%rsi), %r11b
movb %r11b, 0x2(%rax)
movb (%r9,%r10), %r11b
movb %r11b, 0x3(%rax)
addq $0x4, %rax
addq %r8, %r9
jmp 0x1090b5
addq $0x4, %rdx
addq $0x4, %r13
addq $0x4, %r14
addq $0x4, %rbx
movq 0x18(%rsp), %r12
jmp 0x109078
movq 0x40(%r15), %rsi
imulq %rdx, %rsi
imulq 0x10(%r15), %rsi
addq (%r15), %rsi
addq %rcx, %rsi
movl 0xc(%rsp), %edi
subl $0x1, %edi
jb 0x109127
movb (%rsi), %r9b
movb %r9b, (%rax)
incq %rax
addq %r8, %rsi
jmp 0x109114
incq %rdx
cmpq %r12, %rdx
jl 0x1090fd
incq %rcx
jmp 0x109041
movq 0x30(%rsp), %rdi
movq (%rdi), %rbp
movq 0x40(%rdi), %rax
imulq 0x10(%rdi), %rax
movq %rax, 0x28(%rsp)
movl %r12d, %eax
cltd
pushq $0x4
popq %rcx
idivl %ecx
movl %eax, %ecx
movl %edx, %esi
movq 0x48(%rsp), %rax
movl %eax, %edx
sarl $0x2, %edx
movq 0x10(%rsp), %rax
imull %eax, %ecx
imull %eax, %esi
xorl %r9d, %r9d
testl %esi, %esi
cmovlel %r9d, %esi
testl %edx, %edx
cmovlel %r9d, %edx
movq %rdx, 0x40(%rsp)
vpxor %xmm0, %xmm0, %xmm0
movl %ecx, 0xc(%rsp)
cmpq 0x40(%rsp), %r9
je 0x10949e
leaq (,%r9,4), %r10
movq 0x28(%rsp), %rax
imulq %rax, %r10
addq %rbp, %r10
leaq 0x1(,%r9,4), %r11
imulq %rax, %r11
addq %rbp, %r11
leaq 0x2(,%r9,4), %r15
imulq %rax, %r15
addq %rbp, %r15
leaq 0x3(,%r9,4), %r12
imulq %rax, %r12
addq %rbp, %r12
movq 0x90(%rsp), %r14
imulq 0x60(%rsp), %r14
movq 0x38(%rsp), %rcx
movq 0x40(%rcx), %rax
movq %r9, 0x18(%rsp)
imulq %r9, %rax
imulq 0x10(%rcx), %rax
movq 0x50(%rsp), %rdi
addq (%rcx), %rax
xorl %r9d, %r9d
movq %r9, %rcx
orq $0x1, %rcx
cmpq %r8, %rcx
jge 0x109331
movq %r9, %r13
shrq %r13
imulq %r14, %r13
addq %rdi, %r13
movl 0xc(%rsp), %ecx
vpxor %xmm1, %xmm1, %xmm1
vpxor %xmm2, %xmm2, %xmm2
testl %ecx, %ecx
jle 0x10929a
vpxor %xmm3, %xmm3, %xmm3
vpxor %xmm4, %xmm4, %xmm4
movq %rax, %rbx
subl $0x1, %ecx
jb 0x10928e
vpmovsxbw (%r13), %xmm5
vmovdqu (%rbx), %xmm6
vpcmpgtb %xmm6, %xmm0, %xmm7
vpunpcklbw %xmm7, %xmm6, %xmm8 # xmm8 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3],xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7]
vpunpckhbw %xmm7, %xmm6, %xmm6 # xmm6 = xmm6[8],xmm7[8],xmm6[9],xmm7[9],xmm6[10],xmm7[10],xmm6[11],xmm7[11],xmm6[12],xmm7[12],xmm6[13],xmm7[13],xmm6[14],xmm7[14],xmm6[15],xmm7[15]
vpshufd $0x44, %xmm5, %xmm7 # xmm7 = xmm5[0,1,0,1]
vpmaddwd %xmm7, %xmm8, %xmm9
vpaddd %xmm4, %xmm9, %xmm4
vpmaddwd %xmm6, %xmm7, %xmm7
vpaddd %xmm2, %xmm7, %xmm2
vpshufd $0xee, %xmm5, %xmm5 # xmm5 = xmm5[2,3,2,3]
vpmaddwd %xmm5, %xmm8, %xmm7
vpaddd %xmm3, %xmm7, %xmm3
vpmaddwd %xmm6, %xmm5, %xmm5
vpaddd %xmm1, %xmm5, %xmm1
addq $0x8, %r13
addq $0x10, %rbx
jmp 0x10923f
vphaddd %xmm2, %xmm4, %xmm2
vphaddd %xmm1, %xmm3, %xmm1
jmp 0x10929d
movq %rax, %rbx
xorl %ecx, %ecx
cmpl %ecx, %esi
je 0x1092e4
movzwl (%r13,%rcx,2), %edx
vmovd %edx, %xmm3
vpmovsxbw %xmm3, %xmm3
vpshuflw $0x50, %xmm3, %xmm3 # xmm3 = xmm3[0,0,1,1,4,5,6,7]
vpshufd $0x50, %xmm3, %xmm3 # xmm3 = xmm3[0,0,1,1]
vpmovsxbw (%rbx,%rcx,4), %xmm4
vpshufd $0x44, %xmm4, %xmm4 # xmm4 = xmm4[0,1,0,1]
vpmullw %xmm4, %xmm3, %xmm5
vpmulhw %xmm4, %xmm3, %xmm3
vpunpcklwd %xmm3, %xmm5, %xmm4 # xmm4 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3]
vpaddd %xmm4, %xmm2, %xmm2
vpunpckhwd %xmm3, %xmm5, %xmm3 # xmm3 = xmm5[4],xmm3[4],xmm5[5],xmm3[5],xmm5[6],xmm3[6],xmm5[7],xmm3[7]
vpaddd %xmm3, %xmm1, %xmm1
incq %rcx
jmp 0x10929f
vmovd %xmm2, (%r10)
vpextrd $0x1, %xmm2, (%r11)
vpextrd $0x2, %xmm2, (%r15)
vpextrd $0x3, %xmm2, (%r12)
vmovd %xmm1, 0x4(%r10)
vpextrd $0x1, %xmm1, 0x4(%r11)
vpextrd $0x2, %xmm1, 0x4(%r15)
vpextrd $0x3, %xmm1, 0x4(%r12)
addq $0x8, %r10
addq $0x8, %r11
addq $0x8, %r15
addq $0x8, %r12
addq $0x2, %r9
jmp 0x109207
movq 0x50(%rsp), %r14
movq 0x90(%rsp), %rax
imulq 0x60(%rsp), %rax
movq 0x38(%rsp), %rcx
movq 0x40(%rcx), %rbx
imulq 0x18(%rsp), %rbx
imulq 0x10(%rcx), %rbx
addq (%rcx), %rbx
cmpl 0x24(%rsp), %r9d
jge 0x109488
movl %r9d, %ecx
shrl %ecx
movl %r9d, %r13d
andl $0x1, %r13d
addl %ecx, %r13d
imulq %rax, %r13
addq %r14, %r13
movl 0xc(%rsp), %ecx
vpxor %xmm1, %xmm1, %xmm1
movq %rbx, %rdi
testl %ecx, %ecx
jle 0x109423
vpxor %xmm2, %xmm2, %xmm2
vpxor %xmm3, %xmm3, %xmm3
vpxor %xmm4, %xmm4, %xmm4
subl $0x1, %ecx
jb 0x1093f7
vpmovsxbw (%r13), %xmm5
vmovdqu (%rdi), %xmm6
vpcmpgtb %xmm6, %xmm0, %xmm7
vpunpcklbw %xmm7, %xmm6, %xmm8 # xmm8 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3],xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7]
vpunpckhbw %xmm7, %xmm6, %xmm6 # xmm6 = xmm6[8],xmm7[8],xmm6[9],xmm7[9],xmm6[10],xmm7[10],xmm6[11],xmm7[11],xmm6[12],xmm7[12],xmm6[13],xmm7[13],xmm6[14],xmm7[14],xmm6[15],xmm7[15]
vpshufd $0x44, %xmm5, %xmm5 # xmm5 = xmm5[0,1,0,1]
vpmullw %xmm5, %xmm8, %xmm7
vpmulhw %xmm5, %xmm8, %xmm8
vpmullw %xmm6, %xmm5, %xmm9
vpmulhw %xmm6, %xmm5, %xmm5
vpunpcklwd %xmm8, %xmm7, %xmm6 # xmm6 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3]
vpaddd %xmm6, %xmm1, %xmm1
vpunpckhwd %xmm8, %xmm7, %xmm6 # xmm6 = xmm7[4],xmm8[4],xmm7[5],xmm8[5],xmm7[6],xmm8[6],xmm7[7],xmm8[7]
vpaddd %xmm6, %xmm2, %xmm2
vpunpcklwd %xmm5, %xmm9, %xmm6 # xmm6 = xmm9[0],xmm5[0],xmm9[1],xmm5[1],xmm9[2],xmm5[2],xmm9[3],xmm5[3]
vpaddd %xmm6, %xmm3, %xmm3
vpunpckhwd %xmm5, %xmm9, %xmm5 # xmm5 = xmm9[4],xmm5[4],xmm9[5],xmm5[5],xmm9[6],xmm5[6],xmm9[7],xmm5[7]
vpaddd %xmm5, %xmm4, %xmm4
addq $0x4, %r13
addq $0x10, %rdi
jmp 0x10939b
vpunpckldq %xmm2, %xmm1, %xmm5 # xmm5 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
vpunpckldq %xmm4, %xmm3, %xmm6 # xmm6 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
vpunpckhdq %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
vpunpckhdq %xmm4, %xmm3, %xmm2 # xmm2 = xmm3[2],xmm4[2],xmm3[3],xmm4[3]
vpunpcklqdq %xmm6, %xmm5, %xmm3 # xmm3 = xmm5[0],xmm6[0]
vpunpckhqdq %xmm6, %xmm5, %xmm4 # xmm4 = xmm5[1],xmm6[1]
vpaddd %xmm4, %xmm3, %xmm3
vpunpcklqdq %xmm2, %xmm1, %xmm4 # xmm4 = xmm1[0],xmm2[0]
vpunpckhqdq %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[1],xmm2[1]
vpaddd %xmm4, %xmm1, %xmm1
vpaddd %xmm1, %xmm3, %xmm1
xorl %ecx, %ecx
cmpl %ecx, %esi
je 0x109458
movsbl (%r13,%rcx), %edx
vmovd %edx, %xmm2
vpshuflw $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0,4,5,6,7]
vpshufd $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
vpmovsxbw (%rdi,%rcx,4), %xmm3
vpmullw %xmm3, %xmm2, %xmm4
vpmulhw %xmm3, %xmm2, %xmm2
vpunpcklwd %xmm2, %xmm4, %xmm2 # xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
vpaddd %xmm2, %xmm1, %xmm1
incq %rcx
jmp 0x109425
vmovd %xmm1, (%r10)
vpextrd $0x1, %xmm1, (%r11)
vpextrd $0x2, %xmm1, (%r15)
vpextrd $0x3, %xmm1, (%r12)
addq $0x4, %r10
addq $0x4, %r11
addq $0x4, %r15
addq $0x4, %r12
incl %r9d
jmp 0x10935b
movq 0x18(%rsp), %r9
incq %r9
movq 0x30(%rsp), %rdi
movl 0xc(%rsp), %ecx
jmp 0x10918b
movq 0x48(%rsp), %r9
andq $-0x4, %r9
movq 0x40(%rdi), %rax
imulq 0x10(%rdi), %rax
movq %rax, 0xa0(%rsp)
movq (%rdi), %rax
movq %rax, 0x98(%rsp)
movl %ecx, %r11d
leaq (,%r11,4), %rax
movq %rax, 0x28(%rsp)
vpxor %xmm0, %xmm0, %xmm0
cmpq 0x48(%rsp), %r9
movq 0x38(%rsp), %rcx
jge 0x1096f6
movq 0x50(%rsp), %rax
movq %rax, 0x18(%rsp)
movq 0x90(%rsp), %rax
imulq 0x60(%rsp), %rax
movq %rax, 0x40(%rsp)
movl %r9d, %eax
cltd
pushq $0x4
popq %rdi
idivl %edi
movq 0xa0(%rsp), %r12
movq %r9, 0x10(%rsp)
imulq %r9, %r12
addq 0x98(%rsp), %r12
addl %eax, %edx
movslq %edx, %rax
movq 0x40(%rcx), %r10
imulq %rax, %r10
imulq 0x10(%rcx), %r10
addq (%rcx), %r10
movq 0x28(%rsp), %rcx
addq %r10, %rcx
movq %rcx, 0x30(%rsp)
xorl %r15d, %r15d
movq %r15, %rcx
orq $0x1, %rcx
cmpq %r8, %rcx
jge 0x109623
movq %r15, %rdx
shrq %rdx
imulq 0x40(%rsp), %rdx
addq 0x18(%rsp), %rdx
cmpl $0x0, 0xc(%rsp)
jle 0x1095dd
vpxor %xmm1, %xmm1, %xmm1
xorl %ecx, %ecx
vpxor %xmm2, %xmm2, %xmm2
cmpl %ecx, %r11d
je 0x1095ba
vmovq (%rdx), %xmm3
vpcmpgtb %xmm3, %xmm0, %xmm4
vpunpcklbw %xmm4, %xmm3, %xmm3 # xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
vpmovsxbw (%r10,%rcx,4), %xmm4
vpshufd $0x44, %xmm4, %xmm4 # xmm4 = xmm4[0,1,0,1]
vpmullw %xmm3, %xmm4, %xmm5
vpmulhw %xmm4, %xmm3, %xmm3
vpunpcklwd %xmm3, %xmm5, %xmm4 # xmm4 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3]
vpaddd %xmm4, %xmm1, %xmm1
vpunpckhwd %xmm3, %xmm5, %xmm3 # xmm3 = xmm5[4],xmm3[4],xmm5[5],xmm3[5],xmm5[6],xmm3[6],xmm5[7],xmm3[7]
vpaddd %xmm3, %xmm2, %xmm2
addq $0x8, %rdx
incq %rcx
jmp 0x10957d
vphaddd %xmm1, %xmm1, %xmm1
vphaddd %xmm1, %xmm1, %xmm1
vphaddd %xmm2, %xmm2, %xmm2
vphaddd %xmm2, %xmm2, %xmm2
vmovd %xmm1, %ecx
vmovd %xmm2, %ebx
movq 0x30(%rsp), %r9
jmp 0x1095e4
xorl %ecx, %ecx
movq %r10, %r9
xorl %ebx, %ebx
xorl %edi, %edi
cmpl %edi, %esi
je 0x10960d
movsbl (%rdx,%rdi,2), %r14d
movsbl 0x1(%rdx,%rdi,2), %r13d
movsbl (%r9,%rdi), %ebp
imull %ebp, %r14d
addl %r14d, %ecx
imull %ebp, %r13d
addl %r13d, %ebx
incq %rdi
jmp 0x1095e6
movl %ecx, (%r12)
movl %ebx, 0x4(%r12)
addq $0x8, %r12
addq $0x2, %r15
jmp 0x10954b
movq 0x90(%rsp), %r9
imulq 0x60(%rsp), %r9
movq 0x38(%rsp), %rcx
imulq 0x40(%rcx), %rax
imulq 0x10(%rcx), %rax
movq 0x50(%rsp), %r10
addq (%rcx), %rax
movq 0x28(%rsp), %rcx
leaq (%rax,%rcx), %r14
cmpl 0x24(%rsp), %r15d
jge 0x1096e9
movl %r15d, %ecx
shrl %ecx
movl %r15d, %edx
andl $0x1, %edx
addl %ecx, %edx
imulq %r9, %rdx
addq %r10, %rdx
cmpl $0x0, 0xc(%rsp)
jle 0x1096b9
vpxor %xmm1, %xmm1, %xmm1
xorl %ecx, %ecx
cmpl %ecx, %r11d
je 0x1096a6
vpmovsxbw (%rdx), %xmm2
vpmovsxbw (%rax,%rcx,4), %xmm3
vpmullw %xmm2, %xmm3, %xmm4
vpmulhw %xmm3, %xmm2, %xmm2
vpunpcklwd %xmm2, %xmm4, %xmm2 # xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
vpaddd %xmm2, %xmm1, %xmm1
addq $0x4, %rdx
incq %rcx
jmp 0x10967d
vphaddd %xmm1, %xmm1, %xmm1
vphaddd %xmm1, %xmm1, %xmm1
vmovd %xmm1, %ecx
movq %r14, %rdi
jmp 0x1096be
xorl %ecx, %ecx
movq %rax, %rdi
xorl %ebx, %ebx
cmpl %ebx, %esi
je 0x1096d9
movsbl (%rdx,%rbx), %ebp
movsbl (%rdi,%rbx), %r13d
imull %ebp, %r13d
addl %r13d, %ecx
incq %rbx
jmp 0x1096c0
movl %ecx, (%r12)
addq $0x4, %r12
incl %r15d
jmp 0x109651
movq 0x10(%rsp), %r9
incq %r9
jmp 0x1094d7
movq 0x58(%rsp), %rax
testq %rax, %rax
je 0x109724
lock
decl (%rax)
jne 0x109724
movq 0x50(%rsp), %rsi
movq 0x70(%rsp), %rdi
testq %rdi, %rdi
je 0x10971c
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x109724
movq %rsi, %rdi
callq 0x5f3e0
addq $0xa8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0x109771
movq %rax, %rbx
movq 0x58(%rsp), %rax
testq %rax, %rax
je 0x109769
lock
decl (%rax)
jne 0x109769
movq 0x50(%rsp), %rsi
movq 0x70(%rsp), %rdi
testq %rdi, %rdi
jne 0x109763
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x109769
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
movq %rax, %rdi
callq 0x61d68
nopl (%rax)
|
/csukuangfj[P]ncnn/src/layer/x86/convolution_sgemm_int8.h
|
ncnn::Convolution_x86_avx::Convolution_x86_avx()
|
Convolution_x86_avx::Convolution_x86_avx()
{
#if __SSE2__
support_packing = true;
#endif // __SSE2__
activation = 0;
nT = 0;
convolution_dilation1 = 0;
gemm = 0;
}
|
movq (%rsi), %rax
movq %rax, (%rdi)
movq 0x8(%rsi), %rcx
movq -0x18(%rax), %rax
movq %rcx, (%rdi,%rax)
andq $0x0, 0x178(%rdi)
andq $0x0, 0x1d0(%rdi)
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, 0x18(%rdi)
vmovups %xmm0, 0x24(%rdi)
vmovups %xmm0, 0x38(%rdi)
vmovups %xmm0, 0x44(%rdi)
vxorps %xmm1, %xmm1, %xmm1
andl $0x0, 0x78(%rdi)
vmovups %ymm1, 0x58(%rdi)
vmovups %xmm0, 0x80(%rdi)
vmovups %xmm0, 0x8c(%rdi)
vmovups %ymm1, 0xa0(%rdi)
andl $0x0, 0xc0(%rdi)
vmovups %xmm0, 0xc8(%rdi)
vmovups %xmm0, 0xd4(%rdi)
andl $0x0, 0x108(%rdi)
vmovups %ymm1, 0xe8(%rdi)
vmovups %xmm0, 0x11c(%rdi)
vmovups %xmm0, 0x110(%rdi)
vmovups %ymm1, 0x130(%rdi)
andl $0x0, 0x150(%rdi)
vmovups %xmm0, 0x164(%rdi)
vmovups %xmm0, 0x158(%rdi)
vmovups %xmm0, 0x19c(%rdi)
vmovups %xmm0, 0x190(%rdi)
vmovups %xmm0, 0x1bc(%rdi)
vmovups %xmm0, 0x1b0(%rdi)
movq (%rdi), %rax
movq -0x18(%rax), %rax
movb $0x1, 0xb(%rdi,%rax)
andq $0x0, 0x8(%rdi)
andl $0x0, 0x10(%rdi)
vmovups %xmm0, 0x180(%rdi)
vzeroupper
retq
nop
|
/csukuangfj[P]ncnn/build_O2/src/layer/x86/convolution_x86_avx.cpp
|
ncnn::Convolution_x86_avx::Convolution_x86_avx()
|
Convolution_x86_avx::Convolution_x86_avx()
{
#if __SSE2__
support_packing = true;
#endif // __SSE2__
activation = 0;
nT = 0;
convolution_dilation1 = 0;
gemm = 0;
}
|
pushq %rbx
movq %rdi, %rbx
addq $0x1d8, %rdi # imm = 0x1D8
callq 0x94b00
leaq 0x377a8f(%rip), %rax # 0x481318
movq %rax, (%rbx)
leaq 0x377b0d(%rip), %rax # 0x4813a0
movq %rax, 0x1d8(%rbx)
andq $0x0, 0x178(%rbx)
andq $0x0, 0x1d0(%rbx)
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, 0x18(%rbx)
vmovups %xmm0, 0x24(%rbx)
vmovups %xmm0, 0x38(%rbx)
vmovups %xmm0, 0x44(%rbx)
vxorps %xmm1, %xmm1, %xmm1
vmovups %ymm1, 0x58(%rbx)
andl $0x0, 0x78(%rbx)
vmovups %xmm0, 0x80(%rbx)
vmovups %xmm0, 0x8c(%rbx)
andl $0x0, 0xc0(%rbx)
vmovups %ymm1, 0xa0(%rbx)
vmovups %xmm0, 0xc8(%rbx)
vmovups %xmm0, 0xd4(%rbx)
vmovups %ymm1, 0xe8(%rbx)
andl $0x0, 0x108(%rbx)
vmovups %xmm0, 0x11c(%rbx)
vmovups %xmm0, 0x110(%rbx)
andl $0x0, 0x150(%rbx)
vmovups %ymm1, 0x130(%rbx)
vmovups %xmm0, 0x164(%rbx)
vmovups %xmm0, 0x158(%rbx)
vmovups %xmm0, 0x19c(%rbx)
vmovups %xmm0, 0x190(%rbx)
vmovups %xmm0, 0x1bc(%rbx)
vmovups %xmm0, 0x1b0(%rbx)
movb $0x1, 0x1e3(%rbx)
andq $0x0, 0x8(%rbx)
andl $0x0, 0x10(%rbx)
vmovups %xmm0, 0x180(%rbx)
popq %rbx
vzeroupper
retq
nop
|
/csukuangfj[P]ncnn/build_O2/src/layer/x86/convolution_x86_avx.cpp
|
ncnn::Convolution_x86_avx::create_pipeline(ncnn::Option const&)
|
int Convolution_x86_avx::create_pipeline(const Option& opt)
{
if (dynamic_weight)
return 0;
activation = create_activation_layer(activation_type, activation_params, opt);
nT = opt.num_threads;
#if NCNN_INT8
if (opt.use_int8_inference && weight_data.elemsize == (size_t)1u)
{
return create_pipeline_int8_x86(opt);
}
#endif
int kernel_size = kernel_w * kernel_h;
int num_input = weight_data_size / kernel_size / num_output;
if (!opt.use_packing_layout && kernel_w == kernel_h && dilation_w != 1 && dilation_h == dilation_w && stride_w == 1 && stride_h == 1)
{
convolution_dilation1 = ncnn::create_layer(ncnn::LayerType::Convolution);
// set param
ncnn::ParamDict pd;
pd.set(0, num_output); // num_output
pd.set(1, kernel_w);
pd.set(11, kernel_h);
pd.set(2, 1);
pd.set(12, 1);
pd.set(3, 1); // stride_w
pd.set(13, 1); // stride_h
pd.set(4, 0); // pad_w
pd.set(14, 0); // pad_h
pd.set(5, bias_term);
pd.set(6, weight_data_size);
convolution_dilation1->load_param(pd);
// set weights
if (bias_term)
{
ncnn::Mat weights[2];
weights[0] = weight_data;
weights[1] = bias_data;
convolution_dilation1->load_model(ModelBinFromMatArray(weights));
}
else
{
ncnn::Mat weights[1];
weights[0] = weight_data;
convolution_dilation1->load_model(ModelBinFromMatArray(weights));
}
convolution_dilation1->create_pipeline(opt);
if (opt.lightmode)
{
weight_data.release();
}
return 0;
}
int elempack = 1;
int out_elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
#if __AVX512F__
elempack = num_input % 16 == 0 ? 16 : num_input % 8 == 0 ? 8 : num_input % 4 == 0 ? 4 : 1;
out_elempack = num_output % 16 == 0 ? 16 : num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1;
#elif __AVX__
elempack = num_input % 8 == 0 ? 8 : num_input % 4 == 0 ? 4 : 1;
out_elempack = num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1;
#else
elempack = num_input % 4 == 0 ? 4 : 1;
out_elempack = num_output % 4 == 0 ? 4 : 1;
#endif
}
#endif // __SSE2__
bool prefer_winograd = (opt.use_winograd23_convolution || opt.use_winograd43_convolution || opt.use_winograd63_convolution) && (num_input > 8 || num_output > 8);
if (opt.use_winograd_convolution && prefer_winograd && kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
if ((bottom_shapes.empty() || bottom_shapes[0].w == 0 || bottom_shapes[0].h == 0) && (top_shapes.empty() || top_shapes[0].w == 0 || top_shapes[0].h == 0))
{
// dynamic shape
if ((opt.use_winograd63_convolution) && (num_input <= 32 && num_output <= 32))
conv3x3s1_winograd63_transform_kernel(weight_data, weight_winograd63_data, num_input, num_output, opt);
else if (opt.use_winograd43_convolution)
conv3x3s1_winograd43_transform_kernel(weight_data, weight_winograd43_data, num_input, num_output, opt);
else
conv3x3s1_winograd23_transform_kernel(weight_data, weight_winograd23_data, num_input, num_output, opt);
}
else
{
int w;
int h;
if (top_shapes.empty() || top_shapes[0].w == 0 || top_shapes[0].h == 0)
{
w = bottom_shapes[0].w;
h = bottom_shapes[0].h;
// make padding
if (pad_left > 0 || pad_right > 0 || pad_top > 0 || pad_bottom > 0)
{
w += pad_left + pad_right;
h += pad_top + pad_bottom;
}
else if ((pad_left == -233 && pad_right == -233 && pad_top == -233 && pad_bottom == -233)
|| (pad_left == -234 && pad_right == -234 && pad_top == -234 && pad_bottom == -234))
{
// tensorflow padding=SAME or onnx padding=SAME_UPPER/SAME_LOWER
w += 2;
h += 2;
}
}
else
{
w = top_shapes[0].w + 2;
h = top_shapes[0].h + 2;
}
bool prefer_winograd63 = test_prefer_winograd63(num_input, num_output, w, h);
bool prefer_winograd23 = test_prefer_winograd23(num_input, num_output, w, h);
bool prefer_winograd43 = !prefer_winograd63 && !prefer_winograd23;
if (prefer_winograd23 && !opt.use_winograd23_convolution)
{
// f23 fallback to f43
prefer_winograd23 = false;
prefer_winograd43 = true;
}
if (prefer_winograd63 && !opt.use_winograd63_convolution)
{
// f63 fallback to f43
prefer_winograd63 = false;
prefer_winograd43 = true;
}
if (prefer_winograd43 && !opt.use_winograd43_convolution)
{
// f43 fallback to f63 or f23
prefer_winograd43 = false;
if (opt.use_winograd63_convolution)
{
prefer_winograd63 = true;
}
else
{
prefer_winograd23 = true;
}
}
if (prefer_winograd23)
{
conv3x3s1_winograd23_transform_kernel(weight_data, weight_winograd23_data, num_input, num_output, opt);
}
else if (prefer_winograd43)
{
conv3x3s1_winograd43_transform_kernel(weight_data, weight_winograd43_data, num_input, num_output, opt);
}
else if (prefer_winograd63)
{
conv3x3s1_winograd63_transform_kernel(weight_data, weight_winograd63_data, num_input, num_output, opt);
}
else
{
// should never reach here
}
}
if (opt.lightmode)
{
weight_data.release();
}
return 0;
}
int l2_cache_size = get_cpu_level2_cache_size();
bool prefer_sgemm = num_input * num_output * kernel_w * kernel_h * dilation_w * dilation_h * stride_w * stride_h * (int)sizeof(float) * 2 > l2_cache_size || (num_input > 16 || num_output > 16);
if ((opt.use_sgemm_convolution && prefer_sgemm) || (kernel_w == 1 && kernel_h == 1))
{
const int maxk = kernel_w * kernel_h;
gemm = ncnn::create_layer(ncnn::LayerType::Gemm);
ncnn::ParamDict pd;
pd.set(2, 0); // transA
pd.set(3, 0); // transB
pd.set(4, 1); // constantA
pd.set(5, 0); // constantB
pd.set(6, 1); // constantC
pd.set(7, num_output); // M = outch
pd.set(8, 0); // N = size
pd.set(9, maxk * num_input); // K = maxk*inch
pd.set(10, bias_term ? 1 : -1); // constant_broadcast_type_C = (M)
pd.set(11, 1); // output_N1M
gemm->load_param(pd);
// maxk-inch-outch to pa-maxk-inch/pa-outch
Mat tmp;
{
Mat weight_data_r2 = weight_data.reshape(maxk, num_input, num_output);
tmp.create(maxk * num_input, num_output);
for (int q = 0; q < num_output; q += 1)
{
float* g00 = tmp.row(q);
for (int p = 0; p + (elempack - 1) < num_input; p += elempack)
{
for (int k = 0; k < maxk; k++)
{
for (int i = 0; i < elempack; i++)
{
const float* k00 = weight_data_r2.channel(q).row(p + i);
g00[0] = k00[k];
g00++;
}
}
}
}
}
if (bias_term)
{
ncnn::Mat weights[2];
weights[0] = tmp;
weights[1] = bias_data;
gemm->load_model(ModelBinFromMatArray(weights));
}
else
{
ncnn::Mat weights[1];
weights[0] = tmp;
gemm->load_model(ModelBinFromMatArray(weights));
}
gemm->create_pipeline(opt);
}
else
{
if ((elempack == 16 && out_elempack == 1 && kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
|| (elempack == 8 && out_elempack == 8 && kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
|| (elempack == 8 && out_elempack == 8 && kernel_w == 2 && kernel_h == 2 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
|| (elempack == 1 && out_elempack == 8 && kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
|| (elempack == 1 && out_elempack == 8 && kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
|| (elempack == 8 && out_elempack == 1 && kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
|| (elempack == 1 && out_elempack == 4 && kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
|| (elempack == 1 && out_elempack == 4 && kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2))
{
convolution_transform_kernel_packed_sse(weight_data, weight_data_tm, num_input, num_output, kernel_w, kernel_h, elempack, out_elempack);
}
else
{
convolution_transform_kernel_packed(weight_data, weight_data_tm, num_input, num_output, kernel_w, kernel_h);
}
}
if (opt.lightmode)
{
weight_data.release();
}
return 0;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x228, %rsp # imm = 0x228
movq (%rdi), %rax
movq -0x18(%rax), %rbx
cmpl $0x0, 0x158(%rdi,%rbx)
je 0x1099b0
xorl %eax, %eax
addq $0x228, %rsp # imm = 0x228
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq %rsi, 0x30(%rsp)
movq %rdi, 0x8(%rsp)
movl 0x10c(%rdi,%rbx), %ecx
decl %ecx
cmpl $0x5, %ecx
ja 0x10a304
leaq 0x2e901d(%rip), %rax # 0x3f29f0
movslq (%rax,%rcx,4), %rcx
addq %rax, %rcx
jmpq *%rcx
pushq $0x1a
popq %rdi
callq 0x782bf
movq %rax, %r15
leaq 0x90(%rsp), %r12
movq %r12, %rdi
callq 0x71548
movq (%r15), %rax
movq %r15, %rdi
movq %r12, %rsi
callq *0x10(%rax)
jmp 0x109b80
pushq $0x47
popq %rdi
callq 0x782bf
movq %rax, %r15
leaq 0x90(%rsp), %r12
movq %r12, %rdi
callq 0x71548
movq (%r15), %rax
movq %r15, %rdi
movq %r12, %rsi
callq *0x10(%rax)
jmp 0x109b80
pushq $0x36
popq %rdi
callq 0x782bf
movq %rax, %r15
leaq 0x90(%rsp), %r12
movq %r12, %rdi
callq 0x71548
movq 0x8(%rsp), %rax
movq 0x110(%rax,%rbx), %rax
vmovd (%rax), %xmm0
movq %r12, %rdi
xorl %esi, %esi
callq 0x71952
movq 0x8(%rsp), %rax
movq 0x110(%rax,%rbx), %rax
vmovd 0x4(%rax), %xmm0
leaq 0x90(%rsp), %rdi
pushq $0x1
popq %rsi
callq 0x71952
movq (%r15), %rax
leaq 0x90(%rsp), %rsi
movq %r15, %rdi
callq *0x10(%rax)
jmp 0x109b80
pushq $0x1a
popq %rdi
callq 0x782bf
movq %rax, %r15
leaq 0x90(%rsp), %r12
movq %r12, %rdi
callq 0x71548
movq 0x8(%rsp), %rax
movq 0x110(%rax,%rbx), %rax
vmovd (%rax), %xmm0
movq %r12, %rdi
xorl %esi, %esi
callq 0x71952
movq (%r15), %rax
leaq 0x90(%rsp), %rsi
movq %r15, %rdi
callq *0x10(%rax)
jmp 0x109b80
pushq $0x1e
popq %rdi
callq 0x782bf
movq %rax, %r15
leaq 0x90(%rsp), %r12
movq %r12, %rdi
callq 0x71548
movq (%r15), %rax
movq %r15, %rdi
movq %r12, %rsi
callq *0x10(%rax)
jmp 0x109b80
pushq $0x43
popq %rdi
callq 0x782bf
movq %rax, %r15
leaq 0x90(%rsp), %r12
movq %r12, %rdi
callq 0x71548
movq 0x8(%rsp), %rax
movq 0x110(%rax,%rbx), %rax
vmovd (%rax), %xmm0
movq %r12, %rdi
xorl %esi, %esi
callq 0x71952
movq 0x8(%rsp), %rax
movq 0x110(%rax,%rbx), %rax
vmovd 0x4(%rax), %xmm0
leaq 0x90(%rsp), %rdi
pushq $0x1
popq %rsi
callq 0x71952
movq (%r15), %rax
leaq 0x90(%rsp), %rsi
movq %r15, %rdi
callq *0x10(%rax)
leaq 0x90(%rsp), %rdi
callq 0x71614
movq (%r15), %rax
movq %r15, %rdi
movq 0x30(%rsp), %rbx
movq %rbx, %rsi
callq *0x20(%rax)
movq 0x8(%rsp), %rdi
movq (%rdi), %rax
movq %r15, 0x8(%rdi)
movl 0x4(%rbx), %ecx
movl %ecx, 0x10(%rdi)
movq -0x18(%rax), %rax
cmpb $0x1, 0x1e(%rbx)
jne 0x109bd4
cmpq $0x1, 0x170(%rdi,%rax)
jne 0x109bd4
movq 0x30(%rsp), %rsi
callq 0x10c7dc
jmp 0x10999c
leaq (%rdi,%rax), %r12
movl 0xd0(%rdi,%rax), %ebp
movl 0xd4(%rdi,%rax), %ecx
movl 0xd8(%rdi,%rax), %esi
movl %esi, %r8d
imull %ecx, %r8d
movl 0x104(%rdi,%rax), %eax
cltd
idivl %r8d
cltd
idivl %ebp
movq %rax, 0x18(%rsp)
movq 0x30(%rsp), %rdi
movb 0x27(%rdi), %al
cmpl %esi, %ecx
setne %dl
orb %al, %dl
jne 0x109e6c
movl 0xdc(%r12), %edx
cmpl $0x1, %edx
je 0x109e6c
cmpl %edx, 0xe0(%r12)
jne 0x109e6c
cmpl $0x1, 0xe4(%r12)
jne 0x109e6c
cmpl $0x1, 0xe8(%r12)
jne 0x109e6c
pushq $0x6
popq %rdi
callq 0x782bf
movq 0x8(%rsp), %rbx
movq %rax, 0x180(%rbx)
leaq 0x160(%rsp), %r15
movq %r15, %rdi
callq 0x71548
movq (%rbx), %rax
movq -0x18(%rax), %rax
movl 0xd0(%rbx,%rax), %edx
movq %r15, %rdi
xorl %esi, %esi
callq 0x7193a
movq 0x8(%rsp), %rcx
movq (%rcx), %rax
movq -0x18(%rax), %rax
movl 0xd4(%rcx,%rax), %edx
leaq 0x160(%rsp), %rdi
pushq $0x1
popq %rsi
callq 0x7193a
movq 0x8(%rsp), %rcx
movq (%rcx), %rax
movq -0x18(%rax), %rax
movl 0xd8(%rcx,%rax), %edx
leaq 0x160(%rsp), %rdi
pushq $0xb
popq %rsi
callq 0x7193a
leaq 0x160(%rsp), %rdi
pushq $0x2
popq %rsi
pushq $0x1
popq %rdx
callq 0x7193a
leaq 0x160(%rsp), %rdi
pushq $0xc
popq %rsi
pushq $0x1
popq %rdx
callq 0x7193a
leaq 0x160(%rsp), %rdi
pushq $0x3
popq %rsi
pushq $0x1
popq %rdx
callq 0x7193a
leaq 0x160(%rsp), %rdi
pushq $0xd
popq %rsi
pushq $0x1
popq %rdx
callq 0x7193a
leaq 0x160(%rsp), %rdi
pushq $0x4
popq %rsi
xorl %edx, %edx
callq 0x7193a
leaq 0x160(%rsp), %rdi
pushq $0xe
popq %rsi
xorl %edx, %edx
callq 0x7193a
movq 0x8(%rsp), %rcx
movq (%rcx), %rax
movq -0x18(%rax), %rax
movl 0x100(%rcx,%rax), %edx
leaq 0x160(%rsp), %rdi
pushq $0x5
popq %rsi
callq 0x7193a
movq 0x8(%rsp), %rcx
movq (%rcx), %rax
movq -0x18(%rax), %rax
movl 0x104(%rcx,%rax), %edx
leaq 0x160(%rsp), %rdi
pushq $0x6
popq %rsi
callq 0x7193a
movq 0x8(%rsp), %rax
movq 0x180(%rax), %rdi
movq (%rdi), %rax
leaq 0x160(%rsp), %rsi
callq *0x10(%rax)
movq 0x8(%rsp), %rdx
movq (%rdx), %rax
movq -0x18(%rax), %rcx
cmpl $0x0, 0x100(%rdx,%rcx)
je 0x10acc9
leaq 0x90(%rsp), %rcx
andq $0x0, 0x40(%rcx)
vpxor %xmm0, %xmm0, %xmm0
vmovdqa %xmm0, (%rcx)
vmovdqu %xmm0, 0xc(%rcx)
vmovdqa %xmm0, 0x20(%rcx)
vmovdqu %xmm0, 0x2c(%rcx)
andq $0x0, 0x88(%rcx)
vmovdqu %xmm0, 0x48(%rcx)
vmovdqu %xmm0, 0x54(%rcx)
vmovdqu %xmm0, 0x68(%rcx)
vmovdqu %xmm0, 0x74(%rcx)
movq -0x18(%rax), %rbx
leaq (%rdx,%rbx), %rax
addq $0x160, %rax # imm = 0x160
cmpq %rax, %rcx
je 0x10c16c
addq %rdx, %rbx
movq 0x168(%rbx), %rax
testq %rax, %rax
je 0x109e2e
lock
incl (%rax)
movq 0x98(%rsp), %rax
testq %rax, %rax
je 0x10c0f7
lock
decl (%rax)
jne 0x10c0f7
movq 0x90(%rsp), %rsi
movq 0xb0(%rsp), %rdi
testq %rdi, %rdi
je 0x10c0ef
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x10c0f7
testb %al, %al
je 0x109ea4
pushq $0x8
popq %rax
xorl %edx, %edx
movq 0x18(%rsp), %r8
testb $0x3, %r8b
sete %dl
testb $0x7, %r8b
leal 0x1(%rdx,%rdx,2), %ebx
cmovel %eax, %ebx
xorl %edx, %edx
testb $0x3, %bpl
sete %dl
testb $0x7, %bpl
leal 0x1(%rdx,%rdx,2), %r8d
cmovel %eax, %r8d
jmp 0x109eab
pushq $0x1
popq %r8
movl %r8d, %ebx
movb 0x37(%rdi), %r15b
testb %r15b, %r15b
jne 0x109ec4
cmpb $0x0, 0x38(%rdi)
jne 0x109ec4
cmpb $0x1, 0x39(%rdi)
jne 0x10a316
cmpl $0x9, 0x18(%rsp)
setge %dl
cmpl $0x9, %ebp
setge %al
orb %dl, %al
cmpl $0x3, %esi
jne 0x109f53
cmpl $0x3, %ecx
jne 0x109f53
cmpb $0x0, 0x1c(%rdi)
je 0x109f53
testb %al, %al
je 0x109f53
cmpl $0x1, 0xdc(%r12)
jne 0x109f53
cmpl $0x1, 0xe0(%r12)
jne 0x109f53
cmpl $0x1, 0xe4(%r12)
jne 0x109f53
cmpl $0x1, 0xe8(%r12)
jne 0x109f53
movq 0xa0(%r12), %rax
cmpq 0xa8(%r12), %rax
je 0x10a9ab
cmpl $0x0, 0x2c(%rax)
je 0x10a9ab
cmpl $0x0, 0x30(%rax)
je 0x10a9ab
movq 0xb8(%r12), %rcx
movq 0xc0(%r12), %rdx
jmp 0x10a9d8
movq %r8, 0x28(%rsp)
movq %rdi, %r14
callq 0x73479
movq 0x8(%rsp), %rdx
movq (%rdx), %rcx
movq -0x18(%rcx), %rcx
movl 0xd0(%rdx,%rcx), %r12d
movl 0xd4(%rdx,%rcx), %r13d
vmovdqu 0xd8(%rdx,%rcx), %xmm0
movq %rcx, 0x128(%rsp)
movl 0xe8(%rdx,%rcx), %ecx
movl %r12d, %edx
imull %r13d, %edx
vmovd %xmm0, %esi
movq %rsi, 0x10(%rsp)
imull %esi, %edx
vpextrd $0x1, %xmm0, %edi
imull %edx, %edi
vpextrd $0x2, %xmm0, %esi
imull %esi, %edi
vpextrd $0x3, %xmm0, %edx
imull %edx, %edi
imull %ecx, %edi
movq 0x18(%rsp), %r9
imull %r9d, %edi
shll $0x3, %edi
cmpl %eax, %edi
setg %al
cmpl $0x11, %r9d
setge %dil
cmpl $0x11, %r12d
setge %r8b
cmpb $0x1, 0x1d(%r14)
movl %ebx, %r15d
movslq %r9d, %r9
movq %r9, 0x20(%rsp)
jne 0x10a000
orb %r8b, %dil
orb %dil, %al
jne 0x10a016
movl %r13d, %eax
xorl $0x1, %eax
movq 0x10(%rsp), %rdi
xorl $0x1, %edi
orl %eax, %edi
jne 0x10a2bc
pushq $0x4a
popq %rdi
callq 0x782bf
movq 0x8(%rsp), %rcx
movq %rax, 0x188(%rcx)
leaq 0x70(%rsp), %r12
movq %r12, %rdi
callq 0x71548
pushq $0x2
popq %rsi
movq %r12, %rdi
xorl %edx, %edx
callq 0x7193a
leaq 0x70(%rsp), %rdi
pushq $0x3
popq %rsi
xorl %edx, %edx
callq 0x7193a
leaq 0x70(%rsp), %rdi
pushq $0x4
popq %rsi
pushq $0x1
popq %rdx
callq 0x7193a
leaq 0x70(%rsp), %rdi
pushq $0x5
popq %rsi
xorl %edx, %edx
callq 0x7193a
leaq 0x70(%rsp), %rdi
pushq $0x6
popq %rsi
pushq $0x1
popq %rdx
callq 0x7193a
movq 0x8(%rsp), %rcx
movq (%rcx), %rax
movq -0x18(%rax), %rax
movl 0xd0(%rcx,%rax), %edx
leaq 0x70(%rsp), %rdi
pushq $0x7
popq %rsi
callq 0x7193a
leaq 0x70(%rsp), %rdi
pushq $0x8
popq %rsi
xorl %edx, %edx
callq 0x7193a
movq 0x10(%rsp), %rcx
imull %r13d, %ecx
movq 0x18(%rsp), %rax
movl %eax, %r12d
movq %rcx, %rbp
imull %ecx, %r12d
leaq 0x70(%rsp), %rdi
pushq $0x9
popq %rsi
movl %r12d, %edx
callq 0x7193a
movq 0x8(%rsp), %rcx
movq (%rcx), %rax
movq -0x18(%rax), %rax
xorl %edx, %edx
cmpl $0x1, 0x100(%rcx,%rax)
sbbl %edx, %edx
orl $0x1, %edx
leaq 0x70(%rsp), %rdi
pushq $0xa
popq %rsi
callq 0x7193a
leaq 0x70(%rsp), %rdi
pushq $0xb
popq %rsi
pushq $0x1
popq %rdx
callq 0x7193a
movq 0x8(%rsp), %rax
movq 0x188(%rax), %rdi
movq (%rdi), %rax
leaq 0x70(%rsp), %rsi
callq *0x10(%rax)
andq $0x0, 0x1a0(%rsp)
vpxor %xmm0, %xmm0, %xmm0
vmovdqa %xmm0, 0x160(%rsp)
vmovdqu %xmm0, 0x16c(%rsp)
vmovdqa %xmm0, 0x180(%rsp)
vmovdqu %xmm0, 0x18c(%rsp)
movq 0x8(%rsp), %r13
movq (%r13), %rax
movq -0x18(%rax), %rax
leaq 0x160(%r13,%rax), %rsi
movl -0x90(%rsi), %r8d
leaq 0x90(%rsp), %rdi
movl %ebp, %edx
movq 0x18(%rsp), %rcx
xorl %r9d, %r9d
callq 0x63020
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0xd0(%r13,%rax), %edx
xorl %r14d, %r14d
leaq 0x160(%rsp), %rdi
pushq $0x4
popq %rcx
movl %r12d, %esi
xorl %r8d, %r8d
callq 0x636fa
movq (%r13), %rax
movq 0x160(%rsp), %rcx
movslq 0x18c(%rsp), %rdx
imulq 0x170(%rsp), %rdx
decl %ebx
testl %ebp, %ebp
cmovlel %r14d, %ebp
subq %rbx, 0x20(%rsp)
movq -0x18(%rax), %rsi
movq 0x8(%rsp), %rdi
movslq 0xd0(%rdi,%rsi), %rsi
cmpq %rsi, %r14
jge 0x10a27e
movq %rdx, %rsi
imulq %r14, %rsi
addq %rcx, %rsi
xorl %edi, %edi
cmpq 0x20(%rsp), %rdi
jge 0x10a276
movslq 0xbc(%rsp), %r8
movq 0xa0(%rsp), %r10
movq %r10, %r9
imulq %r8, %r9
movq 0xd0(%rsp), %r11
imulq %r14, %r11
imulq %rdi, %r8
addq %r11, %r8
imulq %r10, %r8
addq 0x90(%rsp), %r8
xorl %r10d, %r10d
cmpq %rbp, %r10
je 0x10a271
movq %r8, %r11
movq %r15, %rbx
subq $0x1, %rbx
jb 0x10a268
vmovd (%r11), %xmm0
vmovd %xmm0, (%rsi)
addq $0x4, %rsi
addq %r9, %r11
jmp 0x10a250
incq %r10
addq $0x4, %r8
jmp 0x10a245
addq %r15, %rdi
jmp 0x10a205
incq %r14
jmp 0x10a1df
movq 0x98(%rsp), %rax
testq %rax, %rax
je 0x10a355
lock
decl (%rax)
jne 0x10a355
movq 0x90(%rsp), %rsi
movq 0xb0(%rsp), %rdi
testq %rdi, %rdi
je 0x10a34d
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x10a355
movq 0x8(%rsp), %rax
addq %rax, 0x128(%rsp)
movslq %r12d, %rdi
movq %rdi, 0x80(%rsp)
movq 0x28(%rsp), %rdi
cmpl $0x8, %edi
jne 0x10a31d
cmpl $0x8, %ebx
jne 0x10a31d
cmpl $0x2, %r13d
je 0x10a58d
cmpl $0x3, %r13d
jne 0x10ab39
vpxor 0x2e8581(%rip), %xmm0, %xmm0 # 0x3f2880
jmp 0x10a595
xorl %r15d, %r15d
movq 0x8(%rsp), %rdi
movq 0x30(%rsp), %rbx
jmp 0x109ba6
xorl %eax, %eax
jmp 0x109ed4
vpinsrd $0x3, %r13d, %xmm0, %xmm1
cmpl $0x8, %edi
jne 0x10a49e
cmpl $0x1, %ebx
jne 0x10a49e
vpxor 0x2e8523(%rip), %xmm1, %xmm0 # 0x3f2860
vptest %xmm0, %xmm0
jne 0x10ab39
jmp 0x10a7d9
movq %rsi, %rdi
callq 0x5f3e0
movq 0x8(%rsp), %rcx
movq (%rcx), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0x100(%rcx,%rax)
je 0x10a41b
andq $0x0, 0xd0(%rsp)
vpxor %xmm0, %xmm0, %xmm0
vmovdqa %xmm0, 0x90(%rsp)
vmovdqu %xmm0, 0x9c(%rsp)
vmovdqa %xmm0, 0xb0(%rsp)
vmovdqu %xmm0, 0xbc(%rsp)
andq $0x0, 0x118(%rsp)
vmovdqu %xmm0, 0xd8(%rsp)
vmovdqu %xmm0, 0xe4(%rsp)
vmovdqu %xmm0, 0xf8(%rsp)
vmovdqu %xmm0, 0x104(%rsp)
movq 0x168(%rsp), %rax
testq %rax, %rax
je 0x10a3dd
lock
incl (%rax)
movq 0x98(%rsp), %rax
testq %rax, %rax
je 0x10a4d0
lock
decl (%rax)
jne 0x10a4d0
movq 0x90(%rsp), %rsi
movq 0xb0(%rsp), %rdi
testq %rdi, %rdi
je 0x10a4c8
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x10a4d0
andq $0x0, 0xd0(%rsp)
vpxor %xmm0, %xmm0, %xmm0
vmovdqa %xmm0, 0x90(%rsp)
vmovdqu %xmm0, 0x9c(%rsp)
vmovdqa %xmm0, 0xb0(%rsp)
vmovdqu %xmm0, 0xbc(%rsp)
movq 0x168(%rsp), %rax
testq %rax, %rax
je 0x10aa0d
lock
incl (%rax)
movq 0x98(%rsp), %rax
testq %rax, %rax
je 0x10aa0d
lock
decl (%rax)
jne 0x10aa0d
movq 0x90(%rsp), %rsi
movq 0xb0(%rsp), %rdi
testq %rdi, %rdi
je 0x10aa05
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x10aa0d
cmpl $0x1, %edi
jne 0x10a7a3
cmpl $0x8, %ebx
jne 0x10a7a3
vpxor 0x2e83a8(%rip), %xmm1, %xmm0 # 0x3f2860
vptest %xmm0, %xmm0
je 0x10a7e2
jmp 0x10ab39
movq %rsi, %rdi
callq 0x5f3e0
vmovaps 0x160(%rsp), %xmm0
leaq 0xd8(%rsp), %rax
vmovaps %xmm0, -0x48(%rax)
movq 0x170(%rsp), %rcx
movq %rcx, -0x38(%rax)
movl 0x178(%rsp), %ecx
movl %ecx, -0x30(%rax)
movq 0x180(%rsp), %rcx
movq %rcx, -0x28(%rax)
vmovdqu 0x188(%rsp), %xmm0
vmovdqu %xmm0, -0x20(%rax)
movl 0x198(%rsp), %ecx
movl %ecx, -0x10(%rax)
movq 0x1a0(%rsp), %rcx
movq %rcx, -0x8(%rax)
movq 0x8(%rsp), %rdx
movq (%rdx), %rcx
movq -0x18(%rcx), %rbx
leaq (%rdx,%rbx), %rcx
addq $0x1a8, %rcx # imm = 0x1A8
cmpq %rcx, %rax
je 0x10a61b
addq %rdx, %rbx
movq 0x1b0(%rbx), %rax
testq %rax, %rax
je 0x10a55e
lock
incl (%rax)
movq 0xe0(%rsp), %rax
testq %rax, %rax
je 0x10a5ad
lock
decl (%rax)
jne 0x10a5ad
movq 0xd8(%rsp), %rsi
movq 0xf8(%rsp), %rdi
testq %rdi, %rdi
je 0x10a5a5
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x10a5ad
vpxor 0x2e82db(%rip), %xmm0, %xmm0 # 0x3f2870
vptest %xmm0, %xmm0
je 0x10a7eb
jmp 0x10ab39
movq %rsi, %rdi
callq 0x5f3e0
vmovups 0x1a8(%rbx), %xmm0
vmovups %xmm0, 0xd8(%rsp)
movq 0x1b8(%rbx), %rax
movq %rax, 0xe8(%rsp)
movl 0x1c0(%rbx), %eax
movl %eax, 0xf0(%rsp)
movq 0x1c8(%rbx), %rax
movq %rax, 0xf8(%rsp)
vmovdqu 0x1d0(%rbx), %xmm0
vmovdqa %xmm0, 0x100(%rsp)
movl 0x1e0(%rbx), %eax
movl %eax, 0x110(%rsp)
movq 0x1e8(%rbx), %rax
movq %rax, 0x118(%rsp)
movq 0x8(%rsp), %rdx
movq 0x188(%rdx), %r15
leaq 0x1f8(%rsp), %rdi
leaq 0x90(%rsp), %rsi
callq 0x6b00e
movq (%r15), %rax
leaq 0x1f8(%rsp), %rsi
movq %r15, %rdi
callq *0x18(%rax)
leaq 0x1f8(%rsp), %rdi
callq 0x6b03a
pushq $0x48
popq %rbx
vpxor %xmm0, %xmm0, %xmm0
movq 0x98(%rsp,%rbx), %rax
testq %rax, %rax
je 0x10a69b
lock
decl (%rax)
jne 0x10a69b
movq 0x90(%rsp,%rbx), %rsi
movq 0xb0(%rsp,%rbx), %rdi
testq %rdi, %rdi
je 0x10a68f
movq (%rdi), %rax
callq *0x18(%rax)
vpxor %xmm0, %xmm0, %xmm0
jmp 0x10a69b
movq %rsi, %rdi
callq 0x5f3e0
vpxor %xmm0, %xmm0, %xmm0
leaq (%rsp,%rbx), %rax
addq $0x90, %rax
andq $0x0, 0x40(%rax)
vmovdqu %xmm0, 0xc(%rax)
vmovdqu %xmm0, (%rax)
andl $0x0, 0x38(%rax)
vmovdqu %xmm0, 0x28(%rax)
addq $-0x48, %rbx
cmpq $-0x48, %rbx
jne 0x10a65c
movq 0x8(%rsp), %rax
movq 0x188(%rax), %rdi
movq (%rdi), %rax
movq 0x30(%rsp), %rsi
callq *0x20(%rax)
movq 0x168(%rsp), %rax
testq %rax, %rax
je 0x10a714
lock
decl (%rax)
jne 0x10a714
movq 0x160(%rsp), %rsi
movq 0x180(%rsp), %rdi
testq %rdi, %rdi
je 0x10a70c
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x10a714
movq %rsi, %rdi
callq 0x5f3e0
leaq 0x70(%rsp), %rdi
callq 0x71614
movq 0x30(%rsp), %rax
cmpb $0x1, (%rax)
movq 0x8(%rsp), %rcx
jne 0x10999c
movq (%rcx), %rax
movq -0x18(%rax), %rax
leaq (%rcx,%rax), %rbx
leaq (%rcx,%rax), %r14
addq $0x160, %r14 # imm = 0x160
movq 0x8(%r14), %rax
testq %rax, %rax
je 0x10a778
lock
decl (%rax)
jne 0x10a778
movq 0x160(%rbx), %rsi
movq 0x180(%rbx), %rdi
testq %rdi, %rdi
je 0x10a770
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x10a778
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x1a0(%rbx)
vpxor %xmm0, %xmm0, %xmm0
vmovdqu %xmm0, 0xc(%r14)
vmovdqu %xmm0, (%r14)
vmovdqu %xmm0, 0x188(%rbx)
andl $0x0, 0x198(%rbx)
jmp 0x10999c
vpshufd $0x44, %xmm0, %xmm0 # xmm0 = xmm0[0,1,0,1]
vpinsrd $0x0, %edi, %xmm0, %xmm0
vpinsrd $0x1, %r13d, %xmm0, %xmm0
vpxor 0x2e8094(%rip), %xmm0, %xmm0 # 0x3f2850
vptest %xmm0, %xmm0
jne 0x10ab39
cmpl $0x1, %esi
jne 0x10ab39
cmpl $0x1, %ebx
jne 0x10ab39
cmpl $0x2, %edx
je 0x10ab30
cmpl $0x1, %edx
jne 0x10ab39
cmpl $0x1, %ecx
jne 0x10ab39
movq 0x128(%rsp), %rsi
addq $0x160, %rsi # imm = 0x160
leaq 0x18(%rax), %rcx
movq %rcx, 0x38(%rsp)
movq 0x10(%rsp), %rbp
imull %r13d, %ebp
leaq 0x90(%rsp), %rdi
movl %ebp, %edx
movq 0x18(%rsp), %r14
movl %r14d, %ecx
movq 0x80(%rsp), %r13
movl %r13d, %r8d
xorl %r9d, %r9d
callq 0x63020
movl %r14d, %eax
movq 0x28(%rsp), %r14
cltd
idivl %ebx
movl %eax, %ecx
movl %r12d, %eax
cltd
idivl %r14d
movl %ebx, %r9d
imull %r14d, %r9d
leal (,%r9,4), %r8d
andq $0x0, (%rsp)
movq 0x38(%rsp), %rdi
movl %ebp, %esi
movl %ecx, %edx
movl %eax, %ecx
callq 0x628f2
movq %r14, %rax
movl %eax, %r14d
decl %eax
movq 0x8(%rsp), %rdx
movq 0x18(%rdx), %rcx
movq %rcx, 0x38(%rsp)
movq 0x28(%rdx), %rcx
imulq 0x58(%rdx), %rcx
movq %rcx, 0x40(%rsp)
decl %ebx
subq %rax, %r13
movq %r13, 0x80(%rsp)
subq %rbx, 0x20(%rsp)
movl %ebp, %r9d
xorl %r8d, %r8d
cmpq 0x80(%rsp), %r8
jge 0x10a96d
movl %r8d, %eax
cltd
idivl 0x28(%rsp)
cltq
imulq 0x40(%rsp), %rax
addq 0x38(%rsp), %rax
xorl %edx, %edx
cmpq 0x20(%rsp), %rdx
jge 0x10a965
xorl %r10d, %r10d
xorl %r11d, %r11d
cmpq %r9, %r11
je 0x10a95d
movq %rdx, %rbx
xorl %r12d, %r12d
cmpq %r15, %r12
je 0x10a954
movslq 0xbc(%rsp), %r13
movq 0xa0(%rsp), %rbp
movq 0xd0(%rsp), %rsi
movq 0x90(%rsp), %rdi
addq %r10, %rdi
movq %r8, %rcx
imulq %rsi, %rcx
imulq %rbx, %r13
addq %rcx, %r13
imulq %rbp, %r13
addq %rdi, %r13
imulq %rsi, %rbp
movq %r14, %rsi
subq $0x1, %rsi
jb 0x10a94c
vmovd (%r13), %xmm0
vmovd %xmm0, (%rax)
addq $0x4, %rax
addq %rbp, %r13
jmp 0x10a933
incq %r12
incq %rbx
jmp 0x10a8ef
incq %r11
addq $0x4, %r10
jmp 0x10a8e4
addq %r15, %rdx
jmp 0x10a8d3
addq %r14, %r8
jmp 0x10a8ae
movq 0x98(%rsp), %rax
testq %rax, %rax
je 0x10a71e
lock
decl (%rax)
jne 0x10a71e
movq 0x90(%rsp), %rsi
movq 0xb0(%rsp), %rdi
testq %rdi, %rdi
je 0x10aef8
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x10a71e
movq 0xb8(%r12), %rcx
movq 0xc0(%r12), %rdx
cmpq %rdx, %rcx
je 0x10aae6
cmpl $0x0, 0x2c(%rcx)
je 0x10aae6
cmpl $0x0, 0x30(%rcx)
je 0x10aae6
cmpq %rdx, %rcx
je 0x10ae11
movl 0x2c(%rcx), %r13d
testl %r13d, %r13d
je 0x10ae11
movl 0x30(%rcx), %ecx
testl %ecx, %ecx
je 0x10ae11
addl $0x2, %r13d
addl $0x2, %ecx
jmp 0x10ae71
movq %rsi, %rdi
callq 0x5f3e0
vmovaps 0x160(%rsp), %xmm0
leaq 0x90(%rsp), %rsi
vmovaps %xmm0, (%rsi)
movq 0x170(%rsp), %rax
movq %rax, 0x10(%rsi)
movl 0x178(%rsp), %eax
movl %eax, 0x18(%rsi)
movq 0x180(%rsp), %rax
movq %rax, 0x20(%rsi)
vmovdqu 0x188(%rsp), %xmm0
vmovdqu %xmm0, 0x28(%rsi)
movl 0x198(%rsp), %eax
movl %eax, 0x38(%rsi)
movq 0x1a0(%rsp), %rax
movq %rax, 0x40(%rsi)
movq 0x8(%rsp), %rax
movq 0x188(%rax), %r15
leaq 0x1f8(%rsp), %rdi
callq 0x6b00e
movq (%r15), %rax
leaq 0x1f8(%rsp), %rsi
movq %r15, %rdi
callq *0x18(%rax)
leaq 0x1f8(%rsp), %rdi
callq 0x6b03a
movq 0x98(%rsp), %rax
testq %rax, %rax
je 0x10a6c6
lock
decl (%rax)
jne 0x10a6c6
movq 0x90(%rsp), %rsi
movq 0xb0(%rsp), %rdi
testq %rdi, %rdi
je 0x10aad9
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x10a6c6
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x10a6c6
cmpl $0x20, %ebp
jg 0x10aec1
cmpb $0x0, 0x39(%rdi)
je 0x10aec1
cmpl $0x20, 0x18(%rsp)
jg 0x10aec1
addq $0x160, %r12 # imm = 0x160
movq 0x8(%rsp), %rax
leaq 0x138(%rax), %rsi
movq %r12, %rdi
movq 0x18(%rsp), %rdx
movl %ebp, %ecx
movq 0x30(%rsp), %r8
callq 0x10dc83
jmp 0x10c0ac
cmpl $0x2, %ecx
je 0x10a7f4
leaq 0x18(%rax), %rdi
movq 0x10(%rsp), %rax
imull %r13d, %eax
movq %rax, 0x10(%rsp)
cmpl $0x8, %r12d
jl 0x10ab8d
movq 0x18(%rsp), %rbx
cmpl $0x8, %ebx
movq 0x80(%rsp), %r8
jl 0x10abd7
movq 0x10(%rsp), %r14
movl %r14d, %esi
shll $0x6, %esi
movl %ebx, %eax
shrl $0x3, %eax
movl %ebx, %ecx
shrl $0x2, %ecx
andl $0x1, %ecx
movl %ebx, %edx
andl $0x1, %edx
addl %eax, %edx
btl $0x1, %ebx
adcl %ecx, %edx
jmp 0x10ac71
cmpl $0x4, %r12d
movq 0x18(%rsp), %rbx
movq 0x80(%rsp), %rcx
jl 0x10abf5
cmpl $0x8, %ebx
jl 0x10ac31
movq 0x10(%rsp), %r14
movl %r14d, %esi
shll $0x5, %esi
movl %ebx, %eax
shrl $0x3, %eax
movl %ebx, %r8d
shrl $0x2, %r8d
andl $0x1, %r8d
movl %ebx, %edx
andl $0x1, %edx
addl %eax, %edx
btl $0x1, %ebx
adcl %r8d, %edx
jmp 0x10ada5
cmpl $0x4, %ebx
jl 0x10ac56
movq 0x10(%rsp), %r14
movl %r14d, %esi
shll $0x5, %esi
movl %ebx, %edx
andl $0x1, %edx
btl $0x1, %ebx
adcl $0x1, %edx
jmp 0x10ac71
cmpl $0x2, %ecx
jl 0x10ac94
cmpl $0x8, %ebx
jl 0x10ad58
movq 0x10(%rsp), %r14
movl %r14d, %esi
shll $0x4, %esi
movl %ebx, %eax
shrl $0x3, %eax
movl %ebx, %ecx
shrl $0x2, %ecx
andl $0x1, %ecx
movl %ebx, %edx
andl $0x1, %edx
addl %eax, %edx
btl $0x1, %ebx
adcl %ecx, %edx
jmp 0x10af45
cmpl $0x4, %ebx
jl 0x10ad88
movq 0x10(%rsp), %r14
movl %r14d, %esi
shll $0x4, %esi
movl %ebx, %edx
andl $0x1, %edx
btl $0x1, %ebx
adcl $0x1, %edx
jmp 0x10ada5
cmpl $0x2, %ebx
jl 0x10adb6
movq 0x10(%rsp), %r14
movl %r14d, %esi
shll $0x4, %esi
movl %ebx, %edx
andl $0x1, %edx
incl %edx
movl %r8d, %eax
shrl $0x3, %eax
movl %r12d, %ecx
shrl $0x2, %ecx
andl $0x1, %ecx
andl $0x1, %r12d
addl %eax, %r12d
btl $0x1, %r8d
adcl %ecx, %r12d
jmp 0x10af4c
cmpl $0x8, %ebx
jl 0x10ade6
movq 0x10(%rsp), %r14
leal (,%r14,8), %esi
movl %ebx, %eax
shrl $0x3, %eax
movl %ebx, %ecx
shrl $0x2, %ecx
andl $0x1, %ecx
movl %ebx, %edx
andl $0x1, %edx
addl %eax, %edx
btl $0x1, %ebx
adcl %ecx, %edx
jmp 0x10af4c
leaq 0x90(%rsp), %rcx
andq $0x0, 0x40(%rcx)
vpxor %xmm0, %xmm0, %xmm0
vmovdqa %xmm0, (%rcx)
vmovdqu %xmm0, 0xc(%rcx)
vmovdqa %xmm0, 0x20(%rcx)
vmovdqu %xmm0, 0x2c(%rcx)
movq -0x18(%rax), %rbx
leaq (%rdx,%rbx), %rax
addq $0x160, %rax # imm = 0x160
cmpq %rax, %rcx
je 0x10c445
addq %rdx, %rbx
movq 0x168(%rbx), %rax
testq %rax, %rax
je 0x10c3d7
lock
incl (%rax)
movq 0x98(%rsp), %rax
testq %rax, %rax
je 0x10c3d7
lock
decl (%rax)
jne 0x10c3d7
movq 0x90(%rsp), %rsi
movq 0xb0(%rsp), %rdi
testq %rdi, %rdi
je 0x10c3cf
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x10c3d7
cmpl $0x4, %ebx
jl 0x10af28
movl %ebx, %edx
andl $0x1, %edx
btl $0x1, %ebx
adcl $0x1, %edx
movq 0x10(%rsp), %r14
leal (,%r14,8), %esi
andl $0x1, %ecx
incl %ecx
pushq $0x4
popq %r8
jmp 0x10af53
cmpl $0x2, %ebx
jl 0x10bfe5
movq 0x10(%rsp), %r14
leal (,%r14,8), %esi
movl %ebx, %edx
andl $0x1, %edx
incl %edx
andl $0x1, %r12d
btl $0x1, %ecx
adcl $0x1, %r12d
jmp 0x10af4c
movq 0x10(%rsp), %r14
leal (,%r14,8), %esi
movl %r8d, %eax
shrl $0x3, %eax
movl %r12d, %ecx
shrl $0x2, %ecx
andl $0x1, %ecx
andl $0x1, %r12d
addl %eax, %r12d
btl $0x1, %r8d
adcl %ecx, %r12d
jmp 0x10bffe
cmpl $0x4, %ebx
jl 0x10c009
movq 0x10(%rsp), %r14
leal (,%r14,4), %esi
movl %ebx, %edx
andl $0x1, %edx
btl $0x1, %ebx
adcl $0x1, %edx
pushq $0x4
popq %r8
jmp 0x10c0e0
movl 0x2c(%rax), %r13d
movl 0x30(%rax), %ecx
vmovq 0xec(%r12), %xmm0
vpxor %xmm1, %xmm1, %xmm1
vpcmpgtd %xmm1, %xmm0, %xmm1
vpmovsxdq %xmm1, %xmm1
vtestpd %xmm1, %xmm1
jne 0x10ae52
movl 0xf4(%r12), %eax
testl %eax, %eax
jg 0x10ae52
movl 0xf8(%r12), %edx
testl %edx, %edx
jle 0x10c38a
vmovd %xmm0, %eax
vpextrd $0x1, %xmm0, %edx
addl %eax, %edx
addl %edx, %r13d
addl 0xf4(%r12), %ecx
addl 0xf8(%r12), %ecx
movl %ecx, 0x28(%rsp)
movq 0x18(%rsp), %rbx
movl %ebx, %edi
movl %ebp, %esi
movl %r13d, %edx
callq 0x10ebf9
movl %eax, %r14d
movl %ebx, %edi
movl %ebp, %esi
movl %r13d, %edx
movl 0x28(%rsp), %ecx
callq 0x10edf1
movl %eax, %ecx
andb %r15b, %cl
movzbl %r14b, %esi
movzbl %r15b, %edx
testb %al, %al
cmovel %esi, %edx
testb %sil, %sil
je 0x10af05
movq 0x30(%rsp), %rbx
movb 0x39(%rbx), %al
testb %al, %dl
jne 0x10af12
jmp 0x10c044
addq $0x160, %r12 # imm = 0x160
cmpb $0x1, 0x38(%rdi)
movq 0x8(%rsp), %rax
movq %rdi, %rbx
jne 0x10c027
leaq 0xf0(%rax), %rsi
movq %r12, %rdi
movq 0x18(%rsp), %rdx
movl %ebp, %ecx
movq %rbx, %r8
callq 0x10e20b
jmp 0x10c0b1
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x10a71e
testb %dl, %dl
movq 0x30(%rsp), %rbx
je 0x10c042
testb %cl, %cl
jne 0x10c085
testb %r14b, %r14b
jne 0x10ab04
jmp 0x10c0b1
cmpl $0x2, %ebx
jl 0x10c0b9
movq 0x10(%rsp), %r14
leal (,%r14,4), %esi
movl %ebx, %edx
andl $0x1, %edx
incl %edx
andl $0x1, %r12d
incl %r12d
pushq $0x4
popq %r8
movl %r12d, %ecx
xorl %r9d, %r9d
callq 0x63810
movq 0x128(%rsp), %rax
movq 0x160(%rax), %rax
movq %rax, 0x1d8(%rsp)
movq 0x8(%rsp), %rax
movq 0x28(%rax), %rcx
imulq 0x58(%rax), %rcx
movq %rcx, 0x220(%rsp)
movq 0x18(%rax), %rax
movq %rax, 0x218(%rsp)
movslq %r14d, %rdi
xorl %edx, %edx
testl %r14d, %r14d
movl $0x0, %ecx
cmovgl %r14d, %ecx
movl %r14d, %r8d
imull %ebx, %r8d
leal (,%r8,8), %eax
movl %eax, 0x20c(%rsp)
leal (%r8,%r8), %r11d
leal (%r8,%r8,2), %ebx
imull $0x7, %r8d, %r10d
movq %rdi, %rax
shlq $0x5, %rax
movq %rax, 0x1c8(%rsp)
leaq (,%rdi,4), %rsi
imull $0x6, %r8d, %ebp
movq %rdi, %rax
shlq $0x4, %rax
movq %rax, 0x1c0(%rsp)
leal (%r8,%r8,4), %r12d
leal (,%r8,4), %r13d
movq %rdi, 0x210(%rsp)
leaq (,%rdi,8), %rax
movq %r8, %rdi
movq %rax, 0x1b8(%rsp)
leaq (,%rcx,4), %rax
movq %rax, 0x1d0(%rsp)
xorl %r9d, %r9d
movq %r9, %rax
orq $0x7, %rax
movq 0x80(%rsp), %r8
cmpq %r8, %rax
jge 0x10b5d2
movl %r10d, 0x8c(%rsp)
movslq %r10d, %rax
movq %rdx, %r8
movq 0x1d8(%rsp), %r14
leaq (%r14,%rax,4), %rdx
movl %ebp, 0x1f0(%rsp)
movslq %ebp, %rax
movq %r9, %r10
leaq (%r14,%rax,4), %r9
movl %r12d, 0x1e8(%rsp)
movslq %r12d, %rax
movl %ebx, %ebp
leaq (%r14,%rax,4), %rbx
movl %r13d, 0x1e0(%rsp)
movslq %r13d, %rax
leaq (%r14,%rax,4), %r12
movl %ebp, 0x130(%rsp)
movslq %ebp, %rax
leaq (%r14,%rax,4), %rax
movl %r11d, %ebp
movq %rdi, %r11
movl %ebp, 0x138(%rsp)
movslq %ebp, %rdi
leaq (%r14,%rdi,4), %r13
movq %r11, 0x140(%rsp)
movslq %r11d, %rdi
leaq (%r14,%rdi,4), %r15
movq %r8, 0x150(%rsp)
movslq %r8d, %rdi
leaq (%r14,%rdi,4), %r11
movq %r10, 0x148(%rsp)
movq %r10, %rdi
shrq $0x3, %rdi
imulq 0x220(%rsp), %rdi
addq 0x218(%rsp), %rdi
movq %r11, 0x28(%rsp)
movq %r15, 0x38(%rsp)
movq %r13, 0x40(%rsp)
movq %rax, 0x60(%rsp)
movq %r12, 0x58(%rsp)
movq %rbx, 0x50(%rsp)
movq %r9, 0x48(%rsp)
movq %rdx, 0x68(%rsp)
xorl %r14d, %r14d
xorl %r10d, %r10d
movq %r10, 0x158(%rsp)
movq %r10, %r8
orq $0x7, %r8
cmpq 0x20(%rsp), %r8
jge 0x10b222
movq %r14, 0x1b0(%rsp)
xorl %r8d, %r8d
cmpq %rcx, %r8
je 0x10b1d6
movq %r14, %rbp
xorl %r10d, %r10d
cmpl $0x100, %r10d # imm = 0x100
je 0x10b1c7
vmovss (%r11,%rbp), %xmm0
vmovss %xmm0, (%rdi,%r10)
vmovss (%r15,%rbp), %xmm0
vmovss %xmm0, 0x4(%rdi,%r10)
vmovss (%r13,%rbp), %xmm0
vmovss %xmm0, 0x8(%rdi,%r10)
vmovss (%rax,%rbp), %xmm0
vmovss %xmm0, 0xc(%rdi,%r10)
vmovss (%r12,%rbp), %xmm0
vmovss %xmm0, 0x10(%rdi,%r10)
vmovss (%rbx,%rbp), %xmm0
vmovss %xmm0, 0x14(%rdi,%r10)
vmovss (%r9,%rbp), %xmm0
vmovss %xmm0, 0x18(%rdi,%r10)
vmovd (%rdx,%rbp), %xmm0
vmovd %xmm0, 0x1c(%rdi,%r10)
addq $0x20, %r10
addq %rsi, %rbp
jmp 0x10b150
incq %r8
addq $0x4, %r14
addq %r10, %rdi
jmp 0x10b141
movq 0x158(%rsp), %r10
addq $0x8, %r10
movq 0x1c8(%rsp), %r8
movq 0x1b0(%rsp), %r14
addq %r8, %r14
addq %r8, 0x68(%rsp)
addq %r8, 0x48(%rsp)
addq %r8, 0x50(%rsp)
addq %r8, 0x58(%rsp)
addq %r8, 0x60(%rsp)
addq %r8, 0x40(%rsp)
addq %r8, 0x38(%rsp)
addq %r8, 0x28(%rsp)
jmp 0x10b11c
movq 0x158(%rsp), %r15
movq %r15, %rax
orq $0x3, %rax
cmpq 0x20(%rsp), %rax
jge 0x10b46e
movq %r15, %r14
movq 0x28(%rsp), %rax
movq 0x38(%rsp), %rdx
movq 0x40(%rsp), %r9
movq 0x60(%rsp), %r11
movq 0x58(%rsp), %rbx
movq 0x50(%rsp), %r15
movq 0x48(%rsp), %r12
movq 0x68(%rsp), %r13
xorl %ebp, %ebp
cmpq %rcx, %rbp
je 0x10b31d
xorl %r10d, %r10d
xorl %r8d, %r8d
cmpl $0x80, %r8d
je 0x10b2f2
vmovss (%rax,%r10), %xmm0
vmovss %xmm0, (%rdi,%r8)
vmovss (%rdx,%r10), %xmm0
vmovss %xmm0, 0x4(%rdi,%r8)
vmovss (%r9,%r10), %xmm0
vmovss %xmm0, 0x8(%rdi,%r8)
vmovss (%r11,%r10), %xmm0
vmovss %xmm0, 0xc(%rdi,%r8)
vmovss (%rbx,%r10), %xmm0
vmovss %xmm0, 0x10(%rdi,%r8)
vmovss (%r15,%r10), %xmm0
vmovss %xmm0, 0x14(%rdi,%r8)
vmovss (%r12,%r10), %xmm0
vmovss %xmm0, 0x18(%rdi,%r8)
vmovd (%r13,%r10), %xmm0
vmovd %xmm0, 0x1c(%rdi,%r8)
addq $0x20, %r8
addq %rsi, %r10
jmp 0x10b278
incq %rbp
addq $0x4, %r13
addq $0x4, %r12
addq $0x4, %r15
addq $0x4, %rbx
addq $0x4, %r11
addq $0x4, %r9
addq $0x4, %rdx
addq $0x4, %rax
addq %r8, %rdi
jmp 0x10b269
movq %r14, %r15
addq $0x4, %r15
movq 0x1c0(%rsp), %rax
addq %rax, 0x68(%rsp)
addq %rax, 0x48(%rsp)
addq %rax, 0x50(%rsp)
addq %rax, 0x58(%rsp)
addq %rax, 0x60(%rsp)
addq %rax, 0x40(%rsp)
addq %rax, 0x38(%rsp)
addq %rax, 0x28(%rsp)
jmp 0x10b22a
movq %r15, %r14
movq 0x28(%rsp), %rax
movq 0x68(%rsp), %rdx
movq 0x48(%rsp), %r9
movq 0x50(%rsp), %r11
movq 0x58(%rsp), %rbx
movq 0x60(%rsp), %r15
movq 0x40(%rsp), %r12
movq 0x38(%rsp), %r13
xorl %ebp, %ebp
cmpq %rcx, %rbp
je 0x10b437
xorl %r8d, %r8d
xorl %r10d, %r10d
cmpl $0x40, %r8d
je 0x10b40c
vmovss (%rax,%r10), %xmm0
vmovss %xmm0, (%rdi,%r8)
vmovss (%r13,%r10), %xmm0
vmovss %xmm0, 0x4(%rdi,%r8)
vmovss (%r12,%r10), %xmm0
vmovss %xmm0, 0x8(%rdi,%r8)
vmovss (%r15,%r10), %xmm0
vmovss %xmm0, 0xc(%rdi,%r8)
vmovss (%rbx,%r10), %xmm0
vmovss %xmm0, 0x10(%rdi,%r8)
vmovss (%r11,%r10), %xmm0
vmovss %xmm0, 0x14(%rdi,%r8)
vmovss (%r9,%r10), %xmm0
vmovss %xmm0, 0x18(%rdi,%r8)
vmovd (%rdx,%r10), %xmm0
vmovd %xmm0, 0x1c(%rdi,%r8)
addq %rsi, %r10
addq $0x20, %r8
jmp 0x10b395
incq %rbp
addq $0x4, %r13
addq $0x4, %r12
addq $0x4, %r15
addq $0x4, %rbx
addq $0x4, %r11
addq $0x4, %r9
addq $0x4, %rdx
addq $0x4, %rax
addq %r8, %rdi
jmp 0x10b386
movq %r14, %r15
addq $0x2, %r15
movq 0x1b8(%rsp), %rax
addq %rax, 0x38(%rsp)
addq %rax, 0x40(%rsp)
addq %rax, 0x60(%rsp)
addq %rax, 0x58(%rsp)
addq %rax, 0x50(%rsp)
addq %rax, 0x48(%rsp)
addq %rax, 0x68(%rsp)
addq %rax, 0x28(%rsp)
movq %r15, %rax
orq $0x1, %rax
cmpq 0x20(%rsp), %rax
jl 0x10b359
movq 0x1d0(%rsp), %r8
movq 0x148(%rsp), %r9
movl 0x138(%rsp), %r11d
movl 0x130(%rsp), %ebx
movl 0x8c(%rsp), %r10d
movl 0x1f0(%rsp), %ebp
movl 0x1e8(%rsp), %r12d
movl 0x1e0(%rsp), %r13d
cmpq 0x20(%rsp), %r15
jge 0x10b59e
xorl %edx, %edx
movq %rdi, %rax
cmpq %rdx, %r8
je 0x10b56b
movq 0x28(%rsp), %r14
vmovss (%r14,%rdx), %xmm0
vmovss %xmm0, (%rdi,%rdx,8)
movq 0x38(%rsp), %r14
vmovss (%r14,%rdx), %xmm0
vmovss %xmm0, 0x4(%rdi,%rdx,8)
movq 0x40(%rsp), %r14
vmovss (%r14,%rdx), %xmm0
vmovss %xmm0, 0x8(%rdi,%rdx,8)
movq 0x60(%rsp), %r14
vmovss (%r14,%rdx), %xmm0
vmovss %xmm0, 0xc(%rdi,%rdx,8)
movq 0x58(%rsp), %r14
vmovss (%r14,%rdx), %xmm0
vmovss %xmm0, 0x10(%rdi,%rdx,8)
movq 0x50(%rsp), %r14
vmovss (%r14,%rdx), %xmm0
vmovss %xmm0, 0x14(%rdi,%rdx,8)
movq 0x48(%rsp), %r14
vmovss (%r14,%rdx), %xmm0
vmovss %xmm0, 0x18(%rdi,%rdx,8)
movq 0x68(%rsp), %r14
vmovd (%r14,%rdx), %xmm0
vmovd %xmm0, 0x1c(%rdi,%rdx,8)
addq $0x20, %rax
addq $0x4, %rdx
jmp 0x10b4ce
incq %r15
addq %rsi, 0x68(%rsp)
addq %rsi, 0x48(%rsp)
addq %rsi, 0x50(%rsp)
addq %rsi, 0x58(%rsp)
addq %rsi, 0x60(%rsp)
addq %rsi, 0x40(%rsp)
addq %rsi, 0x38(%rsp)
addq %rsi, 0x28(%rsp)
movq %rax, %rdi
jmp 0x10b4be
addq $0x8, %r9
movq 0x150(%rsp), %rdx
movl 0x20c(%rsp), %eax
addl %eax, %edx
movq 0x140(%rsp), %rdi
addl %eax, %edi
addl %eax, %r11d
addl %eax, %ebx
addl %eax, %r10d
addl %eax, %ebp
addl %eax, %r12d
addl %eax, %r13d
jmp 0x10b02b
movq 0x128(%rsp), %rax
movq 0x160(%rax), %rax
movq %rax, 0x40(%rsp)
movq 0x8(%rsp), %rax
movq 0x18(%rax), %r10
movq %r10, 0x60(%rsp)
movq 0x28(%rax), %r10
imulq 0x58(%rax), %r10
movq %r10, 0x58(%rsp)
movq 0x18(%rsp), %rax
movq 0x10(%rsp), %r10
imull %r10d, %eax
movq %rax, 0x18(%rsp)
leal (,%rax,4), %eax
movl %eax, 0x50(%rsp)
movq %r9, %rax
orq $0x3, %rax
cmpq %r8, %rax
movq %r9, %r12
jge 0x10b929
movq %rdx, 0x150(%rsp)
movslq %edx, %rax
movq 0x40(%rsp), %r8
leaq (%r8,%rax,4), %rdx
movq %rdi, 0x140(%rsp)
movslq %edi, %rax
leaq (%r8,%rax,4), %r9
movl %r11d, 0x138(%rsp)
movslq %r11d, %rax
leaq (%r8,%rax,4), %rbp
movl %ebx, 0x130(%rsp)
movslq %ebx, %rax
leaq (%r8,%rax,4), %rbx
movl %r12d, %r11d
shrl $0x3, %r11d
movq %r12, 0x148(%rsp)
btl $0x2, %r12d
adcl $0x0, %r11d
imulq 0x58(%rsp), %r11
addq 0x60(%rsp), %r11
movq %rbx, %rdi
movq %rbp, %r14
movq %r9, %r15
movq %rdx, %r12
xorl %r8d, %r8d
xorl %r13d, %r13d
movq %r13, %rax
orq $0x7, %rax
cmpq 0x20(%rsp), %rax
jge 0x10b7d7
movq %r13, 0x28(%rsp)
movq %r8, 0x38(%rsp)
movq %r8, %r13
xorl %r10d, %r10d
cmpq %rcx, %r10
je 0x10b724
movq %r13, %r8
xorl %eax, %eax
cmpl $0x80, %eax
je 0x10b718
vmovss (%rdx,%r8), %xmm0
vmovss %xmm0, (%r11,%rax)
vmovss (%r9,%r8), %xmm0
vmovss %xmm0, 0x4(%r11,%rax)
vmovss (%rbp,%r8), %xmm0
vmovss %xmm0, 0x8(%r11,%rax)
vmovd (%rbx,%r8), %xmm0
vmovd %xmm0, 0xc(%r11,%rax)
addq $0x10, %rax
addq %rsi, %r8
jmp 0x10b6d4
incq %r10
addq %rax, %r11
addq $0x4, %r13
jmp 0x10b6ca
movq 0x28(%rsp), %r13
addq $0x8, %r13
movq 0x1c8(%rsp), %rax
movq 0x38(%rsp), %r8
addq %rax, %r8
addq %rax, %r12
addq %rax, %r15
addq %rax, %r14
addq %rax, %rdi
jmp 0x10b6a8
movq %rdi, %rdx
movq %r14, %r8
movq %r15, %r9
movq %r12, %rbx
xorl %ebp, %ebp
cmpq %rcx, %rbp
je 0x10b7bf
xorl %r10d, %r10d
xorl %eax, %eax
cmpl $0x40, %eax
je 0x10b7a7
vmovss (%rbx,%r10), %xmm0
vmovss %xmm0, (%r11,%rax)
vmovss (%r9,%r10), %xmm0
vmovss %xmm0, 0x4(%r11,%rax)
vmovss (%r8,%r10), %xmm0
vmovss %xmm0, 0x8(%r11,%rax)
vmovd (%rdx,%r10), %xmm0
vmovd %xmm0, 0xc(%r11,%rax)
addq $0x10, %rax
addq %rsi, %r10
jmp 0x10b766
incq %rbp
addq %rax, %r11
addq $0x4, %rbx
addq $0x4, %r9
addq $0x4, %r8
addq $0x4, %rdx
jmp 0x10b75c
addq $0x4, %r13
movq 0x1c0(%rsp), %rax
addq %rax, %r12
addq %rax, %r15
addq %rax, %r14
addq %rax, %rdi
movq %r13, %rax
orq $0x3, %rax
cmpq 0x20(%rsp), %rax
jl 0x10b74e
jmp 0x10b877
movq %rdi, %rdx
movq %r14, %r8
movq %r15, %r9
movq %r12, %rbx
xorl %ebp, %ebp
cmpq %rcx, %rbp
je 0x10b85f
xorl %r10d, %r10d
xorl %eax, %eax
cmpl $0x20, %eax
je 0x10b847
vmovss (%rbx,%r10), %xmm0
vmovss %xmm0, (%r11,%rax)
vmovss (%r9,%r10), %xmm0
vmovss %xmm0, 0x4(%r11,%rax)
vmovss (%r8,%r10), %xmm0
vmovss %xmm0, 0x8(%r11,%rax)
vmovd (%rdx,%r10), %xmm0
vmovd %xmm0, 0xc(%r11,%rax)
addq $0x10, %rax
addq %rsi, %r10
jmp 0x10b806
incq %rbp
addq %rax, %r11
addq $0x4, %rbx
addq $0x4, %r9
addq $0x4, %r8
addq $0x4, %rdx
jmp 0x10b7fc
addq $0x2, %r13
movq 0x1b8(%rsp), %rax
addq %rax, %r12
addq %rax, %r15
addq %rax, %r14
addq %rax, %rdi
movq %r13, %rax
orq $0x1, %rax
cmpq 0x20(%rsp), %rax
jl 0x10b7ee
movq 0x150(%rsp), %rdx
movq 0x148(%rsp), %r9
movl 0x130(%rsp), %ebx
cmpq 0x20(%rsp), %r13
jge 0x10b8f6
xorl %eax, %eax
cmpq %rax, %rcx
je 0x10b8e5
vmovss (%r12,%rax,4), %xmm0
vmovss %xmm0, (%r11)
vmovss (%r15,%rax,4), %xmm0
vmovss %xmm0, 0x4(%r11)
vmovss (%r14,%rax,4), %xmm0
vmovss %xmm0, 0x8(%r11)
vmovd (%rdi,%rax,4), %xmm0
vmovd %xmm0, 0xc(%r11)
addq $0x10, %r11
incq %rax
jmp 0x10b8a9
incq %r13
addq %rsi, %rdi
addq %rsi, %r14
addq %rsi, %r15
addq %rsi, %r12
jmp 0x10b8a0
addq $0x4, %r9
movl 0x50(%rsp), %eax
addl %eax, %edx
movq 0x140(%rsp), %rdi
addl %eax, %edi
movl 0x138(%rsp), %r11d
addl %eax, %r11d
addl %eax, %ebx
movq 0x80(%rsp), %r8
movq 0x10(%rsp), %r10
jmp 0x10b620
movq 0x128(%rsp), %rax
movq 0x160(%rax), %r14
movq 0x8(%rsp), %rax
movq 0x18(%rax), %r9
movq %r9, 0x138(%rsp)
movq 0x28(%rax), %r9
imulq 0x58(%rax), %r9
movq %r9, 0x130(%rsp)
leal (%r10,%r10), %eax
cltq
movq %rdx, %rbx
leal (%r10,%r10,2), %edx
movslq %edx, %rdx
movq %r8, %r15
movq %rdi, %r13
leal (,%r10,4), %edi
movslq %edi, %rdi
leal (%r10,%r10,4), %r8d
movslq %r8d, %r8
imull $0x6, %r10d, %r9d
movslq %r9d, %r9
imull $0x7, %r10d, %r10d
movslq %r10d, %r10
movq 0x18(%rsp), %r11
addl %r11d, %r11d
movl %r11d, 0x8c(%rsp)
leaq (%r14,%r10,4), %r10
movq %r10, 0x1f0(%rsp)
leaq (%r14,%r9,4), %r9
movq %r9, 0x1e8(%rsp)
leaq (%r14,%r8,4), %r8
movq %r8, 0x1e0(%rsp)
leaq (%r14,%rdi,4), %rdi
movq %rdi, 0x158(%rsp)
movq %r13, %rdi
leaq (%r14,%rdx,4), %rdx
movq %rdx, 0x10(%rsp)
movq %rbx, %r9
leaq (%r14,%rax,4), %rax
movq %rax, 0x1d8(%rsp)
movq 0x210(%rsp), %rax
movq %r14, 0x68(%rsp)
leaq (%r14,%rax,4), %rax
movq %rax, 0x1b0(%rsp)
movq %r12, %rax
orq $0x1, %rax
cmpq %r15, %rax
jge 0x10be1b
movq %rdi, 0x140(%rsp)
movslq %edi, %rax
movq 0x1f0(%rsp), %r8
leaq (%r8,%rax,4), %rdi
movq %rdi, 0x28(%rsp)
movq 0x1e8(%rsp), %r10
leaq (%r10,%rax,4), %rdi
movq %rdi, 0x38(%rsp)
movq 0x1e0(%rsp), %r11
leaq (%r11,%rax,4), %rdi
movq %rdi, 0x40(%rsp)
movq 0x158(%rsp), %rdx
leaq (%rdx,%rax,4), %rdi
movq %rdi, 0x60(%rsp)
movq 0x10(%rsp), %r13
leaq (%r13,%rax,4), %rbx
movq 0x1d8(%rsp), %rbp
leaq (%rbp,%rax,4), %r14
movq 0x1b0(%rsp), %rdx
leaq (%rdx,%rax,4), %r15
movq 0x68(%rsp), %rdx
leaq (%rdx,%rax,4), %rdi
movq %r9, 0x150(%rsp)
movslq %r9d, %rax
leaq (%r8,%rax,4), %rdx
movq %rdx, 0x58(%rsp)
leaq (%r10,%rax,4), %rdx
movq %rdx, 0x50(%rsp)
leaq (%r11,%rax,4), %r11
movl %r12d, %r8d
shrl $0x3, %r8d
movl %r12d, %r10d
shrl $0x2, %r10d
andl $0x1, %r10d
movq %r12, 0x148(%rsp)
btl $0x1, %r12d
adcl %r8d, %r10d
movq 0x158(%rsp), %rdx
leaq (%rdx,%rax,4), %r9
leaq (,%rax,4), %r12
addq %r13, %r12
leaq (%rbp,%rax,4), %r13
movq 0x1b0(%rsp), %rdx
leaq (%rdx,%rax,4), %rbp
movq 0x68(%rsp), %rdx
leaq (%rdx,%rax,4), %r8
imulq 0x130(%rsp), %r10
addq 0x138(%rsp), %r10
xorl %edx, %edx
movq %rdx, 0x48(%rsp)
movq %rdx, %rax
orq $0x7, %rax
cmpq 0x20(%rsp), %rax
jge 0x10bc72
xorl %eax, %eax
cmpq %rax, 0x1d0(%rsp)
je 0x10bc20
vmovss (%r8,%rax), %xmm0
vmovss %xmm0, (%r10)
vmovss (%rbp,%rax), %xmm0
vmovss %xmm0, 0x4(%r10)
vmovss (%r13,%rax), %xmm0
vmovss %xmm0, 0x8(%r10)
vmovss (%r12,%rax), %xmm0
vmovss %xmm0, 0xc(%r10)
vmovss (%r9,%rax), %xmm0
vmovss %xmm0, 0x10(%r10)
vmovss (%r11,%rax), %xmm0
vmovss %xmm0, 0x14(%r10)
movq 0x50(%rsp), %rdx
vmovss (%rdx,%rax), %xmm0
vmovss %xmm0, 0x18(%r10)
movq 0x58(%rsp), %rdx
vmovss (%rdx,%rax), %xmm0
vmovss %xmm0, 0x1c(%r10)
vmovss (%rdi,%rax), %xmm0
vmovss %xmm0, 0x20(%r10)
vmovss (%r15,%rax), %xmm0
vmovss %xmm0, 0x24(%r10)
vmovss (%r14,%rax), %xmm0
vmovss %xmm0, 0x28(%r10)
vmovss (%rbx,%rax), %xmm0
vmovss %xmm0, 0x2c(%r10)
movq 0x60(%rsp), %rdx
vmovss (%rdx,%rax), %xmm0
vmovss %xmm0, 0x30(%r10)
movq 0x40(%rsp), %rdx
vmovss (%rdx,%rax), %xmm0
vmovss %xmm0, 0x34(%r10)
movq 0x38(%rsp), %rdx
vmovss (%rdx,%rax), %xmm0
vmovss %xmm0, 0x38(%r10)
movq 0x28(%rsp), %rdx
vmovd (%rdx,%rax), %xmm0
vmovd %xmm0, 0x3c(%r10)
addq $0x40, %r10
addq $0x4, %rax
jmp 0x10bb2f
movq 0x48(%rsp), %rdx
addq $0x8, %rdx
movq 0x1c8(%rsp), %rax
addq %rax, 0x28(%rsp)
addq %rax, 0x38(%rsp)
addq %rax, 0x40(%rsp)
addq %rax, 0x60(%rsp)
addq %rax, %rbx
addq %rax, %r14
addq %rax, %r15
addq %rax, %rdi
addq %rax, 0x58(%rsp)
addq %rax, 0x50(%rsp)
addq %rax, %r11
addq %rax, %r9
addq %rax, %r12
addq %rax, %r13
addq %rax, %rbp
addq %rax, %r8
jmp 0x10bb16
movq 0x1d0(%rsp), %r9
movq 0x48(%rsp), %r11
movq %r11, 0x48(%rsp)
orq $0x3, %r11
cmpq 0x20(%rsp), %r11
jge 0x10bd3e
xorl %edx, %edx
movq %r10, %rax
cmpq %rdx, %r9
je 0x10bd0d
vmovss (%r8,%rdx), %xmm0
vmovss %xmm0, (%r10,%rdx,8)
vmovss (%rbp,%rdx), %xmm0
vmovss %xmm0, 0x4(%r10,%rdx,8)
vmovss (%r13,%rdx), %xmm0
vmovss %xmm0, 0x8(%r10,%rdx,8)
vmovss (%r12,%rdx), %xmm0
vmovss %xmm0, 0xc(%r10,%rdx,8)
vmovss (%rdi,%rdx), %xmm0
vmovss %xmm0, 0x10(%r10,%rdx,8)
vmovss (%r15,%rdx), %xmm0
vmovss %xmm0, 0x14(%r10,%rdx,8)
vmovss (%r14,%rdx), %xmm0
vmovss %xmm0, 0x18(%r10,%rdx,8)
vmovd (%rbx,%rdx), %xmm0
vmovd %xmm0, 0x1c(%r10,%rdx,8)
addq $0x20, %rax
addq $0x4, %rdx
jmp 0x10bc98
movq 0x48(%rsp), %r11
addq $0x4, %r11
movq 0x1c0(%rsp), %rdx
addq %rdx, %rbx
addq %rdx, %r14
addq %rdx, %r15
addq %rdx, %rdi
addq %rdx, %r12
addq %rdx, %r13
addq %rdx, %rbp
addq %rdx, %r8
movq %rax, %r10
jmp 0x10bc7f
movq 0x80(%rsp), %r15
movq 0x48(%rsp), %r14
movq %r14, %rax
orq $0x1, %rax
cmpq 0x20(%rsp), %rax
jge 0x10bdb6
movq %rdi, %rax
movq %r8, %rdx
xorl %r9d, %r9d
cmpq %rcx, %r9
je 0x10bda2
xorl %ebx, %ebx
xorl %r11d, %r11d
cmpl $0x10, %r11d
je 0x10bd92
vmovss (%rdx,%rbx), %xmm0
vmovss %xmm0, (%r10,%r11)
vmovd (%rax,%rbx), %xmm0
vmovd %xmm0, 0x4(%r10,%r11)
addq $0x8, %r11
addq %rsi, %rbx
jmp 0x10bd6c
incq %r9
addq %r11, %r10
addq $0x4, %rdx
addq $0x4, %rax
jmp 0x10bd62
addq $0x2, %r14
movq 0x1b8(%rsp), %rax
addq %rax, %r8
addq %rax, %rdi
jmp 0x10bd4b
movq 0x150(%rsp), %r9
movq 0x148(%rsp), %r12
cmpq 0x20(%rsp), %r14
jge 0x10bdfe
xorl %eax, %eax
cmpq %rax, %rcx
je 0x10bdf3
vmovss (%r8,%rax,4), %xmm0
vmovss %xmm0, (%r10)
vmovd (%rdi,%rax,4), %xmm0
vmovd %xmm0, 0x4(%r10)
addq $0x8, %r10
incq %rax
jmp 0x10bdcf
incq %r14
addq %rsi, %rdi
addq %rsi, %r8
jmp 0x10bdc6
addq $0x2, %r12
movl 0x8c(%rsp), %eax
addl %eax, %r9d
movq 0x140(%rsp), %rdi
addl %eax, %edi
jmp 0x10ba04
movq 0x128(%rsp), %rax
movq 0x160(%rax), %rax
movq 0x8(%rsp), %rdx
movq 0x18(%rdx), %rdi
movq %rdi, 0x28(%rsp)
movq 0x28(%rdx), %r8
imulq 0x58(%rdx), %r8
movq %r9, %rdx
movq %r12, %r14
movq 0x210(%rsp), %rdi
leaq (%rax,%rdi,4), %r10
cmpq %r15, %r14
jge 0x10a71e
movq %rdx, %rbp
movslq %edx, %rdx
leaq (%r10,%rdx,4), %r15
movl %r14d, %r9d
shrl $0x3, %r9d
movl %r14d, %ebx
shrl $0x2, %ebx
andl $0x1, %ebx
movl %r14d, %r11d
andl $0x1, %r11d
addl %r9d, %r11d
movq %r14, %rdi
btl $0x1, %r14d
adcl %ebx, %r11d
leaq (%rax,%rdx,4), %rbx
imulq %r8, %r11
addq 0x28(%rsp), %r11
xorl %r14d, %r14d
movq %r14, %rdx
orq $0x7, %rdx
cmpq 0x20(%rsp), %rdx
jge 0x10bf43
movq %rbx, %rdx
xorl %r9d, %r9d
cmpq %rcx, %r9
je 0x10bee5
movq %rdx, %r13
xorl %r12d, %r12d
cmpl $0x20, %r12d
je 0x10bed9
vmovd (%r13), %xmm0
vmovd %xmm0, (%r11,%r12)
addq $0x4, %r12
addq %rsi, %r13
jmp 0x10bebe
incq %r9
addq %r12, %r11
addq $0x4, %rdx
jmp 0x10beb3
addq $0x8, %r14
movq 0x1c8(%rsp), %rdx
addq %rdx, %rbx
addq %rdx, %r15
jmp 0x10be9b
movq %rbx, %rdx
xorl %r9d, %r9d
cmpq %rcx, %r9
je 0x10bf31
movq %rdx, %r13
xorl %r12d, %r12d
cmpl $0x10, %r12d
je 0x10bf25
vmovd (%r13), %xmm0
vmovd %xmm0, (%r11,%r12)
addq $0x4, %r12
addq %rsi, %r13
jmp 0x10bf0a
incq %r9
addq %r12, %r11
addq $0x4, %rdx
jmp 0x10beff
addq $0x4, %r14
movq 0x1c0(%rsp), %rdx
addq %rdx, %rbx
addq %rdx, %r15
movq %r14, %rdx
orq $0x3, %rdx
cmpq 0x20(%rsp), %rdx
jl 0x10bef9
movq 0x1d0(%rsp), %r9
movq %r14, %rdx
orq $0x1, %rdx
cmpq 0x20(%rsp), %rdx
jge 0x10bfa1
xorl %edx, %edx
cmpq %rdx, %rcx
je 0x10bf8d
vmovss (%rbx,%rdx,4), %xmm0
vmovss %xmm0, (%r11)
vmovd (%r15,%rdx,4), %xmm0
vmovd %xmm0, 0x4(%r11)
addq $0x8, %r11
incq %rdx
jmp 0x10bf69
addq $0x2, %r14
movq 0x1b8(%rsp), %rdx
addq %rdx, %r15
addq %rdx, %rbx
jmp 0x10bf59
movq 0x80(%rsp), %r15
cmpq 0x20(%rsp), %r14
jge 0x10bfd3
xorl %edx, %edx
cmpq %rdx, %r9
je 0x10bfc8
vmovd (%rbx,%rdx), %xmm0
vmovd %xmm0, (%r11,%rdx)
addq $0x4, %rdx
jmp 0x10bfb2
incq %r14
addq %rsi, %rbx
addq %rdx, %r11
jmp 0x10bfa9
movq %rdi, %r14
incq %r14
movq %rbp, %rdx
addl 0x18(%rsp), %edx
jmp 0x10be53
movq 0x10(%rsp), %r14
leal (,%r14,4), %esi
andl $0x1, %r12d
btl $0x1, %ecx
adcl $0x1, %r12d
pushq $0x4
popq %r8
movl %ebx, %edx
jmp 0x10af50
cmpl $0x2, %ebx
jl 0x10c0d2
movq 0x10(%rsp), %r14
leal (%r14,%r14), %esi
movl %ebx, %edx
andl $0x1, %edx
incl %edx
jmp 0x10af4c
leaq 0xa8(%rax), %rsi
movq %r12, %rdi
movq 0x18(%rsp), %rdx
movl %ebp, %ecx
movq %rbx, %r8
callq 0x10e72d
jmp 0x10c0b1
xorl %eax, %eax
cmpb $0x0, 0x38(%rbx)
je 0x10c077
testb %cl, %cl
jne 0x10c085
addq $0x160, %r12 # imm = 0x160
movq 0x8(%rsp), %rax
leaq 0xf0(%rax), %rsi
movq %r12, %rdi
movq 0x18(%rsp), %rdx
movl %ebp, %ecx
movq 0x30(%rsp), %r8
callq 0x10e20b
jmp 0x10c0ac
movb 0x39(%rbx), %dl
movl %edx, %esi
xorb $0x1, %sil
orb %sil, %cl
je 0x10c0e5
addq $0x160, %r12 # imm = 0x160
movq 0x8(%rsp), %rax
leaq 0xa8(%rax), %rsi
movq %r12, %rdi
movq 0x18(%rsp), %rdx
movl %ebp, %ecx
movq 0x30(%rsp), %r8
callq 0x10e72d
movq 0x30(%rsp), %rbx
cmpb $0x1, (%rbx)
jmp 0x10a726
movq 0x10(%rsp), %r14
leal (%r14,%r14), %esi
andl $0x1, %ecx
incl %ecx
pushq $0x4
popq %r8
movl %ebx, %edx
jmp 0x10af53
pushq $0x4
popq %r8
movq 0x10(%rsp), %r14
movl %r14d, %esi
movl %ebx, %edx
jmp 0x10af53
orb %dl, %al
jne 0x10ab04
jmp 0x10c0b1
movq %rsi, %rdi
callq 0x5f3e0
vmovups 0x160(%rbx), %xmm0
vmovaps %xmm0, 0x90(%rsp)
movq 0x170(%rbx), %rax
movq %rax, 0xa0(%rsp)
movl 0x178(%rbx), %eax
movl %eax, 0xa8(%rsp)
movq 0x180(%rbx), %rax
movq %rax, 0xb0(%rsp)
vmovdqu 0x188(%rbx), %xmm0
vmovdqu %xmm0, 0xb8(%rsp)
movl 0x198(%rbx), %eax
movl %eax, 0xc8(%rsp)
movq 0x1a0(%rbx), %rax
movq %rax, 0xd0(%rsp)
movq 0x8(%rsp), %rdx
movq (%rdx), %rax
movq -0x18(%rax), %rbx
leaq (%rdx,%rbx), %rax
addq $0x1a8, %rax # imm = 0x1A8
leaq 0xd8(%rsp), %rcx
cmpq %rax, %rcx
je 0x10c23e
addq %rdx, %rbx
movq 0x1b0(%rbx), %rax
testq %rax, %rax
je 0x10c199
lock
incl (%rax)
movq 0xe0(%rsp), %rax
testq %rax, %rax
je 0x10c1d0
lock
decl (%rax)
jne 0x10c1d0
movq 0xd8(%rsp), %rsi
movq 0xf8(%rsp), %rdi
testq %rdi, %rdi
je 0x10c1c8
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x10c1d0
movq %rsi, %rdi
callq 0x5f3e0
vmovups 0x1a8(%rbx), %xmm0
vmovups %xmm0, 0xd8(%rsp)
movq 0x1b8(%rbx), %rax
movq %rax, 0xe8(%rsp)
movl 0x1c0(%rbx), %eax
movl %eax, 0xf0(%rsp)
movq 0x1c8(%rbx), %rax
movq %rax, 0xf8(%rsp)
vmovdqu 0x1d0(%rbx), %xmm0
vmovdqa %xmm0, 0x100(%rsp)
movl 0x1e0(%rbx), %eax
movl %eax, 0x110(%rsp)
movq 0x1e8(%rbx), %rax
movq %rax, 0x118(%rsp)
movq 0x8(%rsp), %rdx
movq 0x180(%rdx), %r15
leaq 0x70(%rsp), %rdi
leaq 0x90(%rsp), %rsi
callq 0x6b00e
movq (%r15), %rax
leaq 0x70(%rsp), %rsi
movq %r15, %rdi
callq *0x18(%rax)
leaq 0x70(%rsp), %rdi
callq 0x6b03a
pushq $0x48
popq %rbx
vpxor %xmm0, %xmm0, %xmm0
movq 0x98(%rsp,%rbx), %rax
testq %rax, %rax
je 0x10c2b5
lock
decl (%rax)
jne 0x10c2b5
movq 0x90(%rsp,%rbx), %rsi
movq 0xb0(%rsp,%rbx), %rdi
testq %rdi, %rdi
je 0x10c2a9
movq (%rdi), %rax
callq *0x18(%rax)
vpxor %xmm0, %xmm0, %xmm0
jmp 0x10c2b5
movq %rsi, %rdi
callq 0x5f3e0
vpxor %xmm0, %xmm0, %xmm0
leaq (%rsp,%rbx), %rax
addq $0x90, %rax
andq $0x0, 0x40(%rax)
vmovdqu %xmm0, 0xc(%rax)
vmovdqu %xmm0, (%rax)
andl $0x0, 0x38(%rax)
vmovdqu %xmm0, 0x28(%rax)
addq $-0x48, %rbx
cmpq $-0x48, %rbx
jne 0x10c276
movq 0x8(%rsp), %rax
movq 0x180(%rax), %rdi
movq (%rdi), %rax
movq 0x30(%rsp), %rsi
callq *0x20(%rax)
movq 0x30(%rsp), %rax
cmpb $0x1, (%rax)
jne 0x10c378
movq 0x8(%rsp), %rcx
movq (%rcx), %rax
movq -0x18(%rax), %r14
leaq (%rcx,%r14), %rbx
movq 0x168(%rcx,%r14), %rax
testq %rax, %rax
je 0x10c346
lock
decl (%rax)
jne 0x10c346
movq 0x160(%rbx), %rsi
movq 0x180(%rbx), %rdi
testq %rdi, %rdi
je 0x10c33e
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x10c346
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x1a0(%rbx)
movq 0x8(%rsp), %rax
addq %r14, %rax
addq $0x160, %rax # imm = 0x160
vpxor %xmm0, %xmm0, %xmm0
vmovdqu %xmm0, 0xc(%rax)
vmovdqu %xmm0, (%rax)
vmovdqu %xmm0, 0x188(%rbx)
andl $0x0, 0x198(%rbx)
leaq 0x160(%rsp), %rdi
callq 0x71614
jmp 0x10999c
vpinsrd $0x2, %eax, %xmm0, %xmm0
vpinsrd $0x3, %edx, %xmm0, %xmm0
vbroadcastss 0x2e6521(%rip), %xmm1 # 0x3f28c0
vpxor %xmm1, %xmm0, %xmm1
vptest %xmm1, %xmm1
je 0x10a9f9
vbroadcastss 0x2e650d(%rip), %xmm1 # 0x3f28c4
vpcmpeqd %xmm1, %xmm0, %xmm0
vmovmskps %xmm0, %eax
xorl $0xf, %eax
testb %al, %al
jne 0x10ae71
jmp 0x10a9f9
movq %rsi, %rdi
callq 0x5f3e0
vmovups 0x160(%rbx), %xmm0
vmovaps %xmm0, 0x90(%rsp)
movq 0x170(%rbx), %rax
movq %rax, 0xa0(%rsp)
movl 0x178(%rbx), %eax
movl %eax, 0xa8(%rsp)
movq 0x180(%rbx), %rax
movq %rax, 0xb0(%rsp)
vmovdqu 0x188(%rbx), %xmm0
vmovdqu %xmm0, 0xb8(%rsp)
movl 0x198(%rbx), %eax
movl %eax, 0xc8(%rsp)
movq 0x1a0(%rbx), %rax
movq %rax, 0xd0(%rsp)
movq 0x8(%rsp), %rdx
movq 0x180(%rdx), %r15
leaq 0x70(%rsp), %rdi
leaq 0x90(%rsp), %rsi
callq 0x6b00e
movq (%r15), %rax
leaq 0x70(%rsp), %rsi
movq %r15, %rdi
callq *0x18(%rax)
leaq 0x70(%rsp), %rdi
callq 0x6b03a
movq 0x98(%rsp), %rax
testq %rax, %rax
je 0x10c2e0
lock
decl (%rax)
jne 0x10c2e0
movq 0x90(%rsp), %rsi
movq 0xb0(%rsp), %rdi
testq %rdi, %rdi
je 0x10c4b0
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x10c2e0
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x10c2e0
jmp 0x10c4d5
jmp 0x10c7be
jmp 0x10c538
movq %rax, %rbx
leaq 0x70(%rsp), %rdi
callq 0x6b03a
jmp 0x10c4d8
movq %rax, %rbx
movq 0x98(%rsp), %rax
testq %rax, %rax
je 0x10c693
lock
decl (%rax)
jne 0x10c693
movq 0x90(%rsp), %rsi
movq 0xb0(%rsp), %rdi
testq %rdi, %rdi
jne 0x10c514
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x10c693
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x10c693
jmp 0x10c7be
jmp 0x10c7be
movq %rax, %rbx
leaq 0x70(%rsp), %rdi
callq 0x6b03a
jmp 0x10c53b
movq %rax, %rbx
pushq $0x48
popq %r14
vpxor %xmm0, %xmm0, %xmm0
movq 0x98(%rsp,%r14), %rax
testq %rax, %rax
je 0x10c582
lock
decl (%rax)
jne 0x10c582
movq 0x90(%rsp,%r14), %rsi
movq 0xb0(%rsp,%r14), %rdi
testq %rdi, %rdi
je 0x10c576
movq (%rdi), %rax
callq *0x18(%rax)
vpxor %xmm0, %xmm0, %xmm0
jmp 0x10c582
movq %rsi, %rdi
callq 0x5f3e0
vpxor %xmm0, %xmm0, %xmm0
leaq (%rsp,%r14), %rax
addq $0x90, %rax
andq $0x0, 0x40(%rax)
vmovdqu %xmm0, 0xc(%rax)
vmovdqu %xmm0, (%rax)
andl $0x0, 0x38(%rax)
vmovdqu %xmm0, 0x28(%rax)
addq $-0x48, %r14
cmpq $-0x48, %r14
jne 0x10c543
jmp 0x10c693
jmp 0x10c7be
jmp 0x10c645
jmp 0x10c7be
movq %rax, %rbx
movq 0x98(%rsp), %rax
testq %rax, %rax
je 0x10c7d3
lock
decl (%rax)
jne 0x10c7d3
movq 0x90(%rsp), %rsi
movq 0xb0(%rsp), %rdi
testq %rdi, %rdi
jne 0x10c600
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x10c7d3
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x10c7d3
jmp 0x10c7be
jmp 0x10c7be
jmp 0x10c7be
jmp 0x10c7be
jmp 0x10c6b2
jmp 0x10c730
jmp 0x10c730
jmp 0x10c730
movq %rax, %rbx
leaq 0x1f8(%rsp), %rdi
callq 0x6b03a
jmp 0x10c648
movq %rax, %rbx
movq 0x98(%rsp), %rax
testq %rax, %rax
je 0x10c783
lock
decl (%rax)
jne 0x10c783
movq 0x90(%rsp), %rsi
movq 0xb0(%rsp), %rdi
testq %rdi, %rdi
je 0x10c76c
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x10c783
jmp 0x10c7be
jmp 0x10c730
movq %rax, %rbx
leaq 0x160(%rsp), %rdi
jmp 0x10c7ce
movq %rax, %rbx
leaq 0x1f8(%rsp), %rdi
callq 0x6b03a
jmp 0x10c6b5
movq %rax, %rbx
pushq $0x48
popq %r14
vpxor %xmm0, %xmm0, %xmm0
movq 0x98(%rsp,%r14), %rax
testq %rax, %rax
je 0x10c6fc
lock
decl (%rax)
jne 0x10c6fc
movq 0x90(%rsp,%r14), %rsi
movq 0xb0(%rsp,%r14), %rdi
testq %rdi, %rdi
je 0x10c6f0
movq (%rdi), %rax
callq *0x18(%rax)
vpxor %xmm0, %xmm0, %xmm0
jmp 0x10c6fc
movq %rsi, %rdi
callq 0x5f3e0
vpxor %xmm0, %xmm0, %xmm0
leaq (%rsp,%r14), %rax
addq $0x90, %rax
andq $0x0, 0x40(%rax)
vmovdqu %xmm0, 0xc(%rax)
vmovdqu %xmm0, (%rax)
andl $0x0, 0x38(%rax)
vmovdqu %xmm0, 0x28(%rax)
addq $-0x48, %r14
cmpq $-0x48, %r14
jne 0x10c6bd
jmp 0x10c783
jmp 0x10c7be
jmp 0x10c730
movq %rax, %rbx
leaq 0x90(%rsp), %rdi
jmp 0x10c7ce
jmp 0x10c780
movq %rax, %rbx
movq 0x98(%rsp), %rax
testq %rax, %rax
je 0x10c783
lock
decl (%rax)
jne 0x10c783
movq 0x90(%rsp), %rsi
movq 0xb0(%rsp), %rdi
testq %rdi, %rdi
jne 0x10c776
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x10c783
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x10c783
jmp 0x10c7be
movq %rax, %rbx
movq 0x168(%rsp), %rax
testq %rax, %rax
je 0x10c7c9
lock
decl (%rax)
jne 0x10c7c9
movq 0x160(%rsp), %rsi
movq 0x180(%rsp), %rdi
testq %rdi, %rdi
jne 0x10c7b4
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x10c7c9
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x10c7c9
jmp 0x10c7be
movq %rax, %rdi
callq 0x61d68
movq %rax, %rbx
leaq 0x70(%rsp), %rdi
callq 0x71614
movq %rbx, %rdi
callq 0x5f340
nop
|
/csukuangfj[P]ncnn/build_O2/src/layer/x86/convolution_x86_avx.cpp
|
ncnn::Convolution_x86_avx::create_pipeline_int8_x86(ncnn::Option const&)
|
int Convolution_x86_avx::create_pipeline_int8_x86(const Option& opt)
{
const int maxk = kernel_w * kernel_h;
const int num_input = weight_data_size / maxk / num_output;
int elempack = 1;
int out_elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
elempack = num_input % 8 == 0 ? 8 : 1;
out_elempack = num_output % 4 == 0 ? 4 : 1;
}
#endif // __SSE2__
#if __SSE2__
if (elempack == 8 && out_elempack == 4)
{
if (kernel_w == 1 && kernel_h == 1 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convolution_im2col_sgemm_transform_kernel_pack8to4_int8_sse(weight_data, weight_sgemm_data, num_input, num_output, kernel_w, kernel_h);
}
else if (kernel_w == 1 && kernel_h == 1 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convolution_im2col_sgemm_transform_kernel_pack8to4_int8_sse(weight_data, weight_sgemm_data, num_input, num_output, kernel_w, kernel_h);
}
else if (opt.use_winograd_convolution && opt.use_winograd43_convolution && kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
conv3x3s1_winograd43_transform_kernel_pack8to4_int8_sse(weight_data, weight_winograd43_data, num_input, num_output, opt);
}
else if (opt.use_sgemm_convolution)
{
convolution_im2col_sgemm_transform_kernel_pack8to4_int8_sse(weight_data, weight_sgemm_data, num_input, num_output, kernel_w, kernel_h);
}
else
{
convolution_transform_kernel_packed_int8_sse(weight_data, weight_data_tm, num_input, num_output, kernel_w, kernel_h, elempack, out_elempack);
}
}
if (elempack == 1 && out_elempack == 4)
{
if (kernel_w == 1 && kernel_h == 1 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convolution_im2col_sgemm_transform_kernel_pack1to4_int8_sse(weight_data, weight_sgemm_data, num_input, num_output, kernel_w, kernel_h);
}
else if (kernel_w == 1 && kernel_h == 1 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convolution_im2col_sgemm_transform_kernel_pack1to4_int8_sse(weight_data, weight_sgemm_data, num_input, num_output, kernel_w, kernel_h);
}
else if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convolution_im2col_sgemm_transform_kernel_pack1to4_int8_sse(weight_data, weight_sgemm_data, num_input, num_output, kernel_w, kernel_h);
}
else if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convolution_im2col_sgemm_transform_kernel_pack1to4_int8_sse(weight_data, weight_sgemm_data, num_input, num_output, kernel_w, kernel_h);
}
else if (kernel_w == 7 && kernel_h == 7 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convolution_im2col_sgemm_transform_kernel_pack1to4_int8_sse(weight_data, weight_sgemm_data, num_input, num_output, kernel_w, kernel_h);
}
else if (opt.use_sgemm_convolution) // TODO better condition && num_input >= 8 && num_output >= 8)
{
convolution_im2col_sgemm_transform_kernel_pack1to4_int8_sse(weight_data, weight_sgemm_data, num_input, num_output, kernel_w, kernel_h);
}
else
{
convolution_transform_kernel_packed_int8_sse(weight_data, weight_data_tm, num_input, num_output, kernel_w, kernel_h, elempack, out_elempack);
}
}
if (elempack == 8 && out_elempack == 1)
{
if (kernel_w == 1 && kernel_h == 1 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convolution_im2col_sgemm_transform_kernel_pack8to1_int8_sse(weight_data, weight_sgemm_data, num_input, num_output, kernel_w, kernel_h);
}
else if (kernel_w == 1 && kernel_h == 1 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convolution_im2col_sgemm_transform_kernel_pack8to1_int8_sse(weight_data, weight_sgemm_data, num_input, num_output, kernel_w, kernel_h);
}
else if (opt.use_winograd_convolution && opt.use_winograd43_convolution && kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
conv3x3s1_winograd43_transform_kernel_pack8to1_int8_sse(weight_data, weight_winograd43_data, num_input, num_output, opt);
}
else if (opt.use_sgemm_convolution) // TODO better condition && num_input >= 8 && num_output >= 8)
{
convolution_im2col_sgemm_transform_kernel_pack8to1_int8_sse(weight_data, weight_sgemm_data, num_input, num_output, kernel_w, kernel_h);
}
else
{
convolution_transform_kernel_packed_int8_sse(weight_data, weight_data_tm, num_input, num_output, kernel_w, kernel_h, elempack, out_elempack);
}
}
#endif // __SSE2__
if (elempack == 1 && out_elempack == 1)
{
if (kernel_w == 1 && kernel_h == 1 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convolution_im2col_sgemm_transform_kernel_int8_sse(weight_data, weight_sgemm_data, num_input, num_output, kernel_w, kernel_h);
}
else if (kernel_w == 1 && kernel_h == 1 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convolution_im2col_sgemm_transform_kernel_int8_sse(weight_data, weight_sgemm_data, num_input, num_output, kernel_w, kernel_h);
}
else if (opt.use_winograd_convolution && opt.use_winograd23_convolution && kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1 && num_input >= 16 && num_output >= 16)
{
conv3x3s1_winograd23_transform_kernel_int8_sse(weight_data, weight_winograd23_data, num_input, num_output, opt);
// conv3x3s1_winograd43_transform_kernel_int8_sse(weight_data, weight_winograd43_data, num_input, num_output, opt);
}
else if (opt.use_sgemm_convolution)
{
convolution_im2col_sgemm_transform_kernel_int8_sse(weight_data, weight_sgemm_data, num_input, num_output, kernel_w, kernel_h);
}
else
{
weight_data_tm = weight_data;
}
}
scale_in_data.create(num_output);
for (int p = 0; p < num_output; p++)
{
// requantize and relu
float scale_in;
if (weight_data_int8_scales[p] == 0)
scale_in = 0;
else
scale_in = 1.f / (bottom_blob_int8_scales[0] * weight_data_int8_scales[p]);
scale_in_data[p] = scale_in;
}
if (opt.lightmode)
{
weight_data.release();
}
return 0;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x108, %rsp # imm = 0x108
movq %rdi, %r14
movq (%rdi), %rax
movq -0x18(%rax), %r12
movl 0xd4(%rdi,%r12), %r8d
movl 0xd8(%rdi,%r12), %r9d
movl %r9d, %ecx
imull %r8d, %ecx
movl 0x104(%rdi,%r12), %eax
cltd
idivl %ecx
movslq 0xd0(%rdi,%r12), %rbx
cltd
idivl %ebx
movq %rsi, 0x10(%rsp)
movb 0x27(%rsi), %r15b
movslq %eax, %rcx
movq %rcx, 0x68(%rsp)
cmpb $0x1, %r15b
movq %rdi, 0x40(%rsp)
movq %rax, 0x8(%rsp)
jne 0x10c929
movl %eax, %edi
andl $0x7, %edi
sete %dl
movl %ebx, %ecx
andl $0x3, %ecx
setne %sil
movl %edi, %eax
orl %ecx, %eax
je 0x10cebe
movl %edx, %eax
testl %ecx, %ecx
jne 0x10c937
testl %edi, %edi
je 0x10c937
movq (%r14), %rax
movq -0x18(%rax), %rax
leaq (%r14,%rax), %rdi
movl 0xd4(%r14,%rax), %r8d
cmpl $0x7, %r8d
je 0x10d09f
cmpl $0x3, %r8d
movq 0x8(%rsp), %rdx
je 0x10d035
cmpl $0x1, %r8d
jne 0x10d0eb
cmpl $0x1, 0xd8(%rdi)
jne 0x10d0eb
cmpl $0x1, 0xdc(%rdi)
jne 0x10c8dd
cmpl $0x1, 0xe0(%rdi)
jne 0x10c8dd
cmpl $0x1, 0xe4(%rdi)
jne 0x10c8dd
cmpl $0x1, 0xe8(%rdi)
je 0x10c911
cmpl $0x1, 0xdc(%rdi)
jne 0x10d0eb
cmpl $0x1, 0xe0(%rdi)
jne 0x10d0eb
cmpl $0x2, 0xe4(%rdi)
jne 0x10d0eb
cmpl $0x2, 0xe8(%rdi)
jne 0x10d0eb
leaq 0x60(%r14), %rsi
movl 0xd0(%rdi), %ecx
addq $0x160, %rdi # imm = 0x160
pushq $0x1
jmp 0x10d0e4
xorl %ebx, %ebx
movl %ebx, %eax
movl %r15d, %esi
xorb $0x1, %sil
movl %r15d, %edx
testb %sil, %sil
je 0x10caf2
testb %al, %al
je 0x10caf2
movq (%r14), %rax
movq -0x18(%rax), %rax
leaq (%r14,%rax), %r12
movl 0xd4(%r14,%rax), %r8d
cmpl $0x1, %r8d
jne 0x10ca00
cmpl $0x1, 0xd8(%r12)
jne 0x10ca00
cmpl $0x1, 0xdc(%r12)
jne 0x10c9a0
cmpl $0x1, 0xe0(%r12)
jne 0x10c9a0
cmpl $0x1, 0xe4(%r12)
jne 0x10c9a0
cmpl $0x1, 0xe8(%r12)
je 0x10c9cc
cmpl $0x1, 0xdc(%r12)
jne 0x10ca00
cmpl $0x1, 0xe0(%r12)
jne 0x10ca00
cmpl $0x2, 0xe4(%r12)
jne 0x10ca00
cmpl $0x2, 0xe8(%r12)
jne 0x10ca00
movl %edx, %ebx
movl %esi, %ebp
leaq 0x60(%r14), %rsi
movl 0xd0(%r12), %ecx
addq $0x160, %r12 # imm = 0x160
pushq $0x1
popq %r8
movq %r12, %rdi
movq 0x8(%rsp), %rdx
movl %r8d, %r9d
callq 0x129165
movl %ebp, %esi
movl %ebx, %edx
jmp 0x10caf2
movq 0x10(%rsp), %rax
cmpb $0x1, 0x1c(%rax)
jne 0x10caa3
cmpl $0x3, %r8d
jne 0x10caa3
movq 0x10(%rsp), %rax
cmpb $0x0, 0x38(%rax)
je 0x10caa3
cmpl $0x3, 0xd8(%r12)
jne 0x10caa3
cmpl $0x1, 0xdc(%r12)
jne 0x10caa3
cmpl $0x1, 0xe0(%r12)
jne 0x10caa3
cmpl $0x1, 0xe4(%r12)
jne 0x10caa3
cmpl $0x1, 0xe8(%r12)
jne 0x10caa3
movb %sil, 0x7(%rsp)
movl %edx, 0x18(%rsp)
leaq 0xf0(%r14), %r13
movslq 0xd0(%r12), %rbx
addq $0x160, %r12 # imm = 0x160
callq 0x732f7
testl %eax, %eax
je 0x10d2a0
movq %r12, %rdi
movq %r13, %rsi
movq 0x8(%rsp), %rdx
movl %ebx, %ecx
movq 0x10(%rsp), %r8
callq 0x134022
jmp 0x10d2c8
movl %esi, %ebp
movl %edx, %ebx
leaq 0x160(%r12), %rdi
movl 0xd0(%r12), %ecx
movl 0xd8(%r12), %r9d
movq 0x10(%rsp), %rax
cmpb $0x1, 0x1d(%rax)
jne 0x10cada
leaq 0x60(%r14), %rsi
movq 0x8(%rsp), %rdx
callq 0x129165
jmp 0x10caee
leaq 0x18(%r14), %rsi
movq 0x8(%rsp), %rdx
pushq $0x1
pushq $0x8
callq 0x128d71
popq %rax
popq %rcx
movl %ebx, %edx
movl %ebp, %esi
xorb $0x1, %dl
andb %dl, %sil
cmpb $0x1, %sil
jne 0x10d1a6
movq (%r14), %rax
movq -0x18(%rax), %rbx
leaq (%r14,%rbx), %r15
movl 0xd4(%r14,%rbx), %r8d
cmpl $0x1, %r8d
jne 0x10cb9f
cmpl $0x1, 0xd8(%r15)
jne 0x10cb9f
cmpl $0x1, 0xdc(%r15)
jne 0x10cb51
cmpl $0x1, 0xe0(%r15)
jne 0x10cb51
cmpl $0x1, 0xe4(%r15)
jne 0x10cb51
cmpl $0x1, 0xe8(%r15)
je 0x10cb79
cmpl $0x1, 0xdc(%r15)
jne 0x10cb9f
cmpl $0x1, 0xe0(%r15)
jne 0x10cb9f
cmpl $0x2, 0xe4(%r15)
jne 0x10cb9f
cmpl $0x2, 0xe8(%r15)
jne 0x10cb9f
leaq 0x60(%r14), %rsi
movl 0xd0(%r15), %ecx
addq $0x160, %r15 # imm = 0x160
pushq $0x1
popq %r8
movq %r15, %rdi
movq 0x8(%rsp), %rdx
movl %r8d, %r9d
jmp 0x10ce60
movq 0x10(%rsp), %rax
cmpb $0x1, 0x1c(%rax)
jne 0x10ce37
cmpl $0x3, %r8d
jne 0x10ce37
movq 0x10(%rsp), %rax
cmpb $0x0, 0x37(%rax)
je 0x10ce37
cmpl $0x3, 0xd8(%r15)
jne 0x10ce37
cmpl $0x1, 0xdc(%r15)
jne 0x10ce37
cmpl $0x1, 0xe0(%r15)
jne 0x10ce37
cmpl $0x1, 0xe4(%r15)
jne 0x10ce37
cmpl $0x1, 0xe8(%r15)
jne 0x10ce37
cmpl $0x10, 0x8(%rsp)
jl 0x10ce37
movslq 0xd0(%r15), %rcx
cmpq $0x10, %rcx
jl 0x10ce37
leaq 0xa8(%r14), %rdi
xorl %r12d, %r12d
pushq $0x10
popq %rsi
pushq $0x2
popq %r8
movq 0x8(%rsp), %rbx
movl %ebx, %edx
movq %rcx, 0x58(%rsp)
xorl %r9d, %r9d
callq 0x63810
leal (%rbx,%rbx,8), %eax
movl %eax, 0x50(%rsp)
movl %ebx, %eax
movq %rax, 0x30(%rsp)
movq %r15, 0x60(%rsp)
cmpq 0x58(%rsp), %r12
je 0x10d1a6
movl 0x50(%rsp), %eax
imull %r12d, %eax
cltq
addq 0x160(%r15), %rax
movq %rax, 0x20(%rsp)
movslq 0xd4(%r14), %rax
movq 0xb8(%r14), %r11
imulq %r11, %rax
movq %rax, 0x38(%rsp)
imulq 0xe8(%r14), %r11
imulq %r12, %r11
addq 0xa8(%r14), %r11
xorl %eax, %eax
movq %r12, 0x28(%rsp)
cmpq 0x30(%rsp), %rax
je 0x10ce2f
movq %r11, 0x68(%rsp)
movq %rax, 0x8(%rsp)
leaq (%rax,%rax,8), %rax
movq 0x20(%rsp), %rdx
movsbl (%rdx,%rax), %r11d
movsbl 0x1(%rdx,%rax), %ebp
movsbl 0x2(%rdx,%rax), %r9d
movsbl 0x3(%rdx,%rax), %r10d
movsbl 0x4(%rdx,%rax), %ebx
movsbl 0x5(%rdx,%rax), %r14d
movsbl 0x6(%rdx,%rax), %r12d
movsbl 0x7(%rdx,%rax), %ecx
movsbl 0x8(%rdx,%rax), %r8d
movswl %r11w, %r11d
movswl %bp, %ebp
movswl %r9w, %eax
movswl %r10w, %r13d
movswl %bx, %r15d
movswl %r14w, %ebx
movswl %r12w, %r12d
movswl %cx, %ecx
movswl %r8w, %r8d
pushq $0x4
popq %r9
cmpq $0x1c, %r9
je 0x10cda4
leaq 0x2e5d8a(%rip), %rsi # 0x3f2ac0
movzwl -0x4(%r9,%rsi), %edi
movl %edi, %r14d
imull %r11d, %r14d
movzwl -0x2(%r9,%rsi), %edx
movl %edx, %r10d
imull %ebp, %r10d
addl %r14d, %r10d
movzwl (%r9,%rsi), %esi
movl %esi, %r14d
imull %eax, %r14d
addl %r10d, %r14d
movw %r14w, 0x6c(%rsp,%r9)
movl %edi, %r10d
imull %r13d, %r10d
movl %edx, %r14d
imull %r15d, %r14d
addl %r10d, %r14d
movl %esi, %r10d
imull %ebx, %r10d
addl %r14d, %r10d
movw %r10w, 0x6e(%rsp,%r9)
imull %r12d, %edi
imull %ecx, %edx
addl %edi, %edx
imull %r8d, %esi
addl %edx, %esi
movw %si, 0x70(%rsp,%r9)
addq $0x6, %r9
jmp 0x10cd29
movq 0x68(%rsp), %r11
movq %r11, %rax
xorl %ecx, %ecx
movq 0x40(%rsp), %r14
movq 0x60(%rsp), %r15
movq 0x28(%rsp), %r12
leaq 0x2e5cfe(%rip), %rdi # 0x3f2ac2
cmpq $0x4, %rcx
je 0x10ce1d
imulq $0x6, %rcx, %rdx
movzwl 0x70(%rsp,%rdx), %r8d
vmovd 0x72(%rsp,%rdx), %xmm0
movq %rdi, %r9
xorl %r10d, %r10d
cmpq $0x4, %r10
je 0x10ce14
movzwl -0x2(%r9), %edx
imulw %r8w, %dx
vmovd (%r9), %xmm1
vpmullw %xmm0, %xmm1, %xmm1
vmovd %xmm1, %esi
addl %edx, %esi
vpextrw $0x1, %xmm1, %edx
addl %esi, %edx
movw %dx, (%rax,%r10,2)
incq %r10
addq $0x6, %r9
jmp 0x10cde0
incq %rcx
addq $0x8, %rax
jmp 0x10cdc4
movq 0x8(%rsp), %rax
incq %rax
addq 0x38(%rsp), %r11
jmp 0x10ccb3
incq %r12
jmp 0x10cc61
movq 0x10(%rsp), %rax
cmpb $0x1, 0x1d(%rax)
jne 0x10ce6a
leaq 0x160(%r15), %rdi
leaq 0x60(%r14), %rsi
movl 0xd0(%r15), %ecx
movl 0xd8(%r15), %r9d
movq 0x8(%rsp), %rdx
callq 0x12938c
jmp 0x10d1a6
addq $0x160, %rbx # imm = 0x160
cmpq $0x18, %rbx
je 0x10d1a6
movq 0x8(%r14,%rbx), %rax
testq %rax, %rax
je 0x10ce88
lock
incl (%rax)
leaq 0x18(%r14), %r15
movq 0x20(%r14), %rax
testq %rax, %rax
je 0x10d141
lock
decl (%rax)
jne 0x10d141
movq 0x18(%r14), %rsi
movq 0x38(%r14), %rdi
testq %rdi, %rdi
je 0x10d139
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x10d141
movl %ecx, 0x48(%rsp)
addq %r14, %r12
movl %r8d, %eax
xorl $0x1, %eax
movl %r9d, %ecx
xorl $0x1, %ecx
orl %eax, %ecx
jne 0x10cf46
cmpl $0x1, 0xdc(%r12)
jne 0x10cf46
cmpl $0x1, 0xe0(%r12)
jne 0x10cf01
cmpl $0x1, 0xe4(%r12)
jne 0x10cf01
cmpl $0x1, 0xe8(%r12)
je 0x10cf22
cmpl $0x1, 0xe0(%r12)
jne 0x10cf46
cmpl $0x2, 0xe4(%r12)
jne 0x10cf46
cmpl $0x2, 0xe8(%r12)
jne 0x10cf46
addq $0x160, %r12 # imm = 0x160
movq %rbx, %rcx
movl %edx, %ebx
leaq 0x60(%r14), %rsi
pushq $0x1
popq %r8
movq %r12, %rdi
movq 0x8(%rsp), %rdx
movl %r8d, %r9d
jmp 0x10d00a
movq 0x10(%rsp), %rax
cmpb $0x1, 0x1c(%rax)
movq %rbx, 0x20(%rsp)
jne 0x10cfe5
cmpl $0x3, %r9d
jne 0x10cfe5
cmpl $0x3, %r8d
jne 0x10cfe5
movq 0x10(%rsp), %rax
cmpb $0x0, 0x38(%rax)
je 0x10cfe5
cmpl $0x1, 0xdc(%r12)
jne 0x10cfe5
cmpl $0x1, 0xe0(%r12)
jne 0x10cfe5
cmpl $0x1, 0xe4(%r12)
jne 0x10cfe5
cmpl $0x1, 0xe8(%r12)
jne 0x10cfe5
movl %edi, 0x1c(%rsp)
movb %sil, 0x7(%rsp)
movl %edx, 0x18(%rsp)
addq $0x160, %r12 # imm = 0x160
leaq 0xf0(%r14), %r13
callq 0x732f7
testl %eax, %eax
je 0x10d2d6
movq %r12, %rdi
movq %r13, %rsi
movq 0x8(%rsp), %rdx
movl %ebx, %ecx
movq 0x10(%rsp), %r8
callq 0x136404
jmp 0x10d2fa
movl %edx, %ebx
addq $0x160, %r12 # imm = 0x160
movq 0x10(%rsp), %rax
cmpb $0x1, 0x1d(%rax)
jne 0x10d014
leaq 0x60(%r14), %rsi
movq %r12, %rdi
movq 0x8(%rsp), %rdx
movq 0x20(%rsp), %rcx
callq 0x128be2
jmp 0x10c92b
leaq 0x18(%r14), %rsi
movq %r12, %rdi
movq 0x8(%rsp), %rdx
movq 0x20(%rsp), %rcx
pushq $0x4
pushq $0x8
callq 0x128d71
popq %rax
popq %rcx
jmp 0x10c92b
cmpl $0x3, 0xd8(%rdi)
jne 0x10d0eb
cmpl $0x1, 0xdc(%rdi)
jne 0x10d066
cmpl $0x1, 0xe0(%rdi)
jne 0x10d066
cmpl $0x1, 0xe4(%rdi)
jne 0x10d066
cmpl $0x1, 0xe8(%rdi)
je 0x10d08a
cmpl $0x1, 0xdc(%rdi)
jne 0x10d0eb
cmpl $0x1, 0xe0(%rdi)
jne 0x10d0eb
cmpl $0x2, 0xe4(%rdi)
jne 0x10d0eb
cmpl $0x2, 0xe8(%rdi)
jne 0x10d0eb
leaq 0x60(%r14), %rsi
movl 0xd0(%rdi), %ecx
addq $0x160, %rdi # imm = 0x160
pushq $0x3
jmp 0x10d0e4
cmpl $0x7, 0xd8(%rdi)
movq 0x8(%rsp), %rdx
jne 0x10d0eb
cmpl $0x1, 0xdc(%rdi)
jne 0x10d0eb
cmpl $0x1, 0xe0(%rdi)
jne 0x10d0eb
cmpl $0x2, 0xe4(%rdi)
jne 0x10d0eb
cmpl $0x2, 0xe8(%rdi)
jne 0x10d0eb
leaq 0x60(%r14), %rsi
movl 0xd0(%rdi), %ecx
addq $0x160, %rdi # imm = 0x160
pushq $0x7
popq %r8
movl %r8d, %r9d
jmp 0x10d116
leaq 0x160(%rdi), %rax
movl 0xd0(%rdi), %ecx
movl 0xd8(%rdi), %r9d
movq 0x10(%rsp), %rdx
cmpb $0x1, 0x1d(%rdx)
jne 0x10d120
leaq 0x60(%r14), %rsi
movq %rax, %rdi
movq 0x8(%rsp), %rdx
callq 0x128f6b
jmp 0x10d1a6
leaq 0x18(%r14), %rsi
movq %rax, %rdi
movq 0x8(%rsp), %rdx
pushq $0x4
pushq $0x1
callq 0x128d71
popq %rax
popq %rcx
jmp 0x10d1a6
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x58(%r14)
vpxor %xmm0, %xmm0, %xmm0
vmovdqu %xmm0, 0xc(%r15)
vmovdqu %xmm0, (%r15)
andl $0x0, 0x50(%r14)
vmovdqu %xmm0, 0x40(%r14)
vmovups (%r14,%rbx), %xmm0
vmovups %xmm0, 0x18(%r14)
movq 0x10(%r14,%rbx), %rax
movq %rax, 0x28(%r14)
movl 0x18(%r14,%rbx), %eax
movl %eax, 0x30(%r14)
movq 0x20(%r14,%rbx), %rax
movq %rax, 0x38(%r14)
vmovdqu 0x28(%r14,%rbx), %xmm0
vmovdqu %xmm0, 0x40(%r14)
movl 0x38(%r14,%rbx), %eax
movl %eax, 0x50(%r14)
movq 0x40(%r14,%rbx), %rax
movq %rax, 0x58(%r14)
leaq 0x190(%r14), %rdi
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xd0(%r14,%rax), %esi
xorl %r15d, %r15d
pushq $0x4
popq %rdx
xorl %ecx, %ecx
callq 0x635fa
movq (%r14), %rax
movq 0x190(%r14), %rcx
vmovss 0x2e1aad(%rip), %xmm0 # 0x3eec88
movq -0x18(%rax), %rdx
leaq (%r14,%rdx), %rbx
movslq 0xd0(%r14,%rdx), %rdx
cmpq %rdx, %r15
jge 0x10d221
movq 0x1f0(%rbx), %rdx
vmovss (%rdx,%r15,4), %xmm2
vxorps %xmm1, %xmm1, %xmm1
vucomiss %xmm2, %xmm1
je 0x10d216
movq 0x238(%rbx), %rdx
vmulss (%rdx), %xmm2, %xmm1
vdivss %xmm1, %xmm0, %xmm1
vmovss %xmm1, (%rcx,%r15,4)
incq %r15
jmp 0x10d1db
movq 0x10(%rsp), %rax
cmpb $0x0, (%rax)
je 0x10d28c
leaq 0x160(%rbx), %r14
movq 0x168(%rbx), %rax
testq %rax, %rax
je 0x10d266
lock
decl (%rax)
jne 0x10d266
movq 0x160(%rbx), %rsi
movq 0x180(%rbx), %rdi
testq %rdi, %rdi
je 0x10d25e
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x10d266
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x1a0(%rbx)
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, 0xc(%r14)
vmovups %xmm0, (%r14)
vmovups %xmm0, 0x188(%rbx)
andl $0x0, 0x198(%rbx)
xorl %eax, %eax
addq $0x108, %rsp # imm = 0x108
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq %rbx, 0x20(%rsp)
callq 0x732db
testl %eax, %eax
je 0x10d303
movq %r12, %rdi
movq %r13, %rsi
movq 0x8(%rsp), %rdx
movq 0x20(%rsp), %rcx
movq 0x10(%rsp), %r8
callq 0x139296
movl 0x18(%rsp), %edx
movb 0x7(%rsp), %sil
jmp 0x10caf2
callq 0x732db
testl %eax, %eax
je 0x10d7e7
movq %r12, %rdi
movq %r13, %rsi
movq 0x8(%rsp), %rdx
movl %ebx, %ecx
movq 0x10(%rsp), %r8
callq 0x13b11f
movl 0x18(%rsp), %ebx
jmp 0x10c92b
leaq 0x70(%rsp), %rdi
andq $0x0, 0x40(%rdi)
vpxor %xmm0, %xmm0, %xmm0
vmovdqa %xmm0, (%rdi)
vmovdqu %xmm0, 0xc(%rdi)
vmovdqa %xmm0, 0x20(%rdi)
vmovdqu %xmm0, 0x2c(%rdi)
xorl %ebp, %ebp
pushq $0x24
popq %rsi
pushq $0x2
popq %r8
movq 0x8(%rsp), %rbx
movl %ebx, %edx
movq 0x20(%rsp), %r15
movl %r15d, %ecx
xorl %r9d, %r9d
callq 0x63810
testl %ebx, %ebx
movl $0x0, %ecx
cmovgl %ebx, %ecx
movq %rcx, 0x38(%rsp)
testl %r15d, %r15d
movl $0x0, %ecx
cmovgl %r15d, %ecx
movq %rcx, 0x48(%rsp)
leal (%rbx,%rbx,8), %ecx
movl %ecx, 0x1c(%rsp)
movq %r12, 0xc0(%rsp)
movq %r13, 0xb8(%rsp)
cmpq 0x48(%rsp), %rbp
je 0x10d574
movl 0x1c(%rsp), %ecx
imull %ebp, %ecx
movslq %ecx, %rcx
addq (%r12), %rcx
movq %rcx, 0x58(%rsp)
movslq 0x9c(%rsp), %rcx
movq 0x80(%rsp), %r11
imulq %r11, %rcx
movq %rcx, 0x50(%rsp)
imulq 0xb0(%rsp), %r11
imulq %rbp, %r11
addq 0x70(%rsp), %r11
xorl %ecx, %ecx
movq %rbp, 0x60(%rsp)
cmpq 0x38(%rsp), %rcx
je 0x10d56c
movq %r11, 0x30(%rsp)
movq %rcx, 0x28(%rsp)
leaq (%rcx,%rcx,8), %rax
movq 0x58(%rsp), %rdx
movsbl (%rdx,%rax), %ecx
movsbl 0x1(%rdx,%rax), %esi
movsbl 0x2(%rdx,%rax), %ebp
movsbl 0x3(%rdx,%rax), %r10d
movsbl 0x4(%rdx,%rax), %r11d
movsbl 0x5(%rdx,%rax), %ebx
movsbl 0x6(%rdx,%rax), %r14d
movsbl 0x7(%rdx,%rax), %r12d
movsbl 0x8(%rdx,%rax), %r9d
movswl %cx, %edx
movswl %si, %ecx
movswl %bp, %r15d
movswl %r10w, %esi
movswl %r11w, %eax
movswl %bx, %ebp
movswl %r14w, %r10d
movswl %r12w, %r13d
movswl %r9w, %r11d
pushq $0x4
popq %r12
cmpq $0x28, %r12
je 0x10d4cc
leaq 0x2e5640(%rip), %r8 # 0x3f2a90
movzwl -0x4(%r12,%r8), %ebx
movl %ebx, %r14d
imull %edx, %r14d
movzwl -0x2(%r12,%r8), %edi
movl %edi, %r9d
imull %ecx, %r9d
addl %r14d, %r9d
movzwl (%r12,%r8), %r8d
movl %r8d, %r14d
imull %r15d, %r14d
addl %r9d, %r14d
movw %r14w, 0xcc(%rsp,%r12)
movl %ebx, %r9d
imull %esi, %r9d
movl %edi, %r14d
imull %eax, %r14d
addl %r9d, %r14d
movl %r8d, %r9d
imull %ebp, %r9d
addl %r14d, %r9d
movw %r9w, 0xce(%rsp,%r12)
imull %r10d, %ebx
imull %r13d, %edi
addl %ebx, %edi
imull %r11d, %r8d
addl %edi, %r8d
movw %r8w, 0xd0(%rsp,%r12)
addq $0x6, %r12
jmp 0x10d43f
movq 0x30(%rsp), %r11
movq %r11, %rax
xorl %ecx, %ecx
movq 0x40(%rsp), %r14
movq 0xc0(%rsp), %r12
movq 0xb8(%rsp), %r13
movq 0x60(%rsp), %rbp
leaq 0x2e559b(%rip), %r10 # 0x3f2a92
cmpq $0x6, %rcx
je 0x10d555
imulq $0x6, %rcx, %rsi
movzwl 0xd0(%rsp,%rsi), %edx
vmovd 0xd2(%rsp,%rsi), %xmm0
movq %r10, %rsi
xorl %r9d, %r9d
cmpq $0x6, %r9
je 0x10d54c
movzwl -0x2(%rsi), %edi
imulw %dx, %di
vmovd (%rsi), %xmm1
vpmullw %xmm0, %xmm1, %xmm1
vmovd %xmm1, %r8d
addl %edi, %r8d
vpextrw $0x1, %xmm1, %edi
addl %r8d, %edi
movw %di, (%rax,%r9,2)
incq %r9
addq $0x6, %rsi
jmp 0x10d518
incq %rcx
addq $0xc, %rax
jmp 0x10d4f7
movq 0x28(%rsp), %rcx
incq %rcx
addq 0x50(%rsp), %r11
movq 0x20(%rsp), %r15
jmp 0x10d3cb
incq %rbp
jmp 0x10d37b
movl %r15d, %eax
cltd
pushq $0x4
popq %r9
idivl %r9d
imull $-0x3, %eax, %ecx
addl %r15d, %ecx
subq $0x8, %rsp
pushq $0x24
popq %rdx
pushq $0x8
popq %r8
movq %r13, %rdi
movq 0x10(%rsp), %rsi
pushq $0x0
callq 0x628f2
popq %rax
popq %rcx
movslq 0x11c(%r14), %rcx
movq 0xf0(%r14), %rax
movq %rax, 0x28(%rsp)
movq 0x100(%r14), %rax
movq 0x130(%r14), %rdx
imulq %rax, %rdx
movq %rdx, 0x38(%rsp)
imulq %rax, %rcx
movq %rcx, 0x30(%rsp)
xorl %ebx, %ebx
movq %rbx, %rdx
orq $0x3, %rdx
cmpq 0x20(%rsp), %rdx
jge 0x10d6b9
movq %rbx, %rdi
shrq $0x2, %rdi
imulq 0x38(%rsp), %rdi
addq 0x28(%rsp), %rdi
xorl %r8d, %r8d
xorl %r9d, %r9d
cmpq $0x24, %r9
je 0x10d6b0
movq 0x30(%rsp), %r10
imulq %r9, %r10
addq %rdi, %r10
movq 0x80(%rsp), %rdx
movq 0xb0(%rsp), %r11
imulq %rdx, %r11
movq %r11, %r15
movq %rbx, %rcx
imulq %rbx, %r15
addq %r8, %r15
addq 0x70(%rsp), %r15
movslq 0x9c(%rsp), %r12
imulq %rdx, %r12
leaq (,%r12,8), %r13
xorl %edx, %edx
movq %rdx, %rsi
orq $0x7, %rsi
cmpq 0x68(%rsp), %rsi
jge 0x10d69c
movq %r15, %rbp
xorl %esi, %esi
cmpq $0x4, %rsi
je 0x10d693
xorl %ebx, %ebx
movq %rbp, %r14
cmpq $0x10, %rbx
je 0x10d688
movzwl (%r14), %eax
movw %ax, (%r10,%rbx)
addq %r12, %r14
addq $0x2, %rbx
jmp 0x10d670
incq %rsi
addq %r11, %rbp
addq %rbx, %r10
jmp 0x10d665
addq $0x8, %rdx
addq %r13, %r15
jmp 0x10d652
incq %r9
addq $0x2, %r8
movq 0x40(%rsp), %r14
movq %rcx, %rbx
jmp 0x10d600
addq $0x4, %rbx
jmp 0x10d5d6
movslq 0x9c(%rsp), %rcx
movq 0x80(%rsp), %rdx
movslq 0x11c(%r14), %rsi
movq 0xf0(%r14), %rax
movq %rax, 0x30(%rsp)
movq 0x100(%r14), %rdi
movq 0x130(%r14), %rax
imulq %rdi, %rax
movq %rax, 0x28(%rsp)
imulq %rdi, %rsi
imulq %rdx, %rcx
imulq 0xb0(%rsp), %rdx
movq %rdx, %r9
imulq %rbx, %r9
addq 0x70(%rsp), %r9
leaq (,%rcx,8), %r10
cmpq 0x20(%rsp), %rbx
jge 0x10d7af
movl %ebx, %eax
shrl $0x2, %eax
movq %rbx, %r8
movl %ebx, %r11d
andl $0x3, %r11d
addl %eax, %r11d
imulq 0x28(%rsp), %r11
addq 0x30(%rsp), %r11
movq %r9, %r15
xorl %r12d, %r12d
cmpq $0x24, %r12
je 0x10d79c
movq %rsi, %r13
imulq %r12, %r13
addq %r11, %r13
movq %r15, %rbp
xorl %edi, %edi
movq %rdi, %rax
orq $0x7, %rax
cmpq 0x68(%rsp), %rax
jge 0x10d793
xorl %ebx, %ebx
movq %rbp, %r14
cmpq $0x10, %rbx
je 0x10d787
movzwl (%r14), %eax
movw %ax, (%r13,%rbx)
addq %rcx, %r14
addq $0x2, %rbx
jmp 0x10d76e
addq $0x8, %rdi
addq %r10, %rbp
addq %rbx, %r13
jmp 0x10d75b
incq %r12
addq $0x2, %r15
jmp 0x10d746
movq %r8, %rbx
incq %rbx
addq %rdx, %r9
movq 0x40(%rsp), %r14
jmp 0x10d718
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x10d2c8
lock
decl (%rax)
jne 0x10d2c8
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x10dbe0
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x10d2c8
leaq 0x70(%rsp), %rdi
andq $0x0, 0x40(%rdi)
vpxor %xmm0, %xmm0, %xmm0
vmovdqa %xmm0, (%rdi)
vmovdqu %xmm0, 0xc(%rdi)
vmovdqa %xmm0, 0x20(%rdi)
vmovdqu %xmm0, 0x2c(%rdi)
xorl %ebp, %ebp
pushq $0x24
popq %rsi
pushq $0x2
popq %r8
movq %rbx, %r15
movq 0x8(%rsp), %rbx
movl %ebx, %edx
movl %r15d, %ecx
xorl %r9d, %r9d
callq 0x63810
testl %ebx, %ebx
movl $0x0, %eax
cmovgl %ebx, %eax
movq %rax, 0x38(%rsp)
testl %r15d, %r15d
movl $0x0, %eax
cmovgl %r15d, %eax
movq %rax, 0x100(%rsp)
leal (%rbx,%rbx,8), %eax
movl %eax, 0xcc(%rsp)
movq %r12, 0xc0(%rsp)
movq %r13, 0xb8(%rsp)
cmpq 0x100(%rsp), %rbp
je 0x10da5e
movl 0xcc(%rsp), %eax
imull %ebp, %eax
cltq
addq (%r12), %rax
movq %rax, 0x58(%rsp)
movslq 0x9c(%rsp), %rax
movq 0x80(%rsp), %r11
imulq %r11, %rax
movq %rax, 0x50(%rsp)
imulq 0xb0(%rsp), %r11
imulq %rbp, %r11
addq 0x70(%rsp), %r11
xorl %eax, %eax
movq %rbp, 0x60(%rsp)
cmpq 0x38(%rsp), %rax
je 0x10da51
movq %r11, 0x30(%rsp)
movq %rax, 0x28(%rsp)
leaq (%rax,%rax,8), %rax
movq 0x58(%rsp), %r8
movsbl (%r8,%rax), %ecx
movsbl 0x1(%r8,%rax), %edx
movsbl 0x2(%r8,%rax), %esi
movsbl 0x3(%r8,%rax), %edi
movsbl 0x4(%r8,%rax), %r9d
movsbl 0x5(%r8,%rax), %r11d
movsbl 0x6(%r8,%rax), %ebx
movsbl 0x7(%r8,%rax), %r14d
movsbl 0x8(%r8,%rax), %r13d
movswl %cx, %ecx
movswl %dx, %r15d
movswl %si, %esi
movswl %di, %eax
movswl %r9w, %r10d
movswl %r11w, %ebp
movswl %bx, %r12d
movswl %r14w, %r11d
movswl %r13w, %edx
pushq $0x4
popq %r13
cmpq $0x28, %r13
je 0x10d9b5
leaq 0x2e5153(%rip), %r8 # 0x3f2a90
movzwl -0x4(%r13,%r8), %edi
movl %edi, %ebx
imull %ecx, %ebx
movzwl -0x2(%r13,%r8), %r9d
movl %r9d, %r14d
imull %r15d, %r14d
addl %ebx, %r14d
movzwl (%r13,%r8), %r8d
movl %r8d, %ebx
imull %esi, %ebx
addl %r14d, %ebx
movw %bx, 0xcc(%rsp,%r13)
movl %edi, %ebx
imull %eax, %ebx
movl %r9d, %r14d
imull %r10d, %r14d
addl %ebx, %r14d
movl %r8d, %ebx
imull %ebp, %ebx
addl %r14d, %ebx
movw %bx, 0xce(%rsp,%r13)
imull %r12d, %edi
imull %r11d, %r9d
addl %edi, %r9d
imull %edx, %r8d
addl %r9d, %r8d
movw %r8w, 0xd0(%rsp,%r13)
addq $0x6, %r13
jmp 0x10d930
movq 0x30(%rsp), %r11
movq %r11, %rax
xorl %ecx, %ecx
movq 0x40(%rsp), %r14
movq 0xc0(%rsp), %r12
movq 0xb8(%rsp), %r13
movq 0x60(%rsp), %rbp
leaq 0x2e50b2(%rip), %r10 # 0x3f2a92
cmpq $0x6, %rcx
je 0x10da3f
imulq $0x6, %rcx, %rsi
movzwl 0xd0(%rsp,%rsi), %edx
vmovd 0xd2(%rsp,%rsi), %xmm0
movq %r10, %rsi
xorl %edi, %edi
cmpq $0x6, %rdi
je 0x10da36
movzwl -0x2(%rsi), %r8d
imulw %dx, %r8w
vmovd (%rsi), %xmm1
vpmullw %xmm0, %xmm1, %xmm1
vmovd %xmm1, %r9d
addl %r8d, %r9d
vpextrw $0x1, %xmm1, %r8d
addl %r9d, %r8d
movw %r8w, (%rax,%rdi,2)
incq %rdi
addq $0x6, %rsi
jmp 0x10da00
incq %rcx
addq $0xc, %rax
jmp 0x10d9e0
movq 0x28(%rsp), %rax
incq %rax
addq 0x50(%rsp), %r11
jmp 0x10d8b8
incq %rbp
movq 0x8(%rsp), %rbx
jmp 0x10d863
pushq $0x8
popq %rcx
movl %ebx, %eax
cltd
idivl %ecx
movl %eax, %esi
movq 0x20(%rsp), %rbx
movl %ebx, %eax
cltd
pushq $0x4
popq %rcx
idivl %ecx
subq $0x8, %rsp
pushq $0x24
popq %rdx
pushq $0x40
popq %r8
pushq $0x20
popq %r9
movq %r13, %rdi
movl %eax, %ecx
pushq $0x0
callq 0x628f2
popq %rax
popq %rcx
movslq 0x11c(%r14), %rax
movq 0xf0(%r14), %rcx
movq %rcx, 0x28(%rsp)
movq 0x100(%r14), %rcx
movq 0x130(%r14), %rdx
imulq %rcx, %rdx
movq %rdx, 0x38(%rsp)
imulq %rcx, %rax
movq %rax, 0x30(%rsp)
xorl %esi, %esi
movl 0x1c(%rsp), %edi
movq %rsi, %rcx
orq $0x3, %rcx
cmpq %rbx, %rcx
jge 0x10dbab
movq %rsi, %rdi
shrq $0x2, %rdi
imulq 0x38(%rsp), %rdi
addq 0x28(%rsp), %rdi
xorl %r8d, %r8d
xorl %r9d, %r9d
cmpq $0x24, %r9
je 0x10db9d
movq 0x30(%rsp), %r10
imulq %r9, %r10
addq %rdi, %r10
movq 0x80(%rsp), %rcx
movq 0xb0(%rsp), %r11
imulq %rcx, %r11
movq %r11, %r15
imulq %rsi, %r15
addq %r8, %r15
addq 0x70(%rsp), %r15
movslq 0x9c(%rsp), %r13
imulq %rcx, %r13
leaq (,%r13,8), %rcx
xorl %edx, %edx
movq %rdx, %rbx
orq $0x7, %rbx
cmpq 0x68(%rsp), %rbx
jge 0x10db8c
movq %r15, %r12
xorl %ebp, %ebp
cmpq $0x4, %rbp
je 0x10db83
xorl %ebx, %ebx
movq %r12, %r14
cmpq $0x10, %rbx
je 0x10db78
movzwl (%r14), %eax
movw %ax, (%r10,%rbx)
addq %r13, %r14
addq $0x2, %rbx
jmp 0x10db60
incq %rbp
addq %r11, %r12
addq %rbx, %r10
jmp 0x10db55
addq $0x8, %rdx
addq %rcx, %r15
jmp 0x10db42
incq %r9
addq $0x2, %r8
movq 0x20(%rsp), %rbx
jmp 0x10daf3
addq $0x4, %rsi
movq 0x40(%rsp), %r14
jmp 0x10dac7
movq 0x78(%rsp), %rax
testq %rax, %rax
movl 0x48(%rsp), %ecx
je 0x10dbfd
lock
decl (%rax)
jne 0x10dbfd
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x10dbed
movq (%rdi), %rax
callq *0x18(%rax)
movl 0x1c(%rsp), %edi
movl 0x48(%rsp), %ecx
jmp 0x10dbfd
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x10d2c8
movq %rsi, %rdi
callq 0x5f3e0
movl 0x1c(%rsp), %edi
movl 0x48(%rsp), %ecx
movl 0x18(%rsp), %edx
movb 0x7(%rsp), %sil
jmp 0x10c864
jmp 0x10dc7b
jmp 0x10dc7b
movq %rax, %rbx
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x10dc73
lock
decl (%rax)
jne 0x10dc73
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
jne 0x10dc35
jmp 0x10dc63
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x10dc73
jmp 0x10dc7b
movq %rax, %rbx
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x10dc73
lock
decl (%rax)
jne 0x10dc73
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
jne 0x10dc6d
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x10dc73
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
movq %rax, %rdi
callq 0x61d68
|
/csukuangfj[P]ncnn/build_O2/src/layer/x86/convolution_x86_avx.cpp
|
ncnn::conv3x3s1_winograd23_transform_kernel(ncnn::Mat const&, ncnn::Mat&, int, int, ncnn::Option const&)
|
static void conv3x3s1_winograd23_transform_kernel(const Mat& kernel, Mat& AT, int inch, int outch, const Option& opt)
{
const int M = outch;
const int K = inch;
const int B = 16;
int TILE_M, TILE_N, TILE_K;
get_optimal_tile_mnk(M, 0, K, TILE_M, TILE_N, TILE_K, opt.num_threads);
const int nn_M = (M + TILE_M - 1) / TILE_M;
Mat A_tileX(B * TILE_M * TILE_K, 1, opt.num_threads, 4u, (Allocator*)0);
AT.create(TILE_K * TILE_M, B, (K + TILE_K - 1) / TILE_K, (M + TILE_M - 1) / TILE_M, 4u, (Allocator*)0);
#pragma omp parallel for num_threads(opt.num_threads)
for (int ppj = 0; ppj < nn_M; ppj++)
{
const int i = ppj * TILE_M;
Mat A_tile = A_tileX.channel(get_omp_thread_num());
for (int k = 0; k < K; k += TILE_K)
{
const int max_ii = std::min((M - i), TILE_M);
const int max_kk = std::min((K - k), TILE_K);
conv3x3s1_winograd23_transform_kernel_tile(kernel, A_tile, inch, i, max_ii, k, max_kk);
Mat AT_tile = AT.channel(i / TILE_M).depth(k / TILE_K);
pack_A_tile(A_tile, AT_tile, B, max_ii, max_kk);
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x158, %rsp # imm = 0x158
movq %r8, %r13
movl %ecx, %r14d
movl %edx, %r15d
movq %rsi, 0x28(%rsp)
movq %rdi, %r12
movl 0x4(%r8), %eax
movl %eax, (%rsp)
leaq 0xbc(%rsp), %rbx
leaq 0xb8(%rsp), %r8
leaq 0xb4(%rsp), %rbp
movl %ecx, %edi
xorl %esi, %esi
movq %rbx, %rcx
movq %rbp, %r9
callq 0x129a0f
movl (%rbx), %ecx
movq %r14, 0x80(%rsp)
leal (%r14,%rcx), %eax
decl %eax
cltd
idivl %ecx
movl (%rbp), %r14d
movl %eax, %ebp
movl %r14d, %ebx
movq %rcx, 0x78(%rsp)
imull %ecx, %ebx
movl 0x4(%r13), %ecx
leaq 0xc0(%rsp), %rdi
andq $0x0, 0x40(%rdi)
movl %ebx, %esi
shll $0x4, %esi
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, (%rdi)
vmovups %xmm0, 0xc(%rdi)
vmovaps %xmm0, 0x20(%rdi)
vmovups %xmm0, 0x2c(%rdi)
pushq $0x1
popq %rdx
pushq $0x4
popq %r13
movq %r13, %r8
xorl %r9d, %r9d
callq 0x63810
leal (%r15,%r14), %eax
decl %eax
cltd
movq %r14, 0x10(%rsp)
idivl %r14d
andq $0x0, (%rsp)
pushq $0x10
popq %rdx
movq 0x28(%rsp), %rdi
movl %ebx, %esi
movl %eax, %ecx
movl %ebp, %r8d
movq %r13, %r9
callq 0x6393c
leal (%r15,%r15,8), %r14d
testl %ebp, %ebp
movl $0x0, %eax
cmovlel %eax, %ebp
pushq $0x8
popq %r13
movq %r15, %rbx
xorl %r15d, %r15d
movq %rbx, 0x90(%rsp)
movl %ebp, 0x1c(%rsp)
cmpl %ebp, %r15d
je 0x10eb62
callq 0x7357d
movslq 0xec(%rsp), %rdx
movslq 0xf0(%rsp), %rcx
movslq %eax, %r9
imulq 0x100(%rsp), %r9
movq 0xd0(%rsp), %rsi
imulq %rsi, %r9
addq 0xc0(%rsp), %r9
movl 0xf4(%rsp), %eax
movl 0xd8(%rsp), %edi
movq 0xe0(%rsp), %r8
movq %r9, 0xa0(%rsp)
movq %r9, 0x110(%rsp)
andq $0x0, 0x118(%rsp)
movq %rsi, 0x120(%rsp)
movl %edi, 0x128(%rsp)
movq %r8, 0x130(%rsp)
movl %edx, 0x13c(%rsp)
movl %ecx, 0x140(%rsp)
movl $0x1, 0x144(%rsp)
movl %eax, 0x148(%rsp)
imulq %rdx, %rcx
movq %rsi, %rax
imulq %rcx, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rsi
movq %rax, 0x150(%rsp)
movl 0xe8(%rsp), %eax
leal -0x1(%rax), %edx
movl %edx, 0x138(%rsp)
cmpl $0x4, %eax
jne 0x10e911
movq %rcx, 0x150(%rsp)
movq 0x78(%rsp), %rcx
movl %ecx, %ebp
imull %r15d, %ebp
movq 0x80(%rsp), %rax
movl %eax, %edx
subl %ebp, %edx
cmpl %edx, %ecx
cmovll %ecx, %edx
xorl %ecx, %ecx
testl %edx, %edx
movq %r15, %rax
movl $0x0, %r15d
movl %edx, 0x20(%rsp)
cmovgl %edx, %r15d
movq %rax, 0x88(%rsp)
cltq
movq %rax, 0x98(%rsp)
movl %ebx, %r8d
subl %ecx, %r8d
jle 0x10eb4e
movq 0x10(%rsp), %rax
cmpl %r8d, %eax
cmovll %eax, %r8d
movl %ecx, 0x24(%rsp)
movslq %ecx, %rax
testl %r8d, %r8d
movl $0x0, %ecx
cmovgl %r8d, %ecx
xorl %edx, %edx
movq 0xa0(%rsp), %rsi
cmpq %r15, %rdx
je 0x10ea85
leal (%rdx,%rbp), %edi
imull %r14d, %edi
movslq %edi, %rdi
shlq $0x2, %rdi
addq (%r12), %rdi
xorl %r9d, %r9d
cmpq %rcx, %r9
je 0x10ea7d
leaq (%r9,%rax), %r10
imulq $0x24, %r10, %r10
addq %rdi, %r10
xorl %r11d, %r11d
cmpq $0x3, %r11
je 0x10ea1a
vmovss (%r10), %xmm0
vmovss 0x8(%r10), %xmm1
vmovss 0x2df63d(%rip), %xmm3 # 0x3ee014
vmulss 0x4(%r10), %xmm3, %xmm2
vmovss %xmm0, 0x30(%rsp,%r11,4)
vmulss %xmm3, %xmm0, %xmm0
vmulss %xmm3, %xmm1, %xmm3
vaddss %xmm3, %xmm0, %xmm4
vaddss %xmm4, %xmm2, %xmm4
vmovss %xmm4, 0x3c(%rsp,%r11,4)
vsubss %xmm2, %xmm0, %xmm0
vaddss %xmm3, %xmm0, %xmm0
vmovss %xmm0, 0x48(%rsp,%r11,4)
vmovss %xmm1, 0x54(%rsp,%r11,4)
addq $0xc, %r10
incq %r11
jmp 0x10e9be
movq %r13, %r10
cmpq $0x38, %r10
je 0x10ea75
vmovss 0x28(%rsp,%r10), %xmm0
vmovss 0x30(%rsp,%r10), %xmm1
vmovss 0x2df5db(%rip), %xmm4 # 0x3ee014
vmulss 0x2c(%rsp,%r10), %xmm4, %xmm2
vmulss %xmm4, %xmm0, %xmm3
vmulss %xmm4, %xmm1, %xmm4
vaddss %xmm4, %xmm3, %xmm5
vaddss %xmm5, %xmm2, %xmm5
vsubss %xmm2, %xmm3, %xmm2
vaddss %xmm4, %xmm2, %xmm2
vmovss %xmm0, (%rsi)
vmovss %xmm5, 0x4(%rsi)
vmovss %xmm2, 0x8(%rsi)
vmovss %xmm1, 0xc(%rsi)
addq $0x10, %rsi
addq $0xc, %r10
jmp 0x10ea1d
incq %r9
jmp 0x10e9a7
incq %rdx
jmp 0x10e989
movq 0x28(%rsp), %rax
movslq 0x2c(%rax), %r11
movslq 0x30(%rax), %r10
movq 0x40(%rax), %rdi
imulq 0x98(%rsp), %rdi
movq 0x10(%rax), %r9
imulq %r9, %rdi
addq (%rax), %rdi
movl 0x18(%rax), %ecx
movq 0x20(%rax), %rax
movq %rax, 0xa8(%rsp)
movq %r10, %rsi
imulq %r11, %rsi
movl 0x24(%rsp), %ebx
movl %ebx, %eax
cltd
idivl 0x10(%rsp)
cltq
movq %r9, %rdx
imulq %rsi, %rdx
imulq %rax, %rdx
addq %rdi, %rdx
movq %rdx, 0x30(%rsp)
andq $0x0, 0x38(%rsp)
movq %r9, 0x40(%rsp)
movl %ecx, 0x48(%rsp)
movq 0xa8(%rsp), %rax
movq %rax, 0x50(%rsp)
movl $0x2, 0x58(%rsp)
movl %r11d, 0x5c(%rsp)
movl %r10d, 0x60(%rsp)
movabsq $0x100000001, %rax # imm = 0x100000001
movq %rax, 0x64(%rsp)
movq %rsi, 0x70(%rsp)
leaq 0x110(%rsp), %rdi
leaq 0x30(%rsp), %rsi
pushq $0x10
popq %rdx
movl 0x20(%rsp), %ecx
callq 0x129bb4
movl %ebx, %ecx
addl 0x10(%rsp), %ecx
movq 0x90(%rsp), %rbx
jmp 0x10e954
movq 0x88(%rsp), %r15
incl %r15d
movl 0x1c(%rsp), %ebp
jmp 0x10e82f
movq 0xc8(%rsp), %rax
testq %rax, %rax
je 0x10eb99
lock
decl (%rax)
jne 0x10eb99
movq 0xc0(%rsp), %rsi
movq 0xe0(%rsp), %rdi
testq %rdi, %rdi
je 0x10eb91
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x10eb99
movq %rsi, %rdi
callq 0x5f3e0
addq $0x158, %rsp # imm = 0x158
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0x10ebf1
jmp 0x10ebaf
movq %rax, %rbx
movq 0xc8(%rsp), %rax
testq %rax, %rax
je 0x10ebe9
lock
decl (%rax)
jne 0x10ebe9
movq 0xc0(%rsp), %rsi
movq 0xe0(%rsp), %rdi
testq %rdi, %rdi
jne 0x10ebe3
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x10ebe9
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
movq %rax, %rdi
callq 0x61d68
|
/csukuangfj[P]ncnn/src/layer/x86/convolution_3x3_winograd.h
|
ncnn::test_prefer_winograd63(int, int, int, int)
|
static bool test_prefer_winograd63(int num_input, int num_output, int w, int h)
{
// winograd selection strategy (profiled on i7-7700 single thread)
int minwh = std::min(w, h);
if (num_input >= 64)
{
return false;
}
if (num_input >= 32)
{
if (num_output >= 64) return false;
if (num_output >= 32) return (minwh >= 11 && minwh <= 14)
|| (minwh >= 19 && minwh <= 20)
|| (minwh >= 23 && minwh <= 44)
|| (minwh >= 47 && minwh <= 56)
|| (minwh >= 63 && minwh <= 130);
if (num_output >= 16) return (minwh >= 13 && minwh <= 14)
|| (minwh >= 19 && minwh <= 20)
|| (minwh >= 23 && minwh <= 38)
|| (minwh >= 43 && minwh <= 44)
|| (minwh >= 47 && minwh <= 140);
if (num_output >= 8) return (minwh >= 11 && minwh <= 14)
|| (minwh >= 19 && minwh <= 20)
|| (minwh >= 31 && minwh <= 38)
|| (minwh >= 43 && minwh <= 44)
|| (minwh >= 55 && minwh <= 162);
return false;
}
if (num_input >= 16)
{
if (num_output >= 64) return false;
if (num_output >= 32) return (minwh >= 11 && minwh <= 14)
|| (minwh >= 19 && minwh <= 20)
|| (minwh >= 23 && minwh <= 44)
|| (minwh >= 47 && minwh <= 92)
|| (minwh >= 95 && minwh <= 188);
if (num_output >= 16) return (minwh >= 11 && minwh <= 14)
|| (minwh >= 27 && minwh <= 38)
|| (minwh >= 43 && minwh <= 44)
|| (minwh >= 47 && minwh <= 74)
|| (minwh >= 81 && minwh <= 110)
|| (minwh >= 117 && minwh <= 170)
|| (minwh >= 177 && minwh <= 182);
if (num_output >= 8) return (minwh >= 19 && minwh <= 20)
|| (minwh >= 33 && minwh <= 38)
|| (minwh >= 43 && minwh <= 44)
|| (minwh >= 47 && minwh <= 128)
|| (minwh >= 155 && minwh <= 210);
return false;
}
if (num_input >= 8)
{
if (num_output >= 64) return false;
if (num_output >= 32) return (minwh >= 7 && minwh <= 14)
|| (minwh >= 17 && minwh <= 20)
|| (minwh >= 23 && minwh <= 26)
|| (minwh >= 31 && minwh <= 38)
|| (minwh >= 43 && minwh <= 162);
if (num_output >= 16) return minwh == 31 || minwh == 32
|| (minwh >= 39 && minwh <= 44)
|| (minwh >= 47 && minwh <= 212);
if (num_output >= 8) return false;
return false;
}
return false;
}
|
cmpl %edx, %ecx
cmovll %ecx, %edx
cmpl $0x3f, %edi
jle 0x10ec06
xorl %eax, %eax
retq
cmpl $0x20, %edi
jl 0x10ec4b
cmpl $0x3f, %esi
jg 0x10ec03
cmpl $0x20, %esi
jl 0x10ecca
vmovd %edx, %xmm0
vpshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vpaddd 0x2e2f66(%rip), %xmm0, %xmm0 # 0x3f1b90
vpminud 0x2e3c7d(%rip), %xmm0, %xmm1 # 0x3f28b0
vpcmpeqd %xmm1, %xmm0, %xmm0
movb $0x1, %al
vtestps %xmm0, %xmm0
jne 0x10ec05
addl $-0x3f, %edx
cmpl $0x44, %edx
jmp 0x10edb5
cmpl $0x10, %edi
jl 0x10ec8a
cmpl $0x3f, %esi
jg 0x10ec03
cmpl $0x20, %esi
jl 0x10ed02
vmovd %edx, %xmm0
vpshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vpaddd 0x2e2f21(%rip), %xmm0, %xmm0 # 0x3f1b90
vpminud 0x2e3c28(%rip), %xmm0, %xmm1 # 0x3f28a0
vpcmpeqd %xmm1, %xmm0, %xmm0
movb $0x1, %al
vtestps %xmm0, %xmm0
jne 0x10ec05
addl $-0x5f, %edx
jmp 0x10ecfa
cmpl $0x8, %edi
setl %al
cmpl $0x40, %esi
setge %cl
orb %al, %cl
jne 0x10ec03
cmpl $0x20, %esi
jl 0x10ed5c
leal -0x7(%rdx), %ecx
cmpl $0x20, %ecx
jae 0x10ecbf
movb $0x1, %al
movl $0xff0f3cff, %esi # imm = 0xFF0F3CFF
btl %ecx, %esi
jb 0x10ec05
addl $-0x2b, %edx
cmpl $0x78, %edx
jmp 0x10edb5
cmpl $0x10, %esi
jl 0x10ed88
leal -0x17(%rdx), %ecx
movb $0x1, %al
cmpl $0x10, %ecx
jb 0x10ec05
leal -0xd(%rdx), %ecx
cmpl $0x20, %ecx
jae 0x10ecf7
movl $0xc00000c3, %esi # imm = 0xC00000C3
btl %ecx, %esi
jb 0x10ec05
addl $-0x2f, %edx
cmpl $0x5e, %edx
jmp 0x10edb5
cmpl $0x10, %esi
jl 0x10edb9
vmovd %edx, %xmm0
vpshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vpaddd 0x2e2e54(%rip), %xmm0, %xmm0 # 0x3f1b70
leal -0x51(%rdx), %eax
cmpl $0x1e, %eax
setb %al
leal -0x75(%rdx), %ecx
cmpl $0x36, %ecx
setb %cl
vpminud 0x2e3b59(%rip), %xmm0, %xmm1 # 0x3f2890
vpcmpeqd %xmm1, %xmm0, %xmm0
vtestps %xmm0, %xmm0
setne %sil
orb %al, %cl
orb %sil, %cl
movb $0x1, %al
jne 0x10ec05
addl $0xffffff4f, %edx # imm = 0xFFFFFF4F
cmpl $0x6, %edx
jmp 0x10edb5
cmpl $0x10, %esi
jl 0x10ec03
leal -0x1f(%rdx), %ecx
cmpl $0xe, %ecx
jae 0x10ed7d
movb $0x1, %al
movl $0x3f03, %esi # imm = 0x3F03
btl %ecx, %esi
jb 0x10ec05
addl $-0x2f, %edx
cmpl $0xa6, %edx
jmp 0x10edb5
cmpl $0x8, %esi
jl 0x10ec03
leal -0xb(%rdx), %ecx
cmpl $0x22, %ecx
jae 0x10edaf
movb $0x1, %al
movabsq $0x30ff0030f, %rsi # imm = 0x30FF0030F
btq %rcx, %rsi
jb 0x10ec05
addl $-0x37, %edx
cmpl $0x6c, %edx
setb %al
retq
cmpl $0x8, %esi
jl 0x10ec03
leal -0x2f(%rdx), %ecx
movb $0x1, %al
cmpl $0x52, %ecx
jb 0x10ec05
leal -0x13(%rdx), %ecx
cmpl $0x1a, %ecx
jae 0x10ede6
movl $0x30fc003, %esi # imm = 0x30FC003
btl %ecx, %esi
jb 0x10ec05
addl $0xffffff65, %edx # imm = 0xFFFFFF65
cmpl $0x38, %edx
jmp 0x10edb5
|
/csukuangfj[P]ncnn/build_O2/src/layer/x86/convolution_x86_avx.cpp
|
ncnn::convolution_im2col_sgemm_transform_kernel_pack8to1_int8_sse(ncnn::Mat const&, ncnn::Mat&, int, int, int, int)
|
static void convolution_im2col_sgemm_transform_kernel_pack8to1_int8_sse(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h)
{
const int maxk = kernel_w * kernel_h;
// interleave
// src = maxk-inch-outch
// dst = 8a-4b-maxk-inch/8a-outch/4b
Mat kernel = _kernel.reshape(maxk, inch, outch);
if (outch >= 4)
kernel_tm.create(32 * maxk, inch / 8, outch / 4 + outch % 4, (size_t)1u);
else
kernel_tm.create(8 * maxk, inch / 8, outch, (size_t)1u);
int q = 0;
for (; q + 3 < outch; q += 4)
{
signed char* g00 = kernel_tm.channel(q / 4);
for (int p = 0; p + 7 < inch; p += 8)
{
for (int k = 0; k < maxk; k++)
{
for (int i = 0; i < 4; i++)
{
for (int j = 0; j < 8; j++)
{
const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j);
g00[0] = k00[k];
g00++;
}
}
}
}
}
// TODO unroll 2
for (; q < outch; q++)
{
signed char* g00 = kernel_tm.channel(q / 4 + q % 4);
for (int p = 0; p + 7 < inch; p += 8)
{
for (int k = 0; k < maxk; k++)
{
for (int j = 0; j < 8; j++)
{
const signed char* k00 = kernel.channel(q).row<const signed char>(p + j);
g00[0] = k00[k];
g00++;
}
}
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x48, %rsp
movl %r8d, %ebx
movl %ecx, %ebp
movl %edx, %r15d
movq %rsi, %r14
movq %rdi, %rsi
imull %r9d, %ebx
movq %rsp, %rdi
movl %ebx, %edx
movl %r15d, %ecx
movl %ebp, %r8d
xorl %r9d, %r9d
callq 0x63020
pushq $0x8
popq %rcx
movl %r15d, %eax
cltd
idivl %ecx
cmpl $0x4, %ebp
jl 0x1291b7
movl %ebx, %esi
shll $0x5, %esi
movl %ebp, %ecx
shrl $0x2, %ecx
imull $-0x3, %ecx, %ecx
addl %ebp, %ecx
jmp 0x1291c0
leal (,%rbx,8), %esi
movl %ebp, %ecx
xorl %r12d, %r12d
pushq $0x1
popq %r8
movq %r14, %rdi
movl %eax, %edx
xorl %r9d, %r9d
callq 0x63810
testl %ebx, %ebx
cmovlel %r12d, %ebx
movslq %r15d, %rax
movslq %ebp, %rcx
movq %r12, %rdx
orq $0x3, %rdx
cmpq %rcx, %rdx
jge 0x129305
movq %r12, %rdx
shrq $0x2, %rdx
imulq 0x40(%r14), %rdx
imulq 0x10(%r14), %rdx
addq (%r14), %rdx
xorl %esi, %esi
movq %rsi, %rdi
orq $0x7, %rdi
cmpq %rax, %rdi
jge 0x12927b
xorl %edi, %edi
cmpq %rbx, %rdi
je 0x129275
xorl %r8d, %r8d
cmpq $0x4, %r8
je 0x129270
movq %r8, %r10
orq %r12, %r10
xorl %r9d, %r9d
cmpq $0x8, %r9
je 0x129268
movq 0x40(%rsp), %r11
imulq %r10, %r11
movq 0x10(%rsp), %r15
imulq %r15, %r11
addq (%rsp), %r11
movslq 0x2c(%rsp), %r13
leaq (%rsi,%r9), %rbp
imulq %r13, %rbp
imulq %r15, %rbp
addq %r11, %rbp
movb (%rdi,%rbp), %r11b
movb %r11b, (%rdx,%r9)
incq %r9
jmp 0x12922b
incq %r8
addq %r9, %rdx
jmp 0x12921c
incq %rdi
jmp 0x129214
addq $0x8, %rsi
jmp 0x129206
addq $0x4, %r12
jmp 0x1291e0
movl %r12d, %esi
shrl $0x2, %esi
movl %r12d, %edx
andl $0x3, %edx
addl %esi, %edx
imulq 0x40(%r14), %rdx
imulq 0x10(%r14), %rdx
addq (%r14), %rdx
xorl %esi, %esi
movq %rsi, %rdi
orq $0x7, %rdi
cmpq %rax, %rdi
jge 0x129302
xorl %edi, %edi
cmpq %rbx, %rdi
je 0x1292fc
xorl %r8d, %r8d
cmpq $0x8, %r8
je 0x1292f4
movq 0x40(%rsp), %r9
imulq %r12, %r9
movq 0x10(%rsp), %r10
imulq %r10, %r9
addq (%rsp), %r9
movslq 0x2c(%rsp), %r11
leaq (%rsi,%r8), %r15
imulq %r11, %r15
imulq %r10, %r15
addq %r9, %r15
movb (%rdi,%r15), %r9b
movb %r9b, (%rdx,%r8)
incq %r8
jmp 0x1292b7
incq %rdi
addq %r8, %rdx
jmp 0x1292af
addq $0x8, %rsi
jmp 0x1292a1
incq %r12
cmpq %rcx, %r12
jl 0x129284
movq 0x8(%rsp), %rax
testq %rax, %rax
je 0x12933b
lock
decl (%rax)
jne 0x12933b
movq (%rsp), %rsi
movq 0x20(%rsp), %rdi
testq %rdi, %rdi
je 0x129333
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x12933b
movq %rsi, %rdi
callq 0x5f3e0
addq $0x48, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0x129384
movq %rax, %rbx
movq 0x8(%rsp), %rax
testq %rax, %rax
je 0x12937c
lock
decl (%rax)
jne 0x12937c
movq (%rsp), %rsi
movq 0x20(%rsp), %rdi
testq %rdi, %rdi
jne 0x129376
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x12937c
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
movq %rax, %rdi
callq 0x61d68
|
/csukuangfj[P]ncnn/src/layer/x86/convolution_sgemm_pack8to1_int8.h
|
ncnn::conv3x3s1_winograd43_transform_kernel_pack8to4_int8_sse_avx2(ncnn::Mat const&, ncnn::Mat&, int, int, ncnn::Option const&)
|
void conv3x3s1_winograd43_transform_kernel_pack8to4_int8_sse_avx2(const Mat& kernel, Mat& kernel_tm, int inch, int outch, const Option& opt)
{
conv3x3s1_winograd43_transform_kernel_pack8to4_int8_sse(kernel, kernel_tm, inch, outch, opt);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xd8, %rsp
movl %ecx, %ebx
movl %edx, %r14d
movq %rsi, %r15
movq %rdi, 0x38(%rsp)
leaq 0x60(%rsp), %rdi
andq $0x0, 0x40(%rdi)
vpxor %xmm0, %xmm0, %xmm0
vmovdqa %xmm0, (%rdi)
vmovdqu %xmm0, 0xc(%rdi)
vmovdqa %xmm0, 0x20(%rdi)
vmovdqu %xmm0, 0x2c(%rdi)
xorl %r12d, %r12d
pushq $0x24
popq %rsi
pushq $0x2
popq %r8
xorl %r9d, %r9d
callq 0x63810
testl %r14d, %r14d
movl $0x0, %eax
cmovgl %r14d, %eax
movq %rax, 0x18(%rsp)
testl %ebx, %ebx
movl $0x0, %eax
cmovgl %ebx, %eax
movq %rax, 0x48(%rsp)
leal (%r14,%r14,8), %eax
movl %eax, 0x30(%rsp)
movl %ebx, 0x34(%rsp)
movq %r14, 0x40(%rsp)
movq %r15, 0x50(%rsp)
cmpq 0x48(%rsp), %r12
je 0x136676
movl 0x30(%rsp), %eax
imull %r12d, %eax
movslq %eax, %rcx
movq 0x38(%rsp), %rax
addq (%rax), %rcx
movq %rcx, 0x8(%rsp)
movslq 0x8c(%rsp), %rax
movq 0x70(%rsp), %r11
imulq %r11, %rax
movq %rax, 0x58(%rsp)
imulq 0xa0(%rsp), %r11
imulq %r12, %r11
addq 0x60(%rsp), %r11
xorl %eax, %eax
movq %r12, 0x10(%rsp)
cmpq 0x18(%rsp), %rax
je 0x136669
movq %r11, 0x28(%rsp)
movq %rax, 0x20(%rsp)
leaq (%rax,%rax,8), %rax
movq 0x8(%rsp), %rdi
movsbl (%rdi,%rax), %ecx
movsbl 0x1(%rdi,%rax), %edx
movsbl 0x2(%rdi,%rax), %esi
movsbl 0x3(%rdi,%rax), %r10d
movsbl 0x4(%rdi,%rax), %r11d
movsbl 0x5(%rdi,%rax), %ebp
movsbl 0x6(%rdi,%rax), %r13d
movsbl 0x7(%rdi,%rax), %r9d
movsbl 0x8(%rdi,%rax), %eax
movswl %cx, %ebx
movswl %dx, %r15d
movswl %si, %r14d
movswl %r10w, %r12d
movswl %r11w, %edx
movswl %bp, %ecx
movswl %r13w, %r13d
movswl %r9w, %esi
cwtl
pushq $0x4
popq %r10
cmpq $0x28, %r10
je 0x1365da
leaq 0x2bc591(%rip), %r8 # 0x3f2af0
movzwl -0x4(%r10,%r8), %ebp
movl %ebp, %r9d
imull %ebx, %r9d
movzwl -0x2(%r10,%r8), %edi
movl %edi, %r11d
imull %r15d, %r11d
addl %r9d, %r11d
movzwl (%r10,%r8), %r8d
movl %r8d, %r9d
imull %r14d, %r9d
addl %r11d, %r9d
movw %r9w, 0xac(%rsp,%r10)
movl %ebp, %r9d
imull %r12d, %r9d
movl %edi, %r11d
imull %edx, %r11d
addl %r9d, %r11d
movl %r8d, %r9d
imull %ecx, %r9d
addl %r11d, %r9d
movw %r9w, 0xae(%rsp,%r10)
imull %r13d, %ebp
imull %esi, %edi
addl %ebp, %edi
imull %eax, %r8d
addl %edi, %r8d
movw %r8w, 0xb0(%rsp,%r10)
addq $0x6, %r10
jmp 0x13654e
movq 0x28(%rsp), %r11
movq %r11, %rcx
xorl %edx, %edx
movl 0x34(%rsp), %ebx
movq 0x50(%rsp), %r15
movq 0x10(%rsp), %r12
leaq 0x2bc4f9(%rip), %r9 # 0x3f2af2
cmpq $0x6, %rdx
je 0x136657
imulq $0x6, %rdx, %rsi
movzwl 0xb0(%rsp,%rsi), %eax
vmovd 0xb2(%rsp,%rsi), %xmm0
movq %r9, %rsi
xorl %r10d, %r10d
cmpq $0x6, %r10
je 0x13664e
movzwl -0x2(%rsi), %edi
imulw %ax, %di
vmovd (%rsi), %xmm1
vpmullw %xmm0, %xmm1, %xmm1
vmovd %xmm1, %r8d
addl %edi, %r8d
vpextrw $0x1, %xmm1, %edi
addl %r8d, %edi
movw %di, (%rcx,%r10,2)
incq %r10
addq $0x6, %rsi
jmp 0x13661a
incq %rdx
addq $0xc, %rcx
jmp 0x1365f9
movq 0x20(%rsp), %rax
incq %rax
addq 0x58(%rsp), %r11
jmp 0x1364dd
incq %r12
movq 0x40(%rsp), %r14
jmp 0x13648b
pushq $0x8
popq %rcx
movl %r14d, %eax
cltd
idivl %ecx
movl %eax, %esi
pushq $0x4
popq %rcx
movl %ebx, %eax
cltd
idivl %ecx
andq $0x0, (%rsp)
pushq $0x24
popq %rdx
pushq $0x40
popq %r8
pushq $0x20
popq %r9
movq %r15, %rdi
movl %eax, %ecx
callq 0x628f2
movslq 0x2c(%r15), %rcx
movq (%r15), %rax
movq %rax, 0x10(%rsp)
movq 0x10(%r15), %rax
movq 0x40(%r15), %rdx
imulq %rax, %rdx
movq %rdx, 0x8(%rsp)
imulq %rax, %rcx
movq %rcx, 0x20(%rsp)
movslq %r14d, %rsi
movslq %ebx, %rax
movq %rax, 0x18(%rsp)
xorl %edx, %edx
movq %rdx, %rax
orq $0x3, %rax
cmpq 0x18(%rsp), %rax
jge 0x1367b6
movq %rdx, %rax
shrq $0x2, %rax
imulq 0x8(%rsp), %rax
addq 0x10(%rsp), %rax
movq %rax, 0x28(%rsp)
xorl %r10d, %r10d
xorl %r11d, %r11d
cmpq $0x24, %r11
je 0x1367ad
movq 0x20(%rsp), %rbx
imulq %r11, %rbx
addq 0x28(%rsp), %rbx
movq 0x70(%rsp), %rcx
movq 0xa0(%rsp), %r14
imulq %rcx, %r14
movq %r14, %rax
movq %rdx, %r9
imulq %rdx, %rax
addq %r10, %rax
addq 0x60(%rsp), %rax
movslq 0x8c(%rsp), %r12
imulq %rcx, %r12
leaq (,%r12,8), %r13
xorl %ebp, %ebp
movq %rbp, %rcx
orq $0x7, %rcx
cmpq %rsi, %rcx
jge 0x13679e
movq %rax, %rcx
xorl %edx, %edx
cmpq $0x4, %rdx
je 0x136795
xorl %edi, %edi
movq %rcx, %r15
cmpq $0x10, %rdi
je 0x13678a
movzwl (%r15), %r8d
movw %r8w, (%rbx,%rdi)
addq %r12, %r15
addq $0x2, %rdi
jmp 0x136772
incq %rdx
addq %r14, %rcx
addq %rdi, %rbx
jmp 0x136767
addq $0x8, %rbp
addq %r13, %rax
jmp 0x136756
incq %r11
addq $0x2, %r10
movq %r9, %rdx
jmp 0x136705
addq $0x4, %rdx
jmp 0x1366d6
movq 0x68(%rsp), %rax
testq %rax, %rax
je 0x1367e7
lock
decl (%rax)
jne 0x1367e7
movq 0x60(%rsp), %rsi
movq 0x80(%rsp), %rdi
testq %rdi, %rdi
je 0x1367df
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1367e7
movq %rsi, %rdi
callq 0x5f3e0
addq $0xd8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0x136837
movq %rax, %rbx
movq 0x68(%rsp), %rax
testq %rax, %rax
je 0x13682f
lock
decl (%rax)
jne 0x13682f
movq 0x60(%rsp), %rsi
movq 0x80(%rsp), %rdi
testq %rdi, %rdi
jne 0x136829
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x13682f
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
movq %rax, %rdi
callq 0x61d68
|
/csukuangfj[P]ncnn/src/layer/x86/convolution_x86_avx2.cpp
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.