Commit 6f1fd2b8 authored by David Benjamin's avatar David Benjamin Committed by Chromium LUCI CQ

Roll src/third_party/boringssl/src 5656fec51..3094902fc

https://boringssl.googlesource.com/boringssl/+log/5656fec512a8dfd2833fbf47b6aaa76364c26fad..3094902fcdc2db2cc832fa854b9a6a8be383926c

The following commits have update notes:
  c3f4612d8 Only accept little-endian ARM and MIPS variants in base.h.
  0a6bfa36c Always check the TLS 1.3 downgrade signal.

Bug: none
Change-Id: I2beb676d8ccdbc04628f2668dd88ae35558b4442
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2569382
Commit-Queue: David Benjamin <davidben@chromium.org>
Commit-Queue: Steven Valdez <svaldez@chromium.org>
Auto-Submit: David Benjamin <davidben@chromium.org>
Reviewed-by: default avatarSteven Valdez <svaldez@chromium.org>
Cr-Commit-Position: refs/heads/master@{#833386}
parent 637f4404
...@@ -230,7 +230,7 @@ vars = { ...@@ -230,7 +230,7 @@ vars = {
# #
# Note this revision should be updated with # Note this revision should be updated with
# third_party/boringssl/roll_boringssl.py, not roll-dep. # third_party/boringssl/roll_boringssl.py, not roll-dep.
'boringssl_revision': '5656fec512a8dfd2833fbf47b6aaa76364c26fad', 'boringssl_revision': '3094902fcdc2db2cc832fa854b9a6a8be383926c',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling google-toolbox-for-mac # the commit queue can handle CLs rolling google-toolbox-for-mac
# and whatever else without interference from each other. # and whatever else without interference from each other.
......
...@@ -620,20 +620,34 @@ _aes_hw_ctr32_encrypt_blocks: ...@@ -620,20 +620,34 @@ _aes_hw_ctr32_encrypt_blocks:
add x7,x3,#32 add x7,x3,#32
mov w6,w5 mov w6,w5
csel x12,xzr,x12,lo csel x12,xzr,x12,lo
// ARM Cortex-A57 and Cortex-A72 cores running in 32-bit mode are
// affected by silicon errata #1742098 [0] and #1655431 [1],
// respectively, where the second instruction of an aese/aesmc
// instruction pair may execute twice if an interrupt is taken right
// after the first instruction consumes an input register of which a
// single 32-bit lane has been updated the last time it was modified.
//
// This function uses a counter in one 32-bit lane. The vmov lines
// could write to v1.16b and v18.16b directly, but that trips this bugs.
// We write to v6.16b and copy to the final register as a workaround.
//
// [0] ARM-EPM-049219 v23 Cortex-A57 MPCore Software Developers Errata Notice
// [1] ARM-EPM-012079 v11.0 Cortex-A72 MPCore Software Developers Errata Notice
#ifndef __ARMEB__ #ifndef __ARMEB__
rev w8, w8 rev w8, w8
#endif #endif
orr v1.16b,v0.16b,v0.16b
add w10, w8, #1 add w10, w8, #1
orr v18.16b,v0.16b,v0.16b
add w8, w8, #2
orr v6.16b,v0.16b,v0.16b orr v6.16b,v0.16b,v0.16b
rev w10, w10 rev w10, w10
mov v1.s[3],w10 mov v6.s[3],w10
add w8, w8, #2
orr v1.16b,v6.16b,v6.16b
b.ls Lctr32_tail b.ls Lctr32_tail
rev w12, w8 rev w12, w8
mov v6.s[3],w12
sub x2,x2,#3 // bias sub x2,x2,#3 // bias
mov v18.s[3],w12 orr v18.16b,v6.16b,v6.16b
b Loop3x_ctr32 b Loop3x_ctr32
.align 4 .align 4
...@@ -660,11 +674,11 @@ Loop3x_ctr32: ...@@ -660,11 +674,11 @@ Loop3x_ctr32:
aese v1.16b,v16.16b aese v1.16b,v16.16b
aesmc v5.16b,v1.16b aesmc v5.16b,v1.16b
ld1 {v2.16b},[x0],#16 ld1 {v2.16b},[x0],#16
orr v0.16b,v6.16b,v6.16b add w9,w8,#1
aese v18.16b,v16.16b aese v18.16b,v16.16b
aesmc v18.16b,v18.16b aesmc v18.16b,v18.16b
ld1 {v3.16b},[x0],#16 ld1 {v3.16b},[x0],#16
orr v1.16b,v6.16b,v6.16b rev w9,w9
aese v4.16b,v17.16b aese v4.16b,v17.16b
aesmc v4.16b,v4.16b aesmc v4.16b,v4.16b
aese v5.16b,v17.16b aese v5.16b,v17.16b
...@@ -673,8 +687,6 @@ Loop3x_ctr32: ...@@ -673,8 +687,6 @@ Loop3x_ctr32:
mov x7,x3 mov x7,x3
aese v18.16b,v17.16b aese v18.16b,v17.16b
aesmc v17.16b,v18.16b aesmc v17.16b,v18.16b
orr v18.16b,v6.16b,v6.16b
add w9,w8,#1
aese v4.16b,v20.16b aese v4.16b,v20.16b
aesmc v4.16b,v4.16b aesmc v4.16b,v4.16b
aese v5.16b,v20.16b aese v5.16b,v20.16b
...@@ -689,21 +701,26 @@ Loop3x_ctr32: ...@@ -689,21 +701,26 @@ Loop3x_ctr32:
aesmc v4.16b,v4.16b aesmc v4.16b,v4.16b
aese v5.16b,v21.16b aese v5.16b,v21.16b
aesmc v5.16b,v5.16b aesmc v5.16b,v5.16b
// Note the logic to update v0.16b, v1.16b, and v1.16b is written to work
// around a bug in ARM Cortex-A57 and Cortex-A72 cores running in
// 32-bit mode. See the comment above.
eor v19.16b,v19.16b,v7.16b eor v19.16b,v19.16b,v7.16b
rev w9,w9 mov v6.s[3], w9
aese v17.16b,v21.16b aese v17.16b,v21.16b
aesmc v17.16b,v17.16b aesmc v17.16b,v17.16b
mov v0.s[3], w9 orr v0.16b,v6.16b,v6.16b
rev w10,w10 rev w10,w10
aese v4.16b,v22.16b aese v4.16b,v22.16b
aesmc v4.16b,v4.16b aesmc v4.16b,v4.16b
mov v6.s[3], w10
rev w12,w8
aese v5.16b,v22.16b aese v5.16b,v22.16b
aesmc v5.16b,v5.16b aesmc v5.16b,v5.16b
mov v1.s[3], w10 orr v1.16b,v6.16b,v6.16b
rev w12,w8 mov v6.s[3], w12
aese v17.16b,v22.16b aese v17.16b,v22.16b
aesmc v17.16b,v17.16b aesmc v17.16b,v17.16b
mov v18.s[3], w12 orr v18.16b,v6.16b,v6.16b
subs x2,x2,#3 subs x2,x2,#3
aese v4.16b,v23.16b aese v4.16b,v23.16b
aese v5.16b,v23.16b aese v5.16b,v23.16b
......
...@@ -630,20 +630,34 @@ _aes_hw_ctr32_encrypt_blocks: ...@@ -630,20 +630,34 @@ _aes_hw_ctr32_encrypt_blocks:
add r7,r3,#32 add r7,r3,#32
mov r6,r5 mov r6,r5
movlo r12,#0 movlo r12,#0
@ ARM Cortex-A57 and Cortex-A72 cores running in 32-bit mode are
@ affected by silicon errata #1742098 [0] and #1655431 [1],
@ respectively, where the second instruction of an aese/aesmc
@ instruction pair may execute twice if an interrupt is taken right
@ after the first instruction consumes an input register of which a
@ single 32-bit lane has been updated the last time it was modified.
@
@ This function uses a counter in one 32-bit lane. The
@ could write to q1 and q10 directly, but that trips this bugs.
@ We write to q6 and copy to the final register as a workaround.
@
@ [0] ARM-EPM-049219 v23 Cortex-A57 MPCore Software Developers Errata Notice
@ [1] ARM-EPM-012079 v11.0 Cortex-A72 MPCore Software Developers Errata Notice
#ifndef __ARMEB__ #ifndef __ARMEB__
rev r8, r8 rev r8, r8
#endif #endif
vorr q1,q0,q0
add r10, r8, #1 add r10, r8, #1
vorr q10,q0,q0
add r8, r8, #2
vorr q6,q0,q0 vorr q6,q0,q0
rev r10, r10 rev r10, r10
vmov.32 d3[1],r10 vmov.32 d13[1],r10
add r8, r8, #2
vorr q1,q6,q6
bls Lctr32_tail bls Lctr32_tail
rev r12, r8 rev r12, r8
vmov.32 d13[1],r12
sub r2,r2,#3 @ bias sub r2,r2,#3 @ bias
vmov.32 d21[1],r12 vorr q10,q6,q6
b Loop3x_ctr32 b Loop3x_ctr32
.align 4 .align 4
...@@ -670,11 +684,11 @@ Loop3x_ctr32: ...@@ -670,11 +684,11 @@ Loop3x_ctr32:
.byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8 .byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8
.byte 0x82,0xa3,0xb0,0xf3 @ aesmc q5,q1 .byte 0x82,0xa3,0xb0,0xf3 @ aesmc q5,q1
vld1.8 {q2},[r0]! vld1.8 {q2},[r0]!
vorr q0,q6,q6 add r9,r8,#1
.byte 0x20,0x43,0xf0,0xf3 @ aese q10,q8 .byte 0x20,0x43,0xf0,0xf3 @ aese q10,q8
.byte 0xa4,0x43,0xf0,0xf3 @ aesmc q10,q10 .byte 0xa4,0x43,0xf0,0xf3 @ aesmc q10,q10
vld1.8 {q3},[r0]! vld1.8 {q3},[r0]!
vorr q1,q6,q6 rev r9,r9
.byte 0x22,0x83,0xb0,0xf3 @ aese q4,q9 .byte 0x22,0x83,0xb0,0xf3 @ aese q4,q9
.byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4 .byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4
.byte 0x22,0xa3,0xb0,0xf3 @ aese q5,q9 .byte 0x22,0xa3,0xb0,0xf3 @ aese q5,q9
...@@ -683,8 +697,6 @@ Loop3x_ctr32: ...@@ -683,8 +697,6 @@ Loop3x_ctr32:
mov r7,r3 mov r7,r3
.byte 0x22,0x43,0xf0,0xf3 @ aese q10,q9 .byte 0x22,0x43,0xf0,0xf3 @ aese q10,q9
.byte 0xa4,0x23,0xf0,0xf3 @ aesmc q9,q10 .byte 0xa4,0x23,0xf0,0xf3 @ aesmc q9,q10
vorr q10,q6,q6
add r9,r8,#1
.byte 0x28,0x83,0xb0,0xf3 @ aese q4,q12 .byte 0x28,0x83,0xb0,0xf3 @ aese q4,q12
.byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4 .byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4
.byte 0x28,0xa3,0xb0,0xf3 @ aese q5,q12 .byte 0x28,0xa3,0xb0,0xf3 @ aese q5,q12
...@@ -699,21 +711,26 @@ Loop3x_ctr32: ...@@ -699,21 +711,26 @@ Loop3x_ctr32:
.byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4 .byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4
.byte 0x2a,0xa3,0xb0,0xf3 @ aese q5,q13 .byte 0x2a,0xa3,0xb0,0xf3 @ aese q5,q13
.byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5 .byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5
@ Note the logic to update q0, q1, and q1 is written to work
@ around a bug in ARM Cortex-A57 and Cortex-A72 cores running in
@ 32-bit mode. See the comment above.
veor q11,q11,q7 veor q11,q11,q7
rev r9,r9 vmov.32 d13[1], r9
.byte 0x2a,0x23,0xf0,0xf3 @ aese q9,q13 .byte 0x2a,0x23,0xf0,0xf3 @ aese q9,q13
.byte 0xa2,0x23,0xf0,0xf3 @ aesmc q9,q9 .byte 0xa2,0x23,0xf0,0xf3 @ aesmc q9,q9
vmov.32 d1[1], r9 vorr q0,q6,q6
rev r10,r10 rev r10,r10
.byte 0x2c,0x83,0xb0,0xf3 @ aese q4,q14 .byte 0x2c,0x83,0xb0,0xf3 @ aese q4,q14
.byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4 .byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4
vmov.32 d13[1], r10
rev r12,r8
.byte 0x2c,0xa3,0xb0,0xf3 @ aese q5,q14 .byte 0x2c,0xa3,0xb0,0xf3 @ aese q5,q14
.byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5 .byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5
vmov.32 d3[1], r10 vorr q1,q6,q6
rev r12,r8 vmov.32 d13[1], r12
.byte 0x2c,0x23,0xf0,0xf3 @ aese q9,q14 .byte 0x2c,0x23,0xf0,0xf3 @ aese q9,q14
.byte 0xa2,0x23,0xf0,0xf3 @ aesmc q9,q9 .byte 0xa2,0x23,0xf0,0xf3 @ aesmc q9,q9
vmov.32 d21[1], r12 vorr q10,q6,q6
subs r2,r2,#3 subs r2,r2,#3
.byte 0x2e,0x83,0xb0,0xf3 @ aese q4,q15 .byte 0x2e,0x83,0xb0,0xf3 @ aese q4,q15
.byte 0x2e,0xa3,0xb0,0xf3 @ aese q5,q15 .byte 0x2e,0xa3,0xb0,0xf3 @ aese q5,q15
......
...@@ -621,20 +621,34 @@ aes_hw_ctr32_encrypt_blocks: ...@@ -621,20 +621,34 @@ aes_hw_ctr32_encrypt_blocks:
add x7,x3,#32 add x7,x3,#32
mov w6,w5 mov w6,w5
csel x12,xzr,x12,lo csel x12,xzr,x12,lo
// ARM Cortex-A57 and Cortex-A72 cores running in 32-bit mode are
// affected by silicon errata #1742098 [0] and #1655431 [1],
// respectively, where the second instruction of an aese/aesmc
// instruction pair may execute twice if an interrupt is taken right
// after the first instruction consumes an input register of which a
// single 32-bit lane has been updated the last time it was modified.
//
// This function uses a counter in one 32-bit lane. The vmov lines
// could write to v1.16b and v18.16b directly, but that trips this bugs.
// We write to v6.16b and copy to the final register as a workaround.
//
// [0] ARM-EPM-049219 v23 Cortex-A57 MPCore Software Developers Errata Notice
// [1] ARM-EPM-012079 v11.0 Cortex-A72 MPCore Software Developers Errata Notice
#ifndef __ARMEB__ #ifndef __ARMEB__
rev w8, w8 rev w8, w8
#endif #endif
orr v1.16b,v0.16b,v0.16b
add w10, w8, #1 add w10, w8, #1
orr v18.16b,v0.16b,v0.16b
add w8, w8, #2
orr v6.16b,v0.16b,v0.16b orr v6.16b,v0.16b,v0.16b
rev w10, w10 rev w10, w10
mov v1.s[3],w10 mov v6.s[3],w10
add w8, w8, #2
orr v1.16b,v6.16b,v6.16b
b.ls .Lctr32_tail b.ls .Lctr32_tail
rev w12, w8 rev w12, w8
mov v6.s[3],w12
sub x2,x2,#3 // bias sub x2,x2,#3 // bias
mov v18.s[3],w12 orr v18.16b,v6.16b,v6.16b
b .Loop3x_ctr32 b .Loop3x_ctr32
.align 4 .align 4
...@@ -661,11 +675,11 @@ aes_hw_ctr32_encrypt_blocks: ...@@ -661,11 +675,11 @@ aes_hw_ctr32_encrypt_blocks:
aese v1.16b,v16.16b aese v1.16b,v16.16b
aesmc v5.16b,v1.16b aesmc v5.16b,v1.16b
ld1 {v2.16b},[x0],#16 ld1 {v2.16b},[x0],#16
orr v0.16b,v6.16b,v6.16b add w9,w8,#1
aese v18.16b,v16.16b aese v18.16b,v16.16b
aesmc v18.16b,v18.16b aesmc v18.16b,v18.16b
ld1 {v3.16b},[x0],#16 ld1 {v3.16b},[x0],#16
orr v1.16b,v6.16b,v6.16b rev w9,w9
aese v4.16b,v17.16b aese v4.16b,v17.16b
aesmc v4.16b,v4.16b aesmc v4.16b,v4.16b
aese v5.16b,v17.16b aese v5.16b,v17.16b
...@@ -674,8 +688,6 @@ aes_hw_ctr32_encrypt_blocks: ...@@ -674,8 +688,6 @@ aes_hw_ctr32_encrypt_blocks:
mov x7,x3 mov x7,x3
aese v18.16b,v17.16b aese v18.16b,v17.16b
aesmc v17.16b,v18.16b aesmc v17.16b,v18.16b
orr v18.16b,v6.16b,v6.16b
add w9,w8,#1
aese v4.16b,v20.16b aese v4.16b,v20.16b
aesmc v4.16b,v4.16b aesmc v4.16b,v4.16b
aese v5.16b,v20.16b aese v5.16b,v20.16b
...@@ -690,21 +702,26 @@ aes_hw_ctr32_encrypt_blocks: ...@@ -690,21 +702,26 @@ aes_hw_ctr32_encrypt_blocks:
aesmc v4.16b,v4.16b aesmc v4.16b,v4.16b
aese v5.16b,v21.16b aese v5.16b,v21.16b
aesmc v5.16b,v5.16b aesmc v5.16b,v5.16b
// Note the logic to update v0.16b, v1.16b, and v1.16b is written to work
// around a bug in ARM Cortex-A57 and Cortex-A72 cores running in
// 32-bit mode. See the comment above.
eor v19.16b,v19.16b,v7.16b eor v19.16b,v19.16b,v7.16b
rev w9,w9 mov v6.s[3], w9
aese v17.16b,v21.16b aese v17.16b,v21.16b
aesmc v17.16b,v17.16b aesmc v17.16b,v17.16b
mov v0.s[3], w9 orr v0.16b,v6.16b,v6.16b
rev w10,w10 rev w10,w10
aese v4.16b,v22.16b aese v4.16b,v22.16b
aesmc v4.16b,v4.16b aesmc v4.16b,v4.16b
mov v6.s[3], w10
rev w12,w8
aese v5.16b,v22.16b aese v5.16b,v22.16b
aesmc v5.16b,v5.16b aesmc v5.16b,v5.16b
mov v1.s[3], w10 orr v1.16b,v6.16b,v6.16b
rev w12,w8 mov v6.s[3], w12
aese v17.16b,v22.16b aese v17.16b,v22.16b
aesmc v17.16b,v17.16b aesmc v17.16b,v17.16b
mov v18.s[3], w12 orr v18.16b,v6.16b,v6.16b
subs x2,x2,#3 subs x2,x2,#3
aese v4.16b,v23.16b aese v4.16b,v23.16b
aese v5.16b,v23.16b aese v5.16b,v23.16b
......
...@@ -619,20 +619,34 @@ aes_hw_ctr32_encrypt_blocks: ...@@ -619,20 +619,34 @@ aes_hw_ctr32_encrypt_blocks:
add r7,r3,#32 add r7,r3,#32
mov r6,r5 mov r6,r5
movlo r12,#0 movlo r12,#0
@ ARM Cortex-A57 and Cortex-A72 cores running in 32-bit mode are
@ affected by silicon errata #1742098 [0] and #1655431 [1],
@ respectively, where the second instruction of an aese/aesmc
@ instruction pair may execute twice if an interrupt is taken right
@ after the first instruction consumes an input register of which a
@ single 32-bit lane has been updated the last time it was modified.
@
@ This function uses a counter in one 32-bit lane. The
@ could write to q1 and q10 directly, but that trips this bugs.
@ We write to q6 and copy to the final register as a workaround.
@
@ [0] ARM-EPM-049219 v23 Cortex-A57 MPCore Software Developers Errata Notice
@ [1] ARM-EPM-012079 v11.0 Cortex-A72 MPCore Software Developers Errata Notice
#ifndef __ARMEB__ #ifndef __ARMEB__
rev r8, r8 rev r8, r8
#endif #endif
vorr q1,q0,q0
add r10, r8, #1 add r10, r8, #1
vorr q10,q0,q0
add r8, r8, #2
vorr q6,q0,q0 vorr q6,q0,q0
rev r10, r10 rev r10, r10
vmov.32 d3[1],r10 vmov.32 d13[1],r10
add r8, r8, #2
vorr q1,q6,q6
bls .Lctr32_tail bls .Lctr32_tail
rev r12, r8 rev r12, r8
vmov.32 d13[1],r12
sub r2,r2,#3 @ bias sub r2,r2,#3 @ bias
vmov.32 d21[1],r12 vorr q10,q6,q6
b .Loop3x_ctr32 b .Loop3x_ctr32
.align 4 .align 4
...@@ -659,11 +673,11 @@ aes_hw_ctr32_encrypt_blocks: ...@@ -659,11 +673,11 @@ aes_hw_ctr32_encrypt_blocks:
.byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8 .byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8
.byte 0x82,0xa3,0xb0,0xf3 @ aesmc q5,q1 .byte 0x82,0xa3,0xb0,0xf3 @ aesmc q5,q1
vld1.8 {q2},[r0]! vld1.8 {q2},[r0]!
vorr q0,q6,q6 add r9,r8,#1
.byte 0x20,0x43,0xf0,0xf3 @ aese q10,q8 .byte 0x20,0x43,0xf0,0xf3 @ aese q10,q8
.byte 0xa4,0x43,0xf0,0xf3 @ aesmc q10,q10 .byte 0xa4,0x43,0xf0,0xf3 @ aesmc q10,q10
vld1.8 {q3},[r0]! vld1.8 {q3},[r0]!
vorr q1,q6,q6 rev r9,r9
.byte 0x22,0x83,0xb0,0xf3 @ aese q4,q9 .byte 0x22,0x83,0xb0,0xf3 @ aese q4,q9
.byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4 .byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4
.byte 0x22,0xa3,0xb0,0xf3 @ aese q5,q9 .byte 0x22,0xa3,0xb0,0xf3 @ aese q5,q9
...@@ -672,8 +686,6 @@ aes_hw_ctr32_encrypt_blocks: ...@@ -672,8 +686,6 @@ aes_hw_ctr32_encrypt_blocks:
mov r7,r3 mov r7,r3
.byte 0x22,0x43,0xf0,0xf3 @ aese q10,q9 .byte 0x22,0x43,0xf0,0xf3 @ aese q10,q9
.byte 0xa4,0x23,0xf0,0xf3 @ aesmc q9,q10 .byte 0xa4,0x23,0xf0,0xf3 @ aesmc q9,q10
vorr q10,q6,q6
add r9,r8,#1
.byte 0x28,0x83,0xb0,0xf3 @ aese q4,q12 .byte 0x28,0x83,0xb0,0xf3 @ aese q4,q12
.byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4 .byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4
.byte 0x28,0xa3,0xb0,0xf3 @ aese q5,q12 .byte 0x28,0xa3,0xb0,0xf3 @ aese q5,q12
...@@ -688,21 +700,26 @@ aes_hw_ctr32_encrypt_blocks: ...@@ -688,21 +700,26 @@ aes_hw_ctr32_encrypt_blocks:
.byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4 .byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4
.byte 0x2a,0xa3,0xb0,0xf3 @ aese q5,q13 .byte 0x2a,0xa3,0xb0,0xf3 @ aese q5,q13
.byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5 .byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5
@ Note the logic to update q0, q1, and q1 is written to work
@ around a bug in ARM Cortex-A57 and Cortex-A72 cores running in
@ 32-bit mode. See the comment above.
veor q11,q11,q7 veor q11,q11,q7
rev r9,r9 vmov.32 d13[1], r9
.byte 0x2a,0x23,0xf0,0xf3 @ aese q9,q13 .byte 0x2a,0x23,0xf0,0xf3 @ aese q9,q13
.byte 0xa2,0x23,0xf0,0xf3 @ aesmc q9,q9 .byte 0xa2,0x23,0xf0,0xf3 @ aesmc q9,q9
vmov.32 d1[1], r9 vorr q0,q6,q6
rev r10,r10 rev r10,r10
.byte 0x2c,0x83,0xb0,0xf3 @ aese q4,q14 .byte 0x2c,0x83,0xb0,0xf3 @ aese q4,q14
.byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4 .byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4
vmov.32 d13[1], r10
rev r12,r8
.byte 0x2c,0xa3,0xb0,0xf3 @ aese q5,q14 .byte 0x2c,0xa3,0xb0,0xf3 @ aese q5,q14
.byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5 .byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5
vmov.32 d3[1], r10 vorr q1,q6,q6
rev r12,r8 vmov.32 d13[1], r12
.byte 0x2c,0x23,0xf0,0xf3 @ aese q9,q14 .byte 0x2c,0x23,0xf0,0xf3 @ aese q9,q14
.byte 0xa2,0x23,0xf0,0xf3 @ aesmc q9,q9 .byte 0xa2,0x23,0xf0,0xf3 @ aesmc q9,q9
vmov.32 d21[1], r12 vorr q10,q6,q6
subs r2,r2,#3 subs r2,r2,#3
.byte 0x2e,0x83,0xb0,0xf3 @ aese q4,q15 .byte 0x2e,0x83,0xb0,0xf3 @ aese q4,q15
.byte 0x2e,0xa3,0xb0,0xf3 @ aese q5,q15 .byte 0x2e,0xa3,0xb0,0xf3 @ aese q5,q15
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment