forked from sailfishos/glibc
-
Notifications
You must be signed in to change notification settings - Fork 0
/
0003-arm-CVE-2020-6096-Fix-multiarch-memcpy-for-negative-.patch
107 lines (95 loc) · 2.53 KB
/
0003-arm-CVE-2020-6096-Fix-multiarch-memcpy-for-negative-.patch
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
From beea361050728138b82c57dda0c4810402d342b9 Mon Sep 17 00:00:00 2001
From: Alexander Anisimov <[email protected]>
Date: Wed, 8 Jul 2020 14:18:31 +0200
Subject: [PATCH] arm: CVE-2020-6096: Fix multiarch memcpy for negative
length [BZ #25620]
Unsigned branch instructions could be used for r2 to fix the wrong
behavior when a negative length is passed to memcpy.
This commit fixes the armv7 version.
---
sysdeps/arm/armv7/multiarch/memcpy_impl.S | 22 +++++++++++-----------
1 file changed, 11 insertions(+), 11 deletions(-)
diff --git a/sysdeps/arm/armv7/multiarch/memcpy_impl.S b/sysdeps/arm/armv7/multiarch/memcpy_impl.S
index bf4ac7077f..379bb56fc9 100644
--- a/sysdeps/arm/armv7/multiarch/memcpy_impl.S
+++ b/sysdeps/arm/armv7/multiarch/memcpy_impl.S
@@ -268,7 +268,7 @@ ENTRY(memcpy)
mov dst, dstin /* Preserve dstin, we need to return it. */
cmp count, #64
- bge .Lcpy_not_short
+ bhs .Lcpy_not_short
/* Deal with small copies quickly by dropping straight into the
exit block. */
@@ -351,10 +351,10 @@ ENTRY(memcpy)
1:
subs tmp2, count, #64 /* Use tmp2 for count. */
- blt .Ltail63aligned
+ blo .Ltail63aligned
cmp tmp2, #512
- bge .Lcpy_body_long
+ bhs .Lcpy_body_long
.Lcpy_body_medium: /* Count in tmp2. */
#ifdef USE_VFP
@@ -378,7 +378,7 @@ ENTRY(memcpy)
add src, src, #64
vstr d1, [dst, #56]
add dst, dst, #64
- bge 1b
+ bhs 1b
tst tmp2, #0x3f
beq .Ldone
@@ -412,7 +412,7 @@ ENTRY(memcpy)
ldrd A_l, A_h, [src, #64]!
strd A_l, A_h, [dst, #64]!
subs tmp2, tmp2, #64
- bge 1b
+ bhs 1b
tst tmp2, #0x3f
bne 1f
ldr tmp2,[sp], #FRAME_SIZE
@@ -482,7 +482,7 @@ ENTRY(memcpy)
add src, src, #32
subs tmp2, tmp2, #prefetch_lines * 64 * 2
- blt 2f
+ blo 2f
1:
cpy_line_vfp d3, 0
cpy_line_vfp d4, 64
@@ -494,7 +494,7 @@ ENTRY(memcpy)
add dst, dst, #2 * 64
add src, src, #2 * 64
subs tmp2, tmp2, #prefetch_lines * 64
- bge 1b
+ bhs 1b
2:
cpy_tail_vfp d3, 0
@@ -615,8 +615,8 @@ ENTRY(memcpy)
1:
pld [src, #(3 * 64)]
subs count, count, #64
- ldrmi tmp2, [sp], #FRAME_SIZE
- bmi .Ltail63unaligned
+ ldrlo tmp2, [sp], #FRAME_SIZE
+ blo .Ltail63unaligned
pld [src, #(4 * 64)]
#ifdef USE_NEON
@@ -633,7 +633,7 @@ ENTRY(memcpy)
neon_load_multi d0-d3, src
neon_load_multi d4-d7, src
subs count, count, #64
- bmi 2f
+ blo 2f
1:
pld [src, #(4 * 64)]
neon_store_multi d0-d3, dst
@@ -641,7 +641,7 @@ ENTRY(memcpy)
neon_store_multi d4-d7, dst
neon_load_multi d4-d7, src
subs count, count, #64
- bpl 1b
+ bhs 1b
2:
neon_store_multi d0-d3, dst
neon_store_multi d4-d7, dst
--
2.17.1