Skip to content

Commit 2692a71

Browse files
author
Al Viro
committed
Merge branch 'work.uaccess' into for-linus
2 parents 7041c57 + b065444 commit 2692a71

9 files changed

Lines changed: 47 additions & 78 deletions

File tree

arch/alpha/include/asm/uaccess.h

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -396,11 +396,12 @@ copy_to_user(void __user *to, const void *from, long n)
396396
extern inline long
397397
copy_from_user(void *to, const void __user *from, long n)
398398
{
399+
long res = n;
399400
if (likely(__access_ok((unsigned long)from, n, get_fs())))
400-
n = __copy_tofrom_user_nocheck(to, (__force void *)from, n);
401-
else
402-
memset(to, 0, n);
403-
return n;
401+
res = __copy_from_user_inatomic(to, from, n);
402+
if (unlikely(res))
403+
memset(to + (n - res), 0, res);
404+
return res;
404405
}
405406

406407
extern void __do_clear_user(void);

arch/alpha/lib/copy_user.S

Lines changed: 1 addition & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -124,22 +124,8 @@ $65:
124124
bis $31,$31,$0
125125
$41:
126126
$35:
127-
$exitout:
128-
ret $31,($28),1
129-
130127
$exitin:
131-
/* A stupid byte-by-byte zeroing of the rest of the output
132-
buffer. This cures security holes by never leaving
133-
random kernel data around to be copied elsewhere. */
134-
135-
mov $0,$1
136-
$101:
137-
EXO ( ldq_u $2,0($6) )
138-
subq $1,1,$1
139-
mskbl $2,$6,$2
140-
EXO ( stq_u $2,0($6) )
141-
addq $6,1,$6
142-
bgt $1,$101
128+
$exitout:
143129
ret $31,($28),1
144130

145131
.end __copy_user

arch/alpha/lib/ev6-copy_user.S

Lines changed: 1 addition & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -227,33 +227,12 @@ $dirtyentry:
227227
bgt $0,$onebyteloop # U .. .. .. : U L U L
228228

229229
$zerolength:
230+
$exitin:
230231
$exitout: # Destination for exception recovery(?)
231232
nop # .. .. .. E
232233
nop # .. .. E ..
233234
nop # .. E .. ..
234235
ret $31,($28),1 # L0 .. .. .. : L U L U
235236

236-
$exitin:
237-
238-
/* A stupid byte-by-byte zeroing of the rest of the output
239-
buffer. This cures security holes by never leaving
240-
random kernel data around to be copied elsewhere. */
241-
242-
nop
243-
nop
244-
nop
245-
mov $0,$1
246-
247-
$101:
248-
EXO ( stb $31,0($6) ) # L
249-
subq $1,1,$1 # E
250-
addq $6,1,$6 # E
251-
bgt $1,$101 # U
252-
253-
nop
254-
nop
255-
nop
256-
ret $31,($28),1 # L0
257-
258237
.end __copy_user
259238

arch/arc/kernel/signal.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -107,13 +107,13 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
107107
struct user_regs_struct uregs;
108108

109109
err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
110-
if (!err)
111-
set_current_blocked(&set);
112-
113110
err |= __copy_from_user(&uregs.scratch,
114111
&(sf->uc.uc_mcontext.regs.scratch),
115112
sizeof(sf->uc.uc_mcontext.regs.scratch));
113+
if (err)
114+
return err;
116115

116+
set_current_blocked(&set);
117117
regs->bta = uregs.scratch.bta;
118118
regs->lp_start = uregs.scratch.lp_start;
119119
regs->lp_end = uregs.scratch.lp_end;
@@ -138,7 +138,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
138138
regs->r0 = uregs.scratch.r0;
139139
regs->sp = uregs.scratch.sp;
140140

141-
return err;
141+
return 0;
142142
}
143143

144144
static inline int is_do_ss_needed(unsigned int magic)

arch/arm/include/asm/uaccess.h

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -533,11 +533,12 @@ __clear_user(void __user *addr, unsigned long n)
533533

534534
static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
535535
{
536-
if (access_ok(VERIFY_READ, from, n))
537-
n = __copy_from_user(to, from, n);
538-
else /* security hole - plug it */
539-
memset(to, 0, n);
540-
return n;
536+
unsigned long res = n;
537+
if (likely(access_ok(VERIFY_READ, from, n)))
538+
res = __copy_from_user(to, from, n);
539+
if (unlikely(res))
540+
memset(to + (n - res), 0, res);
541+
return res;
541542
}
542543

543544
static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)

arch/arm/lib/copy_from_user.S

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -98,12 +98,9 @@ ENDPROC(arm_copy_from_user)
9898
.pushsection .fixup,"ax"
9999
.align 0
100100
copy_abort_preamble
101-
ldmfd sp!, {r1, r2}
102-
sub r3, r0, r1
103-
rsb r1, r3, r2
104-
str r1, [sp]
105-
bl __memzero
106-
ldr r0, [sp], #4
101+
ldmfd sp!, {r1, r2, r3}
102+
sub r0, r0, r1
103+
rsb r0, r0, r2
107104
copy_abort_end
108105
.popsection
109106

arch/arm64/include/asm/uaccess.h

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -278,14 +278,16 @@ static inline unsigned long __must_check __copy_to_user(void __user *to, const v
278278

279279
static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
280280
{
281+
unsigned long res = n;
281282
kasan_check_write(to, n);
282283

283284
if (access_ok(VERIFY_READ, from, n)) {
284285
check_object_size(to, n, false);
285-
n = __arch_copy_from_user(to, from, n);
286-
} else /* security hole - plug it */
287-
memset(to, 0, n);
288-
return n;
286+
res = __arch_copy_from_user(to, from, n);
287+
}
288+
if (unlikely(res))
289+
memset(to + (n - res), 0, res);
290+
return res;
289291
}
290292

291293
static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)

arch/arm64/lib/copy_from_user.S

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -79,11 +79,6 @@ ENDPROC(__arch_copy_from_user)
7979

8080
.section .fixup,"ax"
8181
.align 2
82-
9998:
83-
sub x0, end, dst
84-
9999:
85-
strb wzr, [dst], #1 // zero remaining buffer space
86-
cmp dst, end
87-
b.lo 9999b
82+
9998: sub x0, end, dst // bytes not copied
8883
ret
8984
.previous

arch/blackfin/include/asm/uaccess.h

Lines changed: 20 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -163,31 +163,39 @@ static inline int bad_user_access_length(void)
163163
: "a" (__ptr(ptr))); \
164164
})
165165

166-
#define __copy_from_user(to, from, n) copy_from_user(to, from, n)
167-
#define __copy_to_user(to, from, n) copy_to_user(to, from, n)
168166
#define __copy_to_user_inatomic __copy_to_user
169167
#define __copy_from_user_inatomic __copy_from_user
170168

169+
static inline unsigned long __must_check
170+
__copy_from_user(void *to, const void __user *from, unsigned long n)
171+
{
172+
memcpy(to, (const void __force *)from, n);
173+
return 0;
174+
}
175+
176+
static inline unsigned long __must_check
177+
__copy_to_user(void __user *to, const void *from, unsigned long n)
178+
{
179+
memcpy((void __force *)to, from, n);
180+
SSYNC();
181+
return 0;
182+
}
183+
171184
static inline unsigned long __must_check
172185
copy_from_user(void *to, const void __user *from, unsigned long n)
173186
{
174-
if (likely(access_ok(VERIFY_READ, from, n))) {
175-
memcpy(to, (const void __force *)from, n);
176-
return 0;
177-
}
187+
if (likely(access_ok(VERIFY_READ, from, n)))
188+
return __copy_from_user(to, from, n);
178189
memset(to, 0, n);
179190
return n;
180191
}
181192

182193
static inline unsigned long __must_check
183194
copy_to_user(void __user *to, const void *from, unsigned long n)
184195
{
185-
if (access_ok(VERIFY_WRITE, to, n))
186-
memcpy((void __force *)to, from, n);
187-
else
188-
return n;
189-
SSYNC();
190-
return 0;
196+
if (likely(access_ok(VERIFY_WRITE, to, n)))
197+
return __copy_to_user(to, from, n);
198+
return n;
191199
}
192200

193201
/*

0 commit comments

Comments
 (0)