@@ -310,6 +310,55 @@ do { \
310
310
} \
311
311
} while (0)
312
312
313
+ #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
314
+
315
+ #ifdef CONFIG_X86_32
316
+ #define __get_user_asm_u64 (x , ptr , label ) do { \
317
+ unsigned int __gu_low, __gu_high; \
318
+ const unsigned int __user *__gu_ptr; \
319
+ __gu_ptr = (const void __user *)(ptr); \
320
+ __get_user_asm(__gu_low, ptr, "l", "=r", label); \
321
+ __get_user_asm(__gu_high, ptr+1, "l", "=r", label); \
322
+ (x) = ((unsigned long long)__gu_high << 32) | __gu_low; \
323
+ } while (0)
324
+ #else
325
+ #define __get_user_asm_u64 (x , ptr , label ) \
326
+ __get_user_asm(x, ptr, "q", "=r", label)
327
+ #endif
328
+
329
+ #define __get_user_size (x , ptr , size , label ) \
330
+ do { \
331
+ __chk_user_ptr(ptr); \
332
+ switch (size) { \
333
+ unsigned char x_u8__; \
334
+ case 1: \
335
+ __get_user_asm(x_u8__, ptr, "b", "=q", label); \
336
+ (x) = x_u8__; \
337
+ break; \
338
+ case 2: \
339
+ __get_user_asm(x, ptr, "w", "=r", label); \
340
+ break; \
341
+ case 4: \
342
+ __get_user_asm(x, ptr, "l", "=r", label); \
343
+ break; \
344
+ case 8: \
345
+ __get_user_asm_u64(x, ptr, label); \
346
+ break; \
347
+ default: \
348
+ (x) = __get_user_bad(); \
349
+ } \
350
+ } while (0)
351
+
352
+ #define __get_user_asm (x , addr , itype , ltype , label ) \
353
+ asm_volatile_goto("\n" \
354
+ "1: mov"itype" %[umem],%[output]\n" \
355
+ _ASM_EXTABLE_UA(1b, %l2) \
356
+ : [output] ltype(x) \
357
+ : [umem] "m" (__m(addr)) \
358
+ : : label)
359
+
360
+ #else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
361
+
313
362
#ifdef CONFIG_X86_32
314
363
#define __get_user_asm_u64 (x , ptr , retval ) \
315
364
({ \
@@ -378,6 +427,8 @@ do { \
378
427
: [umem] "m" (__m(addr)), \
379
428
[efault] "i" (-EFAULT), "0" (err))
380
429
430
+ #endif // CONFIG_CC_ASM_GOTO_OUTPUT
431
+
381
432
/* FIXME: this hack is definitely wrong -AK */
382
433
struct __large_struct { unsigned long buf [100 ]; };
383
434
#define __m (x ) (*(struct __large_struct __user *)(x))
@@ -452,6 +503,14 @@ static __must_check __always_inline bool user_access_begin(const void __user *pt
452
503
#define unsafe_put_user (x , ptr , label ) \
453
504
__put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label)
454
505
506
+ #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
507
+ #define unsafe_get_user (x , ptr , err_label ) \
508
+ do { \
509
+ __inttype(*(ptr)) __gu_val; \
510
+ __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), err_label); \
511
+ (x) = (__force __typeof__(*(ptr)))__gu_val; \
512
+ } while (0)
513
+ #else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
455
514
#define unsafe_get_user (x , ptr , err_label ) \
456
515
do { \
457
516
int __gu_err; \
@@ -460,6 +519,7 @@ do { \
460
519
(x) = (__force __typeof__(*(ptr)))__gu_val; \
461
520
if (unlikely(__gu_err)) goto err_label; \
462
521
} while (0)
522
+ #endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT
463
523
464
524
/*
465
525
* We want the unsafe accessors to always be inlined and use
@@ -486,6 +546,11 @@ do { \
486
546
487
547
#define HAVE_GET_KERNEL_NOFAULT
488
548
549
+ #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
550
+ #define __get_kernel_nofault (dst , src , type , err_label ) \
551
+ __get_user_size(*((type *)(dst)), (__force type __user *)(src), \
552
+ sizeof(type), err_label)
553
+ #else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
489
554
#define __get_kernel_nofault (dst , src , type , err_label ) \
490
555
do { \
491
556
int __kr_err; \
@@ -495,6 +560,7 @@ do { \
495
560
if (unlikely(__kr_err)) \
496
561
goto err_label; \
497
562
} while (0)
563
+ #endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT
498
564
499
565
#define __put_kernel_nofault (dst , src , type , err_label ) \
500
566
__put_user_size(*((type *)(src)), (__force type __user *)(dst), \
0 commit comments