path: root/arch/x86/include/asm
diff options
authorRobert Richter <robert.richter@amd.com>2011-06-07 11:49:55 +0200
committerIngo Molnar <mingo@elte.hu>2011-07-21 20:41:57 +0200
commit1ac2e6ca44e13a087eb7438d284f887097ee7e84 (patch)
tree7ee4f00a67850e1e247610cefeae0ee7aa7b239a /arch/x86/include/asm
parent9985c20f9e4aee6857c08246b273a3695a52b929 (diff)
x86, perf: Make copy_from_user_nmi() a library function
copy_from_user_nmi() is used in oprofile and perf. Moving it to other library functions like copy_from_user(). As this is x86 code for 32 and 64 bits, create a new file usercopy.c for unified code. Signed-off-by: Robert Richter <robert.richter@amd.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/20110607172413.GJ20052@erda.amd.com Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/include/asm')
1 files changed, 3 insertions, 0 deletions
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 99ddd148a760..36361bf6fdd1 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -555,6 +555,9 @@ struct __large_struct { unsigned long buf[100]; };
#endif /* CONFIG_X86_WP_WORKS_OK */
+extern unsigned long
+copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
* movsl can be slow when source and dest are not both 8-byte aligned

Privacy Policy