uprobes/x86: Add uprobe_init_insn(), kill validate_insn_{32,64}bits()
validate_insn_32bits() and validate_insn_64bits() are very similar,
turn them into the single uprobe_init_insn() which has the additional
"bool x86_64" argument which can be passed to insn_init() and used to
choose between good_insns_64/good_insns_32.
Also kill UPROBE_FIX_NONE, it has no users.
Note: the current code doesn't use ifdef's consistently, good_insns_64
depends on CONFIG_X86_64 but good_insns_32 is unconditional. This patch
removes ifdef around good_insns_64, we will add it back later along with
the similar one for good_insns_32.
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Reviewed-by: Jim Keniston <jkenisto@us.ibm.com>
Acked-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 3cf24a2..b4aff6a 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -32,9 +32,6 @@
/* Post-execution fixups. */
-/* No fixup needed */
-#define UPROBE_FIX_NONE 0x0
-
/* Adjust IP back to vicinity of actual insn */
#define UPROBE_FIX_IP 0x1
@@ -114,7 +111,6 @@
/* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
};
-#ifdef CONFIG_X86_64
/* Good-instruction tables for 64-bit apps */
static volatile u32 good_insns_64[256 / 32] = {
/* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
@@ -138,7 +134,6 @@
/* ---------------------------------------------- */
/* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
};
-#endif
#undef W
/*
@@ -209,16 +204,22 @@
return false;
}
-static int validate_insn_32bits(struct arch_uprobe *auprobe, struct insn *insn)
+static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool x86_64)
{
- insn_init(insn, auprobe->insn, false);
+ u32 volatile *good_insns;
- /* Skip good instruction prefixes; reject "bad" ones. */
+ insn_init(insn, auprobe->insn, x86_64);
+
insn_get_opcode(insn);
if (is_prefix_bad(insn))
return -ENOTSUPP;
- if (test_bit(OPCODE1(insn), (unsigned long *)good_insns_32))
+ if (x86_64)
+ good_insns = good_insns_64;
+ else
+ good_insns = good_insns_32;
+
+ if (test_bit(OPCODE1(insn), (unsigned long *)good_insns))
return 0;
if (insn->opcode.nbytes == 2) {
@@ -355,30 +356,10 @@
}
}
-static int validate_insn_64bits(struct arch_uprobe *auprobe, struct insn *insn)
-{
- insn_init(insn, auprobe->insn, true);
-
- /* Skip good instruction prefixes; reject "bad" ones. */
- insn_get_opcode(insn);
- if (is_prefix_bad(insn))
- return -ENOTSUPP;
-
- if (test_bit(OPCODE1(insn), (unsigned long *)good_insns_64))
- return 0;
-
- if (insn->opcode.nbytes == 2) {
- if (test_bit(OPCODE2(insn), (unsigned long *)good_2byte_insns))
- return 0;
- }
- return -ENOTSUPP;
-}
-
static int validate_insn_bits(struct arch_uprobe *auprobe, struct mm_struct *mm, struct insn *insn)
{
- if (mm->context.ia32_compat)
- return validate_insn_32bits(auprobe, insn);
- return validate_insn_64bits(auprobe, insn);
+ bool x86_64 = !mm->context.ia32_compat;
+ return uprobe_init_insn(auprobe, insn, x86_64);
}
#else /* 32-bit: */
/*
@@ -398,7 +379,7 @@
static int validate_insn_bits(struct arch_uprobe *auprobe, struct mm_struct *mm, struct insn *insn)
{
- return validate_insn_32bits(auprobe, insn);
+ return uprobe_init_insn(auprobe, insn, false);
}
#endif /* CONFIG_X86_64 */