summaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/step.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/step.c')
-rw-r--r--arch/x86/kernel/step.c140
1 files changed, 140 insertions, 0 deletions
diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
new file mode 100644
index 000000000000..cb3c8bc2939a
--- /dev/null
+++ b/arch/x86/kernel/step.c
@@ -0,0 +1,140 @@
+/*
+ * x86 single-step support code, common to 32-bit and 64-bit.
+ */
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/ptrace.h>
+
+#define LDT_SEGMENT 4
+
+unsigned long convert_rip_to_linear(struct task_struct *child, struct pt_regs *regs)
+{
+ unsigned long addr, seg;
+
+ addr = regs->rip;
+ seg = regs->cs & 0xffff;
+
+ /*
+ * We'll assume that the code segments in the GDT
+ * are all zero-based. That is largely true: the
+ * TLS segments are used for data, and the PNPBIOS
+ * and APM bios ones we just ignore here.
+ */
+ if (seg & LDT_SEGMENT) {
+ u32 *desc;
+ unsigned long base;
+
+ seg &= ~7UL;
+
+ mutex_lock(&child->mm->context.lock);
+ if (unlikely((seg >> 3) >= child->mm->context.size))
+ addr = -1L; /* bogus selector, access would fault */
+ else {
+ desc = child->mm->context.ldt + seg;
+ base = ((desc[0] >> 16) |
+ ((desc[1] & 0xff) << 16) |
+ (desc[1] & 0xff000000));
+
+ /* 16-bit code segment? */
+ if (!((desc[1] >> 22) & 1))
+ addr &= 0xffff;
+ addr += base;
+ }
+ mutex_unlock(&child->mm->context.lock);
+ }
+
+ return addr;
+}
+
+static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
+{
+ int i, copied;
+ unsigned char opcode[15];
+ unsigned long addr = convert_rip_to_linear(child, regs);
+
+ copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
+ for (i = 0; i < copied; i++) {
+ switch (opcode[i]) {
+ /* popf and iret */
+ case 0x9d: case 0xcf:
+ return 1;
+
+ /* CHECKME: 64 65 */
+
+ /* opcode and address size prefixes */
+ case 0x66: case 0x67:
+ continue;
+ /* irrelevant prefixes (segment overrides and repeats) */
+ case 0x26: case 0x2e:
+ case 0x36: case 0x3e:
+ case 0x64: case 0x65:
+ case 0xf2: case 0xf3:
+ continue;
+
+ case 0x40 ... 0x4f:
+ if (regs->cs != __USER_CS)
+ /* 32-bit mode: register increment */
+ return 0;
+ /* 64-bit mode: REX prefix */
+ continue;
+
+ /* CHECKME: f2, f3 */
+
+ /*
+ * pushf: NOTE! We should probably not let
+ * the user see the TF bit being set. But
+ * it's more pain than it's worth to avoid
+ * it, and a debugger could emulate this
+ * all in user space if it _really_ cares.
+ */
+ case 0x9c:
+ default:
+ return 0;
+ }
+ }
+ return 0;
+}
+
+void user_enable_single_step(struct task_struct *child)
+{
+ struct pt_regs *regs = task_pt_regs(child);
+
+ /*
+ * Always set TIF_SINGLESTEP - this guarantees that
+ * we single-step system calls etc.. This will also
+ * cause us to set TF when returning to user mode.
+ */
+ set_tsk_thread_flag(child, TIF_SINGLESTEP);
+
+ /*
+ * If TF was already set, don't do anything else
+ */
+ if (regs->eflags & X86_EFLAGS_TF)
+ return;
+
+ /* Set TF on the kernel stack.. */
+ regs->eflags |= X86_EFLAGS_TF;
+
+ /*
+ * ..but if TF is changed by the instruction we will trace,
+ * don't mark it as being "us" that set it, so that we
+ * won't clear it by hand later.
+ */
+ if (is_setting_trap_flag(child, regs))
+ return;
+
+ child->ptrace |= PT_DTRACE;
+}
+
+void user_disable_single_step(struct task_struct *child)
+{
+ /* Always clear TIF_SINGLESTEP... */
+ clear_tsk_thread_flag(child, TIF_SINGLESTEP);
+
+ /* But touch TF only if it was set by us.. */
+ if (child->ptrace & PT_DTRACE) {
+ struct pt_regs *regs = task_pt_regs(child);
+ regs->eflags &= ~X86_EFLAGS_TF;
+ child->ptrace &= ~PT_DTRACE;
+ }
+}