LKML Archive mirror
 help / color / mirror / Atom feed
* [PATCH v2 1/7] uprobes/core: Make macro names consistent.
@ 2012-03-12  9:25 Srikar Dronamraju
  2012-03-12  9:25 ` [PATCH v2 2/7] uprobes/core: Make order of function parameters consistent across functions Srikar Dronamraju
                   ` (6 more replies)
  0 siblings, 7 replies; 11+ messages in thread
From: Srikar Dronamraju @ 2012-03-12  9:25 UTC (permalink / raw
  To: Peter Zijlstra, Ingo Molnar
  Cc: Andrew Morton, Linus Torvalds, Ananth N Mavinakayanahalli,
	Jim Keniston, LKML, Linux-mm, Oleg Nesterov, Andi Kleen,
	Christoph Hellwig, Steven Rostedt, Arnaldo Carvalho de Melo,
	Masami Hiramatsu, Thomas Gleixner

From: Srikar Dronamraju <srikar@linux.vnet.ibm.com>

Rename macros that refer to individual uprobe to start with UPROBE_ instead
of UPROBES_.

This is pure cleanup, no functional change intended.

Signed-off-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
---
 arch/x86/include/asm/uprobes.h |    6 +++---
 arch/x86/kernel/uprobes.c      |   18 +++++++++---------
 include/linux/uprobes.h        |    4 ++--
 kernel/events/uprobes.c        |   18 +++++++++---------
 4 files changed, 23 insertions(+), 23 deletions(-)

diff --git a/arch/x86/include/asm/uprobes.h b/arch/x86/include/asm/uprobes.h
index f7ce310..5c399e4 100644
--- a/arch/x86/include/asm/uprobes.h
+++ b/arch/x86/include/asm/uprobes.h
@@ -26,10 +26,10 @@
 typedef u8 uprobe_opcode_t;
 
 #define MAX_UINSN_BYTES			  16
-#define UPROBES_XOL_SLOT_BYTES		 128	/* to keep it cache aligned */
+#define UPROBE_XOL_SLOT_BYTES		 128	/* to keep it cache aligned */
 
-#define UPROBES_BKPT_INSN		0xcc
-#define UPROBES_BKPT_INSN_SIZE		   1
+#define UPROBE_BKPT_INSN		0xcc
+#define UPROBE_BKPT_INSN_SIZE		   1
 
 struct arch_uprobe {
 	u16				fixups;
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 04dfcef..6dfa89e 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -31,14 +31,14 @@
 /* Post-execution fixups. */
 
 /* No fixup needed */
-#define UPROBES_FIX_NONE	0x0
+#define UPROBE_FIX_NONE	0x0
 /* Adjust IP back to vicinity of actual insn */
-#define UPROBES_FIX_IP		0x1
+#define UPROBE_FIX_IP		0x1
 /* Adjust the return address of a call insn */
-#define UPROBES_FIX_CALL	0x2
+#define UPROBE_FIX_CALL	0x2
 
-#define UPROBES_FIX_RIP_AX	0x8000
-#define UPROBES_FIX_RIP_CX	0x4000
+#define UPROBE_FIX_RIP_AX	0x8000
+#define UPROBE_FIX_RIP_CX	0x4000
 
 /* Adaptations for mhiramat x86 decoder v14. */
 #define OPCODE1(insn)		((insn)->opcode.bytes[0])
@@ -269,9 +269,9 @@ static void prepare_fixups(struct arch_uprobe *auprobe, struct insn *insn)
 		break;
 	}
 	if (fix_ip)
-		auprobe->fixups |= UPROBES_FIX_IP;
+		auprobe->fixups |= UPROBE_FIX_IP;
 	if (fix_call)
-		auprobe->fixups |= UPROBES_FIX_CALL;
+		auprobe->fixups |= UPROBE_FIX_CALL;
 }
 
 #ifdef CONFIG_X86_64
@@ -341,12 +341,12 @@ static void handle_riprel_insn(struct mm_struct *mm, struct arch_uprobe *auprobe
 		 * is NOT the register operand, so we use %rcx (register
 		 * #1) for the scratch register.
 		 */
-		auprobe->fixups = UPROBES_FIX_RIP_CX;
+		auprobe->fixups = UPROBE_FIX_RIP_CX;
 		/* Change modrm from 00 000 101 to 00 000 001. */
 		*cursor = 0x1;
 	} else {
 		/* Use %rax (register #0) for the scratch register. */
-		auprobe->fixups = UPROBES_FIX_RIP_AX;
+		auprobe->fixups = UPROBE_FIX_RIP_AX;
 		/* Change modrm from 00 xxx 101 to 00 xxx 000 */
 		*cursor = (reg << 3);
 	}
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index f85797e..838fb31 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -35,10 +35,10 @@ struct vm_area_struct;
 /* flags that denote/change uprobes behaviour */
 
 /* Have a copy of original instruction */
-#define UPROBES_COPY_INSN	0x1
+#define UPROBE_COPY_INSN	0x1
 
 /* Dont run handlers when first register/ last unregister in progress*/
-#define UPROBES_RUN_HANDLER	0x2
+#define UPROBE_RUN_HANDLER	0x2
 
 struct uprobe_consumer {
 	int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs);
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 5ce32e3..0d36bf3 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -177,7 +177,7 @@ static int __replace_page(struct vm_area_struct *vma, struct page *page, struct 
  */
 bool __weak is_bkpt_insn(uprobe_opcode_t *insn)
 {
-	return *insn == UPROBES_BKPT_INSN;
+	return *insn == UPROBE_BKPT_INSN;
 }
 
 /*
@@ -259,8 +259,8 @@ static int write_opcode(struct mm_struct *mm, struct arch_uprobe *auprobe,
 
 	/* poke the new insn in, ASSUMES we don't cross page boundary */
 	vaddr &= ~PAGE_MASK;
-	BUG_ON(vaddr + UPROBES_BKPT_INSN_SIZE > PAGE_SIZE);
-	memcpy(vaddr_new + vaddr, &opcode, UPROBES_BKPT_INSN_SIZE);
+	BUG_ON(vaddr + UPROBE_BKPT_INSN_SIZE > PAGE_SIZE);
+	memcpy(vaddr_new + vaddr, &opcode, UPROBE_BKPT_INSN_SIZE);
 
 	kunmap_atomic(vaddr_new);
 	kunmap_atomic(vaddr_old);
@@ -308,7 +308,7 @@ static int read_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_
 	lock_page(page);
 	vaddr_new = kmap_atomic(page);
 	vaddr &= ~PAGE_MASK;
-	memcpy(opcode, vaddr_new + vaddr, UPROBES_BKPT_INSN_SIZE);
+	memcpy(opcode, vaddr_new + vaddr, UPROBE_BKPT_INSN_SIZE);
 	kunmap_atomic(vaddr_new);
 	unlock_page(page);
 
@@ -352,7 +352,7 @@ int __weak set_bkpt(struct mm_struct *mm, struct arch_uprobe *auprobe, unsigned 
 	if (result)
 		return result;
 
-	return write_opcode(mm, auprobe, vaddr, UPROBES_BKPT_INSN);
+	return write_opcode(mm, auprobe, vaddr, UPROBE_BKPT_INSN);
 }
 
 /**
@@ -635,7 +635,7 @@ static int install_breakpoint(struct mm_struct *mm, struct uprobe *uprobe,
 
 	addr = (unsigned long)vaddr;
 
-	if (!(uprobe->flags & UPROBES_COPY_INSN)) {
+	if (!(uprobe->flags & UPROBE_COPY_INSN)) {
 		ret = copy_insn(uprobe, vma, addr);
 		if (ret)
 			return ret;
@@ -647,7 +647,7 @@ static int install_breakpoint(struct mm_struct *mm, struct uprobe *uprobe,
 		if (ret)
 			return ret;
 
-		uprobe->flags |= UPROBES_COPY_INSN;
+		uprobe->flags |= UPROBE_COPY_INSN;
 	}
 	ret = set_bkpt(mm, &uprobe->arch, addr);
 
@@ -857,7 +857,7 @@ int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *
 			uprobe->consumers = NULL;
 			__uprobe_unregister(uprobe);
 		} else {
-			uprobe->flags |= UPROBES_RUN_HANDLER;
+			uprobe->flags |= UPROBE_RUN_HANDLER;
 		}
 	}
 
@@ -889,7 +889,7 @@ void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consume
 	if (consumer_del(uprobe, consumer)) {
 		if (!uprobe->consumers) {
 			__uprobe_unregister(uprobe);
-			uprobe->flags &= ~UPROBES_RUN_HANDLER;
+			uprobe->flags &= ~UPROBE_RUN_HANDLER;
 		}
 	}
 



^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH v2 2/7] uprobes/core: Make order of function parameters consistent across functions.
  2012-03-12  9:25 [PATCH v2 1/7] uprobes/core: Make macro names consistent Srikar Dronamraju
@ 2012-03-12  9:25 ` Srikar Dronamraju
  2012-03-13  9:42   ` [tip:perf/uprobes] " tip-bot for Srikar Dronamraju
  2012-03-12  9:25 ` [PATCH v2 3/7] uprobes/core: Rename bkpt to swbp Srikar Dronamraju
                   ` (5 subsequent siblings)
  6 siblings, 1 reply; 11+ messages in thread
From: Srikar Dronamraju @ 2012-03-12  9:25 UTC (permalink / raw
  To: Peter Zijlstra, Ingo Molnar
  Cc: Andrew Morton, Linus Torvalds, Ananth N Mavinakayanahalli,
	Jim Keniston, LKML, Linux-mm, Oleg Nesterov, Andi Kleen,
	Christoph Hellwig, Steven Rostedt, Arnaldo Carvalho de Melo,
	Masami Hiramatsu, Thomas Gleixner

From: Srikar Dronamraju <srikar@linux.vnet.ibm.com>

If a function takes struct uprobe or struct arch_uprobe, then it is
passed as the first parameter.

This is pure cleanup, no functional change intended.

Signed-off-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
---
 arch/x86/include/asm/uprobes.h |    2 -
 arch/x86/kernel/uprobes.c      |   15 +++---
 include/linux/uprobes.h        |   12 +++--
 kernel/events/uprobes.c        |   93 +++++++++++++++++++++-------------------
 4 files changed, 63 insertions(+), 59 deletions(-)

diff --git a/arch/x86/include/asm/uprobes.h b/arch/x86/include/asm/uprobes.h
index 5c399e4..384f1be 100644
--- a/arch/x86/include/asm/uprobes.h
+++ b/arch/x86/include/asm/uprobes.h
@@ -39,5 +39,5 @@ struct arch_uprobe {
 #endif
 };
 
-extern int arch_uprobes_analyze_insn(struct mm_struct *mm, struct arch_uprobe *arch_uprobe);
+extern int arch_uprobes_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm);
 #endif	/* _ASM_UPROBES_H */
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 6dfa89e..851a11b 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -297,7 +297,8 @@ static void prepare_fixups(struct arch_uprobe *auprobe, struct insn *insn)
  *  - There's never a SIB byte.
  *  - The displacement is always 4 bytes.
  */
-static void handle_riprel_insn(struct mm_struct *mm, struct arch_uprobe *auprobe, struct insn *insn)
+static void
+handle_riprel_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, struct insn *insn)
 {
 	u8 *cursor;
 	u8 reg;
@@ -381,19 +382,19 @@ static int validate_insn_64bits(struct arch_uprobe *auprobe, struct insn *insn)
 	return -ENOTSUPP;
 }
 
-static int validate_insn_bits(struct mm_struct *mm, struct arch_uprobe *auprobe, struct insn *insn)
+static int validate_insn_bits(struct arch_uprobe *auprobe, struct mm_struct *mm, struct insn *insn)
 {
 	if (mm->context.ia32_compat)
 		return validate_insn_32bits(auprobe, insn);
 	return validate_insn_64bits(auprobe, insn);
 }
 #else /* 32-bit: */
-static void handle_riprel_insn(struct mm_struct *mm, struct arch_uprobe *auprobe, struct insn *insn)
+static void handle_riprel_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, struct insn *insn)
 {
 	/* No RIP-relative addressing on 32-bit */
 }
 
-static int validate_insn_bits(struct mm_struct *mm, struct arch_uprobe *auprobe, struct insn *insn)
+static int validate_insn_bits(struct arch_uprobe *auprobe, struct mm_struct *mm,  struct insn *insn)
 {
 	return validate_insn_32bits(auprobe, insn);
 }
@@ -405,17 +406,17 @@ static int validate_insn_bits(struct mm_struct *mm, struct arch_uprobe *auprobe,
  * @arch_uprobe: the probepoint information.
  * Return 0 on success or a -ve number on error.
  */
-int arch_uprobes_analyze_insn(struct mm_struct *mm, struct arch_uprobe *auprobe)
+int arch_uprobes_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm)
 {
 	int ret;
 	struct insn insn;
 
 	auprobe->fixups = 0;
-	ret = validate_insn_bits(mm, auprobe, &insn);
+	ret = validate_insn_bits(auprobe, mm, &insn);
 	if (ret != 0)
 		return ret;
 
-	handle_riprel_insn(mm, auprobe, &insn);
+	handle_riprel_insn(auprobe, mm, &insn);
 	prepare_fixups(auprobe, &insn);
 
 	return 0;
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index 838fb31..5869918 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -52,20 +52,20 @@ struct uprobe_consumer {
 };
 
 #ifdef CONFIG_UPROBES
-extern int __weak set_bkpt(struct mm_struct *mm, struct arch_uprobe *auprobe, unsigned long vaddr);
-extern int __weak set_orig_insn(struct mm_struct *mm, struct arch_uprobe *auprobe, unsigned long vaddr, bool verify);
+extern int __weak set_bkpt(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
+extern int __weak set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm,  unsigned long vaddr, bool verify);
 extern bool __weak is_bkpt_insn(uprobe_opcode_t *insn);
-extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *consumer);
-extern void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *consumer);
+extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
+extern void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
 extern int uprobe_mmap(struct vm_area_struct *vma);
 #else /* CONFIG_UPROBES is not defined */
 static inline int
-uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *consumer)
+uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
 {
 	return -ENOSYS;
 }
 static inline void
-uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *consumer)
+uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
 {
 }
 static inline int uprobe_mmap(struct vm_area_struct *vma)
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 0d36bf3..9c5ddff 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -192,8 +192,8 @@ bool __weak is_bkpt_insn(uprobe_opcode_t *insn)
 
 /*
  * write_opcode - write the opcode at a given virtual address.
+ * @auprobe: arch breakpointing information.
  * @mm: the probed process address space.
- * @arch_uprobe: the breakpointing information.
  * @vaddr: the virtual address to store the opcode.
  * @opcode: opcode to be written at @vaddr.
  *
@@ -203,7 +203,7 @@ bool __weak is_bkpt_insn(uprobe_opcode_t *insn)
  * For mm @mm, write the opcode at @vaddr.
  * Return 0 (success) or a negative errno.
  */
-static int write_opcode(struct mm_struct *mm, struct arch_uprobe *auprobe,
+static int write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
 			unsigned long vaddr, uprobe_opcode_t opcode)
 {
 	struct page *old_page, *new_page;
@@ -334,14 +334,14 @@ static int is_bkpt_at_addr(struct mm_struct *mm, unsigned long vaddr)
 
 /**
  * set_bkpt - store breakpoint at a given address.
+ * @auprobe: arch specific probepoint information.
  * @mm: the probed process address space.
- * @uprobe: the probepoint information.
  * @vaddr: the virtual address to insert the opcode.
  *
  * For mm @mm, store the breakpoint instruction at @vaddr.
  * Return 0 (success) or a negative errno.
  */
-int __weak set_bkpt(struct mm_struct *mm, struct arch_uprobe *auprobe, unsigned long vaddr)
+int __weak set_bkpt(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
 {
 	int result;
 
@@ -352,13 +352,13 @@ int __weak set_bkpt(struct mm_struct *mm, struct arch_uprobe *auprobe, unsigned 
 	if (result)
 		return result;
 
-	return write_opcode(mm, auprobe, vaddr, UPROBE_BKPT_INSN);
+	return write_opcode(auprobe, mm, vaddr, UPROBE_BKPT_INSN);
 }
 
 /**
  * set_orig_insn - Restore the original instruction.
  * @mm: the probed process address space.
- * @uprobe: the probepoint information.
+ * @auprobe: arch specific probepoint information.
  * @vaddr: the virtual address to insert the opcode.
  * @verify: if true, verify existance of breakpoint instruction.
  *
@@ -366,7 +366,7 @@ int __weak set_bkpt(struct mm_struct *mm, struct arch_uprobe *auprobe, unsigned 
  * Return 0 (success) or a negative errno.
  */
 int __weak
-set_orig_insn(struct mm_struct *mm, struct arch_uprobe *auprobe, unsigned long vaddr, bool verify)
+set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr, bool verify)
 {
 	if (verify) {
 		int result;
@@ -378,7 +378,7 @@ set_orig_insn(struct mm_struct *mm, struct arch_uprobe *auprobe, unsigned long v
 		if (result != 1)
 			return result;
 	}
-	return write_opcode(mm, auprobe, vaddr, *(uprobe_opcode_t *)auprobe->insn);
+	return write_opcode(auprobe, mm, vaddr, *(uprobe_opcode_t *)auprobe->insn);
 }
 
 static int match_uprobe(struct uprobe *l, struct uprobe *r)
@@ -525,30 +525,30 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset)
 
 /* Returns the previous consumer */
 static struct uprobe_consumer *
-consumer_add(struct uprobe *uprobe, struct uprobe_consumer *consumer)
+consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc)
 {
 	down_write(&uprobe->consumer_rwsem);
-	consumer->next = uprobe->consumers;
-	uprobe->consumers = consumer;
+	uc->next = uprobe->consumers;
+	uprobe->consumers = uc;
 	up_write(&uprobe->consumer_rwsem);
 
-	return consumer->next;
+	return uc->next;
 }
 
 /*
- * For uprobe @uprobe, delete the consumer @consumer.
- * Return true if the @consumer is deleted successfully
+ * For uprobe @uprobe, delete the consumer @uc.
+ * Return true if the @uc is deleted successfully
  * or return false.
  */
-static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *consumer)
+static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc)
 {
 	struct uprobe_consumer **con;
 	bool ret = false;
 
 	down_write(&uprobe->consumer_rwsem);
 	for (con = &uprobe->consumers; *con; con = &(*con)->next) {
-		if (*con == consumer) {
-			*con = consumer->next;
+		if (*con == uc) {
+			*con = uc->next;
 			ret = true;
 			break;
 		}
@@ -558,8 +558,8 @@ static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *consumer
 	return ret;
 }
 
-static int __copy_insn(struct address_space *mapping,
-			struct vm_area_struct *vma, char *insn,
+static int
+__copy_insn(struct address_space *mapping, struct vm_area_struct *vma, char *insn,
 			unsigned long nbytes, unsigned long offset)
 {
 	struct file *filp = vma->vm_file;
@@ -590,7 +590,8 @@ static int __copy_insn(struct address_space *mapping,
 	return 0;
 }
 
-static int copy_insn(struct uprobe *uprobe, struct vm_area_struct *vma, unsigned long addr)
+static int
+copy_insn(struct uprobe *uprobe, struct vm_area_struct *vma, unsigned long addr)
 {
 	struct address_space *mapping;
 	unsigned long nbytes;
@@ -617,8 +618,9 @@ static int copy_insn(struct uprobe *uprobe, struct vm_area_struct *vma, unsigned
 	return __copy_insn(mapping, vma, uprobe->arch.insn, bytes, uprobe->offset);
 }
 
-static int install_breakpoint(struct mm_struct *mm, struct uprobe *uprobe,
-				struct vm_area_struct *vma, loff_t vaddr)
+static int
+install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
+			struct vm_area_struct *vma, loff_t vaddr)
 {
 	unsigned long addr;
 	int ret;
@@ -643,20 +645,21 @@ static int install_breakpoint(struct mm_struct *mm, struct uprobe *uprobe,
 		if (is_bkpt_insn((uprobe_opcode_t *)uprobe->arch.insn))
 			return -EEXIST;
 
-		ret = arch_uprobes_analyze_insn(mm, &uprobe->arch);
+		ret = arch_uprobes_analyze_insn(&uprobe->arch, mm);
 		if (ret)
 			return ret;
 
 		uprobe->flags |= UPROBE_COPY_INSN;
 	}
-	ret = set_bkpt(mm, &uprobe->arch, addr);
+	ret = set_bkpt(&uprobe->arch, mm, addr);
 
 	return ret;
 }
 
-static void remove_breakpoint(struct mm_struct *mm, struct uprobe *uprobe, loff_t vaddr)
+static void
+remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, loff_t vaddr)
 {
-	set_orig_insn(mm, &uprobe->arch, (unsigned long)vaddr, true);
+	set_orig_insn(&uprobe->arch, mm, (unsigned long)vaddr, true);
 }
 
 static void delete_uprobe(struct uprobe *uprobe)
@@ -671,9 +674,9 @@ static void delete_uprobe(struct uprobe *uprobe)
 	atomic_dec(&uprobe_events);
 }
 
-static struct vma_info *__find_next_vma_info(struct list_head *head,
-			loff_t offset, struct address_space *mapping,
-			struct vma_info *vi, bool is_register)
+static struct vma_info *
+__find_next_vma_info(struct address_space *mapping, struct list_head *head,
+			struct vma_info *vi, loff_t offset, bool is_register)
 {
 	struct prio_tree_iter iter;
 	struct vm_area_struct *vma;
@@ -719,8 +722,8 @@ static struct vma_info *__find_next_vma_info(struct list_head *head,
  * yet been inserted.
  */
 static struct vma_info *
-find_next_vma_info(struct list_head *head, loff_t offset, struct address_space *mapping,
-		   bool is_register)
+find_next_vma_info(struct address_space *mapping, struct list_head *head,
+		loff_t offset, bool is_register)
 {
 	struct vma_info *vi, *retvi;
 
@@ -729,7 +732,7 @@ find_next_vma_info(struct list_head *head, loff_t offset, struct address_space *
 		return ERR_PTR(-ENOMEM);
 
 	mutex_lock(&mapping->i_mmap_mutex);
-	retvi = __find_next_vma_info(head, offset, mapping, vi, is_register);
+	retvi = __find_next_vma_info(mapping, head, vi, offset, is_register);
 	mutex_unlock(&mapping->i_mmap_mutex);
 
 	if (!retvi)
@@ -754,7 +757,7 @@ static int register_for_each_vma(struct uprobe *uprobe, bool is_register)
 	ret = 0;
 
 	for (;;) {
-		vi = find_next_vma_info(&try_list, uprobe->offset, mapping, is_register);
+		vi = find_next_vma_info(mapping, &try_list, uprobe->offset, is_register);
 		if (!vi)
 			break;
 
@@ -784,9 +787,9 @@ static int register_for_each_vma(struct uprobe *uprobe, bool is_register)
 		}
 
 		if (is_register)
-			ret = install_breakpoint(mm, uprobe, vma, vi->vaddr);
+			ret = install_breakpoint(uprobe, mm, vma, vi->vaddr);
 		else
-			remove_breakpoint(mm, uprobe, vi->vaddr);
+			remove_breakpoint(uprobe, mm, vi->vaddr);
 
 		up_read(&mm->mmap_sem);
 		mmput(mm);
@@ -823,25 +826,25 @@ static void __uprobe_unregister(struct uprobe *uprobe)
  * uprobe_register - register a probe
  * @inode: the file in which the probe has to be placed.
  * @offset: offset from the start of the file.
- * @consumer: information on howto handle the probe..
+ * @uc: information on howto handle the probe..
  *
  * Apart from the access refcount, uprobe_register() takes a creation
  * refcount (thro alloc_uprobe) if and only if this @uprobe is getting
  * inserted into the rbtree (i.e first consumer for a @inode:@offset
  * tuple).  Creation refcount stops uprobe_unregister from freeing the
  * @uprobe even before the register operation is complete. Creation
- * refcount is released when the last @consumer for the @uprobe
+ * refcount is released when the last @uc for the @uprobe
  * unregisters.
  *
  * Return errno if it cannot successully install probes
  * else return 0 (success)
  */
-int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *consumer)
+int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
 {
 	struct uprobe *uprobe;
 	int ret;
 
-	if (!inode || !consumer || consumer->next)
+	if (!inode || !uc || uc->next)
 		return -EINVAL;
 
 	if (offset > i_size_read(inode))
@@ -851,7 +854,7 @@ int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *
 	mutex_lock(uprobes_hash(inode));
 	uprobe = alloc_uprobe(inode, offset);
 
-	if (uprobe && !consumer_add(uprobe, consumer)) {
+	if (uprobe && !consumer_add(uprobe, uc)) {
 		ret = __uprobe_register(uprobe);
 		if (ret) {
 			uprobe->consumers = NULL;
@@ -871,13 +874,13 @@ int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *
  * uprobe_unregister - unregister a already registered probe.
  * @inode: the file in which the probe has to be removed.
  * @offset: offset from the start of the file.
- * @consumer: identify which probe if multiple probes are colocated.
+ * @uc: identify which probe if multiple probes are colocated.
  */
-void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *consumer)
+void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
 {
 	struct uprobe *uprobe;
 
-	if (!inode || !consumer)
+	if (!inode || !uc)
 		return;
 
 	uprobe = find_uprobe(inode, offset);
@@ -886,7 +889,7 @@ void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consume
 
 	mutex_lock(uprobes_hash(inode));
 
-	if (consumer_del(uprobe, consumer)) {
+	if (consumer_del(uprobe, uc)) {
 		if (!uprobe->consumers) {
 			__uprobe_unregister(uprobe);
 			uprobe->flags &= ~UPROBE_RUN_HANDLER;
@@ -993,7 +996,7 @@ int uprobe_mmap(struct vm_area_struct *vma)
 		if (!ret) {
 			vaddr = vma_address(vma, uprobe->offset);
 			if (vaddr >= vma->vm_start && vaddr < vma->vm_end) {
-				ret = install_breakpoint(vma->vm_mm, uprobe, vma, vaddr);
+				ret = install_breakpoint(uprobe, vma->vm_mm, vma, vaddr);
 				/* Ignore double add: */
 				if (ret == -EEXIST)
 					ret = 0;



^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH v2 3/7] uprobes/core: Rename bkpt to swbp
  2012-03-12  9:25 [PATCH v2 1/7] uprobes/core: Make macro names consistent Srikar Dronamraju
  2012-03-12  9:25 ` [PATCH v2 2/7] uprobes/core: Make order of function parameters consistent across functions Srikar Dronamraju
@ 2012-03-12  9:25 ` Srikar Dronamraju
  2012-03-13  9:43   ` [tip:perf/uprobes] " tip-bot for Srikar Dronamraju
  2012-03-12  9:25 ` [PATCH v2 4/7] x86/trivial: Rename trap_no to trap_nr in thread struct Srikar Dronamraju
                   ` (4 subsequent siblings)
  6 siblings, 1 reply; 11+ messages in thread
From: Srikar Dronamraju @ 2012-03-12  9:25 UTC (permalink / raw
  To: Peter Zijlstra, Ingo Molnar
  Cc: Andrew Morton, Linus Torvalds, Ananth N Mavinakayanahalli,
	Jim Keniston, LKML, Linux-mm, Oleg Nesterov, Andi Kleen,
	Christoph Hellwig, Steven Rostedt, Arnaldo Carvalho de Melo,
	Masami Hiramatsu, Thomas Gleixner

From: Srikar Dronamraju <srikar@linux.vnet.ibm.com>

bkpt doesnt seem to be a correct abbrevation for breakpoint.  Choice was
between bp and breakpoint. Since bp can refer to things other than
breakpoint, use swbp to refer to breakpoints.

This is pure cleanup, no functional change intended.

Signed-off-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
---
 arch/x86/include/asm/uprobes.h |    4 ++--
 include/linux/uprobes.h        |    4 ++--
 kernel/events/uprobes.c        |   34 +++++++++++++++++-----------------
 3 files changed, 21 insertions(+), 21 deletions(-)

diff --git a/arch/x86/include/asm/uprobes.h b/arch/x86/include/asm/uprobes.h
index 384f1be..0500391 100644
--- a/arch/x86/include/asm/uprobes.h
+++ b/arch/x86/include/asm/uprobes.h
@@ -28,8 +28,8 @@ typedef u8 uprobe_opcode_t;
 #define MAX_UINSN_BYTES			  16
 #define UPROBE_XOL_SLOT_BYTES		 128	/* to keep it cache aligned */
 
-#define UPROBE_BKPT_INSN		0xcc
-#define UPROBE_BKPT_INSN_SIZE		   1
+#define UPROBE_SWBP_INSN		0xcc
+#define UPROBE_SWBP_INSN_SIZE		   1
 
 struct arch_uprobe {
 	u16				fixups;
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index 5869918..eac525f 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -52,9 +52,9 @@ struct uprobe_consumer {
 };
 
 #ifdef CONFIG_UPROBES
-extern int __weak set_bkpt(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
+extern int __weak set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
 extern int __weak set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm,  unsigned long vaddr, bool verify);
-extern bool __weak is_bkpt_insn(uprobe_opcode_t *insn);
+extern bool __weak is_swbp_insn(uprobe_opcode_t *insn);
 extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
 extern void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
 extern int uprobe_mmap(struct vm_area_struct *vma);
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 9c5ddff..e56e56a 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -170,14 +170,14 @@ static int __replace_page(struct vm_area_struct *vma, struct page *page, struct 
 }
 
 /**
- * is_bkpt_insn - check if instruction is breakpoint instruction.
+ * is_swbp_insn - check if instruction is breakpoint instruction.
  * @insn: instruction to be checked.
- * Default implementation of is_bkpt_insn
+ * Default implementation of is_swbp_insn
  * Returns true if @insn is a breakpoint instruction.
  */
-bool __weak is_bkpt_insn(uprobe_opcode_t *insn)
+bool __weak is_swbp_insn(uprobe_opcode_t *insn)
 {
-	return *insn == UPROBE_BKPT_INSN;
+	return *insn == UPROBE_SWBP_INSN;
 }
 
 /*
@@ -227,7 +227,7 @@ static int write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
 	 * adding probes in write mapped pages since the breakpoints
 	 * might end up in the file copy.
 	 */
-	if (!valid_vma(vma, is_bkpt_insn(&opcode)))
+	if (!valid_vma(vma, is_swbp_insn(&opcode)))
 		goto put_out;
 
 	uprobe = container_of(auprobe, struct uprobe, arch);
@@ -259,8 +259,8 @@ static int write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
 
 	/* poke the new insn in, ASSUMES we don't cross page boundary */
 	vaddr &= ~PAGE_MASK;
-	BUG_ON(vaddr + UPROBE_BKPT_INSN_SIZE > PAGE_SIZE);
-	memcpy(vaddr_new + vaddr, &opcode, UPROBE_BKPT_INSN_SIZE);
+	BUG_ON(vaddr + UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
+	memcpy(vaddr_new + vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
 
 	kunmap_atomic(vaddr_new);
 	kunmap_atomic(vaddr_old);
@@ -308,7 +308,7 @@ static int read_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_
 	lock_page(page);
 	vaddr_new = kmap_atomic(page);
 	vaddr &= ~PAGE_MASK;
-	memcpy(opcode, vaddr_new + vaddr, UPROBE_BKPT_INSN_SIZE);
+	memcpy(opcode, vaddr_new + vaddr, UPROBE_SWBP_INSN_SIZE);
 	kunmap_atomic(vaddr_new);
 	unlock_page(page);
 
@@ -317,7 +317,7 @@ static int read_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_
 	return 0;
 }
 
-static int is_bkpt_at_addr(struct mm_struct *mm, unsigned long vaddr)
+static int is_swbp_at_addr(struct mm_struct *mm, unsigned long vaddr)
 {
 	uprobe_opcode_t opcode;
 	int result;
@@ -326,14 +326,14 @@ static int is_bkpt_at_addr(struct mm_struct *mm, unsigned long vaddr)
 	if (result)
 		return result;
 
-	if (is_bkpt_insn(&opcode))
+	if (is_swbp_insn(&opcode))
 		return 1;
 
 	return 0;
 }
 
 /**
- * set_bkpt - store breakpoint at a given address.
+ * set_swbp - store breakpoint at a given address.
  * @auprobe: arch specific probepoint information.
  * @mm: the probed process address space.
  * @vaddr: the virtual address to insert the opcode.
@@ -341,18 +341,18 @@ static int is_bkpt_at_addr(struct mm_struct *mm, unsigned long vaddr)
  * For mm @mm, store the breakpoint instruction at @vaddr.
  * Return 0 (success) or a negative errno.
  */
-int __weak set_bkpt(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
+int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
 {
 	int result;
 
-	result = is_bkpt_at_addr(mm, vaddr);
+	result = is_swbp_at_addr(mm, vaddr);
 	if (result == 1)
 		return -EEXIST;
 
 	if (result)
 		return result;
 
-	return write_opcode(auprobe, mm, vaddr, UPROBE_BKPT_INSN);
+	return write_opcode(auprobe, mm, vaddr, UPROBE_SWBP_INSN);
 }
 
 /**
@@ -371,7 +371,7 @@ set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long v
 	if (verify) {
 		int result;
 
-		result = is_bkpt_at_addr(mm, vaddr);
+		result = is_swbp_at_addr(mm, vaddr);
 		if (!result)
 			return -EINVAL;
 
@@ -642,7 +642,7 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
 		if (ret)
 			return ret;
 
-		if (is_bkpt_insn((uprobe_opcode_t *)uprobe->arch.insn))
+		if (is_swbp_insn((uprobe_opcode_t *)uprobe->arch.insn))
 			return -EEXIST;
 
 		ret = arch_uprobes_analyze_insn(&uprobe->arch, mm);
@@ -651,7 +651,7 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
 
 		uprobe->flags |= UPROBE_COPY_INSN;
 	}
-	ret = set_bkpt(&uprobe->arch, mm, addr);
+	ret = set_swbp(&uprobe->arch, mm, addr);
 
 	return ret;
 }



^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH v2 4/7] x86/trivial: Rename trap_no to trap_nr in thread struct.
  2012-03-12  9:25 [PATCH v2 1/7] uprobes/core: Make macro names consistent Srikar Dronamraju
  2012-03-12  9:25 ` [PATCH v2 2/7] uprobes/core: Make order of function parameters consistent across functions Srikar Dronamraju
  2012-03-12  9:25 ` [PATCH v2 3/7] uprobes/core: Rename bkpt to swbp Srikar Dronamraju
@ 2012-03-12  9:25 ` Srikar Dronamraju
  2012-03-13  9:44   ` [tip:x86/cleanups] x86: Rename trap_no to trap_nr in thread_struct tip-bot for Srikar Dronamraju
  2012-03-12  9:26 ` [PATCH v2 5/7] x86/trivial: Fix 'old_rsp' undefined build failure when including asm/compat.h Srikar Dronamraju
                   ` (3 subsequent siblings)
  6 siblings, 1 reply; 11+ messages in thread
From: Srikar Dronamraju @ 2012-03-12  9:25 UTC (permalink / raw
  To: Peter Zijlstra, Ingo Molnar
  Cc: Andrew Morton, Linus Torvalds, Ananth N Mavinakayanahalli,
	Jim Keniston, LKML, Linux-mm, Oleg Nesterov, Andi Kleen,
	Christoph Hellwig, Steven Rostedt, Arnaldo Carvalho de Melo,
	Masami Hiramatsu, Thomas Gleixner

From: Srikar Dronamraju <srikar@linux.vnet.ibm.com>

There are precedences of trap number being referred to as trap_nr.
However thread struct refers trap number as trap_no. Change it to trap_nr.

Also use enum instead of left-over literals for trap values.

This is pure cleanup, no functional change intended.

Suggested-by: Ingo Molnar <mingo@eltu.hu>
Signed-off-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
---
 arch/x86/ia32/ia32_signal.c      |    2 +-
 arch/x86/include/asm/processor.h |    2 +-
 arch/x86/kernel/dumpstack.c      |    2 +-
 arch/x86/kernel/ptrace.c         |    3 ++-
 arch/x86/kernel/signal.c         |    2 +-
 arch/x86/kernel/traps.c          |   16 ++++++++--------
 arch/x86/kernel/vm86_32.c        |    2 +-
 arch/x86/kernel/vsyscall_64.c    |    2 +-
 arch/x86/math-emu/fpu_entry.c    |    4 ++--
 arch/x86/mm/fault.c              |   10 +++++-----
 10 files changed, 23 insertions(+), 22 deletions(-)

diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index 7d4dba9..135cd12 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -346,7 +346,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
 		put_user_ex(regs->dx, &sc->dx);
 		put_user_ex(regs->cx, &sc->cx);
 		put_user_ex(regs->ax, &sc->ax);
-		put_user_ex(current->thread.trap_no, &sc->trapno);
+		put_user_ex(current->thread.trap_nr, &sc->trapno);
 		put_user_ex(current->thread.error_code, &sc->err);
 		put_user_ex(regs->ip, &sc->ip);
 		put_user_ex(regs->cs, (unsigned int __user *)&sc->cs);
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index c59ff02..09ffc86 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -454,7 +454,7 @@ struct thread_struct {
 	unsigned long           ptrace_dr7;
 	/* Fault info: */
 	unsigned long		cr2;
-	unsigned long		trap_no;
+	unsigned long		trap_nr;
 	unsigned long		error_code;
 	/* floating point and extended processor state */
 	struct fpu		fpu;
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 4025fe4..28f9870 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -265,7 +265,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
 #endif
 	printk("\n");
 	if (notify_die(DIE_OOPS, str, regs, err,
-			current->thread.trap_no, SIGSEGV) == NOTIFY_STOP)
+			current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP)
 		return 1;
 
 	show_registers(regs);
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index d14b3cd..52c06b1 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -34,6 +34,7 @@
 #include <asm/prctl.h>
 #include <asm/proto.h>
 #include <asm/hw_breakpoint.h>
+#include <asm/traps.h>
 
 #include "tls.h"
 
@@ -1426,7 +1427,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
 				int error_code, int si_code,
 				struct siginfo *info)
 {
-	tsk->thread.trap_no = 1;
+	tsk->thread.trap_nr = X86_TRAP_DB;
 	tsk->thread.error_code = error_code;
 
 	memset(info, 0, sizeof(*info));
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 5134e17..115eac4 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -151,7 +151,7 @@ int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
 		put_user_ex(regs->r15, &sc->r15);
 #endif /* CONFIG_X86_64 */
 
-		put_user_ex(current->thread.trap_no, &sc->trapno);
+		put_user_ex(current->thread.trap_nr, &sc->trapno);
 		put_user_ex(current->thread.error_code, &sc->err);
 		put_user_ex(regs->ip, &sc->ip);
 #ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 4310189..19a2b83 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -133,7 +133,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
 trap_signal:
 #endif
 	/*
-	 * We want error_code and trap_no set for userspace faults and
+	 * We want error_code and trap_nr set for userspace faults and
 	 * kernelspace faults which result in die(), but not
 	 * kernelspace faults which are fixed up.  die() gives the
 	 * process no chance to handle the signal and notice the
@@ -142,7 +142,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
 	 * delivered, faults.  See also do_general_protection below.
 	 */
 	tsk->thread.error_code = error_code;
-	tsk->thread.trap_no = trapnr;
+	tsk->thread.trap_nr = trapnr;
 
 #ifdef CONFIG_X86_64
 	if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
@@ -165,7 +165,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
 kernel_trap:
 	if (!fixup_exception(regs)) {
 		tsk->thread.error_code = error_code;
-		tsk->thread.trap_no = trapnr;
+		tsk->thread.trap_nr = trapnr;
 		die(str, regs, error_code);
 	}
 	return;
@@ -241,7 +241,7 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
 	notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
 
 	tsk->thread.error_code = error_code;
-	tsk->thread.trap_no = X86_TRAP_DF;
+	tsk->thread.trap_nr = X86_TRAP_DF;
 
 	/*
 	 * This is always a kernel trap and never fixable (and thus must
@@ -269,7 +269,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
 		goto gp_in_kernel;
 
 	tsk->thread.error_code = error_code;
-	tsk->thread.trap_no = X86_TRAP_GP;
+	tsk->thread.trap_nr = X86_TRAP_GP;
 
 	if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
 			printk_ratelimit()) {
@@ -296,7 +296,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
 		return;
 
 	tsk->thread.error_code = error_code;
-	tsk->thread.trap_no = X86_TRAP_GP;
+	tsk->thread.trap_nr = X86_TRAP_GP;
 	if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
 			X86_TRAP_GP, SIGSEGV) == NOTIFY_STOP)
 		return;
@@ -476,7 +476,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
 	{
 		if (!fixup_exception(regs)) {
 			task->thread.error_code = error_code;
-			task->thread.trap_no = trapnr;
+			task->thread.trap_nr = trapnr;
 			die(str, regs, error_code);
 		}
 		return;
@@ -486,7 +486,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
 	 * Save the info for the exception handler and clear the error.
 	 */
 	save_init_fpu(task);
-	task->thread.trap_no = trapnr;
+	task->thread.trap_nr = trapnr;
 	task->thread.error_code = error_code;
 	info.si_signo = SIGFPE;
 	info.si_errno = 0;
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index b466cab..a1315ab 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -567,7 +567,7 @@ int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno)
 	}
 	if (trapno != 1)
 		return 1; /* we let this handle by the calling routine */
-	current->thread.trap_no = trapno;
+	current->thread.trap_nr = trapno;
 	current->thread.error_code = error_code;
 	force_sig(SIGTRAP, current);
 	return 0;
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index b07ba93..327509b 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -153,7 +153,7 @@ static bool write_ok_or_segv(unsigned long ptr, size_t size)
 
 		thread->error_code	= 6;  /* user fault, no page, write */
 		thread->cr2		= ptr;
-		thread->trap_no		= 14;
+		thread->trap_nr		= X86_TRAP_PF;
 
 		memset(&info, 0, sizeof(info));
 		info.si_signo		= SIGSEGV;
diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c
index 7718541..0feeca4 100644
--- a/arch/x86/math-emu/fpu_entry.c
+++ b/arch/x86/math-emu/fpu_entry.c
@@ -269,7 +269,7 @@ void math_emulate(struct math_emu_info *info)
 			FPU_EIP = FPU_ORIG_EIP;	/* Point to current FPU instruction. */
 
 			RE_ENTRANT_CHECK_OFF;
-			current->thread.trap_no = 16;
+			current->thread.trap_nr = X86_TRAP_MF;
 			current->thread.error_code = 0;
 			send_sig(SIGFPE, current, 1);
 			return;
@@ -662,7 +662,7 @@ static int valid_prefix(u_char *Byte, u_char __user **fpu_eip,
 void math_abort(struct math_emu_info *info, unsigned int signal)
 {
 	FPU_EIP = FPU_ORIG_EIP;
-	current->thread.trap_no = 16;
+	current->thread.trap_nr = X86_TRAP_MF;
 	current->thread.error_code = 0;
 	send_sig(signal, current, 1);
 	RE_ENTRANT_CHECK_OFF;
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index f0b4caf..3ecfd1a 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -615,7 +615,7 @@ pgtable_bad(struct pt_regs *regs, unsigned long error_code,
 	dump_pagetable(address);
 
 	tsk->thread.cr2		= address;
-	tsk->thread.trap_no	= 14;
+	tsk->thread.trap_nr	= X86_TRAP_PF;
 	tsk->thread.error_code	= error_code;
 
 	if (__die("Bad pagetable", regs, error_code))
@@ -636,7 +636,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
 	/* Are we prepared to handle this kernel fault? */
 	if (fixup_exception(regs)) {
 		if (current_thread_info()->sig_on_uaccess_error && signal) {
-			tsk->thread.trap_no = 14;
+			tsk->thread.trap_nr = X86_TRAP_PF;
 			tsk->thread.error_code = error_code | PF_USER;
 			tsk->thread.cr2 = address;
 
@@ -676,7 +676,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
 		printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
 
 	tsk->thread.cr2		= address;
-	tsk->thread.trap_no	= 14;
+	tsk->thread.trap_nr	= X86_TRAP_PF;
 	tsk->thread.error_code	= error_code;
 
 	sig = SIGKILL;
@@ -754,7 +754,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
 		/* Kernel addresses are always protection faults: */
 		tsk->thread.cr2		= address;
 		tsk->thread.error_code	= error_code | (address >= TASK_SIZE);
-		tsk->thread.trap_no	= 14;
+		tsk->thread.trap_nr	= X86_TRAP_PF;
 
 		force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0);
 
@@ -838,7 +838,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
 
 	tsk->thread.cr2		= address;
 	tsk->thread.error_code	= error_code;
-	tsk->thread.trap_no	= 14;
+	tsk->thread.trap_nr	= X86_TRAP_PF;
 
 #ifdef CONFIG_MEMORY_FAILURE
 	if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {



^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH v2 5/7] x86/trivial: Fix 'old_rsp' undefined build failure when including asm/compat.h
  2012-03-12  9:25 [PATCH v2 1/7] uprobes/core: Make macro names consistent Srikar Dronamraju
                   ` (2 preceding siblings ...)
  2012-03-12  9:25 ` [PATCH v2 4/7] x86/trivial: Rename trap_no to trap_nr in thread struct Srikar Dronamraju
@ 2012-03-12  9:26 ` Srikar Dronamraju
  2012-03-12  9:26 ` [PATCH v2 6/7] x86/trivial: Use is_ia32_compat_task Srikar Dronamraju
                   ` (2 subsequent siblings)
  6 siblings, 0 replies; 11+ messages in thread
From: Srikar Dronamraju @ 2012-03-12  9:26 UTC (permalink / raw
  To: Peter Zijlstra, Ingo Molnar
  Cc: Andrew Morton, Linus Torvalds, Ananth N Mavinakayanahalli,
	Jim Keniston, LKML, Linux-mm, Oleg Nesterov, Andi Kleen,
	Christoph Hellwig, Steven Rostedt, Arnaldo Carvalho de Melo,
	Masami Hiramatsu, Thomas Gleixner

From: Srikar Dronamraju <srikar@linux.vnet.ibm.com>

Including asm/compat.h in arch/x86/kernel/signal.c and compiling on a i386
machine results in old_rsp undefined build errors.

old_rsp is defined under CONFIG_X86_64. Hence add a i386 specific
arch_compat_alloc_user_space that doesnt depend on old_rsp. Will be further
cleaned up when is_ia32_compat_task is introduced.

This is pure cleanup, no functional change intended.

Signed-off-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
---
 arch/x86/include/asm/compat.h |   10 ++++++++++
 1 files changed, 10 insertions(+), 0 deletions(-)

diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
index 355edc0..ba9e9dc 100644
--- a/arch/x86/include/asm/compat.h
+++ b/arch/x86/include/asm/compat.h
@@ -221,6 +221,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
 	return (u32)(unsigned long)uptr;
 }
 
+#ifdef CONFIG_x86_64
 static inline void __user *arch_compat_alloc_user_space(long len)
 {
 	compat_uptr_t sp;
@@ -234,6 +235,15 @@ static inline void __user *arch_compat_alloc_user_space(long len)
 
 	return (void __user *)round_down(sp - len, 16);
 }
+#else
+
+static inline void __user *arch_compat_alloc_user_space(long len)
+{
+	struct pt_regs *regs = task_pt_regs(current);
+		return (void __user *)regs->sp - len;
+}
+
+#endif
 
 static inline bool is_ia32_task(void)
 {



^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH v2 6/7] x86/trivial: Use is_ia32_compat_task
  2012-03-12  9:25 [PATCH v2 1/7] uprobes/core: Make macro names consistent Srikar Dronamraju
                   ` (3 preceding siblings ...)
  2012-03-12  9:26 ` [PATCH v2 5/7] x86/trivial: Fix 'old_rsp' undefined build failure when including asm/compat.h Srikar Dronamraju
@ 2012-03-12  9:26 ` Srikar Dronamraju
  2012-03-12  9:26 ` [PATCH v2 7/7] uprobes/core: Handle breakpoint and singlestep exception Srikar Dronamraju
  2012-03-13  9:41 ` [tip:perf/uprobes] uprobes/core: Make macro names consistent tip-bot for Srikar Dronamraju
  6 siblings, 0 replies; 11+ messages in thread
From: Srikar Dronamraju @ 2012-03-12  9:26 UTC (permalink / raw
  To: Peter Zijlstra, Ingo Molnar
  Cc: Andrew Morton, Linus Torvalds, Ananth N Mavinakayanahalli,
	Jim Keniston, LKML, Linux-mm, Oleg Nesterov, Andi Kleen,
	Christoph Hellwig, Steven Rostedt, Arnaldo Carvalho de Melo,
	Masami Hiramatsu, Thomas Gleixner

From: Srikar Dronamraju <srikar@linux.vnet.ibm.com>

There are several instances in the arch/x86 code where we check if a task is
32 bit. However in most cases, this check has to be in #ifdef code.

This change provides a is_ia32_compat_task macro that can be called without
the #ifdef.

This is pure cleanup, no functional change intended.

Signed-off-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
---
 arch/x86/include/asm/compat.h             |   31 +++++++++++++----------------
 arch/x86/include/asm/elf.h                |    9 +++-----
 arch/x86/include/asm/syscall.h            |    9 +++-----
 arch/x86/kernel/cpu/perf_event_intel_ds.c |    5 ++---
 arch/x86/kernel/process_64.c              |   13 +++++-------
 arch/x86/kernel/ptrace.c                  |   27 ++++++-------------------
 arch/x86/kernel/signal.c                  |   10 ++-------
 include/linux/compat.h                    |    4 +++-
 8 files changed, 38 insertions(+), 70 deletions(-)

diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
index ba9e9dc..2a73336 100644
--- a/arch/x86/include/asm/compat.h
+++ b/arch/x86/include/asm/compat.h
@@ -13,6 +13,12 @@
 #define COMPAT_USER_HZ		100
 #define COMPAT_UTS_MACHINE	"i686\0\0"
 
+#ifdef CONFIG_X86_32
+# define is_ia32_compat_task(t) 1
+#else
+# define is_ia32_compat_task(t) (test_tsk_thread_flag(t, TIF_IA32))
+#endif
+
 typedef u32		compat_size_t;
 typedef s32		compat_ssize_t;
 typedef s32		compat_time_t;
@@ -221,36 +227,25 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
 	return (u32)(unsigned long)uptr;
 }
 
-#ifdef CONFIG_x86_64
 static inline void __user *arch_compat_alloc_user_space(long len)
 {
 	compat_uptr_t sp;
 
-	if (test_thread_flag(TIF_IA32)) {
-		sp = task_pt_regs(current)->sp;
-	} else {
+	sp = task_pt_regs(current)->sp;
+#ifdef CONFIG_x86_64
+	if (!is_ia32_compat_task(current))
 		/* -128 for the x32 ABI redzone */
 		sp = percpu_read(old_rsp) - 128;
-	}
+#endif
 
 	return (void __user *)round_down(sp - len, 16);
 }
-#else
-
-static inline void __user *arch_compat_alloc_user_space(long len)
-{
-	struct pt_regs *regs = task_pt_regs(current);
-		return (void __user *)regs->sp - len;
-}
-
-#endif
 
 static inline bool is_ia32_task(void)
 {
-#ifdef CONFIG_IA32_EMULATION
-	if (current_thread_info()->status & TS_COMPAT)
+	if (is_ia32_compat_task(current))
 		return true;
-#endif
+
 	return false;
 }
 
@@ -263,6 +258,8 @@ static inline bool is_x32_task(void)
 	return false;
 }
 
+#undef is_compat_task
+
 static inline bool is_compat_task(void)
 {
 	return is_ia32_task() || is_x32_task();
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index 1e40634..d9805c6 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -9,6 +9,7 @@
 #include <asm/ptrace.h>
 #include <asm/user.h>
 #include <asm/auxvec.h>
+#include <asm/compat.h>
 
 typedef unsigned long elf_greg_t;
 
@@ -345,13 +346,9 @@ extern unsigned long arch_randomize_brk(struct mm_struct *mm);
  */
 static inline int mmap_is_ia32(void)
 {
-#ifdef CONFIG_X86_32
-	return 1;
-#endif
-#ifdef CONFIG_IA32_EMULATION
-	if (test_thread_flag(TIF_ADDR32))
+	if (is_ia32_compat_task(current))
 		return 1;
-#endif
+
 	return 0;
 }
 
diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h
index 386b786..fabcd0e 100644
--- a/arch/x86/include/asm/syscall.h
+++ b/arch/x86/include/asm/syscall.h
@@ -17,6 +17,7 @@
 #include <linux/err.h>
 #include <asm/asm-offsets.h>	/* For NR_syscalls */
 #include <asm/unistd.h>
+#include <asm/compat.h>
 
 extern const unsigned long sys_call_table[];
 
@@ -95,8 +96,7 @@ static inline void syscall_get_arguments(struct task_struct *task,
 					 unsigned int i, unsigned int n,
 					 unsigned long *args)
 {
-# ifdef CONFIG_IA32_EMULATION
-	if (task_thread_info(task)->status & TS_COMPAT)
+	if (is_ia32_compat_task(task))
 		switch (i) {
 		case 0:
 			if (!n--) break;
@@ -123,7 +123,6 @@ static inline void syscall_get_arguments(struct task_struct *task,
 			break;
 		}
 	else
-# endif
 		switch (i) {
 		case 0:
 			if (!n--) break;
@@ -156,8 +155,7 @@ static inline void syscall_set_arguments(struct task_struct *task,
 					 unsigned int i, unsigned int n,
 					 const unsigned long *args)
 {
-# ifdef CONFIG_IA32_EMULATION
-	if (task_thread_info(task)->status & TS_COMPAT)
+	if (is_ia32_compat_task(task))
 		switch (i) {
 		case 0:
 			if (!n--) break;
@@ -184,7 +182,6 @@ static inline void syscall_set_arguments(struct task_struct *task,
 			break;
 		}
 	else
-# endif
 		switch (i) {
 		case 0:
 			if (!n--) break;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 7f64df1..14ab22e 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -4,6 +4,7 @@
 
 #include <asm/perf_event.h>
 #include <asm/insn.h>
+#include <asm/compat.h>
 
 #include "perf_event.h"
 
@@ -528,9 +529,7 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
 		} else
 			kaddr = (void *)to;
 
-#ifdef CONFIG_X86_64
-		is_64bit = kernel_ip(to) || !test_thread_flag(TIF_IA32);
-#endif
+		is_64bit = kernel_ip(to) || !is_ia32_compat_task(current);
 		insn_init(&insn, kaddr, is_64bit);
 		insn_get_length(&insn);
 		to += insn.length;
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 1deb467..37a7715 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -53,6 +53,7 @@
 #include <asm/syscalls.h>
 #include <asm/debugreg.h>
 #include <asm/nmi.h>
+#include <asm/compat.h>
 
 asmlinkage extern void ret_from_fork(void);
 
@@ -295,7 +296,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
 	savesegment(es, p->thread.es);
 	savesegment(ds, p->thread.ds);
 
-	err = -ENOMEM;
+	err = 0;
 	memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
 
 	if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
@@ -312,18 +313,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
 	 * Set a new TLS for the child thread?
 	 */
 	if (clone_flags & CLONE_SETTLS) {
-#ifdef CONFIG_IA32_EMULATION
-		if (test_thread_flag(TIF_IA32))
+		if (is_ia32_compat_task(current))
 			err = do_set_thread_area(p, -1,
 				(struct user_desc __user *)childregs->si, 0);
 		else
-#endif
 			err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
-		if (err)
-			goto out;
+
 	}
-	err = 0;
-out:
+
 	if (err && p->thread.io_bitmap_ptr) {
 		kfree(p->thread.io_bitmap_ptr);
 		p->thread.io_bitmap_max = 0;
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 52c06b1..76b5194 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -35,6 +35,7 @@
 #include <asm/proto.h>
 #include <asm/hw_breakpoint.h>
 #include <asm/traps.h>
+#include <asm/compat.h>
 
 #include "tls.h"
 
@@ -333,18 +334,14 @@ static int set_segment_reg(struct task_struct *task,
 	case offsetof(struct user_regs_struct,cs):
 		if (unlikely(value == 0))
 			return -EIO;
-#ifdef CONFIG_IA32_EMULATION
-		if (test_tsk_thread_flag(task, TIF_IA32))
+		if (is_ia32_compat_task(task))
 			task_pt_regs(task)->cs = value;
-#endif
 		break;
 	case offsetof(struct user_regs_struct,ss):
 		if (unlikely(value == 0))
 			return -EIO;
-#ifdef CONFIG_IA32_EMULATION
-		if (test_tsk_thread_flag(task, TIF_IA32))
+		if (is_ia32_compat_task(task))
 			task_pt_regs(task)->ss = value;
-#endif
 		break;
 	}
 
@@ -1411,13 +1408,10 @@ void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
 
 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
 {
-#ifdef CONFIG_IA32_EMULATION
-	if (test_tsk_thread_flag(task, TIF_IA32))
-#endif
+	if (is_ia32_compat_task(task))
 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
 		return &user_x86_32_view;
-#endif
-#ifdef CONFIG_X86_64
+#else
 	return &user_x86_64_view;
 #endif
 }
@@ -1453,15 +1447,6 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
 	force_sig_info(SIGTRAP, &info, tsk);
 }
 
-
-#ifdef CONFIG_X86_32
-# define IS_IA32	1
-#elif defined CONFIG_IA32_EMULATION
-# define IS_IA32	is_compat_task()
-#else
-# define IS_IA32	0
-#endif
-
 /*
  * We must return the syscall number to actually look up in the table.
  * This can be -1L to skip running any syscall at all.
@@ -1493,7 +1478,7 @@ long syscall_trace_enter(struct pt_regs *regs)
 	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
 		trace_sys_enter(regs, regs->orig_ax);
 
-	if (IS_IA32)
+	if (is_ia32_compat_task(current))
 		audit_syscall_entry(AUDIT_ARCH_I386,
 				    regs->orig_ax,
 				    regs->bx, regs->cx,
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 115eac4..30108c1 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -37,6 +37,7 @@
 #include <asm/syscalls.h>
 
 #include <asm/sigframe.h>
+#include <asm/compat.h>
 
 #ifdef CONFIG_X86_32
 # define FIX_EFLAGS	(__FIX_EFLAGS | X86_EFLAGS_RF)
@@ -622,18 +623,11 @@ static int signr_convert(int sig)
 
 #ifdef CONFIG_X86_32
 
-#define is_ia32	1
 #define ia32_setup_frame	__setup_frame
 #define ia32_setup_rt_frame	__setup_rt_frame
 
 #else /* !CONFIG_X86_32 */
 
-#ifdef CONFIG_IA32_EMULATION
-#define is_ia32	test_thread_flag(TIF_IA32)
-#else /* !CONFIG_IA32_EMULATION */
-#define is_ia32	0
-#endif /* CONFIG_IA32_EMULATION */
-
 #ifdef CONFIG_X86_X32_ABI
 #define is_x32	test_thread_flag(TIF_X32)
 
@@ -663,7 +657,7 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
 		set = &current->saved_sigmask;
 
 	/* Set up the stack frame */
-	if (is_ia32) {
+	if (is_ia32_compat_task(current)) {
 		if (ka->sa.sa_flags & SA_SIGINFO)
 			ret = ia32_setup_rt_frame(usig, ka, info, set, regs);
 		else
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 710446f..bc298f0 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -583,7 +583,9 @@ asmlinkage ssize_t compat_sys_process_vm_writev(compat_pid_t pid,
 
 #else
 
-#define is_compat_task() (0)
+#ifndef is_compat_task
+# define is_compat_task() (0)
+#endif
 
 #endif /* CONFIG_COMPAT */
 #endif /* _LINUX_COMPAT_H */



^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH v2 7/7] uprobes/core: Handle breakpoint and singlestep exception.
  2012-03-12  9:25 [PATCH v2 1/7] uprobes/core: Make macro names consistent Srikar Dronamraju
                   ` (4 preceding siblings ...)
  2012-03-12  9:26 ` [PATCH v2 6/7] x86/trivial: Use is_ia32_compat_task Srikar Dronamraju
@ 2012-03-12  9:26 ` Srikar Dronamraju
  2012-03-13  9:41 ` [tip:perf/uprobes] uprobes/core: Make macro names consistent tip-bot for Srikar Dronamraju
  6 siblings, 0 replies; 11+ messages in thread
From: Srikar Dronamraju @ 2012-03-12  9:26 UTC (permalink / raw
  To: Peter Zijlstra, Ingo Molnar
  Cc: Andrew Morton, Linus Torvalds, Ananth N Mavinakayanahalli,
	Jim Keniston, LKML, Linux-mm, Oleg Nesterov, Andi Kleen,
	Christoph Hellwig, Steven Rostedt, Arnaldo Carvalho de Melo,
	Masami Hiramatsu, Thomas Gleixner

From: Srikar Dronamraju <srikar@linux.vnet.ibm.com>

Uprobes uses exception notifiers to get to know if a thread hit a
breakpoint or singlestep exception.

When a thread hits a uprobe or is singlestepping post a uprobe hit,
the uprobe exception notifier sets its TIF_UPROBE bit, which will
then be checked on its return to userspace path (do_notify_resume()
->uprobe_notify_resume()), where the consumers handlers are run
(in task context) based on the defined filters.

Uprobe hits are thread specific and hence we need to maintain
information about if a task hit a uprobe, what uprobe was hit, the slot
where the original instruction was copied for xol so that it can be
singlestepped with appropriate fixups.

In some cases, special care is needed for instructions that are
executed out of line (xol). These are architecture specific artefacts,
such as handling RIP relative instructions on x86_64.

Since the instruction at which the uprobe was inserted is executed out
of line, architecture specific fixups are added so that the thread
continues normal execution in the presence of a uprobe.

Postpone the signals until we execute the probed insn. post_xol()
path does a recalc_sigpending() before return to user-mode, this
ensures the signal can't be lost.

Uprobes relies on DIE_DEBUG notification to notify if a singlestep is
complete.

Adds x86 specific uprobe exception notifiers and appropriate hooks
needed to determine a uprobe hit and subsequent post processing.

Add requisite x86 fixups for xol for uprobes. Specific cases needing
fixups include relative jumps (x86_64), calls, etc.

Where possible, we check and skip singlestepping the breakpointed
instructions. For now we skip single byte as well as few multibyte
nop instructions. However this can be extended to other
instructions too.

Credits to Oleg Nesterov for suggestions/patches related to signal,
breakpoint, singlestep handling code.

Signed-off-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
---
Changelog

(v10):
 - Resolve comments suggested by Ingo Molnar.
 - arch specific functions start with arch_uprobe_
 - factor out few cleanup into separate patches

(v9):
 - Use instruction_pointer_set instead of set_instruction_pointer.
 - Changed can_skip_xol to uprobe_skip_sstep as suggested by Ananth

(v7):
 - Resolve comments from Dan Carpenter.
 - add_utask returns NULL on error.
 - Handles architecture agnostic parts of a uprobe breakpoint hit and
   subsequent xol singlestepping.

(v6)
 - added x86 specific hook for aborting xol.

(v5)
 - Use srcu_raw instead of synchronize_sched
 - Introduce per task srcu_id to store the srcu_id
 - Modified comments.
 - No more do a i386 specific enable interrupts. (Its not part of another
   patch posted separately)

 arch/x86/include/asm/thread_info.h |    2 
 arch/x86/include/asm/uprobes.h     |   16 ++
 arch/x86/kernel/signal.c           |    6 +
 arch/x86/kernel/uprobes.c          |  261 ++++++++++++++++++++++++++++++
 include/linux/sched.h              |    4 
 include/linux/uprobes.h            |   44 +++++
 kernel/events/uprobes.c            |  310 ++++++++++++++++++++++++++++++++++++
 kernel/fork.c                      |    7 +
 kernel/signal.c                    |    4 
 9 files changed, 645 insertions(+), 9 deletions(-)

diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index af1db7e..2f06cdf 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -85,6 +85,7 @@ struct thread_info {
 #define TIF_SECCOMP		8	/* secure computing */
 #define TIF_MCE_NOTIFY		10	/* notify userspace of an MCE */
 #define TIF_USER_RETURN_NOTIFY	11	/* notify kernel of userspace return */
+#define TIF_UPROBE		12	/* breakpointed or singlestepping */
 #define TIF_NOTSC		16	/* TSC is not accessible in userland */
 #define TIF_IA32		17	/* IA32 compatibility process */
 #define TIF_FORK		18	/* ret_from_fork */
@@ -109,6 +110,7 @@ struct thread_info {
 #define _TIF_SECCOMP		(1 << TIF_SECCOMP)
 #define _TIF_MCE_NOTIFY		(1 << TIF_MCE_NOTIFY)
 #define _TIF_USER_RETURN_NOTIFY	(1 << TIF_USER_RETURN_NOTIFY)
+#define _TIF_UPROBE		(1 << TIF_UPROBE)
 #define _TIF_NOTSC		(1 << TIF_NOTSC)
 #define _TIF_IA32		(1 << TIF_IA32)
 #define _TIF_FORK		(1 << TIF_FORK)
diff --git a/arch/x86/include/asm/uprobes.h b/arch/x86/include/asm/uprobes.h
index 0500391..930620a 100644
--- a/arch/x86/include/asm/uprobes.h
+++ b/arch/x86/include/asm/uprobes.h
@@ -23,6 +23,8 @@
  *	Jim Keniston
  */
 
+#include <linux/notifier.h>
+
 typedef u8 uprobe_opcode_t;
 
 #define MAX_UINSN_BYTES			  16
@@ -39,5 +41,17 @@ struct arch_uprobe {
 #endif
 };
 
-extern int arch_uprobes_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm);
+struct arch_uprobe_task {
+	unsigned long saved_trap_nr;
+#ifdef CONFIG_X86_64
+	unsigned long saved_scratch_register;
+#endif
+};
+
+extern int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm);
+extern int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs);
+extern int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs);
+extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
+extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data);
+extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs);
 #endif	/* _ASM_UPROBES_H */
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 30108c1..c266253 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -18,6 +18,7 @@
 #include <linux/personality.h>
 #include <linux/uaccess.h>
 #include <linux/user-return-notifier.h>
+#include <linux/uprobes.h>
 
 #include <asm/processor.h>
 #include <asm/ucontext.h>
@@ -818,6 +819,11 @@ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags)
 		mce_notify_process();
 #endif /* CONFIG_X86_64 && CONFIG_X86_MCE */
 
+	if (thread_info_flags & _TIF_UPROBE) {
+		clear_thread_flag(TIF_UPROBE);
+		uprobe_notify_resume(regs);
+	}
+
 	/* deal with pending signal delivery */
 	if (thread_info_flags & _TIF_SIGPENDING)
 		do_signal(regs);
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 851a11b..1156fe6 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -24,8 +24,10 @@
 #include <linux/sched.h>
 #include <linux/ptrace.h>
 #include <linux/uprobes.h>
+#include <linux/uaccess.h>
 
 #include <linux/kdebug.h>
+#include <asm/compat.h>
 #include <asm/insn.h>
 
 /* Post-execution fixups. */
@@ -40,6 +42,8 @@
 #define UPROBE_FIX_RIP_AX	0x8000
 #define UPROBE_FIX_RIP_CX	0x4000
 
+#define	UPROBE_TRAP_NR		UINT_MAX
+
 /* Adaptations for mhiramat x86 decoder v14. */
 #define OPCODE1(insn)		((insn)->opcode.bytes[0])
 #define OPCODE2(insn)		((insn)->opcode.bytes[1])
@@ -221,10 +225,9 @@ static int validate_insn_32bits(struct arch_uprobe *auprobe, struct insn *insn)
 }
 
 /*
- * Figure out which fixups post_xol() will need to perform, and annotate
- * arch_uprobe->fixups accordingly.  To start with,
- * arch_uprobe->fixups is either zero or it reflects rip-related
- * fixups.
+ * Figure out which fixups arch_uprobe_post_xol() will need to perform, and
+ * annotate arch_uprobe->fixups accordingly.  To start with,
+ * arch_uprobe->fixups is either zero or it reflects rip-related fixups.
  */
 static void prepare_fixups(struct arch_uprobe *auprobe, struct insn *insn)
 {
@@ -401,12 +404,12 @@ static int validate_insn_bits(struct arch_uprobe *auprobe, struct mm_struct *mm,
 #endif /* CONFIG_X86_64 */
 
 /**
- * arch_uprobes_analyze_insn - instruction analysis including validity and fixups.
+ * arch_uprobe_analyze_insn - instruction analysis including validity and fixups.
  * @mm: the probed address space.
  * @arch_uprobe: the probepoint information.
  * Return 0 on success or a -ve number on error.
  */
-int arch_uprobes_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm)
+int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm)
 {
 	int ret;
 	struct insn insn;
@@ -421,3 +424,249 @@ int arch_uprobes_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm)
 
 	return 0;
 }
+
+#ifdef CONFIG_X86_64
+/*
+ * If we're emulating a rip-relative instruction, save the contents
+ * of the scratch register and store the target address in that register.
+ */
+static void
+pre_xol_rip_insn(struct arch_uprobe *auprobe, struct pt_regs *regs,
+				struct arch_uprobe_task *autask)
+{
+	if (auprobe->fixups & UPROBE_FIX_RIP_AX) {
+		autask->saved_scratch_register = regs->ax;
+		regs->ax = current->utask->vaddr;
+		regs->ax += auprobe->rip_rela_target_address;
+	} else if (auprobe->fixups & UPROBE_FIX_RIP_CX) {
+		autask->saved_scratch_register = regs->cx;
+		regs->cx = current->utask->vaddr;
+		regs->cx += auprobe->rip_rela_target_address;
+	}
+}
+#else
+static void
+pre_xol_rip_insn(struct arch_uprobe *auprobe, struct pt_regs *regs,
+				struct arch_uprobe_task *autask)
+{
+	/* No RIP-relative addressing on 32-bit */
+}
+#endif
+
+/*
+ * arch_uprobe_pre_xol - prepare to execute out of line.
+ * @auprobe: the probepoint information.
+ * @regs: reflects the saved user state of current task.
+ */
+int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+	struct arch_uprobe_task *autask;
+
+	autask = &current->utask->autask;
+	autask->saved_trap_nr = current->thread.trap_nr;
+	current->thread.trap_nr = UPROBE_TRAP_NR;
+	regs->ip = current->utask->xol_vaddr;
+	pre_xol_rip_insn(auprobe, regs, autask);
+
+	return 0;
+}
+
+/*
+ * This function is called by arch_uprobe_post_xol() to adjust the return
+ * address pushed by a call instruction executed out of line.
+ */
+static int adjust_ret_addr(unsigned long sp, long correction)
+{
+	int rasize, ncopied;
+	long ra = 0;
+
+	if (is_ia32_compat_task(current))
+		rasize = 4;
+	else
+		rasize = 8;
+
+	ncopied = copy_from_user(&ra, (void __user *)sp, rasize);
+	if (unlikely(ncopied))
+		return -EFAULT;
+
+	ra += correction;
+	ncopied = copy_to_user((void __user *)sp, &ra, rasize);
+	if (unlikely(ncopied))
+		return -EFAULT;
+
+	return 0;
+}
+
+#ifdef CONFIG_X86_64
+static bool is_riprel_insn(struct arch_uprobe *auprobe)
+{
+	return ((auprobe->fixups & (UPROBE_FIX_RIP_AX | UPROBE_FIX_RIP_CX)) != 0);
+}
+
+static void
+handle_riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, long *correction)
+{
+	if (is_riprel_insn(auprobe)) {
+		struct arch_uprobe_task *autask;
+
+		autask = &current->utask->autask;
+		if (auprobe->fixups & UPROBE_FIX_RIP_AX)
+			regs->ax = autask->saved_scratch_register;
+		else
+			regs->cx = autask->saved_scratch_register;
+
+		/*
+		 * The original instruction includes a displacement, and so
+		 * is 4 bytes longer than what we've just single-stepped.
+		 * Fall through to handle stuff like "jmpq *...(%rip)" and
+		 * "callq *...(%rip)".
+		 */
+		if (correction)
+			*correction += 4;
+	}
+}
+#else
+static void
+handle_riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, long *correction)
+{
+	/* No RIP-relative addressing on 32-bit */
+}
+#endif
+
+/*
+ * If xol insn itself traps and generates a signal(Say,
+ * SIGILL/SIGSEGV/etc), then detect the case where a singlestepped
+ * instruction jumps back to its own address. It is assumed that anything
+ * like do_page_fault/do_trap/etc sets thread.trap_nr != -1.
+ *
+ * arch_uprobe_pre_xol/arch_uprobe_post_xol save/restore thread.trap_nr,
+ * arch_uprobe_xol_was_trapped() simply checks that ->trap_nr is not equal to
+ * UPROBE_TRAP_NR == -1 set by arch_uprobe_pre_xol().
+ */
+bool arch_uprobe_xol_was_trapped(struct task_struct *t)
+{
+	if (t->thread.trap_nr != UPROBE_TRAP_NR)
+		return true;
+
+	return false;
+}
+
+/*
+ * Called after single-stepping. To avoid the SMP problems that can
+ * occur when we temporarily put back the original opcode to
+ * single-step, we single-stepped a copy of the instruction.
+ *
+ * This function prepares to resume execution after the single-step.
+ * We have to fix things up as follows:
+ *
+ * Typically, the new ip is relative to the copied instruction.  We need
+ * to make it relative to the original instruction (FIX_IP).  Exceptions
+ * are return instructions and absolute or indirect jump or call instructions.
+ *
+ * If the single-stepped instruction was a call, the return address that
+ * is atop the stack is the address following the copied instruction.  We
+ * need to make it the address following the original instruction (FIX_CALL).
+ *
+ * If the original instruction was a rip-relative instruction such as
+ * "movl %edx,0xnnnn(%rip)", we have instead executed an equivalent
+ * instruction using a scratch register -- e.g., "movl %edx,(%rax)".
+ * We need to restore the contents of the scratch register and adjust
+ * the ip, keeping in mind that the instruction we executed is 4 bytes
+ * shorter than the original instruction (since we squeezed out the offset
+ * field).  (FIX_RIP_AX or FIX_RIP_CX)
+ */
+int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+	struct uprobe_task *utask;
+	long correction;
+	int result = 0;
+
+	WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR);
+
+	utask = current->utask;
+	current->thread.trap_nr = utask->autask.saved_trap_nr;
+	correction = (long)(utask->vaddr - utask->xol_vaddr);
+	handle_riprel_post_xol(auprobe, regs, &correction);
+	if (auprobe->fixups & UPROBE_FIX_IP)
+		regs->ip += correction;
+
+	if (auprobe->fixups & UPROBE_FIX_CALL)
+		result = adjust_ret_addr(regs->sp, correction);
+
+	return result;
+}
+
+/* callback routine for handling exceptions. */
+int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data)
+{
+	struct die_args *args = data;
+	struct pt_regs *regs = args->regs;
+	int ret = NOTIFY_DONE;
+
+	/* We are only interested in userspace traps */
+	if (regs && !user_mode_vm(regs))
+		return NOTIFY_DONE;
+
+	switch (val) {
+	case DIE_INT3:
+		if (uprobe_pre_sstep_notifier(regs))
+			ret = NOTIFY_STOP;
+
+		break;
+
+	case DIE_DEBUG:
+		if (uprobe_post_sstep_notifier(regs))
+			ret = NOTIFY_STOP;
+
+	default:
+		break;
+	}
+
+	return ret;
+}
+
+/*
+ * This function gets called when XOL instruction either gets trapped or
+ * the thread has a fatal signal, so reset the instruction pointer to its
+ * probed address.
+ */
+void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+	struct uprobe_task *utask = current->utask;
+
+	current->thread.trap_nr = utask->autask.saved_trap_nr;
+	handle_riprel_post_xol(auprobe, regs, NULL);
+	instruction_pointer_set(regs, utask->vaddr);
+}
+
+/*
+ * Skip these instructions as per the currently known x86 ISA.
+ * 0x66* { 0x90 | 0x0f 0x1f | 0x0f 0x19 | 0x87 0xc0 }
+ */
+bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+	int i;
+
+	for (i = 0; i < MAX_UINSN_BYTES; i++) {
+		if ((auprobe->insn[i] == 0x66))
+			continue;
+
+		if (auprobe->insn[i] == 0x90)
+			return true;
+
+		if (i == (MAX_UINSN_BYTES - 1))
+			break;
+
+		if ((auprobe->insn[i] == 0x0f) && (auprobe->insn[i+1] == 0x1f))
+			return true;
+
+		if ((auprobe->insn[i] == 0x0f) && (auprobe->insn[i+1] == 0x19))
+			return true;
+
+		if ((auprobe->insn[i] == 0x87) && (auprobe->insn[i+1] == 0xc0))
+			return true;
+
+		break;
+	}
+	return false;
+}
diff --git a/include/linux/sched.h b/include/linux/sched.h
index e074e1e..5160fd1 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1606,6 +1606,10 @@ struct task_struct {
 #ifdef CONFIG_HAVE_HW_BREAKPOINT
 	atomic_t ptrace_bp_refcnt;
 #endif
+#ifdef CONFIG_UPROBES
+	struct uprobe_task *utask;
+	int uprobe_srcu_id;
+#endif
 };
 
 /* Future-safe accessor for struct task_struct's cpus_allowed. */
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index eac525f..b020e23 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -39,6 +39,8 @@ struct vm_area_struct;
 
 /* Dont run handlers when first register/ last unregister in progress*/
 #define UPROBE_RUN_HANDLER	0x2
+/* Can skip singlestep */
+#define UPROBE_SKIP_SSTEP	0x4
 
 struct uprobe_consumer {
 	int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs);
@@ -52,12 +54,40 @@ struct uprobe_consumer {
 };
 
 #ifdef CONFIG_UPROBES
+enum uprobe_task_state {
+	UTASK_RUNNING,
+	UTASK_BP_HIT,
+	UTASK_SSTEP,
+	UTASK_SSTEP_ACK,
+	UTASK_SSTEP_TRAPPED,
+};
+
+/*
+ * uprobe_task: Metadata of a task while it singlesteps.
+ */
+struct uprobe_task {
+	enum uprobe_task_state		state;
+	struct arch_uprobe_task		autask;
+
+	struct uprobe 			*active_uprobe;
+
+	unsigned long			xol_vaddr;
+	unsigned long			vaddr;
+};
+
 extern int __weak set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
 extern int __weak set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm,  unsigned long vaddr, bool verify);
 extern bool __weak is_swbp_insn(uprobe_opcode_t *insn);
 extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
 extern void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
 extern int uprobe_mmap(struct vm_area_struct *vma);
+extern void uprobe_free_utask(struct task_struct *t);
+extern unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs);
+extern int uprobe_post_sstep_notifier(struct pt_regs *regs);
+extern int uprobe_pre_sstep_notifier(struct pt_regs *regs);
+extern void uprobe_notify_resume(struct pt_regs *regs);
+extern bool uprobe_deny_signal(void);
+extern bool __weak arch_uprobe_skip_sstep(struct arch_uprobe *aup, struct pt_regs *regs);
 #else /* CONFIG_UPROBES is not defined */
 static inline int
 uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
@@ -72,5 +102,19 @@ static inline int uprobe_mmap(struct vm_area_struct *vma)
 {
 	return 0;
 }
+static inline void uprobe_notify_resume(struct pt_regs *regs)
+{
+}
+static inline bool uprobe_deny_signal(void)
+{
+	return false;
+}
+static inline unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
+{
+	return 0;
+}
+static inline void uprobe_free_utask(struct task_struct *t)
+{
+}
 #endif /* CONFIG_UPROBES */
 #endif	/* _LINUX_UPROBES_H */
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index e56e56a..a146c35 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -30,9 +30,12 @@
 #include <linux/rmap.h>		/* anon_vma_prepare */
 #include <linux/mmu_notifier.h>	/* set_pte_at_notify */
 #include <linux/swap.h>		/* try_to_free_swap */
+#include <linux/ptrace.h>	/* user_enable_single_step */
+#include <linux/kdebug.h>	/* notifier mechanism */
 
 #include <linux/uprobes.h>
 
+static struct srcu_struct uprobes_srcu;
 static struct rb_root uprobes_tree = RB_ROOT;
 
 static DEFINE_SPINLOCK(uprobes_treelock);	/* serialize rbtree access */
@@ -486,6 +489,9 @@ static struct uprobe *insert_uprobe(struct uprobe *uprobe)
 	u = __insert_uprobe(uprobe);
 	spin_unlock_irqrestore(&uprobes_treelock, flags);
 
+	/* For now assume that the instruction need not be single-stepped */
+	uprobe->flags |= UPROBE_SKIP_SSTEP;
+
 	return u;
 }
 
@@ -523,6 +529,21 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset)
 	return uprobe;
 }
 
+static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
+{
+	struct uprobe_consumer *uc;
+
+	if (!(uprobe->flags & UPROBE_RUN_HANDLER))
+		return;
+
+	down_read(&uprobe->consumer_rwsem);
+	for (uc = uprobe->consumers; uc; uc = uc->next) {
+		if (!uc->filter || uc->filter(uc, current))
+			uc->handler(uc, regs);
+	}
+	up_read(&uprobe->consumer_rwsem);
+}
+
 /* Returns the previous consumer */
 static struct uprobe_consumer *
 consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc)
@@ -645,7 +666,7 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
 		if (is_swbp_insn((uprobe_opcode_t *)uprobe->arch.insn))
 			return -EEXIST;
 
-		ret = arch_uprobes_analyze_insn(&uprobe->arch, mm);
+		ret = arch_uprobe_analyze_insn(&uprobe->arch, mm);
 		if (ret)
 			return ret;
 
@@ -662,10 +683,21 @@ remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, loff_t vaddr)
 	set_orig_insn(&uprobe->arch, mm, (unsigned long)vaddr, true);
 }
 
+/*
+ * There could be threads that have hit the breakpoint and are entering the
+ * notifier code and trying to acquire the uprobes_treelock. The thread
+ * calling delete_uprobe() that is removing the uprobe from the rb_tree can
+ * race with these threads and might acquire the uprobes_treelock compared
+ * to some of the breakpoint hit threads. In such a case, the breakpoint
+ * hit threads will not find the uprobe. The current unregistering thread
+ * waits till all other threads have hit a breakpoint, to acquire the
+ * uprobes_treelock before the uprobe is removed from the rbtree.
+ */
 static void delete_uprobe(struct uprobe *uprobe)
 {
 	unsigned long flags;
 
+	synchronize_srcu(&uprobes_srcu);
 	spin_lock_irqsave(&uprobes_treelock, flags);
 	rb_erase(&uprobe->rb_node, &uprobes_tree);
 	spin_unlock_irqrestore(&uprobes_treelock, flags);
@@ -1010,6 +1042,279 @@ int uprobe_mmap(struct vm_area_struct *vma)
 	return ret;
 }
 
+/**
+ * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
+ * @regs: Reflects the saved state of the task after it has hit a breakpoint
+ * instruction.
+ * Return the address of the breakpoint instruction.
+ */
+unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs)
+{
+	return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE;
+}
+
+/*
+ * Called with no locks held.
+ * Called in context of a exiting or a exec-ing thread.
+ */
+void uprobe_free_utask(struct task_struct *t)
+{
+	struct uprobe_task *utask = t->utask;
+
+	if (t->uprobe_srcu_id != -1)
+		srcu_read_unlock_raw(&uprobes_srcu, t->uprobe_srcu_id);
+
+	if (!utask)
+		return;
+
+	if (utask->active_uprobe)
+		put_uprobe(utask->active_uprobe);
+
+	kfree(utask);
+	t->utask = NULL;
+}
+
+/*
+ * Allocate a uprobe_task object for the task.
+ * Called when the thread hits a breakpoint for the first time.
+ *
+ * Returns:
+ * - pointer to new uprobe_task on success
+ * - NULL otherwise
+ */
+static struct uprobe_task *add_utask(void)
+{
+	struct uprobe_task *utask;
+
+	utask = kzalloc(sizeof *utask, GFP_KERNEL);
+	if (unlikely(!utask))
+		return NULL;
+
+	utask->active_uprobe = NULL;
+	current->utask = utask;
+	return utask;
+}
+
+/* Prepare to single-step probed instruction out of line. */
+static int
+pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long vaddr)
+{
+	return -EFAULT;
+}
+
+/*
+ * If we are singlestepping, then ensure this thread is not connected to
+ * non-fatal signals until completion of singlestep.  When xol insn itself
+ * triggers the signal,  restart the original insn even if the task is
+ * already SIGKILL'ed (since coredump should report the correct ip).  This
+ * is even more important if the task has a handler for SIGSEGV/etc, The
+ * _same_ instruction should be repeated again after return from the signal
+ * handler, and SSTEP can never finish in this case.
+ */
+bool uprobe_deny_signal(void)
+{
+	struct task_struct *t = current;
+	struct uprobe_task *utask = t->utask;
+
+	if (likely(!utask || !utask->active_uprobe))
+		return false;
+
+	WARN_ON_ONCE(utask->state != UTASK_SSTEP);
+
+	if (signal_pending(t)) {
+		spin_lock_irq(&t->sighand->siglock);
+		clear_tsk_thread_flag(t, TIF_SIGPENDING);
+		spin_unlock_irq(&t->sighand->siglock);
+
+		if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) {
+			utask->state = UTASK_SSTEP_TRAPPED;
+			set_tsk_thread_flag(t, TIF_UPROBE);
+			set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
+		}
+	}
+
+	return true;
+}
+
+/*
+ * Avoid singlestepping the original instruction if the original instruction
+ * is a NOP or can be emulated.
+ */
+static bool can_skip_sstep(struct uprobe *uprobe, struct pt_regs *regs)
+{
+	if (arch_uprobe_skip_sstep(&uprobe->arch, regs))
+		return true;
+
+	uprobe->flags &= ~UPROBE_SKIP_SSTEP;
+	return false;
+}
+
+/*
+ * Run handler and ask thread to singlestep.
+ * Ensure all non-fatal signals cannot interrupt thread while it singlesteps.
+ */
+static void handle_swbp(struct pt_regs *regs)
+{
+	struct vm_area_struct *vma;
+	struct uprobe_task *utask;
+	struct uprobe *uprobe;
+	struct mm_struct *mm;
+	unsigned long bp_vaddr;
+
+	uprobe = NULL;
+	bp_vaddr = uprobe_get_swbp_addr(regs);
+	mm = current->mm;
+	down_read(&mm->mmap_sem);
+	vma = find_vma(mm, bp_vaddr);
+
+	if (vma && vma->vm_start <= bp_vaddr && valid_vma(vma, false)) {
+		struct inode *inode;
+		loff_t offset;
+
+		inode = vma->vm_file->f_mapping->host;
+		offset = bp_vaddr - vma->vm_start;
+		offset += (vma->vm_pgoff << PAGE_SHIFT);
+		uprobe = find_uprobe(inode, offset);
+	}
+
+	srcu_read_unlock_raw(&uprobes_srcu, current->uprobe_srcu_id);
+	current->uprobe_srcu_id = -1;
+	up_read(&mm->mmap_sem);
+
+	if (!uprobe) {
+		/* No matching uprobe; signal SIGTRAP. */
+		send_sig(SIGTRAP, current, 0);
+		return;
+	}
+
+	utask = current->utask;
+	if (!utask) {
+		utask = add_utask();
+		/* Cannot allocate; re-execute the instruction. */
+		if (!utask)
+			goto cleanup_ret;
+	}
+	utask->active_uprobe = uprobe;
+	handler_chain(uprobe, regs);
+	if (uprobe->flags & UPROBE_SKIP_SSTEP && can_skip_sstep(uprobe, regs))
+		goto cleanup_ret;
+
+	utask->state = UTASK_SSTEP;
+	if (!pre_ssout(uprobe, regs, bp_vaddr)) {
+		user_enable_single_step(current);
+		return;
+	}
+
+cleanup_ret:
+	if (utask) {
+		utask->active_uprobe = NULL;
+		utask->state = UTASK_RUNNING;
+	}
+	if (uprobe) {
+		if (!(uprobe->flags & UPROBE_SKIP_SSTEP))
+
+			/*
+			 * cannot singlestep; cannot skip instruction;
+			 * re-execute the instruction.
+			 */
+			instruction_pointer_set(regs, bp_vaddr);
+
+		put_uprobe(uprobe);
+	}
+}
+
+/*
+ * Perform required fix-ups and disable singlestep.
+ * Allow pending signals to take effect.
+ */
+static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs)
+{
+	struct uprobe *uprobe;
+
+	uprobe = utask->active_uprobe;
+	if (utask->state == UTASK_SSTEP_ACK)
+		arch_uprobe_post_xol(&uprobe->arch, regs);
+	else if (utask->state == UTASK_SSTEP_TRAPPED)
+		arch_uprobe_abort_xol(&uprobe->arch, regs);
+	else
+		WARN_ON_ONCE(1);
+
+	put_uprobe(uprobe);
+	utask->active_uprobe = NULL;
+	utask->state = UTASK_RUNNING;
+	user_disable_single_step(current);
+
+	spin_lock_irq(&current->sighand->siglock);
+	recalc_sigpending(); /* see uprobe_deny_signal() */
+	spin_unlock_irq(&current->sighand->siglock);
+}
+
+/*
+ * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag.  (and on
+ * subsequent probe hits on the thread sets the state to UTASK_BP_HIT) and
+ * allows the thread to return from interrupt.
+ *
+ * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag and
+ * also sets the state to UTASK_SSTEP_ACK and allows the thread to return from
+ * interrupt.
+ *
+ * While returning to userspace, thread notices the TIF_UPROBE flag and calls
+ * uprobe_notify_resume().
+ */
+void uprobe_notify_resume(struct pt_regs *regs)
+{
+	struct uprobe_task *utask;
+
+	utask = current->utask;
+	if (!utask || utask->state == UTASK_BP_HIT)
+		handle_swbp(regs);
+	else
+		handle_singlestep(utask, regs);
+}
+
+/*
+ * uprobe_pre_sstep_notifier gets called from interrupt context as part of
+ * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit.
+ */
+int uprobe_pre_sstep_notifier(struct pt_regs *regs)
+{
+	struct uprobe_task *utask;
+
+	if (!current->mm)
+		return 0;
+
+	utask = current->utask;
+	if (utask)
+		utask->state = UTASK_BP_HIT;
+
+	set_thread_flag(TIF_UPROBE);
+	current->uprobe_srcu_id = srcu_read_lock_raw(&uprobes_srcu);
+
+	return 1;
+}
+
+/*
+ * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier
+ * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep.
+ */
+int uprobe_post_sstep_notifier(struct pt_regs *regs)
+{
+	struct uprobe_task *utask = current->utask;
+
+	if (!current->mm || !utask || !utask->active_uprobe)
+		/* task is currently not uprobed */
+		return 0;
+
+	utask->state = UTASK_SSTEP_ACK;
+	set_thread_flag(TIF_UPROBE);
+	return 1;
+}
+
+static struct notifier_block uprobe_exception_nb = {
+	.notifier_call = arch_uprobe_exception_notify,
+	.priority = INT_MAX - 1,	/* notified after kprobes, kgdb */
+};
+
 static int __init init_uprobes(void)
 {
 	int i;
@@ -1018,7 +1323,8 @@ static int __init init_uprobes(void)
 		mutex_init(&uprobes_mutex[i]);
 		mutex_init(&uprobes_mmap_mutex[i]);
 	}
-	return 0;
+	init_srcu_struct(&uprobes_srcu);
+	return register_die_notifier(&uprobe_exception_nb);
 }
 
 static void __exit exit_uprobes(void)
diff --git a/kernel/fork.c b/kernel/fork.c
index 26a7a67..36508b9 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -67,6 +67,7 @@
 #include <linux/oom.h>
 #include <linux/khugepaged.h>
 #include <linux/signalfd.h>
+#include <linux/uprobes.h>
 
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
@@ -731,6 +732,8 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
 		exit_pi_state_list(tsk);
 #endif
 
+	uprobe_free_utask(tsk);
+
 	/* Get rid of any cached register state */
 	deactivate_mm(tsk, mm);
 
@@ -1322,6 +1325,10 @@ static struct task_struct *copy_process(unsigned long clone_flags,
 	INIT_LIST_HEAD(&p->pi_state_list);
 	p->pi_state_cache = NULL;
 #endif
+#ifdef CONFIG_UPROBES
+	p->utask = NULL;
+	p->uprobe_srcu_id = -1;
+#endif
 	/*
 	 * sigaltstack should be cleared when sharing the same VM
 	 */
diff --git a/kernel/signal.c b/kernel/signal.c
index 8511e39..e93ff0a 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -29,6 +29,7 @@
 #include <linux/pid_namespace.h>
 #include <linux/nsproxy.h>
 #include <linux/user_namespace.h>
+#include <linux/uprobes.h>
 #define CREATE_TRACE_POINTS
 #include <trace/events/signal.h>
 
@@ -2192,6 +2193,9 @@ int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
 	struct signal_struct *signal = current->signal;
 	int signr;
 
+	if (unlikely(uprobe_deny_signal()))
+		return 0;
+
 relock:
 	/*
 	 * We'll jump back here after any time we were stopped in TASK_STOPPED.



^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [tip:perf/uprobes] uprobes/core: Make macro names consistent
  2012-03-12  9:25 [PATCH v2 1/7] uprobes/core: Make macro names consistent Srikar Dronamraju
                   ` (5 preceding siblings ...)
  2012-03-12  9:26 ` [PATCH v2 7/7] uprobes/core: Handle breakpoint and singlestep exception Srikar Dronamraju
@ 2012-03-13  9:41 ` tip-bot for Srikar Dronamraju
  6 siblings, 0 replies; 11+ messages in thread
From: tip-bot for Srikar Dronamraju @ 2012-03-13  9:41 UTC (permalink / raw
  To: linux-tip-commits
  Cc: mingo, torvalds, peterz, rostedt, jkenisto, tglx, oleg, linux-mm,
	hpa, linux-kernel, andi, hch, ananth, masami.hiramatsu.pt, acme,
	srikar, mingo

Commit-ID:  900771a483ef28915a48066d7895d8252315607a
Gitweb:     http://git.kernel.org/tip/900771a483ef28915a48066d7895d8252315607a
Author:     Srikar Dronamraju <srikar@linux.vnet.ibm.com>
AuthorDate: Mon, 12 Mar 2012 14:55:14 +0530
Committer:  Ingo Molnar <mingo@elte.hu>
CommitDate: Tue, 13 Mar 2012 06:22:20 +0100

uprobes/core: Make macro names consistent

Rename macros that refer to individual uprobe to start with
UPROBE_ instead of UPROBES_.

This is pure cleanup, no functional change intended.

Signed-off-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: Jim Keniston <jkenisto@linux.vnet.ibm.com>
Cc: Linux-mm <linux-mm@kvack.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
Cc: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20120312092514.5379.36595.sendpatchset@srdronam.in.ibm.com
Signed-off-by: Ingo Molnar <mingo@elte.hu>
---
 arch/x86/include/asm/uprobes.h |    6 +++---
 arch/x86/kernel/uprobes.c      |   18 +++++++++---------
 include/linux/uprobes.h        |    4 ++--
 kernel/events/uprobes.c        |   18 +++++++++---------
 4 files changed, 23 insertions(+), 23 deletions(-)

diff --git a/arch/x86/include/asm/uprobes.h b/arch/x86/include/asm/uprobes.h
index f7ce310..5c399e4 100644
--- a/arch/x86/include/asm/uprobes.h
+++ b/arch/x86/include/asm/uprobes.h
@@ -26,10 +26,10 @@
 typedef u8 uprobe_opcode_t;
 
 #define MAX_UINSN_BYTES			  16
-#define UPROBES_XOL_SLOT_BYTES		 128	/* to keep it cache aligned */
+#define UPROBE_XOL_SLOT_BYTES		 128	/* to keep it cache aligned */
 
-#define UPROBES_BKPT_INSN		0xcc
-#define UPROBES_BKPT_INSN_SIZE		   1
+#define UPROBE_BKPT_INSN		0xcc
+#define UPROBE_BKPT_INSN_SIZE		   1
 
 struct arch_uprobe {
 	u16				fixups;
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 04dfcef..6dfa89e 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -31,14 +31,14 @@
 /* Post-execution fixups. */
 
 /* No fixup needed */
-#define UPROBES_FIX_NONE	0x0
+#define UPROBE_FIX_NONE	0x0
 /* Adjust IP back to vicinity of actual insn */
-#define UPROBES_FIX_IP		0x1
+#define UPROBE_FIX_IP		0x1
 /* Adjust the return address of a call insn */
-#define UPROBES_FIX_CALL	0x2
+#define UPROBE_FIX_CALL	0x2
 
-#define UPROBES_FIX_RIP_AX	0x8000
-#define UPROBES_FIX_RIP_CX	0x4000
+#define UPROBE_FIX_RIP_AX	0x8000
+#define UPROBE_FIX_RIP_CX	0x4000
 
 /* Adaptations for mhiramat x86 decoder v14. */
 #define OPCODE1(insn)		((insn)->opcode.bytes[0])
@@ -269,9 +269,9 @@ static void prepare_fixups(struct arch_uprobe *auprobe, struct insn *insn)
 		break;
 	}
 	if (fix_ip)
-		auprobe->fixups |= UPROBES_FIX_IP;
+		auprobe->fixups |= UPROBE_FIX_IP;
 	if (fix_call)
-		auprobe->fixups |= UPROBES_FIX_CALL;
+		auprobe->fixups |= UPROBE_FIX_CALL;
 }
 
 #ifdef CONFIG_X86_64
@@ -341,12 +341,12 @@ static void handle_riprel_insn(struct mm_struct *mm, struct arch_uprobe *auprobe
 		 * is NOT the register operand, so we use %rcx (register
 		 * #1) for the scratch register.
 		 */
-		auprobe->fixups = UPROBES_FIX_RIP_CX;
+		auprobe->fixups = UPROBE_FIX_RIP_CX;
 		/* Change modrm from 00 000 101 to 00 000 001. */
 		*cursor = 0x1;
 	} else {
 		/* Use %rax (register #0) for the scratch register. */
-		auprobe->fixups = UPROBES_FIX_RIP_AX;
+		auprobe->fixups = UPROBE_FIX_RIP_AX;
 		/* Change modrm from 00 xxx 101 to 00 xxx 000 */
 		*cursor = (reg << 3);
 	}
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index f85797e..838fb31 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -35,10 +35,10 @@ struct vm_area_struct;
 /* flags that denote/change uprobes behaviour */
 
 /* Have a copy of original instruction */
-#define UPROBES_COPY_INSN	0x1
+#define UPROBE_COPY_INSN	0x1
 
 /* Dont run handlers when first register/ last unregister in progress*/
-#define UPROBES_RUN_HANDLER	0x2
+#define UPROBE_RUN_HANDLER	0x2
 
 struct uprobe_consumer {
 	int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs);
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 5ce32e3..0d36bf3 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -177,7 +177,7 @@ out:
  */
 bool __weak is_bkpt_insn(uprobe_opcode_t *insn)
 {
-	return *insn == UPROBES_BKPT_INSN;
+	return *insn == UPROBE_BKPT_INSN;
 }
 
 /*
@@ -259,8 +259,8 @@ static int write_opcode(struct mm_struct *mm, struct arch_uprobe *auprobe,
 
 	/* poke the new insn in, ASSUMES we don't cross page boundary */
 	vaddr &= ~PAGE_MASK;
-	BUG_ON(vaddr + UPROBES_BKPT_INSN_SIZE > PAGE_SIZE);
-	memcpy(vaddr_new + vaddr, &opcode, UPROBES_BKPT_INSN_SIZE);
+	BUG_ON(vaddr + UPROBE_BKPT_INSN_SIZE > PAGE_SIZE);
+	memcpy(vaddr_new + vaddr, &opcode, UPROBE_BKPT_INSN_SIZE);
 
 	kunmap_atomic(vaddr_new);
 	kunmap_atomic(vaddr_old);
@@ -308,7 +308,7 @@ static int read_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_
 	lock_page(page);
 	vaddr_new = kmap_atomic(page);
 	vaddr &= ~PAGE_MASK;
-	memcpy(opcode, vaddr_new + vaddr, UPROBES_BKPT_INSN_SIZE);
+	memcpy(opcode, vaddr_new + vaddr, UPROBE_BKPT_INSN_SIZE);
 	kunmap_atomic(vaddr_new);
 	unlock_page(page);
 
@@ -352,7 +352,7 @@ int __weak set_bkpt(struct mm_struct *mm, struct arch_uprobe *auprobe, unsigned
 	if (result)
 		return result;
 
-	return write_opcode(mm, auprobe, vaddr, UPROBES_BKPT_INSN);
+	return write_opcode(mm, auprobe, vaddr, UPROBE_BKPT_INSN);
 }
 
 /**
@@ -635,7 +635,7 @@ static int install_breakpoint(struct mm_struct *mm, struct uprobe *uprobe,
 
 	addr = (unsigned long)vaddr;
 
-	if (!(uprobe->flags & UPROBES_COPY_INSN)) {
+	if (!(uprobe->flags & UPROBE_COPY_INSN)) {
 		ret = copy_insn(uprobe, vma, addr);
 		if (ret)
 			return ret;
@@ -647,7 +647,7 @@ static int install_breakpoint(struct mm_struct *mm, struct uprobe *uprobe,
 		if (ret)
 			return ret;
 
-		uprobe->flags |= UPROBES_COPY_INSN;
+		uprobe->flags |= UPROBE_COPY_INSN;
 	}
 	ret = set_bkpt(mm, &uprobe->arch, addr);
 
@@ -857,7 +857,7 @@ int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *
 			uprobe->consumers = NULL;
 			__uprobe_unregister(uprobe);
 		} else {
-			uprobe->flags |= UPROBES_RUN_HANDLER;
+			uprobe->flags |= UPROBE_RUN_HANDLER;
 		}
 	}
 
@@ -889,7 +889,7 @@ void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consume
 	if (consumer_del(uprobe, consumer)) {
 		if (!uprobe->consumers) {
 			__uprobe_unregister(uprobe);
-			uprobe->flags &= ~UPROBES_RUN_HANDLER;
+			uprobe->flags &= ~UPROBE_RUN_HANDLER;
 		}
 	}
 

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [tip:perf/uprobes] uprobes/core: Make order of function parameters consistent across functions
  2012-03-12  9:25 ` [PATCH v2 2/7] uprobes/core: Make order of function parameters consistent across functions Srikar Dronamraju
@ 2012-03-13  9:42   ` tip-bot for Srikar Dronamraju
  0 siblings, 0 replies; 11+ messages in thread
From: tip-bot for Srikar Dronamraju @ 2012-03-13  9:42 UTC (permalink / raw
  To: linux-tip-commits
  Cc: mingo, torvalds, peterz, rostedt, jkenisto, tglx, oleg, linux-mm,
	hpa, linux-kernel, andi, hch, ananth, masami.hiramatsu.pt, acme,
	srikar, mingo

Commit-ID:  e3343e6a2819ff5d0dfc4bb5c9fb7f9a4d04da73
Gitweb:     http://git.kernel.org/tip/e3343e6a2819ff5d0dfc4bb5c9fb7f9a4d04da73
Author:     Srikar Dronamraju <srikar@linux.vnet.ibm.com>
AuthorDate: Mon, 12 Mar 2012 14:55:30 +0530
Committer:  Ingo Molnar <mingo@elte.hu>
CommitDate: Tue, 13 Mar 2012 06:22:20 +0100

uprobes/core: Make order of function parameters consistent across functions

If a function takes struct uprobe or struct arch_uprobe, then it
is passed as the first parameter.

This is pure cleanup, no functional change intended.

Signed-off-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: Jim Keniston <jkenisto@linux.vnet.ibm.com>
Cc: Linux-mm <linux-mm@kvack.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
Cc: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20120312092530.5379.18394.sendpatchset@srdronam.in.ibm.com
Signed-off-by: Ingo Molnar <mingo@elte.hu>
---
 arch/x86/include/asm/uprobes.h |    2 +-
 arch/x86/kernel/uprobes.c      |   15 +++---
 include/linux/uprobes.h        |   12 +++---
 kernel/events/uprobes.c        |   93 ++++++++++++++++++++-------------------
 4 files changed, 63 insertions(+), 59 deletions(-)

diff --git a/arch/x86/include/asm/uprobes.h b/arch/x86/include/asm/uprobes.h
index 5c399e4..384f1be 100644
--- a/arch/x86/include/asm/uprobes.h
+++ b/arch/x86/include/asm/uprobes.h
@@ -39,5 +39,5 @@ struct arch_uprobe {
 #endif
 };
 
-extern int arch_uprobes_analyze_insn(struct mm_struct *mm, struct arch_uprobe *arch_uprobe);
+extern int arch_uprobes_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm);
 #endif	/* _ASM_UPROBES_H */
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 6dfa89e..851a11b 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -297,7 +297,8 @@ static void prepare_fixups(struct arch_uprobe *auprobe, struct insn *insn)
  *  - There's never a SIB byte.
  *  - The displacement is always 4 bytes.
  */
-static void handle_riprel_insn(struct mm_struct *mm, struct arch_uprobe *auprobe, struct insn *insn)
+static void
+handle_riprel_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, struct insn *insn)
 {
 	u8 *cursor;
 	u8 reg;
@@ -381,19 +382,19 @@ static int validate_insn_64bits(struct arch_uprobe *auprobe, struct insn *insn)
 	return -ENOTSUPP;
 }
 
-static int validate_insn_bits(struct mm_struct *mm, struct arch_uprobe *auprobe, struct insn *insn)
+static int validate_insn_bits(struct arch_uprobe *auprobe, struct mm_struct *mm, struct insn *insn)
 {
 	if (mm->context.ia32_compat)
 		return validate_insn_32bits(auprobe, insn);
 	return validate_insn_64bits(auprobe, insn);
 }
 #else /* 32-bit: */
-static void handle_riprel_insn(struct mm_struct *mm, struct arch_uprobe *auprobe, struct insn *insn)
+static void handle_riprel_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, struct insn *insn)
 {
 	/* No RIP-relative addressing on 32-bit */
 }
 
-static int validate_insn_bits(struct mm_struct *mm, struct arch_uprobe *auprobe, struct insn *insn)
+static int validate_insn_bits(struct arch_uprobe *auprobe, struct mm_struct *mm,  struct insn *insn)
 {
 	return validate_insn_32bits(auprobe, insn);
 }
@@ -405,17 +406,17 @@ static int validate_insn_bits(struct mm_struct *mm, struct arch_uprobe *auprobe,
  * @arch_uprobe: the probepoint information.
  * Return 0 on success or a -ve number on error.
  */
-int arch_uprobes_analyze_insn(struct mm_struct *mm, struct arch_uprobe *auprobe)
+int arch_uprobes_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm)
 {
 	int ret;
 	struct insn insn;
 
 	auprobe->fixups = 0;
-	ret = validate_insn_bits(mm, auprobe, &insn);
+	ret = validate_insn_bits(auprobe, mm, &insn);
 	if (ret != 0)
 		return ret;
 
-	handle_riprel_insn(mm, auprobe, &insn);
+	handle_riprel_insn(auprobe, mm, &insn);
 	prepare_fixups(auprobe, &insn);
 
 	return 0;
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index 838fb31..5869918 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -52,20 +52,20 @@ struct uprobe_consumer {
 };
 
 #ifdef CONFIG_UPROBES
-extern int __weak set_bkpt(struct mm_struct *mm, struct arch_uprobe *auprobe, unsigned long vaddr);
-extern int __weak set_orig_insn(struct mm_struct *mm, struct arch_uprobe *auprobe, unsigned long vaddr, bool verify);
+extern int __weak set_bkpt(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
+extern int __weak set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm,  unsigned long vaddr, bool verify);
 extern bool __weak is_bkpt_insn(uprobe_opcode_t *insn);
-extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *consumer);
-extern void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *consumer);
+extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
+extern void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
 extern int uprobe_mmap(struct vm_area_struct *vma);
 #else /* CONFIG_UPROBES is not defined */
 static inline int
-uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *consumer)
+uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
 {
 	return -ENOSYS;
 }
 static inline void
-uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *consumer)
+uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
 {
 }
 static inline int uprobe_mmap(struct vm_area_struct *vma)
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 0d36bf3..9c5ddff 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -192,8 +192,8 @@ bool __weak is_bkpt_insn(uprobe_opcode_t *insn)
 
 /*
  * write_opcode - write the opcode at a given virtual address.
+ * @auprobe: arch breakpointing information.
  * @mm: the probed process address space.
- * @arch_uprobe: the breakpointing information.
  * @vaddr: the virtual address to store the opcode.
  * @opcode: opcode to be written at @vaddr.
  *
@@ -203,7 +203,7 @@ bool __weak is_bkpt_insn(uprobe_opcode_t *insn)
  * For mm @mm, write the opcode at @vaddr.
  * Return 0 (success) or a negative errno.
  */
-static int write_opcode(struct mm_struct *mm, struct arch_uprobe *auprobe,
+static int write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
 			unsigned long vaddr, uprobe_opcode_t opcode)
 {
 	struct page *old_page, *new_page;
@@ -334,14 +334,14 @@ static int is_bkpt_at_addr(struct mm_struct *mm, unsigned long vaddr)
 
 /**
  * set_bkpt - store breakpoint at a given address.
+ * @auprobe: arch specific probepoint information.
  * @mm: the probed process address space.
- * @uprobe: the probepoint information.
  * @vaddr: the virtual address to insert the opcode.
  *
  * For mm @mm, store the breakpoint instruction at @vaddr.
  * Return 0 (success) or a negative errno.
  */
-int __weak set_bkpt(struct mm_struct *mm, struct arch_uprobe *auprobe, unsigned long vaddr)
+int __weak set_bkpt(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
 {
 	int result;
 
@@ -352,13 +352,13 @@ int __weak set_bkpt(struct mm_struct *mm, struct arch_uprobe *auprobe, unsigned
 	if (result)
 		return result;
 
-	return write_opcode(mm, auprobe, vaddr, UPROBE_BKPT_INSN);
+	return write_opcode(auprobe, mm, vaddr, UPROBE_BKPT_INSN);
 }
 
 /**
  * set_orig_insn - Restore the original instruction.
  * @mm: the probed process address space.
- * @uprobe: the probepoint information.
+ * @auprobe: arch specific probepoint information.
  * @vaddr: the virtual address to insert the opcode.
  * @verify: if true, verify existance of breakpoint instruction.
  *
@@ -366,7 +366,7 @@ int __weak set_bkpt(struct mm_struct *mm, struct arch_uprobe *auprobe, unsigned
  * Return 0 (success) or a negative errno.
  */
 int __weak
-set_orig_insn(struct mm_struct *mm, struct arch_uprobe *auprobe, unsigned long vaddr, bool verify)
+set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr, bool verify)
 {
 	if (verify) {
 		int result;
@@ -378,7 +378,7 @@ set_orig_insn(struct mm_struct *mm, struct arch_uprobe *auprobe, unsigned long v
 		if (result != 1)
 			return result;
 	}
-	return write_opcode(mm, auprobe, vaddr, *(uprobe_opcode_t *)auprobe->insn);
+	return write_opcode(auprobe, mm, vaddr, *(uprobe_opcode_t *)auprobe->insn);
 }
 
 static int match_uprobe(struct uprobe *l, struct uprobe *r)
@@ -525,30 +525,30 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset)
 
 /* Returns the previous consumer */
 static struct uprobe_consumer *
-consumer_add(struct uprobe *uprobe, struct uprobe_consumer *consumer)
+consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc)
 {
 	down_write(&uprobe->consumer_rwsem);
-	consumer->next = uprobe->consumers;
-	uprobe->consumers = consumer;
+	uc->next = uprobe->consumers;
+	uprobe->consumers = uc;
 	up_write(&uprobe->consumer_rwsem);
 
-	return consumer->next;
+	return uc->next;
 }
 
 /*
- * For uprobe @uprobe, delete the consumer @consumer.
- * Return true if the @consumer is deleted successfully
+ * For uprobe @uprobe, delete the consumer @uc.
+ * Return true if the @uc is deleted successfully
  * or return false.
  */
-static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *consumer)
+static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc)
 {
 	struct uprobe_consumer **con;
 	bool ret = false;
 
 	down_write(&uprobe->consumer_rwsem);
 	for (con = &uprobe->consumers; *con; con = &(*con)->next) {
-		if (*con == consumer) {
-			*con = consumer->next;
+		if (*con == uc) {
+			*con = uc->next;
 			ret = true;
 			break;
 		}
@@ -558,8 +558,8 @@ static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *consumer
 	return ret;
 }
 
-static int __copy_insn(struct address_space *mapping,
-			struct vm_area_struct *vma, char *insn,
+static int
+__copy_insn(struct address_space *mapping, struct vm_area_struct *vma, char *insn,
 			unsigned long nbytes, unsigned long offset)
 {
 	struct file *filp = vma->vm_file;
@@ -590,7 +590,8 @@ static int __copy_insn(struct address_space *mapping,
 	return 0;
 }
 
-static int copy_insn(struct uprobe *uprobe, struct vm_area_struct *vma, unsigned long addr)
+static int
+copy_insn(struct uprobe *uprobe, struct vm_area_struct *vma, unsigned long addr)
 {
 	struct address_space *mapping;
 	unsigned long nbytes;
@@ -617,8 +618,9 @@ static int copy_insn(struct uprobe *uprobe, struct vm_area_struct *vma, unsigned
 	return __copy_insn(mapping, vma, uprobe->arch.insn, bytes, uprobe->offset);
 }
 
-static int install_breakpoint(struct mm_struct *mm, struct uprobe *uprobe,
-				struct vm_area_struct *vma, loff_t vaddr)
+static int
+install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
+			struct vm_area_struct *vma, loff_t vaddr)
 {
 	unsigned long addr;
 	int ret;
@@ -643,20 +645,21 @@ static int install_breakpoint(struct mm_struct *mm, struct uprobe *uprobe,
 		if (is_bkpt_insn((uprobe_opcode_t *)uprobe->arch.insn))
 			return -EEXIST;
 
-		ret = arch_uprobes_analyze_insn(mm, &uprobe->arch);
+		ret = arch_uprobes_analyze_insn(&uprobe->arch, mm);
 		if (ret)
 			return ret;
 
 		uprobe->flags |= UPROBE_COPY_INSN;
 	}
-	ret = set_bkpt(mm, &uprobe->arch, addr);
+	ret = set_bkpt(&uprobe->arch, mm, addr);
 
 	return ret;
 }
 
-static void remove_breakpoint(struct mm_struct *mm, struct uprobe *uprobe, loff_t vaddr)
+static void
+remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, loff_t vaddr)
 {
-	set_orig_insn(mm, &uprobe->arch, (unsigned long)vaddr, true);
+	set_orig_insn(&uprobe->arch, mm, (unsigned long)vaddr, true);
 }
 
 static void delete_uprobe(struct uprobe *uprobe)
@@ -671,9 +674,9 @@ static void delete_uprobe(struct uprobe *uprobe)
 	atomic_dec(&uprobe_events);
 }
 
-static struct vma_info *__find_next_vma_info(struct list_head *head,
-			loff_t offset, struct address_space *mapping,
-			struct vma_info *vi, bool is_register)
+static struct vma_info *
+__find_next_vma_info(struct address_space *mapping, struct list_head *head,
+			struct vma_info *vi, loff_t offset, bool is_register)
 {
 	struct prio_tree_iter iter;
 	struct vm_area_struct *vma;
@@ -719,8 +722,8 @@ static struct vma_info *__find_next_vma_info(struct list_head *head,
  * yet been inserted.
  */
 static struct vma_info *
-find_next_vma_info(struct list_head *head, loff_t offset, struct address_space *mapping,
-		   bool is_register)
+find_next_vma_info(struct address_space *mapping, struct list_head *head,
+		loff_t offset, bool is_register)
 {
 	struct vma_info *vi, *retvi;
 
@@ -729,7 +732,7 @@ find_next_vma_info(struct list_head *head, loff_t offset, struct address_space *
 		return ERR_PTR(-ENOMEM);
 
 	mutex_lock(&mapping->i_mmap_mutex);
-	retvi = __find_next_vma_info(head, offset, mapping, vi, is_register);
+	retvi = __find_next_vma_info(mapping, head, vi, offset, is_register);
 	mutex_unlock(&mapping->i_mmap_mutex);
 
 	if (!retvi)
@@ -754,7 +757,7 @@ static int register_for_each_vma(struct uprobe *uprobe, bool is_register)
 	ret = 0;
 
 	for (;;) {
-		vi = find_next_vma_info(&try_list, uprobe->offset, mapping, is_register);
+		vi = find_next_vma_info(mapping, &try_list, uprobe->offset, is_register);
 		if (!vi)
 			break;
 
@@ -784,9 +787,9 @@ static int register_for_each_vma(struct uprobe *uprobe, bool is_register)
 		}
 
 		if (is_register)
-			ret = install_breakpoint(mm, uprobe, vma, vi->vaddr);
+			ret = install_breakpoint(uprobe, mm, vma, vi->vaddr);
 		else
-			remove_breakpoint(mm, uprobe, vi->vaddr);
+			remove_breakpoint(uprobe, mm, vi->vaddr);
 
 		up_read(&mm->mmap_sem);
 		mmput(mm);
@@ -823,25 +826,25 @@ static void __uprobe_unregister(struct uprobe *uprobe)
  * uprobe_register - register a probe
  * @inode: the file in which the probe has to be placed.
  * @offset: offset from the start of the file.
- * @consumer: information on howto handle the probe..
+ * @uc: information on howto handle the probe..
  *
  * Apart from the access refcount, uprobe_register() takes a creation
  * refcount (thro alloc_uprobe) if and only if this @uprobe is getting
  * inserted into the rbtree (i.e first consumer for a @inode:@offset
  * tuple).  Creation refcount stops uprobe_unregister from freeing the
  * @uprobe even before the register operation is complete. Creation
- * refcount is released when the last @consumer for the @uprobe
+ * refcount is released when the last @uc for the @uprobe
  * unregisters.
  *
  * Return errno if it cannot successully install probes
  * else return 0 (success)
  */
-int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *consumer)
+int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
 {
 	struct uprobe *uprobe;
 	int ret;
 
-	if (!inode || !consumer || consumer->next)
+	if (!inode || !uc || uc->next)
 		return -EINVAL;
 
 	if (offset > i_size_read(inode))
@@ -851,7 +854,7 @@ int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *
 	mutex_lock(uprobes_hash(inode));
 	uprobe = alloc_uprobe(inode, offset);
 
-	if (uprobe && !consumer_add(uprobe, consumer)) {
+	if (uprobe && !consumer_add(uprobe, uc)) {
 		ret = __uprobe_register(uprobe);
 		if (ret) {
 			uprobe->consumers = NULL;
@@ -871,13 +874,13 @@ int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *
  * uprobe_unregister - unregister a already registered probe.
  * @inode: the file in which the probe has to be removed.
  * @offset: offset from the start of the file.
- * @consumer: identify which probe if multiple probes are colocated.
+ * @uc: identify which probe if multiple probes are colocated.
  */
-void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *consumer)
+void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
 {
 	struct uprobe *uprobe;
 
-	if (!inode || !consumer)
+	if (!inode || !uc)
 		return;
 
 	uprobe = find_uprobe(inode, offset);
@@ -886,7 +889,7 @@ void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consume
 
 	mutex_lock(uprobes_hash(inode));
 
-	if (consumer_del(uprobe, consumer)) {
+	if (consumer_del(uprobe, uc)) {
 		if (!uprobe->consumers) {
 			__uprobe_unregister(uprobe);
 			uprobe->flags &= ~UPROBE_RUN_HANDLER;
@@ -993,7 +996,7 @@ int uprobe_mmap(struct vm_area_struct *vma)
 		if (!ret) {
 			vaddr = vma_address(vma, uprobe->offset);
 			if (vaddr >= vma->vm_start && vaddr < vma->vm_end) {
-				ret = install_breakpoint(vma->vm_mm, uprobe, vma, vaddr);
+				ret = install_breakpoint(uprobe, vma->vm_mm, vma, vaddr);
 				/* Ignore double add: */
 				if (ret == -EEXIST)
 					ret = 0;

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [tip:perf/uprobes] uprobes/core: Rename bkpt to swbp
  2012-03-12  9:25 ` [PATCH v2 3/7] uprobes/core: Rename bkpt to swbp Srikar Dronamraju
@ 2012-03-13  9:43   ` tip-bot for Srikar Dronamraju
  0 siblings, 0 replies; 11+ messages in thread
From: tip-bot for Srikar Dronamraju @ 2012-03-13  9:43 UTC (permalink / raw
  To: linux-tip-commits
  Cc: mingo, torvalds, peterz, rostedt, jkenisto, tglx, oleg, linux-mm,
	hpa, linux-kernel, andi, hch, ananth, masami.hiramatsu.pt, acme,
	srikar, mingo

Commit-ID:  5cb4ac3a583d4ee18c8682ab857e093c4a0d0895
Gitweb:     http://git.kernel.org/tip/5cb4ac3a583d4ee18c8682ab857e093c4a0d0895
Author:     Srikar Dronamraju <srikar@linux.vnet.ibm.com>
AuthorDate: Mon, 12 Mar 2012 14:55:45 +0530
Committer:  Ingo Molnar <mingo@elte.hu>
CommitDate: Tue, 13 Mar 2012 06:22:21 +0100

uprobes/core: Rename bkpt to swbp

bkpt doesnt seem to be a correct abbrevation for breakpoint.
Choice was between bp and breakpoint. Since bp can refer to
things other than breakpoint, use swbp to refer to breakpoints.

This is pure cleanup, no functional change intended.

Signed-off-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: Jim Keniston <jkenisto@linux.vnet.ibm.com>
Cc: Linux-mm <linux-mm@kvack.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
Cc: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20120312092545.5379.91251.sendpatchset@srdronam.in.ibm.com
Signed-off-by: Ingo Molnar <mingo@elte.hu>
---
 arch/x86/include/asm/uprobes.h |    4 ++--
 include/linux/uprobes.h        |    4 ++--
 kernel/events/uprobes.c        |   34 +++++++++++++++++-----------------
 3 files changed, 21 insertions(+), 21 deletions(-)

diff --git a/arch/x86/include/asm/uprobes.h b/arch/x86/include/asm/uprobes.h
index 384f1be..0500391 100644
--- a/arch/x86/include/asm/uprobes.h
+++ b/arch/x86/include/asm/uprobes.h
@@ -28,8 +28,8 @@ typedef u8 uprobe_opcode_t;
 #define MAX_UINSN_BYTES			  16
 #define UPROBE_XOL_SLOT_BYTES		 128	/* to keep it cache aligned */
 
-#define UPROBE_BKPT_INSN		0xcc
-#define UPROBE_BKPT_INSN_SIZE		   1
+#define UPROBE_SWBP_INSN		0xcc
+#define UPROBE_SWBP_INSN_SIZE		   1
 
 struct arch_uprobe {
 	u16				fixups;
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index 5869918..eac525f 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -52,9 +52,9 @@ struct uprobe_consumer {
 };
 
 #ifdef CONFIG_UPROBES
-extern int __weak set_bkpt(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
+extern int __weak set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
 extern int __weak set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm,  unsigned long vaddr, bool verify);
-extern bool __weak is_bkpt_insn(uprobe_opcode_t *insn);
+extern bool __weak is_swbp_insn(uprobe_opcode_t *insn);
 extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
 extern void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
 extern int uprobe_mmap(struct vm_area_struct *vma);
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 9c5ddff..e56e56a 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -170,14 +170,14 @@ out:
 }
 
 /**
- * is_bkpt_insn - check if instruction is breakpoint instruction.
+ * is_swbp_insn - check if instruction is breakpoint instruction.
  * @insn: instruction to be checked.
- * Default implementation of is_bkpt_insn
+ * Default implementation of is_swbp_insn
  * Returns true if @insn is a breakpoint instruction.
  */
-bool __weak is_bkpt_insn(uprobe_opcode_t *insn)
+bool __weak is_swbp_insn(uprobe_opcode_t *insn)
 {
-	return *insn == UPROBE_BKPT_INSN;
+	return *insn == UPROBE_SWBP_INSN;
 }
 
 /*
@@ -227,7 +227,7 @@ static int write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
 	 * adding probes in write mapped pages since the breakpoints
 	 * might end up in the file copy.
 	 */
-	if (!valid_vma(vma, is_bkpt_insn(&opcode)))
+	if (!valid_vma(vma, is_swbp_insn(&opcode)))
 		goto put_out;
 
 	uprobe = container_of(auprobe, struct uprobe, arch);
@@ -259,8 +259,8 @@ static int write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
 
 	/* poke the new insn in, ASSUMES we don't cross page boundary */
 	vaddr &= ~PAGE_MASK;
-	BUG_ON(vaddr + UPROBE_BKPT_INSN_SIZE > PAGE_SIZE);
-	memcpy(vaddr_new + vaddr, &opcode, UPROBE_BKPT_INSN_SIZE);
+	BUG_ON(vaddr + UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
+	memcpy(vaddr_new + vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
 
 	kunmap_atomic(vaddr_new);
 	kunmap_atomic(vaddr_old);
@@ -308,7 +308,7 @@ static int read_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_
 	lock_page(page);
 	vaddr_new = kmap_atomic(page);
 	vaddr &= ~PAGE_MASK;
-	memcpy(opcode, vaddr_new + vaddr, UPROBE_BKPT_INSN_SIZE);
+	memcpy(opcode, vaddr_new + vaddr, UPROBE_SWBP_INSN_SIZE);
 	kunmap_atomic(vaddr_new);
 	unlock_page(page);
 
@@ -317,7 +317,7 @@ static int read_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_
 	return 0;
 }
 
-static int is_bkpt_at_addr(struct mm_struct *mm, unsigned long vaddr)
+static int is_swbp_at_addr(struct mm_struct *mm, unsigned long vaddr)
 {
 	uprobe_opcode_t opcode;
 	int result;
@@ -326,14 +326,14 @@ static int is_bkpt_at_addr(struct mm_struct *mm, unsigned long vaddr)
 	if (result)
 		return result;
 
-	if (is_bkpt_insn(&opcode))
+	if (is_swbp_insn(&opcode))
 		return 1;
 
 	return 0;
 }
 
 /**
- * set_bkpt - store breakpoint at a given address.
+ * set_swbp - store breakpoint at a given address.
  * @auprobe: arch specific probepoint information.
  * @mm: the probed process address space.
  * @vaddr: the virtual address to insert the opcode.
@@ -341,18 +341,18 @@ static int is_bkpt_at_addr(struct mm_struct *mm, unsigned long vaddr)
  * For mm @mm, store the breakpoint instruction at @vaddr.
  * Return 0 (success) or a negative errno.
  */
-int __weak set_bkpt(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
+int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
 {
 	int result;
 
-	result = is_bkpt_at_addr(mm, vaddr);
+	result = is_swbp_at_addr(mm, vaddr);
 	if (result == 1)
 		return -EEXIST;
 
 	if (result)
 		return result;
 
-	return write_opcode(auprobe, mm, vaddr, UPROBE_BKPT_INSN);
+	return write_opcode(auprobe, mm, vaddr, UPROBE_SWBP_INSN);
 }
 
 /**
@@ -371,7 +371,7 @@ set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long v
 	if (verify) {
 		int result;
 
-		result = is_bkpt_at_addr(mm, vaddr);
+		result = is_swbp_at_addr(mm, vaddr);
 		if (!result)
 			return -EINVAL;
 
@@ -642,7 +642,7 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
 		if (ret)
 			return ret;
 
-		if (is_bkpt_insn((uprobe_opcode_t *)uprobe->arch.insn))
+		if (is_swbp_insn((uprobe_opcode_t *)uprobe->arch.insn))
 			return -EEXIST;
 
 		ret = arch_uprobes_analyze_insn(&uprobe->arch, mm);
@@ -651,7 +651,7 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
 
 		uprobe->flags |= UPROBE_COPY_INSN;
 	}
-	ret = set_bkpt(&uprobe->arch, mm, addr);
+	ret = set_swbp(&uprobe->arch, mm, addr);
 
 	return ret;
 }

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [tip:x86/cleanups] x86: Rename trap_no to trap_nr in thread_struct
  2012-03-12  9:25 ` [PATCH v2 4/7] x86/trivial: Rename trap_no to trap_nr in thread struct Srikar Dronamraju
@ 2012-03-13  9:44   ` tip-bot for Srikar Dronamraju
  0 siblings, 0 replies; 11+ messages in thread
From: tip-bot for Srikar Dronamraju @ 2012-03-13  9:44 UTC (permalink / raw
  To: linux-tip-commits
  Cc: mingo, torvalds, peterz, mingo, rostedt, jkenisto, oleg, tglx,
	linux-mm, hpa, linux-kernel, andi, hch, ananth,
	masami.hiramatsu.pt, acme, srikar, mingo

Commit-ID:  51e7dc7011c99e1e5294658c7b551b92ca069985
Gitweb:     http://git.kernel.org/tip/51e7dc7011c99e1e5294658c7b551b92ca069985
Author:     Srikar Dronamraju <srikar@linux.vnet.ibm.com>
AuthorDate: Mon, 12 Mar 2012 14:55:55 +0530
Committer:  Ingo Molnar <mingo@elte.hu>
CommitDate: Tue, 13 Mar 2012 06:24:09 +0100

x86: Rename trap_no to trap_nr in thread_struct

There are precedences of trap number being referred to as
trap_nr. However thread struct refers trap number as trap_no.
Change it to trap_nr.

Also use enum instead of left-over literals for trap values.

This is pure cleanup, no functional change intended.

Suggested-by: Ingo Molnar <mingo@eltu.hu>
Signed-off-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: Jim Keniston <jkenisto@linux.vnet.ibm.com>
Cc: Linux-mm <linux-mm@kvack.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
Cc: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20120312092555.5379.942.sendpatchset@srdronam.in.ibm.com
[ Fixed the math-emu build ]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
---
 arch/x86/ia32/ia32_signal.c      |    2 +-
 arch/x86/include/asm/processor.h |    2 +-
 arch/x86/kernel/dumpstack.c      |    2 +-
 arch/x86/kernel/ptrace.c         |    3 ++-
 arch/x86/kernel/signal.c         |    2 +-
 arch/x86/kernel/traps.c          |   16 ++++++++--------
 arch/x86/kernel/vm86_32.c        |    2 +-
 arch/x86/kernel/vsyscall_64.c    |    2 +-
 arch/x86/math-emu/fpu_entry.c    |    5 +++--
 arch/x86/mm/fault.c              |   10 +++++-----
 10 files changed, 24 insertions(+), 22 deletions(-)

diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index bc09ed2..45b4fdd 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -345,7 +345,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
 		put_user_ex(regs->dx, &sc->dx);
 		put_user_ex(regs->cx, &sc->cx);
 		put_user_ex(regs->ax, &sc->ax);
-		put_user_ex(current->thread.trap_no, &sc->trapno);
+		put_user_ex(current->thread.trap_nr, &sc->trapno);
 		put_user_ex(current->thread.error_code, &sc->err);
 		put_user_ex(regs->ip, &sc->ip);
 		put_user_ex(regs->cs, (unsigned int __user *)&sc->cs);
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 02ce0b3..f6d0d2e 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -453,7 +453,7 @@ struct thread_struct {
 	unsigned long           ptrace_dr7;
 	/* Fault info: */
 	unsigned long		cr2;
-	unsigned long		trap_no;
+	unsigned long		trap_nr;
 	unsigned long		error_code;
 	/* floating point and extended processor state */
 	struct fpu		fpu;
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 4025fe4..28f9870 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -265,7 +265,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
 #endif
 	printk("\n");
 	if (notify_die(DIE_OOPS, str, regs, err,
-			current->thread.trap_no, SIGSEGV) == NOTIFY_STOP)
+			current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP)
 		return 1;
 
 	show_registers(regs);
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 93e7877a..6fb330a 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -33,6 +33,7 @@
 #include <asm/prctl.h>
 #include <asm/proto.h>
 #include <asm/hw_breakpoint.h>
+#include <asm/traps.h>
 
 #include "tls.h"
 
@@ -1425,7 +1426,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
 				int error_code, int si_code,
 				struct siginfo *info)
 {
-	tsk->thread.trap_no = 1;
+	tsk->thread.trap_nr = X86_TRAP_DB;
 	tsk->thread.error_code = error_code;
 
 	memset(info, 0, sizeof(*info));
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index c3846b6..9c73acc 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -150,7 +150,7 @@ int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
 		put_user_ex(regs->r15, &sc->r15);
 #endif /* CONFIG_X86_64 */
 
-		put_user_ex(current->thread.trap_no, &sc->trapno);
+		put_user_ex(current->thread.trap_nr, &sc->trapno);
 		put_user_ex(current->thread.error_code, &sc->err);
 		put_user_ex(regs->ip, &sc->ip);
 #ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 037fc2b..c6d17ad 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -132,7 +132,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
 trap_signal:
 #endif
 	/*
-	 * We want error_code and trap_no set for userspace faults and
+	 * We want error_code and trap_nr set for userspace faults and
 	 * kernelspace faults which result in die(), but not
 	 * kernelspace faults which are fixed up.  die() gives the
 	 * process no chance to handle the signal and notice the
@@ -141,7 +141,7 @@ trap_signal:
 	 * delivered, faults.  See also do_general_protection below.
 	 */
 	tsk->thread.error_code = error_code;
-	tsk->thread.trap_no = trapnr;
+	tsk->thread.trap_nr = trapnr;
 
 #ifdef CONFIG_X86_64
 	if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
@@ -164,7 +164,7 @@ trap_signal:
 kernel_trap:
 	if (!fixup_exception(regs)) {
 		tsk->thread.error_code = error_code;
-		tsk->thread.trap_no = trapnr;
+		tsk->thread.trap_nr = trapnr;
 		die(str, regs, error_code);
 	}
 	return;
@@ -240,7 +240,7 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
 	notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
 
 	tsk->thread.error_code = error_code;
-	tsk->thread.trap_no = X86_TRAP_DF;
+	tsk->thread.trap_nr = X86_TRAP_DF;
 
 	/*
 	 * This is always a kernel trap and never fixable (and thus must
@@ -268,7 +268,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
 		goto gp_in_kernel;
 
 	tsk->thread.error_code = error_code;
-	tsk->thread.trap_no = X86_TRAP_GP;
+	tsk->thread.trap_nr = X86_TRAP_GP;
 
 	if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
 			printk_ratelimit()) {
@@ -295,7 +295,7 @@ gp_in_kernel:
 		return;
 
 	tsk->thread.error_code = error_code;
-	tsk->thread.trap_no = X86_TRAP_GP;
+	tsk->thread.trap_nr = X86_TRAP_GP;
 	if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
 			X86_TRAP_GP, SIGSEGV) == NOTIFY_STOP)
 		return;
@@ -475,7 +475,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
 	{
 		if (!fixup_exception(regs)) {
 			task->thread.error_code = error_code;
-			task->thread.trap_no = trapnr;
+			task->thread.trap_nr = trapnr;
 			die(str, regs, error_code);
 		}
 		return;
@@ -485,7 +485,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
 	 * Save the info for the exception handler and clear the error.
 	 */
 	save_init_fpu(task);
-	task->thread.trap_no = trapnr;
+	task->thread.trap_nr = trapnr;
 	task->thread.error_code = error_code;
 	info.si_signo = SIGFPE;
 	info.si_errno = 0;
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index b466cab..a1315ab 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -567,7 +567,7 @@ int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno)
 	}
 	if (trapno != 1)
 		return 1; /* we let this handle by the calling routine */
-	current->thread.trap_no = trapno;
+	current->thread.trap_nr = trapno;
 	current->thread.error_code = error_code;
 	force_sig(SIGTRAP, current);
 	return 0;
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index b07ba93..327509b 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -153,7 +153,7 @@ static bool write_ok_or_segv(unsigned long ptr, size_t size)
 
 		thread->error_code	= 6;  /* user fault, no page, write */
 		thread->cr2		= ptr;
-		thread->trap_no		= 14;
+		thread->trap_nr		= X86_TRAP_PF;
 
 		memset(&info, 0, sizeof(info));
 		info.si_signo		= SIGSEGV;
diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c
index 7718541..9b86812 100644
--- a/arch/x86/math-emu/fpu_entry.c
+++ b/arch/x86/math-emu/fpu_entry.c
@@ -28,6 +28,7 @@
 #include <linux/regset.h>
 
 #include <asm/uaccess.h>
+#include <asm/traps.h>
 #include <asm/desc.h>
 #include <asm/user.h>
 #include <asm/i387.h>
@@ -269,7 +270,7 @@ void math_emulate(struct math_emu_info *info)
 			FPU_EIP = FPU_ORIG_EIP;	/* Point to current FPU instruction. */
 
 			RE_ENTRANT_CHECK_OFF;
-			current->thread.trap_no = 16;
+			current->thread.trap_nr = X86_TRAP_MF;
 			current->thread.error_code = 0;
 			send_sig(SIGFPE, current, 1);
 			return;
@@ -662,7 +663,7 @@ static int valid_prefix(u_char *Byte, u_char __user **fpu_eip,
 void math_abort(struct math_emu_info *info, unsigned int signal)
 {
 	FPU_EIP = FPU_ORIG_EIP;
-	current->thread.trap_no = 16;
+	current->thread.trap_nr = X86_TRAP_MF;
 	current->thread.error_code = 0;
 	send_sig(signal, current, 1);
 	RE_ENTRANT_CHECK_OFF;
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index f0b4caf..3ecfd1a 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -615,7 +615,7 @@ pgtable_bad(struct pt_regs *regs, unsigned long error_code,
 	dump_pagetable(address);
 
 	tsk->thread.cr2		= address;
-	tsk->thread.trap_no	= 14;
+	tsk->thread.trap_nr	= X86_TRAP_PF;
 	tsk->thread.error_code	= error_code;
 
 	if (__die("Bad pagetable", regs, error_code))
@@ -636,7 +636,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
 	/* Are we prepared to handle this kernel fault? */
 	if (fixup_exception(regs)) {
 		if (current_thread_info()->sig_on_uaccess_error && signal) {
-			tsk->thread.trap_no = 14;
+			tsk->thread.trap_nr = X86_TRAP_PF;
 			tsk->thread.error_code = error_code | PF_USER;
 			tsk->thread.cr2 = address;
 
@@ -676,7 +676,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
 		printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
 
 	tsk->thread.cr2		= address;
-	tsk->thread.trap_no	= 14;
+	tsk->thread.trap_nr	= X86_TRAP_PF;
 	tsk->thread.error_code	= error_code;
 
 	sig = SIGKILL;
@@ -754,7 +754,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
 		/* Kernel addresses are always protection faults: */
 		tsk->thread.cr2		= address;
 		tsk->thread.error_code	= error_code | (address >= TASK_SIZE);
-		tsk->thread.trap_no	= 14;
+		tsk->thread.trap_nr	= X86_TRAP_PF;
 
 		force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0);
 
@@ -838,7 +838,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
 
 	tsk->thread.cr2		= address;
 	tsk->thread.error_code	= error_code;
-	tsk->thread.trap_no	= 14;
+	tsk->thread.trap_nr	= X86_TRAP_PF;
 
 #ifdef CONFIG_MEMORY_FAILURE
 	if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {

^ permalink raw reply related	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2012-03-13  9:44 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-03-12  9:25 [PATCH v2 1/7] uprobes/core: Make macro names consistent Srikar Dronamraju
2012-03-12  9:25 ` [PATCH v2 2/7] uprobes/core: Make order of function parameters consistent across functions Srikar Dronamraju
2012-03-13  9:42   ` [tip:perf/uprobes] " tip-bot for Srikar Dronamraju
2012-03-12  9:25 ` [PATCH v2 3/7] uprobes/core: Rename bkpt to swbp Srikar Dronamraju
2012-03-13  9:43   ` [tip:perf/uprobes] " tip-bot for Srikar Dronamraju
2012-03-12  9:25 ` [PATCH v2 4/7] x86/trivial: Rename trap_no to trap_nr in thread struct Srikar Dronamraju
2012-03-13  9:44   ` [tip:x86/cleanups] x86: Rename trap_no to trap_nr in thread_struct tip-bot for Srikar Dronamraju
2012-03-12  9:26 ` [PATCH v2 5/7] x86/trivial: Fix 'old_rsp' undefined build failure when including asm/compat.h Srikar Dronamraju
2012-03-12  9:26 ` [PATCH v2 6/7] x86/trivial: Use is_ia32_compat_task Srikar Dronamraju
2012-03-12  9:26 ` [PATCH v2 7/7] uprobes/core: Handle breakpoint and singlestep exception Srikar Dronamraju
2012-03-13  9:41 ` [tip:perf/uprobes] uprobes/core: Make macro names consistent tip-bot for Srikar Dronamraju

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).