All the mail mirrored from lore.kernel.org
 help / color / mirror / Atom feed
From: Ingo Molnar <mingo@kernel.org>
To: Linus Torvalds <torvalds@linux-foundation.org>
Cc: linux-kernel@vger.kernel.org,
	Peter Zijlstra <a.p.zijlstra@chello.nl>,
	Arnaldo Carvalho de Melo <acme@infradead.org>,
	Thomas Gleixner <tglx@linutronix.de>,
	Andrew Morton <akpm@linux-foundation.org>
Subject: [GIT PULL] perf fixes
Date: Thu, 20 Jun 2013 10:58:24 +0200	[thread overview]
Message-ID: <20130620085824.GA6346@gmail.com> (raw)

Linus,

Please pull the latest perf-urgent-for-linus git tree from:

   git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf-urgent-for-linus

   HEAD: f1a527899ef0a8a241eb3bea619eb2e29d797f44 perf/x86: Fix broken PEBS-LL support on SNB-EP/IVB-EP

Four fixes. The mmap ones are unfortunately larger than desired - fuzzing 
uncovered bugs that needed perf context life time management changes to 
fix properly.

Thanks,

	Ingo

------------------>
Masami Hiramatsu (1):
      kprobes: Fix to free gone and unused optprobes

Peter Zijlstra (2):
      perf: Fix perf mmap bugs
      perf: Fix mmap() accounting hole

Stephane Eranian (1):
      perf/x86: Fix broken PEBS-LL support on SNB-EP/IVB-EP


 arch/x86/kernel/cpu/perf_event_intel.c |   2 +-
 include/linux/perf_event.h             |   3 +-
 kernel/events/core.c                   | 233 +++++++++++++++++++++++----------
 kernel/events/internal.h               |   4 +
 kernel/kprobes.c                       |  30 +++--
 5 files changed, 187 insertions(+), 85 deletions(-)

diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index f60d41f..a9e2207 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -165,13 +165,13 @@ static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
 	INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
 	INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
 	INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
-	INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
 	EVENT_EXTRA_END
 };
 
 static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
 	INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
 	INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
+	INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
 	EVENT_EXTRA_END
 };
 
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index f463a46..c5b6dbf 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -389,8 +389,7 @@ struct perf_event {
 	/* mmap bits */
 	struct mutex			mmap_mutex;
 	atomic_t			mmap_count;
-	int				mmap_locked;
-	struct user_struct		*mmap_user;
+
 	struct ring_buffer		*rb;
 	struct list_head		rb_entry;
 
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 9dc297f..b391907 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -196,9 +196,6 @@ static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
 static void update_context_time(struct perf_event_context *ctx);
 static u64 perf_event_time(struct perf_event *event);
 
-static void ring_buffer_attach(struct perf_event *event,
-			       struct ring_buffer *rb);
-
 void __weak perf_event_print_debug(void)	{ }
 
 extern __weak const char *perf_pmu_name(void)
@@ -2918,6 +2915,7 @@ static void free_event_rcu(struct rcu_head *head)
 }
 
 static void ring_buffer_put(struct ring_buffer *rb);
+static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb);
 
 static void free_event(struct perf_event *event)
 {
@@ -2942,15 +2940,30 @@ static void free_event(struct perf_event *event)
 		if (has_branch_stack(event)) {
 			static_key_slow_dec_deferred(&perf_sched_events);
 			/* is system-wide event */
-			if (!(event->attach_state & PERF_ATTACH_TASK))
+			if (!(event->attach_state & PERF_ATTACH_TASK)) {
 				atomic_dec(&per_cpu(perf_branch_stack_events,
 						    event->cpu));
+			}
 		}
 	}
 
 	if (event->rb) {
-		ring_buffer_put(event->rb);
-		event->rb = NULL;
+		struct ring_buffer *rb;
+
+		/*
+		 * Can happen when we close an event with re-directed output.
+		 *
+		 * Since we have a 0 refcount, perf_mmap_close() will skip
+		 * over us; possibly making our ring_buffer_put() the last.
+		 */
+		mutex_lock(&event->mmap_mutex);
+		rb = event->rb;
+		if (rb) {
+			rcu_assign_pointer(event->rb, NULL);
+			ring_buffer_detach(event, rb);
+			ring_buffer_put(rb); /* could be last */
+		}
+		mutex_unlock(&event->mmap_mutex);
 	}
 
 	if (is_cgroup_event(event))
@@ -3188,30 +3201,13 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
 	unsigned int events = POLL_HUP;
 
 	/*
-	 * Race between perf_event_set_output() and perf_poll(): perf_poll()
-	 * grabs the rb reference but perf_event_set_output() overrides it.
-	 * Here is the timeline for two threads T1, T2:
-	 * t0: T1, rb = rcu_dereference(event->rb)
-	 * t1: T2, old_rb = event->rb
-	 * t2: T2, event->rb = new rb
-	 * t3: T2, ring_buffer_detach(old_rb)
-	 * t4: T1, ring_buffer_attach(rb1)
-	 * t5: T1, poll_wait(event->waitq)
-	 *
-	 * To avoid this problem, we grab mmap_mutex in perf_poll()
-	 * thereby ensuring that the assignment of the new ring buffer
-	 * and the detachment of the old buffer appear atomic to perf_poll()
+	 * Pin the event->rb by taking event->mmap_mutex; otherwise
+	 * perf_event_set_output() can swizzle our rb and make us miss wakeups.
 	 */
 	mutex_lock(&event->mmap_mutex);
-
-	rcu_read_lock();
-	rb = rcu_dereference(event->rb);
-	if (rb) {
-		ring_buffer_attach(event, rb);
+	rb = event->rb;
+	if (rb)
 		events = atomic_xchg(&rb->poll, 0);
-	}
-	rcu_read_unlock();
-
 	mutex_unlock(&event->mmap_mutex);
 
 	poll_wait(file, &event->waitq, wait);
@@ -3521,16 +3517,12 @@ static void ring_buffer_attach(struct perf_event *event,
 		return;
 
 	spin_lock_irqsave(&rb->event_lock, flags);
-	if (!list_empty(&event->rb_entry))
-		goto unlock;
-
-	list_add(&event->rb_entry, &rb->event_list);
-unlock:
+	if (list_empty(&event->rb_entry))
+		list_add(&event->rb_entry, &rb->event_list);
 	spin_unlock_irqrestore(&rb->event_lock, flags);
 }
 
-static void ring_buffer_detach(struct perf_event *event,
-			       struct ring_buffer *rb)
+static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb)
 {
 	unsigned long flags;
 
@@ -3549,13 +3541,10 @@ static void ring_buffer_wakeup(struct perf_event *event)
 
 	rcu_read_lock();
 	rb = rcu_dereference(event->rb);
-	if (!rb)
-		goto unlock;
-
-	list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
-		wake_up_all(&event->waitq);
-
-unlock:
+	if (rb) {
+		list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
+			wake_up_all(&event->waitq);
+	}
 	rcu_read_unlock();
 }
 
@@ -3584,18 +3573,10 @@ static struct ring_buffer *ring_buffer_get(struct perf_event *event)
 
 static void ring_buffer_put(struct ring_buffer *rb)
 {
-	struct perf_event *event, *n;
-	unsigned long flags;
-
 	if (!atomic_dec_and_test(&rb->refcount))
 		return;
 
-	spin_lock_irqsave(&rb->event_lock, flags);
-	list_for_each_entry_safe(event, n, &rb->event_list, rb_entry) {
-		list_del_init(&event->rb_entry);
-		wake_up_all(&event->waitq);
-	}
-	spin_unlock_irqrestore(&rb->event_lock, flags);
+	WARN_ON_ONCE(!list_empty(&rb->event_list));
 
 	call_rcu(&rb->rcu_head, rb_free_rcu);
 }
@@ -3605,26 +3586,100 @@ static void perf_mmap_open(struct vm_area_struct *vma)
 	struct perf_event *event = vma->vm_file->private_data;
 
 	atomic_inc(&event->mmap_count);
+	atomic_inc(&event->rb->mmap_count);
 }
 
+/*
+ * A buffer can be mmap()ed multiple times; either directly through the same
+ * event, or through other events by use of perf_event_set_output().
+ *
+ * In order to undo the VM accounting done by perf_mmap() we need to destroy
+ * the buffer here, where we still have a VM context. This means we need
+ * to detach all events redirecting to us.
+ */
 static void perf_mmap_close(struct vm_area_struct *vma)
 {
 	struct perf_event *event = vma->vm_file->private_data;
 
-	if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
-		unsigned long size = perf_data_size(event->rb);
-		struct user_struct *user = event->mmap_user;
-		struct ring_buffer *rb = event->rb;
+	struct ring_buffer *rb = event->rb;
+	struct user_struct *mmap_user = rb->mmap_user;
+	int mmap_locked = rb->mmap_locked;
+	unsigned long size = perf_data_size(rb);
+
+	atomic_dec(&rb->mmap_count);
+
+	if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
+		return;
 
-		atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
-		vma->vm_mm->pinned_vm -= event->mmap_locked;
-		rcu_assign_pointer(event->rb, NULL);
-		ring_buffer_detach(event, rb);
+	/* Detach current event from the buffer. */
+	rcu_assign_pointer(event->rb, NULL);
+	ring_buffer_detach(event, rb);
+	mutex_unlock(&event->mmap_mutex);
+
+	/* If there's still other mmap()s of this buffer, we're done. */
+	if (atomic_read(&rb->mmap_count)) {
+		ring_buffer_put(rb); /* can't be last */
+		return;
+	}
+
+	/*
+	 * No other mmap()s, detach from all other events that might redirect
+	 * into the now unreachable buffer. Somewhat complicated by the
+	 * fact that rb::event_lock otherwise nests inside mmap_mutex.
+	 */
+again:
+	rcu_read_lock();
+	list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
+		if (!atomic_long_inc_not_zero(&event->refcount)) {
+			/*
+			 * This event is en-route to free_event() which will
+			 * detach it and remove it from the list.
+			 */
+			continue;
+		}
+		rcu_read_unlock();
+
+		mutex_lock(&event->mmap_mutex);
+		/*
+		 * Check we didn't race with perf_event_set_output() which can
+		 * swizzle the rb from under us while we were waiting to
+		 * acquire mmap_mutex.
+		 *
+		 * If we find a different rb; ignore this event, a next
+		 * iteration will no longer find it on the list. We have to
+		 * still restart the iteration to make sure we're not now
+		 * iterating the wrong list.
+		 */
+		if (event->rb == rb) {
+			rcu_assign_pointer(event->rb, NULL);
+			ring_buffer_detach(event, rb);
+			ring_buffer_put(rb); /* can't be last, we still have one */
+		}
 		mutex_unlock(&event->mmap_mutex);
+		put_event(event);
 
-		ring_buffer_put(rb);
-		free_uid(user);
+		/*
+		 * Restart the iteration; either we're on the wrong list or
+		 * destroyed its integrity by doing a deletion.
+		 */
+		goto again;
 	}
+	rcu_read_unlock();
+
+	/*
+	 * It could be there's still a few 0-ref events on the list; they'll
+	 * get cleaned up by free_event() -- they'll also still have their
+	 * ref on the rb and will free it whenever they are done with it.
+	 *
+	 * Aside from that, this buffer is 'fully' detached and unmapped,
+	 * undo the VM accounting.
+	 */
+
+	atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm);
+	vma->vm_mm->pinned_vm -= mmap_locked;
+	free_uid(mmap_user);
+
+	ring_buffer_put(rb); /* could be last */
 }
 
 static const struct vm_operations_struct perf_mmap_vmops = {
@@ -3674,12 +3729,24 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
 		return -EINVAL;
 
 	WARN_ON_ONCE(event->ctx->parent_ctx);
+again:
 	mutex_lock(&event->mmap_mutex);
 	if (event->rb) {
-		if (event->rb->nr_pages == nr_pages)
-			atomic_inc(&event->rb->refcount);
-		else
+		if (event->rb->nr_pages != nr_pages) {
 			ret = -EINVAL;
+			goto unlock;
+		}
+
+		if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
+			/*
+			 * Raced against perf_mmap_close() through
+			 * perf_event_set_output(). Try again, hope for better
+			 * luck.
+			 */
+			mutex_unlock(&event->mmap_mutex);
+			goto again;
+		}
+
 		goto unlock;
 	}
 
@@ -3720,12 +3787,16 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
 		ret = -ENOMEM;
 		goto unlock;
 	}
-	rcu_assign_pointer(event->rb, rb);
+
+	atomic_set(&rb->mmap_count, 1);
+	rb->mmap_locked = extra;
+	rb->mmap_user = get_current_user();
 
 	atomic_long_add(user_extra, &user->locked_vm);
-	event->mmap_locked = extra;
-	event->mmap_user = get_current_user();
-	vma->vm_mm->pinned_vm += event->mmap_locked;
+	vma->vm_mm->pinned_vm += extra;
+
+	ring_buffer_attach(event, rb);
+	rcu_assign_pointer(event->rb, rb);
 
 	perf_event_update_userpage(event);
 
@@ -3734,7 +3805,11 @@ unlock:
 		atomic_inc(&event->mmap_count);
 	mutex_unlock(&event->mmap_mutex);
 
-	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+	/*
+	 * Since pinned accounting is per vm we cannot allow fork() to copy our
+	 * vma.
+	 */
+	vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
 	vma->vm_ops = &perf_mmap_vmops;
 
 	return ret;
@@ -6412,6 +6487,8 @@ set:
 	if (atomic_read(&event->mmap_count))
 		goto unlock;
 
+	old_rb = event->rb;
+
 	if (output_event) {
 		/* get the rb we want to redirect to */
 		rb = ring_buffer_get(output_event);
@@ -6419,16 +6496,28 @@ set:
 			goto unlock;
 	}
 
-	old_rb = event->rb;
-	rcu_assign_pointer(event->rb, rb);
 	if (old_rb)
 		ring_buffer_detach(event, old_rb);
+
+	if (rb)
+		ring_buffer_attach(event, rb);
+
+	rcu_assign_pointer(event->rb, rb);
+
+	if (old_rb) {
+		ring_buffer_put(old_rb);
+		/*
+		 * Since we detached before setting the new rb, so that we
+		 * could attach the new rb, we could have missed a wakeup.
+		 * Provide it now.
+		 */
+		wake_up_all(&event->waitq);
+	}
+
 	ret = 0;
 unlock:
 	mutex_unlock(&event->mmap_mutex);
 
-	if (old_rb)
-		ring_buffer_put(old_rb);
 out:
 	return ret;
 }
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index eb675c4..ca65997 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -31,6 +31,10 @@ struct ring_buffer {
 	spinlock_t			event_lock;
 	struct list_head		event_list;
 
+	atomic_t			mmap_count;
+	unsigned long			mmap_locked;
+	struct user_struct		*mmap_user;
+
 	struct perf_event_mmap_page	*user_page;
 	void				*data_pages[0];
 };
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 3fed7f0..bddf3b2 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -467,6 +467,7 @@ static struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr)
 /* Optimization staging list, protected by kprobe_mutex */
 static LIST_HEAD(optimizing_list);
 static LIST_HEAD(unoptimizing_list);
+static LIST_HEAD(freeing_list);
 
 static void kprobe_optimizer(struct work_struct *work);
 static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
@@ -504,7 +505,7 @@ static __kprobes void do_optimize_kprobes(void)
  * Unoptimize (replace a jump with a breakpoint and remove the breakpoint
  * if need) kprobes listed on unoptimizing_list.
  */
-static __kprobes void do_unoptimize_kprobes(struct list_head *free_list)
+static __kprobes void do_unoptimize_kprobes(void)
 {
 	struct optimized_kprobe *op, *tmp;
 
@@ -515,9 +516,9 @@ static __kprobes void do_unoptimize_kprobes(struct list_head *free_list)
 	/* Ditto to do_optimize_kprobes */
 	get_online_cpus();
 	mutex_lock(&text_mutex);
-	arch_unoptimize_kprobes(&unoptimizing_list, free_list);
+	arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
 	/* Loop free_list for disarming */
-	list_for_each_entry_safe(op, tmp, free_list, list) {
+	list_for_each_entry_safe(op, tmp, &freeing_list, list) {
 		/* Disarm probes if marked disabled */
 		if (kprobe_disabled(&op->kp))
 			arch_disarm_kprobe(&op->kp);
@@ -536,11 +537,11 @@ static __kprobes void do_unoptimize_kprobes(struct list_head *free_list)
 }
 
 /* Reclaim all kprobes on the free_list */
-static __kprobes void do_free_cleaned_kprobes(struct list_head *free_list)
+static __kprobes void do_free_cleaned_kprobes(void)
 {
 	struct optimized_kprobe *op, *tmp;
 
-	list_for_each_entry_safe(op, tmp, free_list, list) {
+	list_for_each_entry_safe(op, tmp, &freeing_list, list) {
 		BUG_ON(!kprobe_unused(&op->kp));
 		list_del_init(&op->list);
 		free_aggr_kprobe(&op->kp);
@@ -556,8 +557,6 @@ static __kprobes void kick_kprobe_optimizer(void)
 /* Kprobe jump optimizer */
 static __kprobes void kprobe_optimizer(struct work_struct *work)
 {
-	LIST_HEAD(free_list);
-
 	mutex_lock(&kprobe_mutex);
 	/* Lock modules while optimizing kprobes */
 	mutex_lock(&module_mutex);
@@ -566,7 +565,7 @@ static __kprobes void kprobe_optimizer(struct work_struct *work)
 	 * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
 	 * kprobes before waiting for quiesence period.
 	 */
-	do_unoptimize_kprobes(&free_list);
+	do_unoptimize_kprobes();
 
 	/*
 	 * Step 2: Wait for quiesence period to ensure all running interrupts
@@ -581,7 +580,7 @@ static __kprobes void kprobe_optimizer(struct work_struct *work)
 	do_optimize_kprobes();
 
 	/* Step 4: Free cleaned kprobes after quiesence period */
-	do_free_cleaned_kprobes(&free_list);
+	do_free_cleaned_kprobes();
 
 	mutex_unlock(&module_mutex);
 	mutex_unlock(&kprobe_mutex);
@@ -723,8 +722,19 @@ static void __kprobes kill_optimized_kprobe(struct kprobe *p)
 	if (!list_empty(&op->list))
 		/* Dequeue from the (un)optimization queue */
 		list_del_init(&op->list);
-
 	op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
+
+	if (kprobe_unused(p)) {
+		/* Enqueue if it is unused */
+		list_add(&op->list, &freeing_list);
+		/*
+		 * Remove unused probes from the hash list. After waiting
+		 * for synchronization, this probe is reclaimed.
+		 * (reclaiming is done by do_free_cleaned_kprobes().)
+		 */
+		hlist_del_rcu(&op->kp.hlist);
+	}
+
 	/* Don't touch the code, because it is already freed. */
 	arch_remove_optimized_kprobe(op);
 }

             reply	other threads:[~2013-06-20  8:58 UTC|newest]

Thread overview: 376+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-06-20  8:58 Ingo Molnar [this message]
  -- strict thread matches above, loose matches on Subject: below --
2023-01-06 11:57 [GIT PULL] perf fixes Ingo Molnar
2023-01-06 21:19 ` pr-tracker-bot
2022-10-02 10:56 Ingo Molnar
2022-10-02 16:47 ` Linus Torvalds
2022-10-03 10:55   ` Ingo Molnar
2022-10-02 17:20 ` pr-tracker-bot
2022-08-28 14:35 Ingo Molnar
2022-08-28 18:18 ` pr-tracker-bot
2022-08-06 19:10 Ingo Molnar
2022-08-07  0:50 ` pr-tracker-bot
2021-07-11 13:26 Ingo Molnar
2021-07-11 18:22 ` pr-tracker-bot
2021-06-12 12:48 Ingo Molnar
2021-06-12 19:09 ` pr-tracker-bot
2021-03-21 10:56 Ingo Molnar
2021-03-21 18:45 ` pr-tracker-bot
2020-08-15 11:21 Ingo Molnar
2020-08-16  1:55 ` pr-tracker-bot
2020-04-25  9:19 Ingo Molnar
2020-04-25 19:30 ` pr-tracker-bot
2020-03-24  9:19 Ingo Molnar
2020-03-24 17:15 ` pr-tracker-bot
2020-03-02  7:23 Ingo Molnar
2020-03-03 23:35 ` pr-tracker-bot
2020-02-15  8:53 Ingo Molnar
2020-02-15 21:25 ` pr-tracker-bot
2020-01-18 17:58 Ingo Molnar
2020-01-18 21:05 ` pr-tracker-bot
2019-12-21 16:16 Ingo Molnar
2019-12-21 18:55 ` pr-tracker-bot
2019-12-17 11:34 Ingo Molnar
2019-12-17 19:06 ` Linus Torvalds
2019-12-18  6:58   ` Ingo Molnar
2019-12-17 19:20 ` pr-tracker-bot
2019-12-01 22:15 Ingo Molnar
2019-12-02  4:40 ` pr-tracker-bot
2019-11-16 21:33 Ingo Molnar
2019-11-17  0:35 ` pr-tracker-bot
2019-11-01 17:48 Ingo Molnar
2019-11-01 18:48 ` Linus Torvalds
2019-11-01 20:30   ` Ingo Molnar
2019-11-01 21:01     ` Ingo Molnar
2019-11-01 22:15     ` Linus Torvalds
2019-11-01 19:10 ` pr-tracker-bot
2019-10-12 13:31 Ingo Molnar
2019-10-12 22:35 ` pr-tracker-bot
2019-07-14 12:01 Ingo Molnar
2019-07-14 18:45 ` pr-tracker-bot
2019-06-29  8:54 Ingo Molnar
2019-06-29 11:45 ` pr-tracker-bot
2019-06-02 17:39 Ingo Molnar
2019-06-02 18:15 ` pr-tracker-bot
2019-05-16 16:05 Ingo Molnar
2019-05-16 18:20 ` pr-tracker-bot
2019-05-05 12:47 Ingo Molnar
2019-05-05 22:10 ` pr-tracker-bot
2019-04-20  7:43 Ingo Molnar
2019-04-20 19:25 ` pr-tracker-bot
2019-04-12 13:06 Ingo Molnar
2019-04-13  4:05 ` pr-tracker-bot
2019-02-17 10:10 Ingo Molnar
2019-02-17 16:50 ` pr-tracker-bot
2019-02-10  9:01 Ingo Molnar
2019-02-10 18:30 ` pr-tracker-bot
2019-01-11  7:44 Ingo Molnar
2019-01-11 18:00 ` pr-tracker-bot
2018-11-30  6:25 Ingo Molnar
2018-11-30 21:00 ` pr-tracker-bot
2018-11-17 10:55 Ingo Molnar
2018-11-18 20:05 ` pr-tracker-bot
2018-10-20  8:10 Ingo Molnar
2018-10-20 13:28 ` Greg Kroah-Hartman
2018-10-11  9:12 Ingo Molnar
2018-10-11 12:32 ` Greg Kroah-Hartman
2018-10-11  8:59 Ingo Molnar
2018-10-05  9:42 Ingo Molnar
2018-10-05  9:55 ` Ingo Molnar
2018-10-05 23:30   ` Greg Kroah-Hartman
2018-09-15 13:11 Ingo Molnar
2018-07-30 17:53 Ingo Molnar
2018-07-13 19:59 Ingo Molnar
2018-06-30  8:44 Ingo Molnar
2018-06-04  9:04 Ingo Molnar
2018-03-31 10:40 Ingo Molnar
2018-03-25  8:53 Ingo Molnar
2018-02-06 21:29 Ingo Molnar
2017-12-06 22:17 Ingo Molnar
2017-11-26 12:40 Ingo Molnar
2017-11-05 14:40 Ingo Molnar
2017-11-09  8:13 ` Markus Trippelsdorf
2017-10-14 16:04 Ingo Molnar
2017-09-13 18:00 Ingo Molnar
2017-09-12 15:32 Ingo Molnar
2017-07-21 10:15 Ingo Molnar
2017-06-10  8:39 Ingo Molnar
2017-05-12  7:31 Ingo Molnar
2017-03-07 20:30 Ingo Molnar
2017-02-28  8:01 Ingo Molnar
2017-02-11 18:12 Ingo Molnar
2017-02-02 21:01 Ingo Molnar
2017-01-18  9:27 Ingo Molnar
2017-01-15  9:59 Ingo Molnar
2016-12-23 22:50 Ingo Molnar
2016-12-07 18:45 Ingo Molnar
2016-11-23  9:00 Ingo Molnar
2016-11-14  7:56 Ingo Molnar
2016-10-28 19:41 Ingo Molnar
2016-10-18 11:07 Ingo Molnar
2016-09-13 18:14 Ingo Molnar
2016-08-18 20:38 Ingo Molnar
2016-08-12 19:35 Ingo Molnar
2016-07-26 14:13 Ingo Molnar
2016-07-08 13:42 Ingo Molnar
2016-06-10 12:50 Ingo Molnar
2016-05-13 18:51 Ingo Molnar
2016-05-06 11:26 Ingo Molnar
2016-04-28 17:56 Ingo Molnar
2016-04-03 11:00 Ingo Molnar
2016-02-20 11:14 Ingo Molnar
2016-01-14 10:03 Ingo Molnar
2016-01-08 12:46 Ingo Molnar
2015-12-08  4:22 Ingo Molnar
2015-09-17  8:02 Ingo Molnar
2015-09-02 18:11 Ingo Molnar
2015-08-22 12:19 Ingo Molnar
2015-08-14  7:12 Ingo Molnar
2015-07-18  2:53 Ingo Molnar
2015-07-06 15:29 Ingo Molnar
2015-06-13 14:29 Ingo Molnar
2015-06-05  8:37 Ingo Molnar
2015-05-15  7:17 Ingo Molnar
2015-05-06 12:52 Ingo Molnar
2015-04-03 13:11 Ingo Molnar
2015-03-17 16:48 Ingo Molnar
2015-03-01 17:00 Ingo Molnar
2015-02-20 13:40 Ingo Molnar
2015-01-30 18:44 Ingo Molnar
2015-01-17 14:15 Ingo Molnar
2015-01-11  8:42 Ingo Molnar
2014-11-20  7:46 Ingo Molnar
2014-10-31 11:13 Ingo Molnar
2014-11-03  9:02 ` Paul Bolle
2014-11-03 10:04   ` Peter Zijlstra
2014-09-19 10:46 Ingo Molnar
2014-08-24 20:25 Ingo Molnar
2014-07-16 11:11 Ingo Molnar
2014-06-18 16:39 Ingo Molnar
2014-05-22  8:04 Ingo Molnar
2014-05-01  6:34 Ingo Molnar
2014-04-20  8:02 Ingo Molnar
2014-04-19 10:41 Ingo Molnar
2014-04-16 13:04 Ingo Molnar
2014-03-22  9:06 Ingo Molnar
2014-03-16 16:34 Ingo Molnar
2014-03-02  8:51 Ingo Molnar
2014-02-22 19:16 Ingo Molnar
2014-02-09  8:01 Ingo Molnar
2014-01-25  7:30 Ingo Molnar
2014-01-19 12:08 Ingo Molnar
2013-12-19 16:52 Ingo Molnar
2013-12-02 14:39 Ingo Molnar
2013-11-15 19:46 Ingo Molnar
2013-11-01  9:56 Ingo Molnar
2013-10-29 10:04 Ingo Molnar
2013-10-26 12:24 Ingo Molnar
2013-10-28  8:28 ` Markus Trippelsdorf
2013-10-28  9:02   ` ------------------------------ Markus Trippelsdorf
2013-10-28  9:34     ` Markus Trippelsdorf
2013-10-28 12:34       ` Arnaldo Carvalho de Melo
2013-10-28 12:42         ` Arnaldo Carvalho de Melo
2013-10-28 12:59           ` Markus Trippelsdorf
2013-10-29  9:50           ` Stephane Eranian
2013-10-29 10:06             ` Ingo Molnar
2013-10-29 12:47             ` Arnaldo Carvalho de Melo
2013-10-08 12:12 Ingo Molnar
2013-09-28 18:03 Ingo Molnar
2013-09-29 11:47 ` Markus Trippelsdorf
2013-09-29 21:33   ` Andi Kleen
2013-09-29 22:47     ` Markus Trippelsdorf
2013-09-30  6:27     ` Ingo Molnar
2013-09-30 18:54       ` Andi Kleen
2013-10-01  8:42         ` Ingo Molnar
2013-09-25 18:00 Ingo Molnar
2013-09-18 16:06 Ingo Molnar
2013-09-12 13:38 Ingo Molnar
2013-09-12 18:03 ` Linus Torvalds
2013-09-12 18:10   ` Linus Torvalds
2013-09-12 18:43     ` Arnaldo Carvalho de Melo
2013-09-12 19:12       ` Arnaldo Carvalho de Melo
2013-09-12 19:13         ` Linus Torvalds
2013-09-12 19:55       ` Ingo Molnar
2013-09-12 19:58       ` David Ahern
2013-09-12 20:02         ` Arnaldo Carvalho de Melo
2013-09-12 20:31           ` Ingo Molnar
2013-09-12 20:43             ` Ingo Molnar
2013-09-12 20:18         ` Ingo Molnar
2013-09-12 20:38           ` Arnaldo Carvalho de Melo
2013-09-12 20:46             ` Ingo Molnar
2013-09-12 21:09               ` David Ahern
2013-09-12 21:18                 ` Ingo Molnar
2013-09-12 22:10                   ` David Ahern
2013-09-13  5:09                     ` Ingo Molnar
2013-09-13  9:32                       ` Jean Pihet
2013-09-13  9:45                         ` Ingo Molnar
2013-09-13 17:15                           ` Jean Pihet
2013-09-12 18:51     ` Linus Torvalds
2013-09-12 20:33       ` Ingo Molnar
2013-09-12 20:38         ` Linus Torvalds
2013-09-12 20:49           ` Ingo Molnar
2013-09-12 20:52             ` Linus Torvalds
2013-09-12 21:01               ` Ingo Molnar
2013-09-12 20:10     ` Ingo Molnar
2013-08-13 16:51 Ingo Molnar
2013-07-10  8:52 Ingo Molnar
2013-06-26  8:52 Ingo Molnar
2013-05-05 10:10 Ingo Molnar
2013-04-21  8:16 Ingo Molnar
2013-04-14 15:20 Ingo Molnar
2013-03-21  9:56 Ingo Molnar
2013-03-11 14:28 Ingo Molnar
2013-02-26  7:02 Ingo Molnar
2013-03-14 20:32 ` Linus Torvalds
2013-03-14 21:06   ` Linus Torvalds
2013-03-14 22:09     ` Stephane Eranian
2013-03-14 22:17       ` Linus Torvalds
2013-03-14 22:19         ` Stephane Eranian
2013-03-14 22:42           ` Stephane Eranian
2013-03-14 22:53             ` Stephane Eranian
2013-03-14 23:11               ` Stephane Eranian
2013-03-15  0:24                 ` Stephane Eranian
2013-03-15  1:06                   ` Linus Torvalds
2013-03-15  8:01                     ` Stephane Eranian
2013-03-15 10:50                       ` Stephane Eranian
2013-02-04 18:20 Ingo Molnar
2012-12-01 11:11 Ingo Molnar
2012-10-26 14:44 Ingo Molnar
2012-10-23 11:02 Ingo Molnar
2012-10-20  0:56 Ingo Molnar
2012-09-21 19:08 Ingo Molnar
2012-09-13 14:39 Ingo Molnar
2012-08-23 10:59 Ingo Molnar
2012-08-20  9:08 Ingo Molnar
2012-08-21  7:59 ` Ingo Molnar
2012-08-05 17:43 Ingo Molnar
2012-08-03 16:40 Ingo Molnar
2012-07-14  7:51 Ingo Molnar
2012-06-22 13:36 Ingo Molnar
2012-06-22 18:07 ` Linus Torvalds
2012-06-22 18:38   ` Hagen Paul Pfeifer
2012-06-22 18:52     ` Linus Torvalds
2012-06-22 19:06       ` Hagen Paul Pfeifer
2012-06-22 19:54         ` Steven Rostedt
     [not found]           ` <86448d73-2e19-416f-8104-ce72aa5d76eb@email.android.com>
2012-06-22 23:18             ` Steven Rostedt
2012-06-23  0:51               ` Arjan van de Ven
2012-06-23  1:57                 ` Steven Rostedt
2012-06-23 18:25                 ` H. Peter Anvin
2012-06-22 18:50   ` Steven Rostedt
2012-06-15 18:48 Ingo Molnar
2012-06-08  9:20 Ingo Molnar
2012-05-30 15:39 Ingo Molnar
2012-05-17  8:19 Ingo Molnar
2012-04-27  6:32 Ingo Molnar
2012-04-16 17:48 Ingo Molnar
2012-04-14 10:54 Ingo Molnar
2012-04-03 22:40 Ingo Molnar
2012-03-13 16:56 Ingo Molnar
2012-03-05  9:27 Ingo Molnar
2012-03-02 10:47 Ingo Molnar
2012-02-10 12:45 Ingo Molnar
2012-02-02 10:00 Ingo Molnar
2012-01-26 18:11 Ingo Molnar
2011-12-29 21:02 Ingo Molnar
2011-12-09  6:16 Ingo Molnar
2011-12-05 19:13 Ingo Molnar
2011-11-07 18:49 Ingo Molnar
2011-11-07 19:00 ` Linus Torvalds
2011-11-07 19:50   ` Ingo Molnar
2011-08-22 17:00 Ingo Molnar
2011-08-11  8:17 Ingo Molnar
2011-07-07 18:11 Ingo Molnar
2011-06-19  8:44 Ingo Molnar
2011-06-13  9:53 Ingo Molnar
2011-06-08 13:46 Ingo Molnar
2011-05-28 16:34 Ingo Molnar
2011-05-24  2:41 Ingo Molnar
2011-05-23 13:41 Ingo Molnar
2011-05-23 22:10 ` Eric Dumazet
2011-05-23 22:19   ` Frederic Weisbecker
2011-05-23 22:22     ` Eric Dumazet
2011-05-20 17:18 Ingo Molnar
2011-05-17 22:07 Ingo Molnar
2011-05-07 18:20 Ingo Molnar
2011-04-29 18:17 Ingo Molnar
2011-04-22 13:42 Ingo Molnar
2011-04-19 15:56 Ingo Molnar
2011-04-16 10:03 Ingo Molnar
2011-04-07 17:48 Ingo Molnar
2011-04-02 10:25 Ingo Molnar
2011-03-25 13:11 Ingo Molnar
2011-03-10  7:53 Ingo Molnar
2011-02-28 17:34 Ingo Molnar
2011-02-22 16:03 Ingo Molnar
2011-02-15 16:58 Ingo Molnar
2011-02-06 11:27 Ingo Molnar
2011-02-03 15:47 Ingo Molnar
2011-01-24 13:34 Ingo Molnar
2011-01-24 19:48 ` Linus Torvalds
2011-01-24 20:07   ` Ingo Molnar
2011-01-24 20:11     ` Ingo Molnar
2011-01-24 20:17       ` Ingo Molnar
2011-01-24 20:17     ` Linus Torvalds
2011-01-24 20:27       ` Linus Torvalds
2011-01-24 20:38         ` Arnaldo Carvalho de Melo
2011-01-24 21:13           ` Linus Torvalds
2011-01-24 21:25           ` Ingo Molnar
2011-01-24 22:00             ` Arnaldo Carvalho de Melo
2011-01-25  0:16               ` Ingo Molnar
2011-01-24 20:37       ` Davidlohr Bueso
2011-01-24 20:14   ` Arnaldo Carvalho de Melo
2011-01-18 18:59 Ingo Molnar
2011-01-18  9:42 Ingo Molnar
2011-01-15 15:24 Ingo Molnar
2011-01-11 11:32 Ingo Molnar
2011-01-03 19:04 Ingo Molnar
2010-12-23 12:56 Ingo Molnar
2010-12-19 15:34 Ingo Molnar
2010-12-08  7:55 Ingo Molnar
2010-11-28 17:36 Ingo Molnar
2010-11-26 13:20 Ingo Molnar
2010-11-11 10:38 Ingo Molnar
2010-10-30 18:21 Ingo Molnar
2010-10-13 15:21 Ingo Molnar
2010-10-05 14:49 Ingo Molnar
2010-09-26  8:38 Ingo Molnar
2010-09-21 19:39 Ingo Molnar
2010-09-10 14:26 Ingo Molnar
2010-08-25 17:44 Ingo Molnar
2010-08-24 19:06 Ingo Molnar
2010-08-19 14:55 Ingo Molnar
2010-08-18  8:14 Ingo Molnar
2010-07-23 19:41 Ingo Molnar
2010-07-16 17:30 Ingo Molnar
2010-07-08  4:36 Frederic Weisbecker
2010-07-08  4:40 ` Frederic Weisbecker
2010-07-08  6:36 ` Ingo Molnar
2010-07-04 20:24 Ingo Molnar
2010-06-10 10:25 Ingo Molnar
2010-06-02 12:28 Ingo Molnar
2010-05-31 23:02 Frederic Weisbecker
2010-06-01  6:59 ` Ingo Molnar
2010-05-30 19:27 Ingo Molnar
2010-05-20  9:44 Frederic Weisbecker
2010-05-20 12:40 ` Ingo Molnar
2010-05-11 19:18 Ingo Molnar
2010-05-12  0:39 ` Linus Torvalds
2010-04-06 17:49 Ingo Molnar
2010-04-04 10:15 Ingo Molnar
2010-04-03 10:47 Frederic Weisbecker
2010-03-28  5:11 Frederic Weisbecker
2010-03-29  3:33 ` Ingo Molnar
2010-03-26 15:16 Ingo Molnar
2010-03-16 16:06 Ingo Molnar
2010-03-11 19:12 Ingo Molnar
2010-02-22 16:50 Ingo Molnar
2010-02-22 17:01 ` Linus Torvalds
2010-02-22 17:15   ` Frederic Weisbecker
2010-02-14  9:06 Ingo Molnar
2010-01-31 17:32 Ingo Molnar
2010-01-21 15:38 Ingo Molnar
2009-12-31 12:00 Ingo Molnar
2009-12-18 18:59 Ingo Molnar
2009-09-22  7:51 Ingo Molnar
2009-09-22 14:49 ` Linus Torvalds
2009-09-22 14:59   ` Ingo Molnar
2009-09-22 15:13     ` Linus Torvalds

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20130620085824.GA6346@gmail.com \
    --to=mingo@kernel.org \
    --cc=a.p.zijlstra@chello.nl \
    --cc=acme@infradead.org \
    --cc=akpm@linux-foundation.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=tglx@linutronix.de \
    --cc=torvalds@linux-foundation.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.