Web lists-archives.com

[PATCH V2 3/8] perf tools: reuse perf_mmap__read_catchup in perf_mmap__push




From: Kan Liang <kan.liang@xxxxxxxxx>

perf_mmap__push uses the same codes as perf_mmap__read_catchup to
calculate the ring buffer start, end and size.

No funcational change.

Signed-off-by: Kan Liang <kan.liang@xxxxxxxxx>
---
 tools/perf/util/mmap.c | 31 ++++++-------------------------
 1 file changed, 6 insertions(+), 25 deletions(-)

diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
index bf67460..61237eb 100644
--- a/tools/perf/util/mmap.c
+++ b/tools/perf/util/mmap.c
@@ -285,36 +285,16 @@ int perf_mmap__push(struct perf_mmap *md, bool overwrite,
 		    void *to, int push(void *to, void *buf, size_t size))
 {
 	u64 head = perf_mmap__read_head(md);
-	u64 old = md->prev;
-	u64 end = head, start = old;
+	u64 end, start;
 	unsigned char *data = md->base + page_size;
 	unsigned long size;
 	void *buf;
-	int rc = 0;
+	int rc;
 
-	start = overwrite ? head : old;
-	end = overwrite ? old : head;
 
-	if (start == end)
-		return 0;
-
-	size = end - start;
-	if (size > (unsigned long)(md->mask) + 1) {
-		if (!overwrite) {
-			WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
-
-			md->prev = head;
-			perf_mmap__consume(md, overwrite);
-			return 0;
-		}
-
-		/*
-		 * Backward ring buffer is full. We still have a chance to read
-		 * most of data from it.
-		 */
-		if (overwrite_rb_find_range(data, md->mask, head, &start, &end))
-			return -1;
-	}
+	rc = perf_mmap__read_catchup(md, overwrite, &start, &end, &size);
+	if (rc < 1)
+		return rc;
 
 	if ((start & md->mask) + size != (end & md->mask)) {
 		buf = &data[start & md->mask];
@@ -338,6 +318,7 @@ int perf_mmap__push(struct perf_mmap *md, bool overwrite,
 
 	md->prev = head;
 	perf_mmap__consume(md, overwrite);
+	rc = 0;
 out:
 	return rc;
 }
-- 
2.5.5