[lxc-devel] [lxc/master] add more detail for blkio

0x0916 on Github lxc-bot at linuxcontainers.org
Sun May 14 04:18:36 UTC 2017


A non-text attachment was scrubbed...
Name: not available
Type: text/x-mailbox
Size: 402 bytes
Desc: not available
URL: <http://lists.linuxcontainers.org/pipermail/lxc-devel/attachments/20170514/dfe28800/attachment.bin>
-------------- next part --------------
From 73a19fcc1c13ed91fba3d025cafe704b99ab254d Mon Sep 17 00:00:00 2001
From: 0x0916 <w at laoqinren.net>
Date: Tue, 9 May 2017 19:07:24 +0800
Subject: [PATCH] add more detail for blkio

This patch add Read and Write information for blkio bytes.

Signed-off-by: 0x0916 <w at laoqinren.net>
---
 src/lxc/tools/lxc_top.c | 99 +++++++++++++++++++++++++++++++++++++++++--------
 1 file changed, 84 insertions(+), 15 deletions(-)

diff --git a/src/lxc/tools/lxc_top.c b/src/lxc/tools/lxc_top.c
index 7f3ae4f..0d92141 100644
--- a/src/lxc/tools/lxc_top.c
+++ b/src/lxc/tools/lxc_top.c
@@ -50,6 +50,12 @@ lxc_log_define(lxc_top_ui, lxc);
 #define TERMBOLD  ESC "[1m"
 #define TERMRVRS  ESC "[7m"
 
+struct blkio_stats {
+	uint64_t read;
+	uint64_t write;
+	uint64_t total;
+};
+
 struct stats {
 	uint64_t mem_used;
 	uint64_t mem_limit;
@@ -58,8 +64,8 @@ struct stats {
 	uint64_t cpu_use_nanos;
 	uint64_t cpu_use_user;
 	uint64_t cpu_use_sys;
-	uint64_t blkio;
-	uint64_t blkio_iops;
+	struct blkio_stats io_service_bytes;
+	struct blkio_stats io_serviced;
 };
 
 struct ct {
@@ -273,6 +279,60 @@ static uint64_t stat_match_get_int(struct lxc_container *c, const char *item,
 	return val;
 }
 
+/*
+examples:
+	blkio.throttle.io_serviced
+	8:0 Read 4259
+	8:0 Write 835
+	8:0 Sync 292
+	8:0 Async 4802
+	8:0 Total 5094
+	Total 5094
+
+	blkio.throttle.io_service_bytes
+	8:0 Read 110309376
+	8:0 Write 39018496
+	8:0 Sync 2818048
+	8:0 Async 146509824
+	8:0 Total 149327872
+	Total 149327872
+*/
+static void stat_get_blk_stats(struct lxc_container *c, const char *item,
+			      struct blkio_stats *stats) {
+	char buf[4096];
+	int i,len;
+	char **lines, **cols;
+
+	len = c->get_cgroup_item(c, item, buf, sizeof(buf));
+	if (len <= 0) {
+		ERROR("unable to read cgroup item %s", item);
+		return;
+	}
+
+	lines = lxc_string_split_and_trim(buf, '\n');
+	if (!lines)
+		return;
+
+	memset(stats, 0, sizeof(struct blkio_stats));
+	for (i = 0; lines[i]; i++) {
+		cols = lxc_string_split_and_trim(lines[i], ' ');
+		if (!cols)
+			goto out;
+		if (strcmp(cols[1], "Read") == 0) {
+			stats->read += strtoull(cols[2], NULL, 0);
+		} else if (strcmp(cols[1], "Write") == 0) {
+			stats->write += strtoull(cols[2], NULL, 0);
+		}
+		if (strcmp(cols[0], "Total") == 0) {
+			stats->total = strtoull(cols[1], NULL, 0);
+		}
+		lxc_free_array((void **)cols, free);
+	}
+out:
+	lxc_free_array((void **)lines, free);
+	return;
+}
+
 static void stats_get(struct lxc_container *c, struct ct *ct, struct stats *total)
 {
 	ct->c = c;
@@ -283,8 +343,8 @@ static void stats_get(struct lxc_container *c, struct ct *ct, struct stats *tota
 	ct->stats->cpu_use_nanos = stat_get_int(c, "cpuacct.usage");
 	ct->stats->cpu_use_user  = stat_match_get_int(c, "cpuacct.stat", "user", 1);
 	ct->stats->cpu_use_sys   = stat_match_get_int(c, "cpuacct.stat", "system", 1);
-	ct->stats->blkio         = stat_match_get_int(c, "blkio.throttle.io_service_bytes", "Total", 1);
-	ct->stats->blkio_iops    = stat_match_get_int(c, "blkio.throttle.io_serviced", "Total", 1);
+	stat_get_blk_stats(c, "blkio.throttle.io_service_bytes", &ct->stats->io_service_bytes);
+	stat_get_blk_stats(c, "blkio.throttle.io_serviced", &ct->stats->io_serviced);
 
 	if (total) {
 		total->mem_used      = total->mem_used      + ct->stats->mem_used;
@@ -294,19 +354,21 @@ static void stats_get(struct lxc_container *c, struct ct *ct, struct stats *tota
 		total->cpu_use_nanos = total->cpu_use_nanos + ct->stats->cpu_use_nanos;
 		total->cpu_use_user  = total->cpu_use_user  + ct->stats->cpu_use_user;
 		total->cpu_use_sys   = total->cpu_use_sys   + ct->stats->cpu_use_sys;
-		total->blkio         = total->blkio         + ct->stats->blkio;
+		total->io_service_bytes.total += ct->stats->io_service_bytes.total;
+		total->io_service_bytes.read += ct->stats->io_service_bytes.read;
+		total->io_service_bytes.write += ct->stats->io_service_bytes.write;
 	}
 }
 
 static void stats_print_header(struct stats *stats)
 {
 	printf(TERMRVRS TERMBOLD);
-	printf("%-18s %12s %12s %12s %14s %10s", "Container", "CPU",  "CPU",  "CPU",  "BlkIO", "Mem");
+	printf("%-18s %12s %12s %12s %36s %10s", "Container", "CPU",  "CPU",  "CPU",  "BlkIO", "Mem");
 	if (stats->kmem_used > 0)
 		printf(" %10s", "KMem");
 	printf("\n");
 
-	printf("%-18s %12s %12s %12s %14s %10s", "Name",      "Used", "Sys",  "User", "Total", "Used");
+	printf("%-18s %12s %12s %12s %36s %10s", "Name",      "Used", "Sys",  "User", "Total(Read/Write)", "Used");
 	if (stats->kmem_used > 0)
 		printf(" %10s", "Used");
 	printf("\n");
@@ -316,22 +378,29 @@ static void stats_print_header(struct stats *stats)
 static void stats_print(const char *name, const struct stats *stats,
 			const struct stats *total)
 {
-	char blkio_str[20];
+	char iosb_str[63];
+	char iosb_total_str[20];
+	char iosb_read_str[20];
+	char iosb_write_str[20];
 	char mem_used_str[20];
 	char kmem_used_str[20];
 	struct timeval time_val;
 	unsigned long long time_ms;
 
 	if (!batch) {
-		size_humanize(stats->blkio, blkio_str, sizeof(blkio_str));
+		size_humanize(stats->io_service_bytes.total, iosb_total_str, sizeof(iosb_total_str));
+		size_humanize(stats->io_service_bytes.read, iosb_read_str, sizeof(iosb_read_str));
+		size_humanize(stats->io_service_bytes.write, iosb_write_str, sizeof(iosb_write_str));
 		size_humanize(stats->mem_used, mem_used_str, sizeof(mem_used_str));
 
-		printf("%-18.18s %12.2f %12.2f %12.2f %14s %10s",
+		snprintf(iosb_str, sizeof(iosb_str), "%s(%s/%s)", iosb_total_str, iosb_read_str, iosb_write_str);
+
+		printf("%-18.18s %12.2f %12.2f %12.2f %36s %10s",
 		       name,
 		       (float)stats->cpu_use_nanos / 1000000000,
 		       (float)stats->cpu_use_sys  / USER_HZ,
 		       (float)stats->cpu_use_user / USER_HZ,
-		       blkio_str,
+		       iosb_str,
 		       mem_used_str);
 		if (total->kmem_used > 0) {
 			size_humanize(stats->kmem_used, kmem_used_str, sizeof(kmem_used_str));
@@ -344,8 +413,8 @@ static void stats_print(const char *name, const struct stats *stats,
 		       ",%" PRIu64 ",%" PRIu64 ",%" PRIu64 ",%" PRIu64,
 		       (uint64_t)time_ms, name, (uint64_t)stats->cpu_use_nanos,
 		       (uint64_t)stats->cpu_use_sys,
-		       (uint64_t)stats->cpu_use_user, (uint64_t)stats->blkio,
-		       (uint64_t)stats->blkio_iops, (uint64_t)stats->mem_used,
+		       (uint64_t)stats->cpu_use_user, (uint64_t)stats->io_service_bytes.total,
+		       (uint64_t)stats->io_serviced.total, (uint64_t)stats->mem_used,
 		       (uint64_t)stats->kmem_used);
 	}
 
@@ -377,8 +446,8 @@ static int cmp_blkio(const void *sct1, const void *sct2)
 	const struct ct *ct2 = sct2;
 
 	if (sort_reverse)
-		return ct2->stats->blkio < ct1->stats->blkio;
-	return ct1->stats->blkio < ct2->stats->blkio;
+		return ct2->stats->io_service_bytes.total < ct1->stats->io_service_bytes.total;
+	return ct1->stats->io_service_bytes.total < ct2->stats->io_service_bytes.total;
 }
 
 static int cmp_memory(const void *sct1, const void *sct2)


More information about the lxc-devel mailing list