Skip to content

Commit

Permalink
Memory:Add memory mapping tool
Browse files Browse the repository at this point in the history
When referring to memory mapping, one is usually referring
to mmap(2). There are 2 categories for using mmap.
One category is SHARED vs PRIVATE mappings.
The other category is FILE vs ANONYMOUS mappings.
Mixed together you get the 4 following combinations:
1. PRIVATE FILE MAPPING
2. SHARED FILE MAPPING
3. PRIVATE ANONYMOUS MAPPING
4. SHARED ANONYMOUS MAPPING
Add a memory mapping tool to support the above four scenarios.
Updates numa_stress with mmap tool.

Signed-off-by: zhenyzha <[email protected]>
  • Loading branch information
zhenyzha committed Jul 11, 2023
1 parent 448acf0 commit f0f9c4a
Show file tree
Hide file tree
Showing 4 changed files with 204 additions and 83 deletions.
147 changes: 147 additions & 0 deletions deps/mem_mapping/mem_mapping.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,147 @@
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <sys/mman.h>
#include <errno.h>
#include <pthread.h>

void *thread_body(void *arg) {
printf("thread: pid = %d, thread_id = %lu\n", getpid(), pthread_self());
return NULL;
}
struct mmap_struct {
int page_size;

int fd;
int flags;
void *buf;
unsigned long len;

int key_break;
};

#define MMAP_DEFAULT_SIZE 0x200000 /* 2MB */

static int util_misc_key_press(int enable, const char *prefix, const char *desc)
{
int c;

if (!enable)
return 0;

fprintf(stdout, "%s%s\n", prefix, desc);
scanf("%c", &c);

return c;
}

static unsigned long util_memory_parse_size(char *args)
{
char *nptr, *p = NULL;
unsigned long factor = 1, size = 0;

if (!strlen(args))
return 0;

nptr = (char *)malloc(strlen(args) + 1);
if (!nptr)
return 0;

memset(nptr, 0, strlen(args) + 1);
strcpy(nptr, args);

if ((p = strstr(nptr, "G")) || (p = strstr(nptr, "g")))
factor = 0x40000000;
else if ((p = strstr(nptr, "M")) || (p = strstr(nptr, "m")))
factor = 0x100000;
else if ((p = strstr(nptr, "K")) || (p = strstr(nptr, "k")))
factor = 0x400;

if (p) *p = '\0';
size = factor * strtoul(nptr, NULL, 0);

free(nptr);
return size;
}

static void usage(void)
{
fprintf(stdout, "testsuite mmap [-a] [-s] [-f <filename>] -l <size>] -k\n");
fprintf(stdout, "\n");
fprintf(stdout, "-a: Anonymous mapping\n");
fprintf(stdout, "-f: File based mapping\n");
fprintf(stdout, "-s Shared anonymous mapping\n");
fprintf(stdout, "-p: Private mapping\n");
fprintf(stdout, "-l: Length of memory to be mapped\n");
fprintf(stdout, "-k: Stop at various stages\n");
fprintf(stdout, "\n");
}

static void mmap_init_data(struct mmap_struct *m)
{
m->page_size = getpagesize();

m->fd = -1;
m->flags = 0;
m->buf = (void *)-1;
m->len = MMAP_DEFAULT_SIZE;
m->key_break = 0;
}

int main(int argc, char **argv)
{
struct mmap_struct m;
int opt, c, i;
void * thread_result;


mmap_init_data(&m);

while ((opt = getopt(argc, argv, "af:spl:kh")) != -1) {
switch (opt) {
case 'a':
m.flags |= MAP_ANONYMOUS;
break;
case 'f':
m.fd = open(optarg, O_RDWR);
if (m.fd < 0) {
fprintf(stderr, "Unable to open <%s>\n", optarg);
}
break;
case 's':
m.flags |= MAP_SHARED;
break;
case 'p':
m.flags |= MAP_PRIVATE;
break;
case 'l':
m.len = util_memory_parse_size(optarg);
break;
case 'k':
m.key_break = 1;
break;
case 'h':
default:
usage();
}
}

/* mmap */
m.buf = mmap(NULL, m.len, PROT_READ | PROT_WRITE, m.flags, m.fd, 0);
if (m.buf == (void *)-1) {
fprintf(stdout, "Unable do mmap()\n");
}

/* Write */
memset(m.buf, 0, m.len);

/* The program will hold memory for 3600s */
sleep(3600);

return 0;
}

Binary file added deps/mem_mapping/mem_mapping.tar.gz
Binary file not shown.
15 changes: 12 additions & 3 deletions qemu/tests/cfg/numa.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,15 @@
no ppc64 ppc64le
type = numa_stress
del stress_args
mem_ratio = 0.8
# Replace "count" in "dd commands" to avoid lack of disk space
#tmpfs_size = 1024
mem_ratio = 0.6
mem_map_tool = "mem_mapping.c"
mem_tar_tool = "mem_mapping.tar.gz"
cmd_cp_mmap_tool = "/bin/cp -rf %s /var/tmp/"
stress_cmds_mem_mapping = "./mem_mapping"
make_cmds_mem_mapping = "gcc mem_mapping.c -o mem_mapping"
uninstall_cmds_mapping = "rm -rf /home/mem_*"
cmd_mmap_configure ="gcc /var/tmp/mem_mapping.c -o /var/tmp/mem_mapping"
cmd_mmap = "cd /var/tmp/ && numactl -m %s ./mem_mapping -a -p -l %dK &"
cmd_mmap_stop = "pkill -9 mem_mapping"
cmd_mmap_cleanup = "rm -rf /var/tmp/mem_*"

125 changes: 45 additions & 80 deletions qemu/tests/numa_stress.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,10 @@
import os
import re
import math

from avocado.utils import process

from virttest import error_context
from virttest import utils_misc
from virttest import funcatexit
from virttest import utils_test
from virttest import data_dir
from virttest.staging import utils_memory
Expand Down Expand Up @@ -34,34 +32,14 @@ def max_mem_map_node(host_numa_node, qemu_pid):
return (node_map_most, memory_sz_map_most)


def get_tmpfs_write_speed():
"""
Get the tmpfs write speed of the host
return: The write speed of tmpfs, the unit is kb/s.
"""
process.run("mkdir -p /tmp/test_speed && "
"mount -t tmpfs none /tmp/test_speed", shell=True)
output = process.run("dd if=/dev/urandom of=/tmp/test_speed/test "
"bs=1k count=1024")
try:
speed = re.search(r"\s([\w\s\.]+)/s", output.stderr, re.I).group(1)
return float(utils_misc.normalize_data_size(speed, 'K', 1024))
except Exception:
return 3072
finally:
process.run("umount /tmp/test_speed")
os.removedirs("/tmp/test_speed")


@error_context.context_aware
def run(test, params, env):
"""
Qemu numa stress test:
1) Boot up a guest and find the node it used
2) Try to allocate memory in that node
3) Run memory heavy stress inside guest
4) Check the vm is running well after stress,
no out of memory or qemu crash.
4) Check the memory use status of qemu process
5) Repeat step 2 ~ 4 several times
Expand All @@ -73,69 +51,56 @@ def run(test, params, env):
if len(host_numa_node.online_nodes) < 2:
test.cancel("Host only has one NUMA node, skipping test...")

mem_map_tool = params.get("mem_map_tool")
mem_tar_tool = params.get("mem_tar_tool")
cmd_cp_mmap_tool = params.get("cmd_cp_mmap_tool")
cmd_mmap_configure = params.get("cmd_mmap_configure")
cmd_mmap_stop = params.get("cmd_mmap_stop")
cmd_mmap_cleanup = params.get("cmd_mmap_cleanup")
timeout = float(params.get("login_timeout", 240))
test_count = int(params.get("test_count", 4))
test_count = int(params.get("test_count", 2))

vm = env.get_vm(params["main_vm"])
vm.verify_alive()
session = vm.wait_for_login(timeout=timeout)

qemu_pid = vm.get_pid()

if test_count < len(host_numa_node.online_nodes):
test_count = len(host_numa_node.online_nodes)

tmpfs_path = params.get("tmpfs_path", "tmpfs_numa_test")
tmpfs_path = utils_misc.get_path(data_dir.get_tmp_dir(), tmpfs_path)
tmpfs_write_speed = get_tmpfs_write_speed()
memory_file = utils_misc.get_path(tmpfs_path, "test")

utils_memory.drop_caches()

if not os.path.isdir(tmpfs_path):
os.mkdir(tmpfs_path)

test_mem = float(params.get("mem"))*float(params.get("mem_ratio", 0.8))
stress_args = "--cpu 4 --io 4 --vm 2 --vm-bytes %sM" % int(test_mem / 2)

for test_round in range(test_count):
most_used_node, _ = max_mem_map_node(host_numa_node, qemu_pid)
if os.path.exists(memory_file):
os.remove(memory_file)
utils_memory.drop_caches()
error_context.context("Executing stress test round: %s" % test_round,
test.log.info)
numa_node_malloc = most_used_node
tmpfs_size = \
math.floor(float(host_numa_node.read_from_node_meminfo(numa_node_malloc,
'MemFree')) * 0.9)
dd_timeout = tmpfs_size / tmpfs_write_speed * 1.5
mount_fs_size = "size=%dK" % tmpfs_size
dd_cmd = "dd if=/dev/urandom of=%s bs=1k count=%s" % (memory_file,
tmpfs_size)
numa_dd_cmd = "numactl -m %s %s" % (numa_node_malloc, dd_cmd)
error_context.context("Try to allocate memory in node %s"
% numa_node_malloc, test.log.info)
try:
utils_misc.mount("none", tmpfs_path, "tmpfs", perm=mount_fs_size)
funcatexit.register(env, params.get("type"), utils_misc.umount,
"none", tmpfs_path, "tmpfs")
process.system(numa_dd_cmd, timeout=dd_timeout, shell=True)
except Exception as error_msg:
if "No space" in str(error_msg):
pass
else:
test.fail("Can not allocate memory in node %s."
" Error message:%s" % (numa_node_malloc,
str(error_msg)))
error_context.context("Run memory heavy stress in guest", test.log.info)
stress_test = utils_test.VMStress(vm, "stress", params, stress_args=stress_args)
stress_test.load_stress_tool()
stress_test.unload_stress()
stress_test.clean()
utils_misc.umount("none", tmpfs_path, "tmpfs")
funcatexit.unregister(env, params.get("type"), utils_misc.umount,
"none", tmpfs_path, "tmpfs")
session.cmd("sync; echo 3 > /proc/sys/vm/drop_caches")
try:
test_mem = float(params.get("mem")) * float(params.get("mem_ratio", 0.6))
stress_args = "-a -p -l %sM" % int(test_mem)
host_path = os.path.join(data_dir.get_deps_dir('mem_mapping'), mem_map_tool)
guest_path = os.path.join(data_dir.get_deps_dir('mem_mapping'), mem_tar_tool)
cmd_cp_mmap_tool = cmd_cp_mmap_tool % host_path
process.run(cmd_cp_mmap_tool, shell=True)
test.log.info("Compile the mem_mapping tool")
process.system_output(cmd_mmap_configure, shell=True)
utils_memory.drop_caches()
for test_round in range(test_count):
error_context.context("Executing stress test round: %s" % test_round, test.log.info)
cmd_mmap = params.get("cmd_mmap")
try:
error_context.context("Get the qemu process memory use status", test.log.info)
most_used_node, memory_used = max_mem_map_node(host_numa_node, qemu_pid)
numa_node_malloc = most_used_node
mmap_size = math.floor(float(host_numa_node.read_from_node_meminfo(numa_node_malloc, 'MemTotal')) * 0.9)
cmd_mmap = cmd_mmap % (numa_node_malloc, mmap_size)
error_context.context("Run mem_mapping on host node %s." % numa_node_malloc, test.log.info)
process.system_output(cmd_mmap, shell=True, ignore_bg_processes=True)
error_context.context("Run memory heavy stress in guest", test.log.info)
stress_test = utils_test.VMStress(vm, "mem_mapping", params, download_url=guest_path, stress_args=stress_args)
stress_test.load_stress_tool()
error_context.context("Get the qemu process memory use status", test.log.info)
node_after, memory_after = max_mem_map_node(host_numa_node, qemu_pid)
if node_after == most_used_node and memory_after >= memory_used:
test.fail("Memory still stick in node %s" % numa_node_malloc)
finally:
stress_test.unload_stress()
stress_test.clean()
process.system_output(cmd_mmap_stop, shell=True, ignore_status=True)
session.cmd("sync; echo 3 > /proc/sys/vm/drop_caches")
utils_memory.drop_caches()
finally:
process.run(cmd_mmap_cleanup, shell=True)
session.close()

session.close()

0 comments on commit f0f9c4a

Please sign in to comment.