2 * Support for decoding of KVM_* ioctl commands.
4 * Copyright (c) 2017 Masatake YAMATO <yamato@redhat.com>
5 * Copyright (c) 2017 Red Hat, Inc.
6 * Copyright (c) 2017-2019 The strace developers.
9 * SPDX-License-Identifier: LGPL-2.1-or-later
14 #ifdef HAVE_LINUX_KVM_H
15 # include <linux/kvm.h>
16 # include "print_fields.h"
17 # include "arch_kvm.c"
19 # include "mmap_cache.h"
22 struct vcpu_info *next;
26 unsigned long mmap_len;
30 static bool dump_kvm_run_structure;
32 static struct vcpu_info *
33 vcpu_find(struct tcb *const tcp, int fd)
35 for (struct vcpu_info *vcpu_info = tcp->vcpu_info_list;
37 vcpu_info = vcpu_info->next)
38 if (vcpu_info->fd == fd)
44 static struct vcpu_info *
45 vcpu_alloc(struct tcb *const tcp, int fd, int cpuid)
47 struct vcpu_info *vcpu_info = xzalloc(sizeof(*vcpu_info));
50 vcpu_info->cpuid = cpuid;
52 vcpu_info->next = tcp->vcpu_info_list;
53 tcp->vcpu_info_list = vcpu_info;
59 kvm_vcpu_info_free(struct tcb *tcp)
61 struct vcpu_info *head, *next;
63 for (head = tcp->vcpu_info_list; head; head = next) {
68 tcp->vcpu_info_list = NULL;
72 vcpu_register(struct tcb *const tcp, int fd, int cpuid)
77 struct vcpu_info *vcpu_info = vcpu_find(tcp, fd);
80 vcpu_alloc(tcp, fd, cpuid);
81 } else if (vcpu_info->cpuid != cpuid) {
82 vcpu_info->cpuid = cpuid;
83 vcpu_info->resolved = false;
88 is_map_for_file(struct mmap_cache_entry_t *map_info, void *data)
90 /* major version for anon inode may be given in get_anon_bdev()
93 * *p = MKDEV(0, dev & MINORMASK);
96 return map_info->binary_filename &&
97 map_info->major == 0 &&
98 strcmp(map_info->binary_filename, data) == 0;
102 map_len(struct mmap_cache_entry_t *map_info)
104 return map_info->start_addr < map_info->end_addr
105 ? map_info->end_addr - map_info->start_addr
109 # define VCPU_DENTRY_PREFIX "anon_inode:kvm-vcpu:"
111 static struct vcpu_info*
112 vcpu_get_info(struct tcb *const tcp, int fd)
114 struct vcpu_info *vcpu_info = vcpu_find(tcp, fd);
115 struct mmap_cache_entry_t *map_info;
116 const char *cpuid_str;
118 enum mmap_cache_rebuild_result mc_stat =
119 mmap_cache_rebuild_if_invalid(tcp, __func__);
120 if (mc_stat == MMAP_CACHE_REBUILD_NOCACHE)
123 if (vcpu_info && vcpu_info->resolved) {
124 if (mc_stat == MMAP_CACHE_REBUILD_READY)
127 map_info = mmap_cache_search(tcp, vcpu_info->mmap_addr);
130 STR_STRIP_PREFIX(map_info->binary_filename,
132 if (cpuid_str != map_info->binary_filename) {
133 int cpuid = string_to_uint(cpuid_str);
136 if (vcpu_info->cpuid == cpuid)
141 /* The vcpu vma may be mremap'ed. */
142 vcpu_info->resolved = false;
146 /* Slow path: !vcpu_info || !vcpu_info->resolved */
147 char path[PATH_MAX + 1];
149 if (getfdpath(tcp, fd, path, sizeof(path)) >= 0)
150 cpuid_str = STR_STRIP_PREFIX(path, VCPU_DENTRY_PREFIX);
151 if (cpuid_str == path)
154 map_info = mmap_cache_search_custom(tcp, is_map_for_file, path);
157 int cpuid = string_to_uint(cpuid_str);
161 vcpu_info = vcpu_alloc(tcp, fd, cpuid);
162 else if (vcpu_info->cpuid != cpuid)
163 vcpu_info->cpuid = cpuid;
164 vcpu_info->mmap_addr = map_info->start_addr;
165 vcpu_info->mmap_len = map_len(map_info);
166 vcpu_info->resolved = true;
174 kvm_ioctl_create_vcpu(struct tcb *const tcp, const kernel_ulong_t arg)
176 uint32_t cpuid = arg;
179 tprintf(", %u", cpuid);
180 if (dump_kvm_run_structure)
182 } else if (!syserror(tcp)) {
183 vcpu_register(tcp, tcp->u_rval, cpuid);
186 return RVAL_IOCTL_DECODED | RVAL_FD;
189 # ifdef HAVE_STRUCT_KVM_USERSPACE_MEMORY_REGION
190 # include "xlat/kvm_mem_flags.h"
192 kvm_ioctl_set_user_memory_region(struct tcb *const tcp, const kernel_ulong_t arg)
194 struct kvm_userspace_memory_region u_memory_region;
197 if (umove_or_printaddr(tcp, arg, &u_memory_region))
198 return RVAL_IOCTL_DECODED;
200 PRINT_FIELD_U("{", u_memory_region, slot);
201 PRINT_FIELD_FLAGS(", ", u_memory_region, flags, kvm_mem_flags,
203 PRINT_FIELD_X(", ", u_memory_region, guest_phys_addr);
204 PRINT_FIELD_U(", ", u_memory_region, memory_size);
205 PRINT_FIELD_X(", ", u_memory_region, userspace_addr);
208 return RVAL_IOCTL_DECODED;
210 # endif /* HAVE_STRUCT_KVM_USERSPACE_MEMORY_REGION */
212 # ifdef HAVE_STRUCT_KVM_REGS
214 kvm_ioctl_decode_regs(struct tcb *const tcp, const unsigned int code,
215 const kernel_ulong_t arg)
217 struct kvm_regs regs;
219 if (code == KVM_GET_REGS && entering(tcp))
223 if (!umove_or_printaddr(tcp, arg, ®s))
224 arch_print_kvm_regs(tcp, arg, ®s);
226 return RVAL_IOCTL_DECODED;
228 # endif /* HAVE_STRUCT_KVM_REGS */
230 # ifdef HAVE_STRUCT_KVM_CPUID2
231 # include "xlat/kvm_cpuid_flags.h"
233 print_kvm_cpuid_entry(struct tcb *const tcp,
234 void* elem_buf, size_t elem_size, void* data)
236 const struct kvm_cpuid_entry2 *entry = elem_buf;
237 PRINT_FIELD_X("{", *entry, function);
238 PRINT_FIELD_X(", ", *entry, index);
239 PRINT_FIELD_FLAGS(", ", *entry, flags, kvm_cpuid_flags,
240 "KVM_CPUID_FLAG_???");
241 PRINT_FIELD_X(", ", *entry, eax);
242 PRINT_FIELD_X(", ", *entry, ebx);
243 PRINT_FIELD_X(", ", *entry, ecx);
244 PRINT_FIELD_X(", ", *entry, edx);
251 kvm_ioctl_decode_cpuid2(struct tcb *const tcp, const unsigned int code,
252 const kernel_ulong_t arg)
254 struct kvm_cpuid2 cpuid;
256 if (entering(tcp) && (code == KVM_GET_SUPPORTED_CPUID
257 # ifdef KVM_GET_EMULATED_CPUID
258 || code == KVM_GET_EMULATED_CPUID
264 if (!umove_or_printaddr(tcp, arg, &cpuid)) {
265 PRINT_FIELD_U("{", cpuid, nent);
267 tprints(", entries=");
275 struct kvm_cpuid_entry2 entry;
276 print_array(tcp, arg + sizeof(cpuid), cpuid.nent,
277 &entry, sizeof(entry), tfetch_mem,
278 print_kvm_cpuid_entry, NULL);
283 return RVAL_IOCTL_DECODED;
285 # endif /* HAVE_STRUCT_KVM_CPUID2 */
287 # ifdef HAVE_STRUCT_KVM_SREGS
289 kvm_ioctl_decode_sregs(struct tcb *const tcp, const unsigned int code,
290 const kernel_ulong_t arg)
292 struct kvm_sregs sregs;
294 if (code == KVM_GET_SREGS && entering(tcp))
298 if (!umove_or_printaddr(tcp, arg, &sregs))
299 arch_print_kvm_sregs(tcp, arg, &sregs);
301 return RVAL_IOCTL_DECODED;
303 # endif /* HAVE_STRUCT_KVM_SREGS */
305 # include "xlat/kvm_cap.h"
307 kvm_ioctl_decode_check_extension(struct tcb *const tcp, const unsigned int code,
308 const kernel_ulong_t arg)
311 printxval64(kvm_cap, arg, "KVM_CAP_???");
312 return RVAL_IOCTL_DECODED;
315 # include "xlat/kvm_exit_reason.h"
317 kvm_ioctl_run_attach_auxstr(struct tcb *const tcp,
318 struct vcpu_info *info)
321 static struct kvm_run vcpu_run_struct;
323 if (info->mmap_len < sizeof(vcpu_run_struct))
326 if (umove(tcp, info->mmap_addr, &vcpu_run_struct) < 0)
329 tcp->auxstr = xlookup(kvm_exit_reason, vcpu_run_struct.exit_reason);
331 tcp->auxstr = "KVM_EXIT_???";
335 kvm_ioctl_decode_run(struct tcb *const tcp)
341 int r = RVAL_DECODED;
346 if (dump_kvm_run_structure) {
348 int fd = tcp->u_arg[0];
349 struct vcpu_info *info = vcpu_get_info(tcp, fd);
352 kvm_ioctl_run_attach_auxstr(tcp, info);
362 kvm_ioctl(struct tcb *const tcp, const unsigned int code, const kernel_ulong_t arg)
365 case KVM_CREATE_VCPU:
366 return kvm_ioctl_create_vcpu(tcp, arg);
368 # ifdef HAVE_STRUCT_KVM_USERSPACE_MEMORY_REGION
369 case KVM_SET_USER_MEMORY_REGION:
370 return kvm_ioctl_set_user_memory_region(tcp, arg);
373 # ifdef HAVE_STRUCT_KVM_REGS
376 return kvm_ioctl_decode_regs(tcp, code, arg);
379 # ifdef HAVE_STRUCT_KVM_SREGS
382 return kvm_ioctl_decode_sregs(tcp, code, arg);
385 # ifdef HAVE_STRUCT_KVM_CPUID2
387 case KVM_GET_SUPPORTED_CPUID:
388 # ifdef KVM_GET_EMULATED_CPUID
389 case KVM_GET_EMULATED_CPUID:
391 return kvm_ioctl_decode_cpuid2(tcp, code, arg);
394 case KVM_CHECK_EXTENSION:
395 return kvm_ioctl_decode_check_extension(tcp, code, arg);
398 return RVAL_DECODED | RVAL_FD;
401 return kvm_ioctl_decode_run(tcp);
403 case KVM_GET_VCPU_MMAP_SIZE:
404 case KVM_GET_API_VERSION:
411 kvm_run_structure_decoder_init(void)
413 dump_kvm_run_structure = true;
417 #endif /* HAVE_LINUX_KVM_H */