2 * Support for decoding of KVM_* ioctl commands.
4 * Copyright (c) 2017 Masatake YAMATO <yamato@redhat.com>
5 * Copyright (c) 2017 Red Hat, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #ifdef HAVE_LINUX_KVM_H
34 # include <linux/kvm.h>
35 # include "print_fields.h"
36 # include "arch_kvm.c"
38 # include "mmap_cache.h"
41 struct vcpu_info *next;
45 unsigned long mmap_len;
49 static bool dump_kvm_run_structure;
51 static struct vcpu_info *
52 vcpu_find(struct tcb *const tcp, int fd)
54 for (struct vcpu_info *vcpu_info = tcp->vcpu_info_list;
56 vcpu_info = vcpu_info->next)
57 if (vcpu_info->fd == fd)
63 static struct vcpu_info *
64 vcpu_alloc(struct tcb *const tcp, int fd, int cpuid)
66 struct vcpu_info *vcpu_info = xcalloc(1, sizeof(*vcpu_info));
69 vcpu_info->cpuid = cpuid;
71 vcpu_info->next = tcp->vcpu_info_list;
72 tcp->vcpu_info_list = vcpu_info;
78 kvm_vcpu_info_free(struct tcb *tcp)
80 struct vcpu_info *head, *next;
82 for (head = tcp->vcpu_info_list; head; head = next) {
87 tcp->vcpu_info_list = NULL;
91 vcpu_register(struct tcb *const tcp, int fd, int cpuid)
96 struct vcpu_info *vcpu_info = vcpu_find(tcp, fd);
99 vcpu_info = vcpu_alloc(tcp, fd, cpuid);
100 else if (vcpu_info->cpuid != cpuid)
102 vcpu_info->cpuid = cpuid;
103 vcpu_info->resolved = false;
108 is_map_for_file(struct mmap_cache_entry_t *map_info, void *data)
110 /* major version for anon inode may be given in get_anon_bdev()
113 * *p = MKDEV(0, dev & MINORMASK);
116 return map_info->binary_filename &&
117 map_info->major == 0 &&
118 strcmp(map_info->binary_filename, data) == 0;
122 map_len(struct mmap_cache_entry_t *map_info)
124 return map_info->start_addr < map_info->end_addr
125 ? map_info->end_addr - map_info->start_addr
129 #define VCPU_DENTRY_PREFIX "anon_inode:kvm-vcpu:"
131 static struct vcpu_info*
132 vcpu_get_info(struct tcb *const tcp, int fd)
134 struct vcpu_info *vcpu_info = vcpu_find(tcp, fd);
135 struct mmap_cache_entry_t *map_info;
136 const char *cpuid_str;
138 enum mmap_cache_rebuild_result mc_stat =
139 mmap_cache_rebuild_if_invalid(tcp, __func__);
140 if (mc_stat == MMAP_CACHE_REBUILD_NOCACHE)
143 if (vcpu_info && vcpu_info->resolved) {
144 if (mc_stat == MMAP_CACHE_REBUILD_READY)
147 map_info = mmap_cache_search(tcp, vcpu_info->mmap_addr);
150 STR_STRIP_PREFIX(map_info->binary_filename,
152 if (cpuid_str != map_info->binary_filename) {
153 int cpuid = string_to_uint(cpuid_str);
156 if (vcpu_info->cpuid == cpuid)
161 /* The vcpu vma may be mremap'ed. */
162 vcpu_info->resolved = false;
166 /* Slow path: !vcpu_info || !vcpu_info->resolved */
167 char path[PATH_MAX + 1];
169 if (getfdpath(tcp, fd, path, sizeof(path)) >= 0)
170 cpuid_str = STR_STRIP_PREFIX(path, VCPU_DENTRY_PREFIX);
171 if (cpuid_str == path)
174 map_info = mmap_cache_search_custom(tcp, is_map_for_file, path);
177 int cpuid = string_to_uint(cpuid_str);
181 vcpu_info = vcpu_alloc(tcp, fd, cpuid);
182 else if (vcpu_info->cpuid != cpuid)
183 vcpu_info->cpuid = cpuid;
184 vcpu_info->mmap_addr = map_info->start_addr;
185 vcpu_info->mmap_len = map_len(map_info);
186 vcpu_info->resolved = true;
194 kvm_ioctl_create_vcpu(struct tcb *const tcp, const kernel_ulong_t arg)
196 uint32_t cpuid = arg;
199 tprintf(", %u", cpuid);
200 if (dump_kvm_run_structure)
202 } else if (!syserror(tcp)) {
203 vcpu_register(tcp, tcp->u_rval, cpuid);
206 return RVAL_IOCTL_DECODED | RVAL_FD;
209 # ifdef HAVE_STRUCT_KVM_USERSPACE_MEMORY_REGION
210 # include "xlat/kvm_mem_flags.h"
212 kvm_ioctl_set_user_memory_region(struct tcb *const tcp, const kernel_ulong_t arg)
214 struct kvm_userspace_memory_region u_memory_region;
217 if (umove_or_printaddr(tcp, arg, &u_memory_region))
218 return RVAL_IOCTL_DECODED;
220 PRINT_FIELD_U("{", u_memory_region, slot);
221 PRINT_FIELD_FLAGS(", ", u_memory_region, flags, kvm_mem_flags,
223 PRINT_FIELD_X(", ", u_memory_region, guest_phys_addr);
224 PRINT_FIELD_U(", ", u_memory_region, memory_size);
225 PRINT_FIELD_X(", ", u_memory_region, userspace_addr);
228 return RVAL_IOCTL_DECODED;
230 # endif /* HAVE_STRUCT_KVM_USERSPACE_MEMORY_REGION */
232 # ifdef HAVE_STRUCT_KVM_REGS
234 kvm_ioctl_decode_regs(struct tcb *const tcp, const unsigned int code,
235 const kernel_ulong_t arg)
237 struct kvm_regs regs;
239 if (code == KVM_GET_REGS && entering(tcp))
243 if (!umove_or_printaddr(tcp, arg, ®s))
244 arch_print_kvm_regs(tcp, arg, ®s);
246 return RVAL_IOCTL_DECODED;
248 # endif /* HAVE_STRUCT_KVM_REGS */
250 # ifdef HAVE_STRUCT_KVM_CPUID2
251 # include "xlat/kvm_cpuid_flags.h"
253 print_kvm_cpuid_entry(struct tcb *const tcp,
254 void* elem_buf, size_t elem_size, void* data)
256 const struct kvm_cpuid_entry2 *entry = elem_buf;
257 PRINT_FIELD_X("{", *entry, function);
258 PRINT_FIELD_X(", ", *entry, index);
259 PRINT_FIELD_FLAGS(", ", *entry, flags, kvm_cpuid_flags,
260 "KVM_CPUID_FLAG_???");
261 PRINT_FIELD_X(", ", *entry, eax);
262 PRINT_FIELD_X(", ", *entry, ebx);
263 PRINT_FIELD_X(", ", *entry, ecx);
264 PRINT_FIELD_X(", ", *entry, edx);
271 kvm_ioctl_decode_cpuid2(struct tcb *const tcp, const unsigned int code,
272 const kernel_ulong_t arg)
274 struct kvm_cpuid2 cpuid;
276 if (entering(tcp) && (code == KVM_GET_SUPPORTED_CPUID
277 # ifdef KVM_GET_EMULATED_CPUID
278 || code == KVM_GET_EMULATED_CPUID
284 if (!umove_or_printaddr(tcp, arg, &cpuid)) {
285 PRINT_FIELD_U("{", cpuid, nent);
287 tprints(", entries=");
295 struct kvm_cpuid_entry2 entry;
296 print_array(tcp, arg + sizeof(cpuid), cpuid.nent,
297 &entry, sizeof(entry), tfetch_mem,
298 print_kvm_cpuid_entry, NULL);
303 return RVAL_IOCTL_DECODED;
305 # endif /* HAVE_STRUCT_KVM_CPUID2 */
307 # ifdef HAVE_STRUCT_KVM_SREGS
309 kvm_ioctl_decode_sregs(struct tcb *const tcp, const unsigned int code,
310 const kernel_ulong_t arg)
312 struct kvm_sregs sregs;
314 if (code == KVM_GET_SREGS && entering(tcp))
318 if (!umove_or_printaddr(tcp, arg, &sregs))
319 arch_print_kvm_sregs(tcp, arg, &sregs);
321 return RVAL_IOCTL_DECODED;
323 # endif /* HAVE_STRUCT_KVM_SREGS */
325 # include "xlat/kvm_exit_reason.h"
327 kvm_ioctl_run_attach_auxstr(struct tcb *const tcp,
328 struct vcpu_info *info)
331 static struct kvm_run vcpu_run_struct;
333 if (info->mmap_len < sizeof(vcpu_run_struct))
336 if (umove(tcp, info->mmap_addr, &vcpu_run_struct) < 0)
339 tcp->auxstr = xlat_idx(kvm_exit_reason, ARRAY_SIZE(kvm_exit_reason) - 1,
340 vcpu_run_struct.exit_reason);
342 tcp->auxstr = "KVM_EXIT_???";
346 kvm_ioctl_decode_run(struct tcb *const tcp)
352 int r = RVAL_DECODED;
357 if (dump_kvm_run_structure) {
359 int fd = tcp->u_arg[0];
360 struct vcpu_info *info = vcpu_get_info(tcp, fd);
363 kvm_ioctl_run_attach_auxstr(tcp, info);
373 kvm_ioctl(struct tcb *const tcp, const unsigned int code, const kernel_ulong_t arg)
376 case KVM_CREATE_VCPU:
377 return kvm_ioctl_create_vcpu(tcp, arg);
379 # ifdef HAVE_STRUCT_KVM_USERSPACE_MEMORY_REGION
380 case KVM_SET_USER_MEMORY_REGION:
381 return kvm_ioctl_set_user_memory_region(tcp, arg);
384 # ifdef HAVE_STRUCT_KVM_REGS
387 return kvm_ioctl_decode_regs(tcp, code, arg);
390 # ifdef HAVE_STRUCT_KVM_SREGS
393 return kvm_ioctl_decode_sregs(tcp, code, arg);
396 # ifdef HAVE_STRUCT_KVM_CPUID2
398 case KVM_GET_SUPPORTED_CPUID:
399 # ifdef KVM_GET_EMULATED_CPUID
400 case KVM_GET_EMULATED_CPUID:
402 return kvm_ioctl_decode_cpuid2(tcp, code, arg);
406 return RVAL_DECODED | RVAL_FD;
409 return kvm_ioctl_decode_run(tcp);
411 case KVM_GET_VCPU_MMAP_SIZE:
412 case KVM_GET_API_VERSION:
419 kvm_run_structure_decoder_init(void)
421 dump_kvm_run_structure = true;
425 #endif /* HAVE_LINUX_KVM_H */