[LTP] [PATCH 02/10] kvm_svm_vmrun(): Simplify VM state save/load with macros
Martin Doucha
mdoucha@suse.cz
Tue Jan 21 17:44:16 CET 2025
Signed-off-by: Martin Doucha <mdoucha@suse.cz>
---
testcases/kernel/kvm/bootstrap_x86.S | 57 +++++++++-----
testcases/kernel/kvm/bootstrap_x86_64.S | 99 +++++++++++++++----------
2 files changed, 98 insertions(+), 58 deletions(-)
diff --git a/testcases/kernel/kvm/bootstrap_x86.S b/testcases/kernel/kvm/bootstrap_x86.S
index 79d2218d3..f08282461 100644
--- a/testcases/kernel/kvm/bootstrap_x86.S
+++ b/testcases/kernel/kvm/bootstrap_x86.S
@@ -361,6 +361,34 @@ kvm_svm_guest_entry:
1: hlt
jmp 1b
+/* vcpu structure address must be in %rdi */
+.macro load_vcpu_regs
+ movl 0x04(%edi), %eax
+ movl 0x0c(%edi), %ebx
+ movl 0x14(%edi), %ecx
+ movl 0x1c(%edi), %edx
+ /* save %edi last */
+ movl 0x2c(%edi), %esi
+ movl 0x34(%edi), %ebp
+ /* skip %esp */
+ movl 0x24(%edi), %edi
+.endm
+
+/* vcpu structure address must be on top of the stack */
+.macro save_vcpu_regs
+ push %edi
+ movl 4(%esp), %edi
+ movl %eax, 0x04(%edi)
+ movl %ebx, 0x0c(%edi)
+ movl %ecx, 0x14(%edi)
+ movl %edx, 0x1c(%edi)
+ pop %eax
+ movl %eax, 0x24(%edi)
+ movl %esi, 0x2c(%edi)
+ movl %ebp, 0x34(%edi)
+ /* skip %esp */
+.endm
+
.global kvm_svm_vmrun
kvm_svm_vmrun:
push %edi
@@ -377,17 +405,11 @@ kvm_svm_vmrun:
vmsave
push %eax
- /* Load guest registers */
push %edi
- movl (%edi), %eax
- /* %eax is loaded by vmrun from VMCB */
- movl 0x0c(%edi), %ebx
- movl 0x14(%edi), %ecx
- movl 0x1c(%edi), %edx
- movl 0x2c(%edi), %esi
- movl 0x34(%edi), %ebp
- /* %esp is loaded by vmrun from VMCB */
- movl 0x24(%edi), %edi
+ load_vcpu_regs
+ /* %eax = vcpu->vmcb; */
+ movl (%esp), %eax
+ movl (%eax), %eax
vmload
vmrun
@@ -395,8 +417,9 @@ kvm_svm_vmrun:
/* Clear guest register buffer */
push %edi
+ push %eax
push %ecx
- movl 8(%esp), %edi
+ movl 12(%esp), %edi
addl $4, %edi
xorl %eax, %eax
mov $32, %ecx
@@ -404,17 +427,13 @@ kvm_svm_vmrun:
cld
rep stosl
popfl
-
- /* Save guest registers */
pop %ecx
pop %eax
pop %edi
- movl %ebx, 0x0c(%edi)
- movl %ecx, 0x14(%edi)
- movl %edx, 0x1c(%edi)
- movl %eax, 0x24(%edi)
- movl %esi, 0x2c(%edi)
- movl %ebp, 0x34(%edi)
+
+ save_vcpu_regs
+ pop %edi
+
/* Copy %eax and %esp from VMCB */
movl (%edi), %esi
movl 0x5f8(%esi), %eax
diff --git a/testcases/kernel/kvm/bootstrap_x86_64.S b/testcases/kernel/kvm/bootstrap_x86_64.S
index 32170f7c9..1e0a2952d 100644
--- a/testcases/kernel/kvm/bootstrap_x86_64.S
+++ b/testcases/kernel/kvm/bootstrap_x86_64.S
@@ -484,35 +484,16 @@ kvm_svm_guest_entry:
1: hlt
jmp 1b
-.global kvm_svm_vmrun
-kvm_svm_vmrun:
- pushq %rbx
- pushq %rbp
- pushq %r12
- pushq %r13
- pushq %r14
- pushq %r15
-
- clgi
-
- /* Save full host state */
- movq $MSR_VM_HSAVE_PA, %rcx
- rdmsr
- shlq $32, %rdx
- orq %rdx, %rax
- vmsave
- pushq %rax
-
- /* Load guest registers */
- pushq %rdi
- movq (%rdi), %rax
- /* %rax is loaded by vmrun from VMCB */
+/* vcpu structure address must be in %rdi */
+.macro load_vcpu_regs
+ movq 0x08(%rdi), %rax
movq 0x10(%rdi), %rbx
movq 0x18(%rdi), %rcx
movq 0x20(%rdi), %rdx
+ /* load %rdi last */
movq 0x30(%rdi), %rsi
movq 0x38(%rdi), %rbp
- /* %rsp is loaded by vmrun from VMCB */
+ /* skip %rsp */
movq 0x48(%rdi), %r8
movq 0x50(%rdi), %r9
movq 0x58(%rdi), %r10
@@ -522,21 +503,21 @@ kvm_svm_vmrun:
movq 0x78(%rdi), %r14
movq 0x80(%rdi), %r15
movq 0x28(%rdi), %rdi
+.endm
- vmload
- vmrun
- vmsave
-
- /* Save guest registers */
- movq %rdi, %rax
- popq %rdi
+/* vcpu structure address must be on top of the stack */
+.macro save_vcpu_regs
+ pushq %rdi
+ movq 8(%rsp), %rdi
+ movq %rax, 0x08(%rdi)
movq %rbx, 0x10(%rdi)
movq %rcx, 0x18(%rdi)
movq %rdx, 0x20(%rdi)
- /* %rax contains guest %rdi */
+ popq %rax
movq %rax, 0x28(%rdi)
movq %rsi, 0x30(%rdi)
movq %rbp, 0x38(%rdi)
+ /* skip %rsp */
movq %r8, 0x48(%rdi)
movq %r9, 0x50(%rdi)
movq %r10, 0x58(%rdi)
@@ -545,6 +526,52 @@ kvm_svm_vmrun:
movq %r13, 0x70(%rdi)
movq %r14, 0x78(%rdi)
movq %r15, 0x80(%rdi)
+.endm
+
+.macro push_local
+ pushq %rbx
+ pushq %rbp
+ pushq %r12
+ pushq %r13
+ pushq %r14
+ pushq %r15
+.endm
+
+.macro pop_local
+ popq %r15
+ popq %r14
+ popq %r13
+ popq %r12
+ popq %rbp
+ popq %rbx
+.endm
+
+.global kvm_svm_vmrun
+kvm_svm_vmrun:
+ push_local
+ clgi
+
+ /* Save full host state */
+ movq $MSR_VM_HSAVE_PA, %rcx
+ rdmsr
+ shlq $32, %rdx
+ orq %rdx, %rax
+ vmsave
+ pushq %rax
+
+ pushq %rdi
+ load_vcpu_regs
+ /* %rax = vcpu->vmcb; */
+ movq (%rsp), %rax
+ movq (%rax), %rax
+
+ vmload
+ vmrun
+ vmsave
+
+ save_vcpu_regs
+ popq %rdi
+
/* copy guest %rax and %rsp from VMCB*/
movq (%rdi), %rsi
movq 0x5f8(%rsi), %rax
@@ -557,13 +584,7 @@ kvm_svm_vmrun:
vmload
stgi
-
- popq %r15
- popq %r14
- popq %r13
- popq %r12
- popq %rbp
- popq %rbx
+ pop_local
retq
.section .bss.pgtables, "aw", @nobits
--
2.47.0
More information about the ltp
mailing list