/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2021 Google LLC * Author: Fuad Tabba */ #ifndef __ARM64_KVM_NVHE_PKVM_H__ #define __ARM64_KVM_NVHE_PKVM_H__ #include #include #include /* * Holds the relevant data for maintaining the vcpu state completely at hyp. */ struct pkvm_hyp_vcpu { struct kvm_vcpu vcpu; /* Backpointer to the host's (untrusted) vCPU instance. */ struct kvm_vcpu *host_vcpu; /* * If this hyp vCPU is loaded, then this is a backpointer to the * per-cpu pointer tracking us. Otherwise, NULL if not loaded. */ struct pkvm_hyp_vcpu **loaded_hyp_vcpu; }; /* * Holds the relevant data for running a protected vm. */ struct pkvm_hyp_vm { struct kvm kvm; /* Backpointer to the host's (untrusted) KVM instance. */ struct kvm *host_kvm; /* The guest's stage-2 page-table managed by the hypervisor. */ struct kvm_pgtable pgt; struct kvm_pgtable_mm_ops mm_ops; struct hyp_pool pool; hyp_spinlock_t lock; /* * The number of vcpus initialized and ready to run. * Modifying this is protected by 'vm_table_lock'. */ unsigned int nr_vcpus; /* Array of the hyp vCPU structures for this VM. */ struct pkvm_hyp_vcpu *vcpus[]; }; extern hyp_spinlock_t vm_table_lock; static inline struct pkvm_hyp_vm * pkvm_hyp_vcpu_to_hyp_vm(struct pkvm_hyp_vcpu *hyp_vcpu) { return container_of(hyp_vcpu->vcpu.kvm, struct pkvm_hyp_vm, kvm); } static inline bool pkvm_hyp_vcpu_is_protected(struct pkvm_hyp_vcpu *hyp_vcpu) { return vcpu_is_protected(&hyp_vcpu->vcpu); } static inline bool pkvm_hyp_vm_is_protected(struct pkvm_hyp_vm *hyp_vm) { return kvm_vm_is_protected(&hyp_vm->kvm); } void pkvm_hyp_vm_table_init(void *tbl); int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva, unsigned long pgd_hva); int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu, unsigned long vcpu_hva); int __pkvm_teardown_vm(pkvm_handle_t handle); struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle, unsigned int vcpu_idx); void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu); struct pkvm_hyp_vcpu *pkvm_get_loaded_hyp_vcpu(void); struct pkvm_hyp_vm *get_pkvm_hyp_vm(pkvm_handle_t handle); struct pkvm_hyp_vm *get_np_pkvm_hyp_vm(pkvm_handle_t handle); void put_pkvm_hyp_vm(struct pkvm_hyp_vm *hyp_vm); bool kvm_handle_pvm_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code); bool kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu, u64 *exit_code); void kvm_init_pvm_id_regs(struct kvm_vcpu *vcpu); int kvm_check_pvm_sysreg_table(void); #endif /* __ARM64_KVM_NVHE_PKVM_H__ */