/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, version 2, as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * Copyright IBM Corp. 2007 * * Authors: Hollis Blanchard */ #include #include #include #include #include #include #define KVMPPC_MSR_MASK (MSR_CE|MSR_EE|MSR_PR|MSR_DE|MSR_ME|MSR_IS|MSR_DS) #define VCPU_GPR(n) (VCPU_GPRS + (n * 4)) /* The host stack layout: */ #define HOST_R1 0 /* Implied by stwu. */ #define HOST_CALLEE_LR 4 #define HOST_RUN 8 /* r2 is special: it holds 'current', and it made nonvolatile in the * kernel with the -ffixed-r2 gcc option. */ #define HOST_R2 12 #define HOST_NV_GPRS 16 #define HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * 4)) #define HOST_MIN_STACK_SIZE (HOST_NV_GPR(31) + 4) #define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */ #define HOST_STACK_LR (HOST_STACK_SIZE + 4) /* In caller stack frame. */ #define NEED_INST_MASK ((1<>2 mtctr r5 addi r5, r4, VCPU_SHADOW_MOD - 4 li r6, 0 1: stwu r6, 4(r5) bdnz 1b iccci 0, 0 /* XXX hack */ /* Load some guest volatiles. */ lwz r0, VCPU_GPR(r0)(r4) lwz r2, VCPU_GPR(r2)(r4) lwz r9, VCPU_GPR(r9)(r4) lwz r10, VCPU_GPR(r10)(r4) lwz r11, VCPU_GPR(r11)(r4) lwz r12, VCPU_GPR(r12)(r4) lwz r13, VCPU_GPR(r13)(r4) lwz r3, VCPU_LR(r4) mtlr r3 lwz r3, VCPU_XER(r4) mtxer r3 /* Switch the IVPR. XXX If we take a TLB miss after this we're screwed, * so how do we make sure vcpu won't fault? */ lis r8, kvmppc_booke_handlers@ha lwz r8, kvmppc_booke_handlers@l(r8) mtspr SPRN_IVPR, r8 /* Save vcpu pointer for the exception handlers. */ mtspr SPRN_SPRG1, r4 /* Can't switch the stack pointer until after IVPR is switched, * because host interrupt handlers would get confused. */ lwz r1, VCPU_GPR(r1)(r4) /* XXX handle USPRG0 */ /* Host interrupt handlers may have clobbered these guest-readable * SPRGs, so we need to reload them here with the guest's values. */ lwz r3, VCPU_SPRG4(r4) mtspr SPRN_SPRG4, r3 lwz r3, VCPU_SPRG5(r4) mtspr SPRN_SPRG5, r3 lwz r3, VCPU_SPRG6(r4) mtspr SPRN_SPRG6, r3 lwz r3, VCPU_SPRG7(r4) mtspr SPRN_SPRG7, r3 /* Finish loading guest volatiles and jump to guest. */ lwz r3, VCPU_CTR(r4) mtctr r3 lwz r3, VCPU_CR(r4) mtcr r3 lwz r5, VCPU_GPR(r5)(r4) lwz r6, VCPU_GPR(r6)(r4) lwz r7, VCPU_GPR(r7)(r4) lwz r8, VCPU_GPR(r8)(r4) lwz r3, VCPU_PC(r4) mtsrr0 r3 lwz r3, VCPU_MSR(r4) oris r3, r3, KVMPPC_MSR_MASK@h ori r3, r3, KVMPPC_MSR_MASK@l mtsrr1 r3 /* Clear any debug events which occurred since we disabled MSR[DE]. * XXX This gives us a 3-instruction window in which a breakpoint * intended for guest context could fire in the host instead. */ lis r3, 0xffff ori r3, r3, 0xffff mtspr SPRN_DBSR, r3 lwz r3, VCPU_GPR(r3)(r4) lwz r4, VCPU_GPR(r4)(r4) rfi