summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-12-19 07:13:33 -0600
committerLinus Torvalds <torvalds@linux-foundation.org>2022-12-19 07:13:33 -0600
commit5f6e430f931d245da838db3e10e918681207029b (patch)
tree6adc54a582652ae470ce95d9205f798568339ff0 /arch/powerpc/kvm
parenta6e3e6f138058ff184d8ef5064a033b3f5fee8f8 (diff)
parent980411a4d1bb925d28cd9e8d8301dc982ece788d (diff)
downloadlinux-5f6e430f931d245da838db3e10e918681207029b.tar.bz2
Merge tag 'powerpc-6.2-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc updates from Michael Ellerman: - Add powerpc qspinlock implementation optimised for large system scalability and paravirt. See the merge message for more details - Enable objtool to be built on powerpc to generate mcount locations - Use a temporary mm for code patching with the Radix MMU, so the writable mapping is restricted to the patching CPU - Add an option to build the 64-bit big-endian kernel with the ELFv2 ABI - Sanitise user registers on interrupt entry on 64-bit Book3S - Many other small features and fixes Thanks to Aboorva Devarajan, Angel Iglesias, Benjamin Gray, Bjorn Helgaas, Bo Liu, Chen Lifu, Christoph Hellwig, Christophe JAILLET, Christophe Leroy, Christopher M. Riedl, Colin Ian King, Deming Wang, Disha Goel, Dmitry Torokhov, Finn Thain, Geert Uytterhoeven, Gustavo A. R. Silva, Haowen Bai, Joel Stanley, Jordan Niethe, Julia Lawall, Kajol Jain, Laurent Dufour, Li zeming, Miaoqian Lin, Michael Jeanson, Nathan Lynch, Naveen N. Rao, Nayna Jain, Nicholas Miehlbradt, Nicholas Piggin, Pali Rohár, Randy Dunlap, Rohan McLure, Russell Currey, Sathvika Vasireddy, Shaomin Deng, Stephen Kitt, Stephen Rothwell, Thomas Weißschuh, Tiezhu Yang, Uwe Kleine-König, Xie Shaowen, Xiu Jianfeng, XueBing Chen, Yang Yingliang, Zhang Jiaming, ruanjinjie, Jessica Yu, and Wolfram Sang. * tag 'powerpc-6.2-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (181 commits) powerpc/code-patching: Fix oops with DEBUG_VM enabled powerpc/qspinlock: Fix 32-bit build powerpc/prom: Fix 32-bit build powerpc/rtas: mandate RTAS syscall filtering powerpc/rtas: define pr_fmt and convert printk call sites powerpc/rtas: clean up includes powerpc/rtas: clean up rtas_error_log_max initialization powerpc/pseries/eeh: use correct API for error log size powerpc/rtas: avoid scheduling in rtas_os_term() powerpc/rtas: avoid device tree lookups in rtas_os_term() powerpc/rtasd: use correct OF API for event scan rate powerpc/rtas: document rtas_call() powerpc/pseries: unregister VPA when hot unplugging a CPU powerpc/pseries: reset the RCU watchdogs after a LPM powerpc: Take in account addition CPU node when building kexec FDT powerpc: export the CPU node count powerpc/cpuidle: Set CPUIDLE_FLAG_POLLING for snooze state powerpc/dts/fsl: Fix pca954x i2c-mux node names cxl: Remove unnecessary cxl_pci_window_alignment() selftests/powerpc: Fix resource leaks ...
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c7
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c6
-rw-r--r--arch/powerpc/kvm/book3s_hv_interrupts.S4
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S27
-rw-r--r--arch/powerpc/kvm/book3s_hv_uvmem.c5
-rw-r--r--arch/powerpc/kvm/book3s_xive.c12
-rw-r--r--arch/powerpc/kvm/book3s_xive.h3
-rw-r--r--arch/powerpc/kvm/book3s_xive_native.c3
-rw-r--r--arch/powerpc/kvm/booke.c3
-rw-r--r--arch/powerpc/kvm/bookehv_interrupts.S9
-rw-r--r--arch/powerpc/kvm/fpu.S17
11 files changed, 52 insertions, 44 deletions
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 4939f57b6f6a..7006bcbc2e37 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -1202,7 +1202,7 @@ static int resize_hpt_allocate(struct kvm_resize_hpt *resize)
if (rc < 0)
return rc;
- resize_hpt_debug(resize, "resize_hpt_allocate(): HPT @ 0x%lx\n",
+ resize_hpt_debug(resize, "%s(): HPT @ 0x%lx\n", __func__,
resize->hpt.virt);
return 0;
@@ -1443,7 +1443,7 @@ static void resize_hpt_prepare_work(struct work_struct *work)
*/
mutex_unlock(&kvm->arch.mmu_setup_lock);
- resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n",
+ resize_hpt_debug(resize, "%s(): order = %d\n", __func__,
resize->order);
err = resize_hpt_allocate(resize);
@@ -1887,8 +1887,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, i, v, r,
tmp);
if (ret != H_SUCCESS) {
- pr_err("kvm_htab_write ret %ld i=%ld v=%lx "
- "r=%lx\n", ret, i, v, r);
+ pr_err("%s ret %ld i=%ld v=%lx r=%lx\n", __func__, ret, i, v, r);
goto out;
}
if (!mmu_ready && is_vrma_hpte(v)) {
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index 40864373ef87..95e738ef9062 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -294,14 +294,14 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
struct kvmppc_spapr_tce_table *stt = NULL;
struct kvmppc_spapr_tce_table *siter;
struct mm_struct *mm = kvm->mm;
- unsigned long npages, size = args->size;
+ unsigned long npages;
int ret;
if (!args->size || args->page_shift < 12 || args->page_shift > 34 ||
(args->offset + args->size > (ULLONG_MAX >> args->page_shift)))
return -EINVAL;
- npages = kvmppc_tce_pages(size);
+ npages = kvmppc_tce_pages(args->size);
ret = account_locked_vm(mm, kvmppc_stt_pages(npages), true);
if (ret)
return ret;
@@ -314,7 +314,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
stt->liobn = args->liobn;
stt->page_shift = args->page_shift;
stt->offset = args->offset;
- stt->size = size;
+ stt->size = args->size;
stt->kvm = kvm;
mutex_init(&stt->alloc_lock);
INIT_LIST_HEAD_RCU(&stt->iommu_tables);
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S
index 59d89e4b154a..c0deeea7eef3 100644
--- a/arch/powerpc/kvm/book3s_hv_interrupts.S
+++ b/arch/powerpc/kvm/book3s_hv_interrupts.S
@@ -9,6 +9,7 @@
* Authors: Alexander Graf <agraf@suse.de>
*/
+#include <linux/linkage.h>
#include <asm/ppc_asm.h>
#include <asm/kvm_asm.h>
#include <asm/reg.h>
@@ -107,7 +108,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
/*
* void kvmhv_save_host_pmu(void)
*/
-kvmhv_save_host_pmu:
+SYM_FUNC_START_LOCAL(kvmhv_save_host_pmu)
BEGIN_FTR_SECTION
/* Work around P8 PMAE bug */
li r3, -1
@@ -154,3 +155,4 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
stw r8, HSTATE_PMC5(r13)
stw r9, HSTATE_PMC6(r13)
31: blr
+SYM_FUNC_END(kvmhv_save_host_pmu)
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 37f50861dd98..acf80915f406 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -10,6 +10,8 @@
* Authors: Alexander Graf <agraf@suse.de>
*/
+#include <linux/linkage.h>
+#include <linux/objtool.h>
#include <asm/ppc_asm.h>
#include <asm/code-patching-asm.h>
#include <asm/kvm_asm.h>
@@ -1522,12 +1524,14 @@ kvm_flush_link_stack:
/* Flush the link stack. On Power8 it's up to 32 entries in size. */
.rept 32
+ ANNOTATE_INTRA_FUNCTION_CALL
bl .+4
.endr
/* And on Power9 it's up to 64. */
BEGIN_FTR_SECTION
.rept 32
+ ANNOTATE_INTRA_FUNCTION_CALL
bl .+4
.endr
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
@@ -2358,7 +2362,7 @@ hmi_realmode:
* This routine calls kvmppc_read_intr, a C function, if an external
* interrupt is pending.
*/
-kvmppc_check_wake_reason:
+SYM_FUNC_START_LOCAL(kvmppc_check_wake_reason)
mfspr r6, SPRN_SRR1
BEGIN_FTR_SECTION
rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */
@@ -2427,6 +2431,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
addi r1, r1, PPC_MIN_STKFRM
mtlr r0
blr
+SYM_FUNC_END(kvmppc_check_wake_reason)
/*
* Save away FP, VMX and VSX registers.
@@ -2434,7 +2439,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
* N.B. r30 and r31 are volatile across this function,
* thus it is not callable from C.
*/
-kvmppc_save_fp:
+SYM_FUNC_START_LOCAL(kvmppc_save_fp)
mflr r30
mr r31,r3
mfmsr r5
@@ -2462,6 +2467,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
stw r6,VCPU_VRSAVE(r31)
mtlr r30
blr
+SYM_FUNC_END(kvmppc_save_fp)
/*
* Load up FP, VMX and VSX registers
@@ -2469,7 +2475,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
* N.B. r30 and r31 are volatile across this function,
* thus it is not callable from C.
*/
-kvmppc_load_fp:
+SYM_FUNC_START_LOCAL(kvmppc_load_fp)
mflr r30
mr r31,r4
mfmsr r9
@@ -2498,6 +2504,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
mtlr r30
mr r4,r31
blr
+SYM_FUNC_END(kvmppc_load_fp)
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/*
@@ -2729,7 +2736,7 @@ kvmppc_bad_host_intr:
std r6, SOFTE(r1)
LOAD_PACA_TOC()
LOAD_REG_IMMEDIATE(3, STACK_FRAME_REGS_MARKER)
- std r3, STACK_FRAME_OVERHEAD-16(r1)
+ std r3, STACK_INT_FRAME_MARKER(r1)
/*
* XXX On POWER7 and POWER8, we just spin here since we don't
@@ -2746,7 +2753,7 @@ kvmppc_bad_host_intr:
* r9 has a vcpu pointer (in)
* r0 is used as a scratch register
*/
-kvmppc_msr_interrupt:
+SYM_FUNC_START_LOCAL(kvmppc_msr_interrupt)
rldicl r0, r11, 64 - MSR_TS_S_LG, 62
cmpwi r0, 2 /* Check if we are in transactional state.. */
ld r11, VCPU_INTR_MSR(r9)
@@ -2755,13 +2762,14 @@ kvmppc_msr_interrupt:
li r0, 1
1: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
blr
+SYM_FUNC_END(kvmppc_msr_interrupt)
/*
* void kvmhv_load_guest_pmu(struct kvm_vcpu *vcpu)
*
* Load up guest PMU state. R3 points to the vcpu struct.
*/
-kvmhv_load_guest_pmu:
+SYM_FUNC_START_LOCAL(kvmhv_load_guest_pmu)
mr r4, r3
mflr r0
li r3, 1
@@ -2811,13 +2819,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
isync
mtlr r0
blr
+SYM_FUNC_END(kvmhv_load_guest_pmu)
/*
* void kvmhv_load_host_pmu(void)
*
* Reload host PMU state saved in the PACA by kvmhv_save_host_pmu.
*/
-kvmhv_load_host_pmu:
+SYM_FUNC_START_LOCAL(kvmhv_load_host_pmu)
mflr r0
lbz r4, PACA_PMCINUSE(r13) /* is the host using the PMU? */
cmpwi r4, 0
@@ -2859,6 +2868,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
isync
mtlr r0
23: blr
+SYM_FUNC_END(kvmhv_load_host_pmu)
/*
* void kvmhv_save_guest_pmu(struct kvm_vcpu *vcpu, bool pmu_in_use)
@@ -2866,7 +2876,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
* Save guest PMU state into the vcpu struct.
* r3 = vcpu, r4 = full save flag (PMU in use flag set in VPA)
*/
-kvmhv_save_guest_pmu:
+SYM_FUNC_START_LOCAL(kvmhv_save_guest_pmu)
mr r9, r3
mr r8, r4
BEGIN_FTR_SECTION
@@ -2942,6 +2952,7 @@ BEGIN_FTR_SECTION
mtspr SPRN_MMCRS, r4
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
22: blr
+SYM_FUNC_END(kvmhv_save_guest_pmu)
/*
* This works around a hardware bug on POWER8E processors, where
diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c
index e2f11f9c3f2a..1d67baa5557a 100644
--- a/arch/powerpc/kvm/book3s_hv_uvmem.c
+++ b/arch/powerpc/kvm/book3s_hv_uvmem.c
@@ -1190,8 +1190,7 @@ int kvmppc_uvmem_init(void)
pfn_first = res->start >> PAGE_SHIFT;
pfn_last = pfn_first + (resource_size(res) >> PAGE_SHIFT);
- kvmppc_uvmem_bitmap = kcalloc(BITS_TO_LONGS(pfn_last - pfn_first),
- sizeof(unsigned long), GFP_KERNEL);
+ kvmppc_uvmem_bitmap = bitmap_zalloc(pfn_last - pfn_first, GFP_KERNEL);
if (!kvmppc_uvmem_bitmap) {
ret = -ENOMEM;
goto out_unmap;
@@ -1215,5 +1214,5 @@ void kvmppc_uvmem_free(void)
memunmap_pages(&kvmppc_uvmem_pgmap);
release_mem_region(kvmppc_uvmem_pgmap.range.start,
range_len(&kvmppc_uvmem_pgmap.range));
- kfree(kvmppc_uvmem_bitmap);
+ bitmap_free(kvmppc_uvmem_bitmap);
}
diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
index 4ca23644f752..f4115819e738 100644
--- a/arch/powerpc/kvm/book3s_xive.c
+++ b/arch/powerpc/kvm/book3s_xive.c
@@ -539,7 +539,7 @@ static int xive_vm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
if (irq == XICS_IPI || irq == 0) {
/*
* This barrier orders the setting of xc->cppr vs.
- * subsquent test of xc->mfrr done inside
+ * subsequent test of xc->mfrr done inside
* scan_interrupts and push_pending_to_hw
*/
smp_mb();
@@ -563,7 +563,7 @@ static int xive_vm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
/*
* This barrier orders both setting of in_eoi above vs,
* subsequent test of guest_priority, and the setting
- * of xc->cppr vs. subsquent test of xc->mfrr done inside
+ * of xc->cppr vs. subsequent test of xc->mfrr done inside
* scan_interrupts and push_pending_to_hw
*/
smp_mb();
@@ -1785,8 +1785,7 @@ void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu)
* stale_p (because it has no easy way to address it). Hence we have
* to adjust stale_p before shutting down the interrupt.
*/
-void xive_cleanup_single_escalation(struct kvm_vcpu *vcpu,
- struct kvmppc_xive_vcpu *xc, int irq)
+void xive_cleanup_single_escalation(struct kvm_vcpu *vcpu, int irq)
{
struct irq_data *d = irq_get_irq_data(irq);
struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
@@ -1827,8 +1826,7 @@ void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu)
for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
if (xc->esc_virq[i]) {
if (kvmppc_xive_has_single_escalation(xc->xive))
- xive_cleanup_single_escalation(vcpu, xc,
- xc->esc_virq[i]);
+ xive_cleanup_single_escalation(vcpu, xc->esc_virq[i]);
free_irq(xc->esc_virq[i], vcpu);
irq_dispose_mapping(xc->esc_virq[i]);
kfree(xc->esc_virq_names[i]);
@@ -2392,7 +2390,7 @@ static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
/*
* Now, we select a target if we have one. If we don't we
* leave the interrupt untargetted. It means that an interrupt
- * can become "untargetted" accross migration if it was masked
+ * can become "untargetted" across migration if it was masked
* by set_xive() but there is little we can do about it.
*/
diff --git a/arch/powerpc/kvm/book3s_xive.h b/arch/powerpc/kvm/book3s_xive.h
index 1e48f72e8aa5..62bf39f53783 100644
--- a/arch/powerpc/kvm/book3s_xive.h
+++ b/arch/powerpc/kvm/book3s_xive.h
@@ -299,8 +299,7 @@ int kvmppc_xive_select_target(struct kvm *kvm, u32 *server, u8 prio);
int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio,
bool single_escalation);
struct kvmppc_xive *kvmppc_xive_get_device(struct kvm *kvm, u32 type);
-void xive_cleanup_single_escalation(struct kvm_vcpu *vcpu,
- struct kvmppc_xive_vcpu *xc, int irq);
+void xive_cleanup_single_escalation(struct kvm_vcpu *vcpu, int irq);
int kvmppc_xive_compute_vp_id(struct kvmppc_xive *xive, u32 cpu, u32 *vp);
int kvmppc_xive_set_nr_servers(struct kvmppc_xive *xive, u64 addr);
bool kvmppc_xive_check_save_restore(struct kvm_vcpu *vcpu);
diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c
index 5271c33fe79e..4f566bea5e10 100644
--- a/arch/powerpc/kvm/book3s_xive_native.c
+++ b/arch/powerpc/kvm/book3s_xive_native.c
@@ -93,8 +93,7 @@ void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu)
/* Free the escalation irq */
if (xc->esc_virq[i]) {
if (kvmppc_xive_has_single_escalation(xc->xive))
- xive_cleanup_single_escalation(vcpu, xc,
- xc->esc_virq[i]);
+ xive_cleanup_single_escalation(vcpu, xc->esc_virq[i]);
free_irq(xc->esc_virq[i], vcpu);
irq_dispose_mapping(xc->esc_virq[i]);
kfree(xc->esc_virq_names[i]);
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 7b4920e9fd26..0dce93ccaadf 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -1015,6 +1015,9 @@ int kvmppc_handle_exit(struct kvm_vcpu *vcpu, unsigned int exit_nr)
u32 last_inst = KVM_INST_FETCH_FAILED;
enum emulation_result emulated = EMULATE_DONE;
+ /* Fix irq state (pairs with kvmppc_fix_ee_before_entry()) */
+ kvmppc_fix_ee_after_exit();
+
/* update before a new last_exit_type is rewritten */
kvmppc_update_timing_stats(vcpu);
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S
index 8262c14fc9e6..b5fe6fb53c66 100644
--- a/arch/powerpc/kvm/bookehv_interrupts.S
+++ b/arch/powerpc/kvm/bookehv_interrupts.S
@@ -424,15 +424,6 @@ _GLOBAL(kvmppc_resume_host)
mtspr SPRN_EPCR, r3
isync
-#ifdef CONFIG_64BIT
- /*
- * We enter with interrupts disabled in hardware, but
- * we need to call RECONCILE_IRQ_STATE to ensure
- * that the software state is kept in sync.
- */
- RECONCILE_IRQ_STATE(r3,r5)
-#endif
-
/* Switch to kernel stack and jump to handler. */
mr r3, r4
mr r5, r14 /* intno */
diff --git a/arch/powerpc/kvm/fpu.S b/arch/powerpc/kvm/fpu.S
index 315c94946bad..b68e7f26a81f 100644
--- a/arch/powerpc/kvm/fpu.S
+++ b/arch/powerpc/kvm/fpu.S
@@ -6,6 +6,8 @@
*/
#include <linux/pgtable.h>
+#include <linux/linkage.h>
+
#include <asm/reg.h>
#include <asm/page.h>
#include <asm/mmu.h>
@@ -110,18 +112,22 @@ FPS_THREE_IN(fsel)
* R8 = (double*)&param3 [load_three]
* LR = instruction call function
*/
-fpd_load_three:
+SYM_FUNC_START_LOCAL(fpd_load_three)
lfd 2,0(r8) /* load param3 */
-fpd_load_two:
+SYM_FUNC_START_LOCAL(fpd_load_two)
lfd 1,0(r7) /* load param2 */
-fpd_load_one:
+SYM_FUNC_START_LOCAL(fpd_load_one)
lfd 0,0(r6) /* load param1 */
-fpd_load_none:
+SYM_FUNC_START_LOCAL(fpd_load_none)
lfd 3,0(r3) /* load up fpscr value */
MTFSF_L(3)
lwz r6, 0(r4) /* load cr */
mtcr r6
blr
+SYM_FUNC_END(fpd_load_none)
+SYM_FUNC_END(fpd_load_one)
+SYM_FUNC_END(fpd_load_two)
+SYM_FUNC_END(fpd_load_three)
/*
* End of double instruction processing
@@ -131,13 +137,14 @@ fpd_load_none:
* R5 = (double*)&result
* LR = caller of instruction call function
*/
-fpd_return:
+SYM_FUNC_START_LOCAL(fpd_return)
mfcr r6
stfd 0,0(r5) /* save result */
mffs 0
stfd 0,0(r3) /* save new fpscr value */
stw r6,0(r4) /* save new cr value */
blr
+SYM_FUNC_END(fpd_return)
/*
* Double operation with no input operand