| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212 | /* * Copyright (c) 2006-2018, RT-Thread Development Team * * SPDX-License-Identifier: Apache-2.0 * * Change Logs: * Date           Author       Notes * 2018/10/28     Bernard      The unify RISC-V porting implementation * 2018/12/27     Jesven       Add SMP support */#include "cpuport.h"#ifdef RT_USING_SMP#define rt_hw_interrupt_disable rt_hw_local_irq_disable#define rt_hw_interrupt_enable  rt_hw_local_irq_enable#endif/* * rt_base_t rt_hw_interrupt_disable(void); */    .globl rt_hw_interrupt_disablert_hw_interrupt_disable:    csrrci a0, mstatus, 8    ret/* * void rt_hw_interrupt_enable(rt_base_t level); */    .globl rt_hw_interrupt_enablert_hw_interrupt_enable:    csrw mstatus, a0    ret/* * #ifdef RT_USING_SMP * void rt_hw_context_switch_to(rt_ubase_t to, stuct rt_thread *to_thread); * #else * void rt_hw_context_switch_to(rt_ubase_t to); * #endif * a0 --> to * a1 --> to_thread */    .globl rt_hw_context_switch_tort_hw_context_switch_to:    LOAD sp, (a0)#ifdef RT_USING_SMP    mv   a0,   a1    jal  rt_cpus_lock_status_restore#endif    LOAD a0,   2 * REGBYTES(sp)    csrw mstatus, a0    j    rt_hw_context_switch_exit/* * #ifdef RT_USING_SMP * void rt_hw_context_switch(rt_ubase_t from, rt_ubase_t to, struct rt_thread *to_thread); * #else * void rt_hw_context_switch(rt_ubase_t from, rt_ubase_t to); * #endif * * a0 --> from * a1 --> to * a2 --> to_thread */    .globl rt_hw_context_switchrt_hw_context_switch:    /* saved from thread context     *     x1/ra       -> sp(0)     *     x1/ra       -> sp(1)     *     mstatus.mie -> sp(2)     *     x(i)        -> sp(i-4)     */    addi  sp,  sp, -32 * REGBYTES    STORE sp,  (a0)    STORE x1,   0 * REGBYTES(sp)    STORE x1,   1 * REGBYTES(sp)    csrr a0, mstatus    andi a0, a0, 8    beqz a0, save_mpie    li   a0, 0x80save_mpie:    STORE a0,   2 * REGBYTES(sp)    STORE x4,   4 * REGBYTES(sp)    STORE x5,   5 * REGBYTES(sp)    STORE x6,   6 * REGBYTES(sp)    STORE x7,   7 * REGBYTES(sp)    STORE x8,   8 * REGBYTES(sp)    STORE x9,   9 * REGBYTES(sp)    STORE x10, 10 * REGBYTES(sp)    STORE x11, 11 * REGBYTES(sp)    STORE x12, 12 * REGBYTES(sp)    STORE x13, 13 * REGBYTES(sp)    STORE x14, 14 * REGBYTES(sp)    STORE x15, 15 * REGBYTES(sp)    STORE x16, 16 * REGBYTES(sp)    STORE x17, 17 * REGBYTES(sp)    STORE x18, 18 * REGBYTES(sp)    STORE x19, 19 * REGBYTES(sp)    STORE x20, 20 * REGBYTES(sp)    STORE x21, 21 * REGBYTES(sp)    STORE x22, 22 * REGBYTES(sp)    STORE x23, 23 * REGBYTES(sp)    STORE x24, 24 * REGBYTES(sp)    STORE x25, 25 * REGBYTES(sp)    STORE x26, 26 * REGBYTES(sp)    STORE x27, 27 * REGBYTES(sp)    STORE x28, 28 * REGBYTES(sp)    STORE x29, 29 * REGBYTES(sp)    STORE x30, 30 * REGBYTES(sp)    STORE x31, 31 * REGBYTES(sp)    /* restore to thread context     * sp(0) -> epc;     * sp(1) -> ra;     * sp(i) -> x(i+2)     */    LOAD sp,  (a1)#ifdef RT_USING_SMP    mv   a0,   a2    jal  rt_cpus_lock_status_restore#endif /*RT_USING_SMP*/    j rt_hw_context_switch_exit#ifdef RT_USING_SMP/* * void rt_hw_context_switch_interrupt(void *context, rt_ubase_t from, rt_ubase_t to, struct rt_thread *to_thread); * * a0 --> context * a1 --> from * a2 --> to * a3 --> to_thread */    .globl rt_hw_context_switch_interruptrt_hw_context_switch_interrupt:    STORE a0, 0(a1)    LOAD  sp, 0(a2)    move  a0, a3    call rt_cpus_lock_status_restore    j rt_hw_context_switch_exit#endif.global rt_hw_context_switch_exitrt_hw_context_switch_exit:#ifdef RT_USING_SMP#ifdef RT_USING_SIGNALS    mv a0, sp    csrr  t0, mhartid    /* switch interrupt stack of current cpu */    la    sp, __stack_start__    addi  t1, t0, 1    li    t2, __STACKSIZE__    mul   t1, t1, t2    add   sp, sp, t1 /* sp = (cpuid + 1) * __STACKSIZE__ + __stack_start__ */    call rt_signal_check    mv sp, a0#endif#endif    /* resw ra to mepc */    LOAD a0,   0 * REGBYTES(sp)    csrw mepc, a0    LOAD x1,   1 * REGBYTES(sp)    li    t0, 0x00001800    csrw  mstatus, t0    LOAD a0,   2 * REGBYTES(sp)    csrs mstatus, a0    LOAD x4,   4 * REGBYTES(sp)    LOAD x5,   5 * REGBYTES(sp)    LOAD x6,   6 * REGBYTES(sp)    LOAD x7,   7 * REGBYTES(sp)    LOAD x8,   8 * REGBYTES(sp)    LOAD x9,   9 * REGBYTES(sp)    LOAD x10, 10 * REGBYTES(sp)    LOAD x11, 11 * REGBYTES(sp)    LOAD x12, 12 * REGBYTES(sp)    LOAD x13, 13 * REGBYTES(sp)    LOAD x14, 14 * REGBYTES(sp)    LOAD x15, 15 * REGBYTES(sp)    LOAD x16, 16 * REGBYTES(sp)    LOAD x17, 17 * REGBYTES(sp)    LOAD x18, 18 * REGBYTES(sp)    LOAD x19, 19 * REGBYTES(sp)    LOAD x20, 20 * REGBYTES(sp)    LOAD x21, 21 * REGBYTES(sp)    LOAD x22, 22 * REGBYTES(sp)    LOAD x23, 23 * REGBYTES(sp)    LOAD x24, 24 * REGBYTES(sp)    LOAD x25, 25 * REGBYTES(sp)    LOAD x26, 26 * REGBYTES(sp)    LOAD x27, 27 * REGBYTES(sp)    LOAD x28, 28 * REGBYTES(sp)    LOAD x29, 29 * REGBYTES(sp)    LOAD x30, 30 * REGBYTES(sp)    LOAD x31, 31 * REGBYTES(sp)    addi sp,  sp, 32 * REGBYTES    mret
 |