@@ -649,6 +649,81 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
649649 return 0 ;
650650}
651651
652+ static int emit_atomic_ld_st (const struct bpf_insn * insn , struct jit_ctx * ctx )
653+ {
654+ const s32 imm = insn -> imm ;
655+ const s16 off = insn -> off ;
656+ const u8 code = insn -> code ;
657+ const bool arena = BPF_MODE (code ) == BPF_PROBE_ATOMIC ;
658+ const u8 arena_vm_base = bpf2a64 [ARENA_VM_START ];
659+ const u8 dst = bpf2a64 [insn -> dst_reg ];
660+ const u8 src = bpf2a64 [insn -> src_reg ];
661+ const u8 tmp = bpf2a64 [TMP_REG_1 ];
662+ u8 reg ;
663+
664+ switch (imm ) {
665+ case BPF_LOAD_ACQ :
666+ reg = src ;
667+ break ;
668+ case BPF_STORE_REL :
669+ reg = dst ;
670+ break ;
671+ default :
672+ pr_err_once ("unknown atomic load/store op code %02x\n" , imm );
673+ return - EINVAL ;
674+ }
675+
676+ if (off ) {
677+ emit_a64_add_i (1 , tmp , reg , tmp , off , ctx );
678+ reg = tmp ;
679+ }
680+ if (arena ) {
681+ emit (A64_ADD (1 , tmp , reg , arena_vm_base ), ctx );
682+ reg = tmp ;
683+ }
684+
685+ switch (imm ) {
686+ case BPF_LOAD_ACQ :
687+ switch (BPF_SIZE (code )) {
688+ case BPF_B :
689+ emit (A64_LDARB (dst , reg ), ctx );
690+ break ;
691+ case BPF_H :
692+ emit (A64_LDARH (dst , reg ), ctx );
693+ break ;
694+ case BPF_W :
695+ emit (A64_LDAR32 (dst , reg ), ctx );
696+ break ;
697+ case BPF_DW :
698+ emit (A64_LDAR64 (dst , reg ), ctx );
699+ break ;
700+ }
701+ break ;
702+ case BPF_STORE_REL :
703+ switch (BPF_SIZE (code )) {
704+ case BPF_B :
705+ emit (A64_STLRB (src , reg ), ctx );
706+ break ;
707+ case BPF_H :
708+ emit (A64_STLRH (src , reg ), ctx );
709+ break ;
710+ case BPF_W :
711+ emit (A64_STLR32 (src , reg ), ctx );
712+ break ;
713+ case BPF_DW :
714+ emit (A64_STLR64 (src , reg ), ctx );
715+ break ;
716+ }
717+ break ;
718+ default :
719+ pr_err_once ("unexpected atomic load/store op code %02x\n" ,
720+ imm );
721+ return - EINVAL ;
722+ }
723+
724+ return 0 ;
725+ }
726+
652727#ifdef CONFIG_ARM64_LSE_ATOMICS
653728static int emit_lse_atomic (const struct bpf_insn * insn , struct jit_ctx * ctx )
654729{
@@ -1690,11 +1765,17 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
16901765 return ret ;
16911766 break ;
16921767
1768+ case BPF_STX | BPF_ATOMIC | BPF_B :
1769+ case BPF_STX | BPF_ATOMIC | BPF_H :
16931770 case BPF_STX | BPF_ATOMIC | BPF_W :
16941771 case BPF_STX | BPF_ATOMIC | BPF_DW :
1772+ case BPF_STX | BPF_PROBE_ATOMIC | BPF_B :
1773+ case BPF_STX | BPF_PROBE_ATOMIC | BPF_H :
16951774 case BPF_STX | BPF_PROBE_ATOMIC | BPF_W :
16961775 case BPF_STX | BPF_PROBE_ATOMIC | BPF_DW :
1697- if (cpus_have_cap (ARM64_HAS_LSE_ATOMICS ))
1776+ if (bpf_atomic_is_load_store (insn ))
1777+ ret = emit_atomic_ld_st (insn , ctx );
1778+ else if (cpus_have_cap (ARM64_HAS_LSE_ATOMICS ))
16981779 ret = emit_lse_atomic (insn , ctx );
16991780 else
17001781 ret = emit_ll_sc_atomic (insn , ctx );
@@ -2716,13 +2797,10 @@ bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena)
27162797 if (!in_arena )
27172798 return true;
27182799 switch (insn -> code ) {
2719- case BPF_STX | BPF_ATOMIC | BPF_B :
2720- case BPF_STX | BPF_ATOMIC | BPF_H :
27212800 case BPF_STX | BPF_ATOMIC | BPF_W :
27222801 case BPF_STX | BPF_ATOMIC | BPF_DW :
2723- if (bpf_atomic_is_load_store (insn ))
2724- return false;
2725- if (!cpus_have_cap (ARM64_HAS_LSE_ATOMICS ))
2802+ if (!bpf_atomic_is_load_store (insn ) &&
2803+ !cpus_have_cap (ARM64_HAS_LSE_ATOMICS ))
27262804 return false;
27272805 }
27282806 return true;
0 commit comments