@@ -113,6 +113,7 @@ struct jit_ctx {
113
113
u64 * reg_val_types ;
114
114
unsigned int long_b_conversion :1 ;
115
115
unsigned int gen_b_offsets :1 ;
116
+ unsigned int use_bbit_insns :1 ;
116
117
};
117
118
118
119
static void set_reg_val_type (u64 * rvt , int reg , enum reg_val_type type )
@@ -655,19 +656,6 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx)
655
656
return build_int_epilogue (ctx , MIPS_R_T9 );
656
657
}
657
658
658
- static bool use_bbit_insns (void )
659
- {
660
- switch (current_cpu_type ()) {
661
- case CPU_CAVIUM_OCTEON :
662
- case CPU_CAVIUM_OCTEON_PLUS :
663
- case CPU_CAVIUM_OCTEON2 :
664
- case CPU_CAVIUM_OCTEON3 :
665
- return true;
666
- default :
667
- return false;
668
- }
669
- }
670
-
671
659
static bool is_bad_offset (int b_off )
672
660
{
673
661
return b_off > 0x1ffff || b_off < -0x20000 ;
@@ -682,6 +670,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
682
670
unsigned int target ;
683
671
u64 t64 ;
684
672
s64 t64s ;
673
+ int bpf_op = BPF_OP (insn -> code );
685
674
686
675
switch (insn -> code ) {
687
676
case BPF_ALU64 | BPF_ADD | BPF_K : /* ALU64_IMM */
@@ -770,13 +759,13 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
770
759
emit_instr (ctx , sll , dst , dst , 0 );
771
760
if (insn -> imm == 1 ) {
772
761
/* div by 1 is a nop, mod by 1 is zero */
773
- if (BPF_OP ( insn -> code ) == BPF_MOD )
762
+ if (bpf_op == BPF_MOD )
774
763
emit_instr (ctx , addu , dst , MIPS_R_ZERO , MIPS_R_ZERO );
775
764
break ;
776
765
}
777
766
gen_imm_to_reg (insn , MIPS_R_AT , ctx );
778
767
emit_instr (ctx , divu , dst , MIPS_R_AT );
779
- if (BPF_OP ( insn -> code ) == BPF_DIV )
768
+ if (bpf_op == BPF_DIV )
780
769
emit_instr (ctx , mflo , dst );
781
770
else
782
771
emit_instr (ctx , mfhi , dst );
@@ -798,13 +787,13 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
798
787
799
788
if (insn -> imm == 1 ) {
800
789
/* div by 1 is a nop, mod by 1 is zero */
801
- if (BPF_OP ( insn -> code ) == BPF_MOD )
790
+ if (bpf_op == BPF_MOD )
802
791
emit_instr (ctx , addu , dst , MIPS_R_ZERO , MIPS_R_ZERO );
803
792
break ;
804
793
}
805
794
gen_imm_to_reg (insn , MIPS_R_AT , ctx );
806
795
emit_instr (ctx , ddivu , dst , MIPS_R_AT );
807
- if (BPF_OP ( insn -> code ) == BPF_DIV )
796
+ if (bpf_op == BPF_DIV )
808
797
emit_instr (ctx , mflo , dst );
809
798
else
810
799
emit_instr (ctx , mfhi , dst );
@@ -829,7 +818,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
829
818
emit_instr (ctx , dinsu , dst , MIPS_R_ZERO , 32 , 32 );
830
819
did_move = false;
831
820
if (insn -> src_reg == BPF_REG_10 ) {
832
- if (BPF_OP ( insn -> code ) == BPF_MOV ) {
821
+ if (bpf_op == BPF_MOV ) {
833
822
emit_instr (ctx , daddiu , dst , MIPS_R_SP , MAX_BPF_STACK );
834
823
did_move = true;
835
824
} else {
@@ -839,15 +828,15 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
839
828
} else if (get_reg_val_type (ctx , this_idx , insn -> src_reg ) == REG_32BIT ) {
840
829
int tmp_reg = MIPS_R_AT ;
841
830
842
- if (BPF_OP ( insn -> code ) == BPF_MOV ) {
831
+ if (bpf_op == BPF_MOV ) {
843
832
tmp_reg = dst ;
844
833
did_move = true;
845
834
}
846
835
emit_instr (ctx , daddu , tmp_reg , src , MIPS_R_ZERO );
847
836
emit_instr (ctx , dinsu , tmp_reg , MIPS_R_ZERO , 32 , 32 );
848
837
src = MIPS_R_AT ;
849
838
}
850
- switch (BPF_OP ( insn -> code ) ) {
839
+ switch (bpf_op ) {
851
840
case BPF_MOV :
852
841
if (!did_move )
853
842
emit_instr (ctx , daddu , dst , src , MIPS_R_ZERO );
@@ -879,7 +868,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
879
868
emit_instr (ctx , beq , src , MIPS_R_ZERO , b_off );
880
869
emit_instr (ctx , movz , MIPS_R_V0 , MIPS_R_ZERO , src );
881
870
emit_instr (ctx , ddivu , dst , src );
882
- if (BPF_OP ( insn -> code ) == BPF_DIV )
871
+ if (bpf_op == BPF_DIV )
883
872
emit_instr (ctx , mflo , dst );
884
873
else
885
874
emit_instr (ctx , mfhi , dst );
@@ -923,15 +912,15 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
923
912
if (ts == REG_64BIT || ts == REG_32BIT_ZERO_EX ) {
924
913
int tmp_reg = MIPS_R_AT ;
925
914
926
- if (BPF_OP ( insn -> code ) == BPF_MOV ) {
915
+ if (bpf_op == BPF_MOV ) {
927
916
tmp_reg = dst ;
928
917
did_move = true;
929
918
}
930
919
/* sign extend */
931
920
emit_instr (ctx , sll , tmp_reg , src , 0 );
932
921
src = MIPS_R_AT ;
933
922
}
934
- switch (BPF_OP ( insn -> code ) ) {
923
+ switch (bpf_op ) {
935
924
case BPF_MOV :
936
925
if (!did_move )
937
926
emit_instr (ctx , addu , dst , src , MIPS_R_ZERO );
@@ -962,7 +951,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
962
951
emit_instr (ctx , beq , src , MIPS_R_ZERO , b_off );
963
952
emit_instr (ctx , movz , MIPS_R_V0 , MIPS_R_ZERO , src );
964
953
emit_instr (ctx , divu , dst , src );
965
- if (BPF_OP ( insn -> code ) == BPF_DIV )
954
+ if (bpf_op == BPF_DIV )
966
955
emit_instr (ctx , mflo , dst );
967
956
else
968
957
emit_instr (ctx , mfhi , dst );
@@ -989,7 +978,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
989
978
break ;
990
979
case BPF_JMP | BPF_JEQ | BPF_K : /* JMP_IMM */
991
980
case BPF_JMP | BPF_JNE | BPF_K : /* JMP_IMM */
992
- cmp_eq = (BPF_OP ( insn -> code ) == BPF_JEQ );
981
+ cmp_eq = (bpf_op == BPF_JEQ );
993
982
dst = ebpf_to_mips_reg (ctx , insn , dst_reg_fp_ok );
994
983
if (dst < 0 )
995
984
return dst ;
@@ -1002,8 +991,12 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
1002
991
goto jeq_common ;
1003
992
case BPF_JMP | BPF_JEQ | BPF_X : /* JMP_REG */
1004
993
case BPF_JMP | BPF_JNE | BPF_X :
994
+ case BPF_JMP | BPF_JSLT | BPF_X :
995
+ case BPF_JMP | BPF_JSLE | BPF_X :
1005
996
case BPF_JMP | BPF_JSGT | BPF_X :
1006
997
case BPF_JMP | BPF_JSGE | BPF_X :
998
+ case BPF_JMP | BPF_JLT | BPF_X :
999
+ case BPF_JMP | BPF_JLE | BPF_X :
1007
1000
case BPF_JMP | BPF_JGT | BPF_X :
1008
1001
case BPF_JMP | BPF_JGE | BPF_X :
1009
1002
case BPF_JMP | BPF_JSET | BPF_X :
@@ -1020,50 +1013,56 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
1020
1013
emit_instr (ctx , sll , MIPS_R_AT , dst , 0 );
1021
1014
dst = MIPS_R_AT ;
1022
1015
}
1023
- if (BPF_OP ( insn -> code ) == BPF_JSET ) {
1016
+ if (bpf_op == BPF_JSET ) {
1024
1017
emit_instr (ctx , and , MIPS_R_AT , dst , src );
1025
1018
cmp_eq = false;
1026
1019
dst = MIPS_R_AT ;
1027
1020
src = MIPS_R_ZERO ;
1028
- } else if (BPF_OP ( insn -> code ) == BPF_JSGT ) {
1021
+ } else if (bpf_op == BPF_JSGT || bpf_op == BPF_JSLE ) {
1029
1022
emit_instr (ctx , dsubu , MIPS_R_AT , dst , src );
1030
1023
if ((insn + 1 )-> code == (BPF_JMP | BPF_EXIT ) && insn -> off == 1 ) {
1031
1024
b_off = b_imm (exit_idx , ctx );
1032
1025
if (is_bad_offset (b_off ))
1033
1026
return - E2BIG ;
1034
- emit_instr (ctx , blez , MIPS_R_AT , b_off );
1027
+ if (bpf_op == BPF_JSGT )
1028
+ emit_instr (ctx , blez , MIPS_R_AT , b_off );
1029
+ else
1030
+ emit_instr (ctx , bgtz , MIPS_R_AT , b_off );
1035
1031
emit_instr (ctx , nop );
1036
1032
return 2 ; /* We consumed the exit. */
1037
1033
}
1038
1034
b_off = b_imm (this_idx + insn -> off + 1 , ctx );
1039
1035
if (is_bad_offset (b_off ))
1040
1036
return - E2BIG ;
1041
- emit_instr (ctx , bgtz , MIPS_R_AT , b_off );
1037
+ if (bpf_op == BPF_JSGT )
1038
+ emit_instr (ctx , bgtz , MIPS_R_AT , b_off );
1039
+ else
1040
+ emit_instr (ctx , blez , MIPS_R_AT , b_off );
1042
1041
emit_instr (ctx , nop );
1043
1042
break ;
1044
- } else if (BPF_OP ( insn -> code ) == BPF_JSGE ) {
1043
+ } else if (bpf_op == BPF_JSGE || bpf_op == BPF_JSLT ) {
1045
1044
emit_instr (ctx , slt , MIPS_R_AT , dst , src );
1046
- cmp_eq = true ;
1045
+ cmp_eq = bpf_op == BPF_JSGE ;
1047
1046
dst = MIPS_R_AT ;
1048
1047
src = MIPS_R_ZERO ;
1049
- } else if (BPF_OP ( insn -> code ) == BPF_JGT ) {
1048
+ } else if (bpf_op == BPF_JGT || bpf_op == BPF_JLE ) {
1050
1049
/* dst or src could be AT */
1051
1050
emit_instr (ctx , dsubu , MIPS_R_T8 , dst , src );
1052
1051
emit_instr (ctx , sltu , MIPS_R_AT , dst , src );
1053
1052
/* SP known to be non-zero, movz becomes boolean not */
1054
1053
emit_instr (ctx , movz , MIPS_R_T9 , MIPS_R_SP , MIPS_R_T8 );
1055
1054
emit_instr (ctx , movn , MIPS_R_T9 , MIPS_R_ZERO , MIPS_R_T8 );
1056
1055
emit_instr (ctx , or , MIPS_R_AT , MIPS_R_T9 , MIPS_R_AT );
1057
- cmp_eq = true ;
1056
+ cmp_eq = bpf_op == BPF_JGT ;
1058
1057
dst = MIPS_R_AT ;
1059
1058
src = MIPS_R_ZERO ;
1060
- } else if (BPF_OP ( insn -> code ) == BPF_JGE ) {
1059
+ } else if (bpf_op == BPF_JGE || bpf_op == BPF_JLT ) {
1061
1060
emit_instr (ctx , sltu , MIPS_R_AT , dst , src );
1062
- cmp_eq = true ;
1061
+ cmp_eq = bpf_op == BPF_JGE ;
1063
1062
dst = MIPS_R_AT ;
1064
1063
src = MIPS_R_ZERO ;
1065
1064
} else { /* JNE/JEQ case */
1066
- cmp_eq = (BPF_OP ( insn -> code ) == BPF_JEQ );
1065
+ cmp_eq = (bpf_op == BPF_JEQ );
1067
1066
}
1068
1067
jeq_common :
1069
1068
/*
@@ -1122,7 +1121,9 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
1122
1121
break ;
1123
1122
case BPF_JMP | BPF_JSGT | BPF_K : /* JMP_IMM */
1124
1123
case BPF_JMP | BPF_JSGE | BPF_K : /* JMP_IMM */
1125
- cmp_eq = (BPF_OP (insn -> code ) == BPF_JSGE );
1124
+ case BPF_JMP | BPF_JSLT | BPF_K : /* JMP_IMM */
1125
+ case BPF_JMP | BPF_JSLE | BPF_K : /* JMP_IMM */
1126
+ cmp_eq = (bpf_op == BPF_JSGE );
1126
1127
dst = ebpf_to_mips_reg (ctx , insn , dst_reg_fp_ok );
1127
1128
if (dst < 0 )
1128
1129
return dst ;
@@ -1132,73 +1133,100 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
1132
1133
b_off = b_imm (exit_idx , ctx );
1133
1134
if (is_bad_offset (b_off ))
1134
1135
return - E2BIG ;
1135
- if (cmp_eq )
1136
- emit_instr (ctx , bltz , dst , b_off );
1137
- else
1136
+ switch (bpf_op ) {
1137
+ case BPF_JSGT :
1138
1138
emit_instr (ctx , blez , dst , b_off );
1139
+ break ;
1140
+ case BPF_JSGE :
1141
+ emit_instr (ctx , bltz , dst , b_off );
1142
+ break ;
1143
+ case BPF_JSLT :
1144
+ emit_instr (ctx , bgez , dst , b_off );
1145
+ break ;
1146
+ case BPF_JSLE :
1147
+ emit_instr (ctx , bgtz , dst , b_off );
1148
+ break ;
1149
+ }
1139
1150
emit_instr (ctx , nop );
1140
1151
return 2 ; /* We consumed the exit. */
1141
1152
}
1142
1153
b_off = b_imm (this_idx + insn -> off + 1 , ctx );
1143
1154
if (is_bad_offset (b_off ))
1144
1155
return - E2BIG ;
1145
- if (cmp_eq )
1146
- emit_instr (ctx , bgez , dst , b_off );
1147
- else
1156
+ switch (bpf_op ) {
1157
+ case BPF_JSGT :
1148
1158
emit_instr (ctx , bgtz , dst , b_off );
1159
+ break ;
1160
+ case BPF_JSGE :
1161
+ emit_instr (ctx , bgez , dst , b_off );
1162
+ break ;
1163
+ case BPF_JSLT :
1164
+ emit_instr (ctx , bltz , dst , b_off );
1165
+ break ;
1166
+ case BPF_JSLE :
1167
+ emit_instr (ctx , blez , dst , b_off );
1168
+ break ;
1169
+ }
1149
1170
emit_instr (ctx , nop );
1150
1171
break ;
1151
1172
}
1152
1173
/*
1153
1174
* only "LT" compare available, so we must use imm + 1
1154
- * to generate "GT"
1175
+ * to generate "GT" and imm -1 to generate LE
1155
1176
*/
1156
- t64s = insn -> imm + (cmp_eq ? 0 : 1 );
1177
+ if (bpf_op == BPF_JSGT )
1178
+ t64s = insn -> imm + 1 ;
1179
+ else if (bpf_op == BPF_JSLE )
1180
+ t64s = insn -> imm + 1 ;
1181
+ else
1182
+ t64s = insn -> imm ;
1183
+
1184
+ cmp_eq = bpf_op == BPF_JSGT || bpf_op == BPF_JSGE ;
1157
1185
if (t64s >= S16_MIN && t64s <= S16_MAX ) {
1158
1186
emit_instr (ctx , slti , MIPS_R_AT , dst , (int )t64s );
1159
1187
src = MIPS_R_AT ;
1160
1188
dst = MIPS_R_ZERO ;
1161
- cmp_eq = true;
1162
1189
goto jeq_common ;
1163
1190
}
1164
1191
emit_const_to_reg (ctx , MIPS_R_AT , (u64 )t64s );
1165
1192
emit_instr (ctx , slt , MIPS_R_AT , dst , MIPS_R_AT );
1166
1193
src = MIPS_R_AT ;
1167
1194
dst = MIPS_R_ZERO ;
1168
- cmp_eq = true;
1169
1195
goto jeq_common ;
1170
1196
1171
1197
case BPF_JMP | BPF_JGT | BPF_K :
1172
1198
case BPF_JMP | BPF_JGE | BPF_K :
1173
- cmp_eq = (BPF_OP (insn -> code ) == BPF_JGE );
1199
+ case BPF_JMP | BPF_JLT | BPF_K :
1200
+ case BPF_JMP | BPF_JLE | BPF_K :
1201
+ cmp_eq = (bpf_op == BPF_JGE );
1174
1202
dst = ebpf_to_mips_reg (ctx , insn , dst_reg_fp_ok );
1175
1203
if (dst < 0 )
1176
1204
return dst ;
1177
1205
/*
1178
1206
* only "LT" compare available, so we must use imm + 1
1179
- * to generate "GT"
1207
+ * to generate "GT" and imm -1 to generate LE
1180
1208
*/
1181
- t64s = (u64 )(u32 )(insn -> imm ) + (cmp_eq ? 0 : 1 );
1182
- if (t64s >= 0 && t64s <= S16_MAX ) {
1183
- emit_instr (ctx , sltiu , MIPS_R_AT , dst , (int )t64s );
1184
- src = MIPS_R_AT ;
1185
- dst = MIPS_R_ZERO ;
1186
- cmp_eq = true;
1187
- goto jeq_common ;
1188
- }
1209
+ if (bpf_op == BPF_JGT )
1210
+ t64s = (u64 )(u32 )(insn -> imm ) + 1 ;
1211
+ else if (bpf_op == BPF_JLE )
1212
+ t64s = (u64 )(u32 )(insn -> imm ) + 1 ;
1213
+ else
1214
+ t64s = (u64 )(u32 )(insn -> imm );
1215
+
1216
+ cmp_eq = bpf_op == BPF_JGT || bpf_op == BPF_JGE ;
1217
+
1189
1218
emit_const_to_reg (ctx , MIPS_R_AT , (u64 )t64s );
1190
1219
emit_instr (ctx , sltu , MIPS_R_AT , dst , MIPS_R_AT );
1191
1220
src = MIPS_R_AT ;
1192
1221
dst = MIPS_R_ZERO ;
1193
- cmp_eq = true;
1194
1222
goto jeq_common ;
1195
1223
1196
1224
case BPF_JMP | BPF_JSET | BPF_K : /* JMP_IMM */
1197
1225
dst = ebpf_to_mips_reg (ctx , insn , dst_reg_fp_ok );
1198
1226
if (dst < 0 )
1199
1227
return dst ;
1200
1228
1201
- if (use_bbit_insns () && hweight32 ((u32 )insn -> imm ) == 1 ) {
1229
+ if (ctx -> use_bbit_insns && hweight32 ((u32 )insn -> imm ) == 1 ) {
1202
1230
if ((insn + 1 )-> code == (BPF_JMP | BPF_EXIT ) && insn -> off == 1 ) {
1203
1231
b_off = b_imm (exit_idx , ctx );
1204
1232
if (is_bad_offset (b_off ))
@@ -1724,10 +1752,14 @@ static int reg_val_propagate_range(struct jit_ctx *ctx, u64 initial_rvt,
1724
1752
case BPF_JEQ :
1725
1753
case BPF_JGT :
1726
1754
case BPF_JGE :
1755
+ case BPF_JLT :
1756
+ case BPF_JLE :
1727
1757
case BPF_JSET :
1728
1758
case BPF_JNE :
1729
1759
case BPF_JSGT :
1730
1760
case BPF_JSGE :
1761
+ case BPF_JSLT :
1762
+ case BPF_JSLE :
1731
1763
if (follow_taken ) {
1732
1764
rvt [idx ] |= RVT_BRANCH_TAKEN ;
1733
1765
idx += insn -> off ;
@@ -1853,6 +1885,18 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1853
1885
1854
1886
memset (& ctx , 0 , sizeof (ctx ));
1855
1887
1888
+ preempt_disable ();
1889
+ switch (current_cpu_type ()) {
1890
+ case CPU_CAVIUM_OCTEON :
1891
+ case CPU_CAVIUM_OCTEON_PLUS :
1892
+ case CPU_CAVIUM_OCTEON2 :
1893
+ case CPU_CAVIUM_OCTEON3 :
1894
+ ctx .use_bbit_insns = 1 ;
1895
+ default :
1896
+ ctx .use_bbit_insns = 0 ;
1897
+ }
1898
+ preempt_enable ();
1899
+
1856
1900
ctx .offsets = kcalloc (prog -> len + 1 , sizeof (* ctx .offsets ), GFP_KERNEL );
1857
1901
if (ctx .offsets == NULL )
1858
1902
goto out_err ;
0 commit comments