@@ -1042,21 +1042,31 @@ impl<'a> MethodDef<'a> {
1042
1042
/// variants where all of the variants match, and one catch-all for
1043
1043
/// when one does not match.
1044
1044
1045
+ /// As an optimization we generate code which checks whether all variants
1046
+ /// match first which makes llvm see that C-like enums can be compiled into
1047
+ /// a simple equality check (for PartialEq).
1048
+
1045
1049
/// The catch-all handler is provided access the variant index values
1046
- /// for each of the self-args, carried in precomputed variables. (Nota
1047
- /// bene: the variant index values are not necessarily the
1048
- /// discriminant values. See issue #15523.)
1050
+ /// for each of the self-args, carried in precomputed variables.
1049
1051
1050
1052
/// ```{.text}
1051
- /// match (this, that, ...) {
1052
- /// (Variant1, Variant1, Variant1) => ... // delegate Matching on Variant1
1053
- /// (Variant2, Variant2, Variant2) => ... // delegate Matching on Variant2
1054
- /// ...
1055
- /// _ => {
1056
- /// let __this_vi = match this { Variant1 => 0, Variant2 => 1, ... };
1057
- /// let __that_vi = match that { Variant1 => 0, Variant2 => 1, ... };
1053
+ /// let __self0_vi = unsafe {
1054
+ /// std::intrinsics::discriminant_value(&self) } as i32;
1055
+ /// let __self1_vi = unsafe {
1056
+ /// std::intrinsics::discriminant_value(&__arg1) } as i32;
1057
+ /// let __self2_vi = unsafe {
1058
+ /// std::intrinsics::discriminant_value(&__arg2) } as i32;
1059
+ ///
1060
+ /// if __self0_vi == __self1_vi && __self0_vi == __self2_vi && ... {
1061
+ /// match (...) {
1062
+ /// (Variant1, Variant1, ...) => Body1
1063
+ /// (Variant2, Variant2, ...) => Body2,
1064
+ /// ...
1065
+ /// _ => ::core::intrinsics::unreachable()
1066
+ /// }
1067
+ /// }
1068
+ /// else {
1058
1069
/// ... // catch-all remainder can inspect above variant index values.
1059
- /// }
1060
1070
/// }
1061
1071
/// ```
1062
1072
fn build_enum_match_tuple < ' b > (
@@ -1187,7 +1197,6 @@ impl<'a> MethodDef<'a> {
1187
1197
1188
1198
cx. arm ( sp, vec ! [ single_pat] , arm_expr)
1189
1199
} ) . collect ( ) ;
1190
-
1191
1200
// We will usually need the catch-all after matching the
1192
1201
// tuples `(VariantK, VariantK, ...)` for each VariantK of the
1193
1202
// enum. But:
@@ -1223,9 +1232,14 @@ impl<'a> MethodDef<'a> {
1223
1232
// ```
1224
1233
let mut index_let_stmts: Vec < P < ast:: Stmt > > = Vec :: new ( ) ;
1225
1234
1235
+ //We also build an expression which checks whether all discriminants are equal
1236
+ // discriminant_test = __self0_vi == __self1_vi && __self0_vi == __self2_vi && ...
1237
+ let mut discriminant_test = cx. expr_bool ( sp, true ) ;
1238
+
1226
1239
let target_type_name =
1227
1240
find_repr_type_name ( & cx. parse_sess . span_diagnostic , type_attrs) ;
1228
1241
1242
+ let mut first_ident = None ;
1229
1243
for ( & ident, self_arg) in vi_idents. iter ( ) . zip ( & self_args) {
1230
1244
let path = vec ! [ cx. ident_of_std( "core" ) ,
1231
1245
cx. ident_of( "intrinsics" ) ,
@@ -1243,32 +1257,64 @@ impl<'a> MethodDef<'a> {
1243
1257
let variant_disr = cx. expr_cast ( sp, variant_value, target_ty) ;
1244
1258
let let_stmt = cx. stmt_let ( sp, false , ident, variant_disr) ;
1245
1259
index_let_stmts. push ( let_stmt) ;
1260
+
1261
+ match first_ident {
1262
+ Some ( first) => {
1263
+ let first_expr = cx. expr_ident ( sp, first) ;
1264
+ let id = cx. expr_ident ( sp, ident) ;
1265
+ let test = cx. expr_binary ( sp, ast:: BiEq , first_expr, id) ;
1266
+ discriminant_test = cx. expr_binary ( sp, ast:: BiAnd , discriminant_test, test)
1267
+ }
1268
+ None => {
1269
+ first_ident = Some ( ident) ;
1270
+ }
1271
+ }
1246
1272
}
1247
1273
1248
1274
let arm_expr = self . call_substructure_method (
1249
1275
cx, trait_, type_ident, & self_args[ ..] , nonself_args,
1250
1276
& catch_all_substructure) ;
1251
1277
1252
- // Builds the expression:
1253
- // {
1254
- // let __self0_vi = ...;
1255
- // let __self1_vi = ...;
1256
- // ...
1257
- // <delegated expression referring to __self0_vi, et al.>
1258
- // }
1259
- let arm_expr = cx. expr_block (
1260
- cx. block_all ( sp, index_let_stmts, Some ( arm_expr) ) ) ;
1261
-
1262
- // Builds arm:
1263
- // _ => { let __self0_vi = ...;
1264
- // let __self1_vi = ...;
1265
- // ...
1266
- // <delegated expression as above> }
1267
- let catch_all_match_arm =
1268
- cx. arm ( sp, vec ! [ cx. pat_wild( sp) ] , arm_expr) ;
1269
-
1270
- match_arms. push ( catch_all_match_arm) ;
1271
-
1278
+ //Since we know that all the arguments will match if we reach the match expression we
1279
+ //add the unreachable intrinsics as the result of the catch all which should help llvm
1280
+ //in optimizing it
1281
+ let path = vec ! [ cx. ident_of_std( "core" ) ,
1282
+ cx. ident_of( "intrinsics" ) ,
1283
+ cx. ident_of( "unreachable" ) ] ;
1284
+ let call = cx. expr_call_global (
1285
+ sp, path, vec ! [ ] ) ;
1286
+ let unreachable = cx. expr_block ( P ( ast:: Block {
1287
+ stmts : vec ! [ ] ,
1288
+ expr : Some ( call) ,
1289
+ id : ast:: DUMMY_NODE_ID ,
1290
+ rules : ast:: UnsafeBlock ( ast:: CompilerGenerated ) ,
1291
+ span : sp } ) ) ;
1292
+ match_arms. push ( cx. arm ( sp, vec ! [ cx. pat_wild( sp) ] , unreachable) ) ;
1293
+
1294
+ // Final wrinkle: the self_args are expressions that deref
1295
+ // down to desired l-values, but we cannot actually deref
1296
+ // them when they are fed as r-values into a tuple
1297
+ // expression; here add a layer of borrowing, turning
1298
+ // `(*self, *__arg_0, ...)` into `(&*self, &*__arg_0, ...)`.
1299
+ let borrowed_self_args = self_args. move_map ( |self_arg| cx. expr_addr_of ( sp, self_arg) ) ;
1300
+ let match_arg = cx. expr ( sp, ast:: ExprTup ( borrowed_self_args) ) ;
1301
+
1302
+ //Lastly we create an expression which branches on all discriminants being equal
1303
+ // if discriminant_test {
1304
+ // match (...) {
1305
+ // (Variant1, Variant1, ...) => Body1
1306
+ // (Variant2, Variant2, ...) => Body2,
1307
+ // ...
1308
+ // _ => ::core::intrinsics::unreachable()
1309
+ // }
1310
+ // }
1311
+ // else {
1312
+ // <delegated expression referring to __self0_vi, et al.>
1313
+ // }
1314
+ let all_match = cx. expr_match ( sp, match_arg, match_arms) ;
1315
+ let arm_expr = cx. expr_if ( sp, discriminant_test, all_match, Some ( arm_expr) ) ;
1316
+ cx. expr_block (
1317
+ cx. block_all ( sp, index_let_stmts, Some ( arm_expr) ) )
1272
1318
} else if variants. is_empty ( ) {
1273
1319
// As an additional wrinkle, For a zero-variant enum A,
1274
1320
// currently the compiler
@@ -1319,17 +1365,19 @@ impl<'a> MethodDef<'a> {
1319
1365
// derive Debug on such a type could here generate code
1320
1366
// that needs the feature gate enabled.)
1321
1367
1322
- return cx. expr_unreachable ( sp) ;
1368
+ cx. expr_unreachable ( sp)
1369
+ }
1370
+ else {
1371
+
1372
+ // Final wrinkle: the self_args are expressions that deref
1373
+ // down to desired l-values, but we cannot actually deref
1374
+ // them when they are fed as r-values into a tuple
1375
+ // expression; here add a layer of borrowing, turning
1376
+ // `(*self, *__arg_0, ...)` into `(&*self, &*__arg_0, ...)`.
1377
+ let borrowed_self_args = self_args. move_map ( |self_arg| cx. expr_addr_of ( sp, self_arg) ) ;
1378
+ let match_arg = cx. expr ( sp, ast:: ExprTup ( borrowed_self_args) ) ;
1379
+ cx. expr_match ( sp, match_arg, match_arms)
1323
1380
}
1324
-
1325
- // Final wrinkle: the self_args are expressions that deref
1326
- // down to desired l-values, but we cannot actually deref
1327
- // them when they are fed as r-values into a tuple
1328
- // expression; here add a layer of borrowing, turning
1329
- // `(*self, *__arg_0, ...)` into `(&*self, &*__arg_0, ...)`.
1330
- let borrowed_self_args = self_args. move_map ( |self_arg| cx. expr_addr_of ( sp, self_arg) ) ;
1331
- let match_arg = cx. expr ( sp, ast:: ExprTup ( borrowed_self_args) ) ;
1332
- cx. expr_match ( sp, match_arg, match_arms)
1333
1381
}
1334
1382
1335
1383
fn expand_static_enum_method_body ( & self ,
0 commit comments