@@ -481,6 +481,7 @@ mini_emit_memory_load (MonoCompile *cfg, MonoType *type, MonoInst *src, int offs
481
481
/* LLVM can handle unaligned loads and stores, so there's no reason to
482
482
* manually decompose an unaligned load here into a memcpy if we're
483
483
* using LLVM. */
484
+ #ifdef NO_UNALIGNED_ACCESS
484
485
if ((ins_flag & MONO_INST_UNALIGNED ) && !COMPILE_LLVM (cfg )) {
485
486
MonoInst * addr , * tmp_var ;
486
487
int align ;
@@ -498,9 +499,10 @@ mini_emit_memory_load (MonoCompile *cfg, MonoType *type, MonoInst *src, int offs
498
499
499
500
mini_emit_memcpy_const_size (cfg , addr , src , size , 1 );
500
501
EMIT_NEW_TEMPLOAD (cfg , ins , tmp_var -> inst_c0 );
501
- } else {
502
+ } else
503
+ #endif
502
504
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg , ins , type , src -> dreg , offset );
503
- }
505
+
504
506
ins -> flags |= ins_flag ;
505
507
506
508
if (ins_flag & MONO_INST_VOLATILE ) {
@@ -524,14 +526,17 @@ mini_emit_memory_store (MonoCompile *cfg, MonoType *type, MonoInst *dest, MonoIn
524
526
if (!(ins_flag & MONO_INST_NONULLCHECK ))
525
527
MONO_EMIT_NULL_CHECK (cfg , dest -> dreg , FALSE);
526
528
529
+ #ifdef NO_UNALIGNED_ACCESS
527
530
if ((ins_flag & MONO_INST_UNALIGNED ) && !COMPILE_LLVM (cfg )) {
528
531
MonoInst * addr , * mov , * tmp_var ;
529
532
530
533
tmp_var = mono_compile_create_var (cfg , type , OP_LOCAL );
531
534
EMIT_NEW_TEMPSTORE (cfg , mov , tmp_var -> inst_c0 , value );
532
535
EMIT_NEW_VARLOADA (cfg , addr , tmp_var , tmp_var -> inst_vtype );
533
536
mini_emit_memory_copy_internal (cfg , dest , addr , mono_class_from_mono_type_internal (type ), 1 , FALSE, (ins_flag & MONO_INST_STACK_STORE ) != 0 );
534
- } else {
537
+ } else
538
+ #endif
539
+ {
535
540
MonoInst * ins ;
536
541
537
542
/* FIXME: should check item at sp [1] is compatible with the type of the store. */
0 commit comments