diff --git a/run_inference_cpu_multi_instance.sh b/run_inference_cpu_multi_instance.sh index 34d108e..2912872 100755 --- a/run_inference_cpu_multi_instance.sh +++ b/run_inference_cpu_multi_instance.sh @@ -27,7 +27,7 @@ for i in $(seq 1 $LAST_INSTANCE); do LOG_i=inference_cpu_bs${BATCH_SIZE}_ins${i}.txt echo "### running on instance $i, numa node $numa_node_i, core list {$start_core_i, $end_core_i}..." - numactl --physcpubind=$start_core_i-$end_core_i --membind=$numa_node_i python main.py -e UCF101 \ + numactl --physcpubind=$start_core_i-$end_core_i --membind=$numa_node_i python -u main.py -e UCF101 \ --batch-size-eval $BATCH_SIZE \ --no-cuda \ 2>&1 | tee $LOG_i & @@ -40,7 +40,7 @@ end_core_0=`expr $CORES_PER_INSTANCE - 1` LOG_0=inference_cpu_bs${BATCH_SIZE}_ins0.txt echo "### running on instance 0, numa node $numa_node_0, core list {$start_core_0, $end_core_0}...\n\n" -numactl --physcpubind=$start_core_0-$end_core_0 --membind=$numa_node_0 python main.py -e UCF101 \ +numactl --physcpubind=$start_core_0-$end_core_0 --membind=$numa_node_0 python -u main.py -e UCF101 \ --batch-size-eval $BATCH_SIZE \ --no-cuda \ 2>&1 | tee $LOG_0 diff --git a/run_inference_cpu_multi_instance_latency.sh b/run_inference_cpu_multi_instance_latency.sh index 05dcb76..af30d3c 100755 --- a/run_inference_cpu_multi_instance_latency.sh +++ b/run_inference_cpu_multi_instance_latency.sh @@ -27,7 +27,7 @@ for i in $(seq 1 $LAST_INSTANCE); do LOG_i=inference_cpu_bs${BATCH_SIZE}_ins${i}.txt echo "### running on instance $i, numa node $numa_node_i, core list {$start_core_i, $end_core_i}..." - numactl --physcpubind=$start_core_i-$end_core_i --membind=$numa_node_i python main.py -e UCF101 \ + numactl --physcpubind=$start_core_i-$end_core_i --membind=$numa_node_i python -u main.py -e UCF101 \ --batch-size-eval $BATCH_SIZE \ --no-cuda \ 2>&1 | tee $LOG_i &