@@ -93,18 +93,17 @@ def run_individual_python_test(target_dir, test_name, pyspark_python):
93
93
"pyspark-shell"
94
94
]
95
95
env ["PYSPARK_SUBMIT_ARGS" ] = " " .join (spark_args )
96
- str_test_name = " " .join (test_name )
97
- LOGGER .info ("Starting test(%s): %s" , pyspark_python , str_test_name )
96
+ LOGGER .info ("Starting test(%s): %s" , pyspark_python , test_name )
98
97
start_time = time .time ()
99
98
try :
100
99
per_test_output = tempfile .TemporaryFile ()
101
100
retcode = subprocess .Popen (
102
- ( os .path .join (SPARK_HOME , "bin/pyspark" ), ) + test_name ,
101
+ [ os .path .join (SPARK_HOME , "bin/pyspark" )] + test_name . split () ,
103
102
stderr = per_test_output , stdout = per_test_output , env = env ).wait ()
104
103
shutil .rmtree (tmp_dir , ignore_errors = True )
105
104
except :
106
105
LOGGER .exception (
107
- "Got exception while running %s with %s" , str_test_name , pyspark_python )
106
+ "Got exception while running %s with %s" , test_name , pyspark_python )
108
107
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
109
108
# this code is invoked from a thread other than the main thread.
110
109
os ._exit (1 )
@@ -126,7 +125,7 @@ def run_individual_python_test(target_dir, test_name, pyspark_python):
126
125
LOGGER .exception ("Got an exception while trying to print failed test output" )
127
126
finally :
128
127
print_red ("\n Had test failures in %s with %s; see logs." % (
129
- str_test_name , pyspark_python ))
128
+ test_name , pyspark_python ))
130
129
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
131
130
# this code is invoked from a thread other than the main thread.
132
131
os ._exit (- 1 )
@@ -142,7 +141,7 @@ def run_individual_python_test(target_dir, test_name, pyspark_python):
142
141
decoded_lines ))
143
142
skipped_counts = len (skipped_tests )
144
143
if skipped_counts > 0 :
145
- key = (pyspark_python , str_test_name )
144
+ key = (pyspark_python , test_name )
146
145
SKIPPED_TESTS [key ] = skipped_tests
147
146
per_test_output .close ()
148
147
except :
@@ -155,10 +154,10 @@ def run_individual_python_test(target_dir, test_name, pyspark_python):
155
154
if skipped_counts != 0 :
156
155
LOGGER .info (
157
156
"Finished test(%s): %s (%is) ... %s tests were skipped" , pyspark_python ,
158
- str_test_name , duration , skipped_counts )
157
+ test_name , duration , skipped_counts )
159
158
else :
160
159
LOGGER .info (
161
- "Finished test(%s): %s (%is)" , pyspark_python , str_test_name , duration )
160
+ "Finished test(%s): %s (%is)" , pyspark_python , test_name , duration )
162
161
163
162
164
163
def get_default_python_executables ():
@@ -278,10 +277,10 @@ def main():
278
277
priority = 0
279
278
else :
280
279
priority = 100
281
- task_queue .put ((priority , (python_exec , ( test_goal , ) )))
280
+ task_queue .put ((priority , (python_exec , test_goal )))
282
281
else :
283
282
for test_goal in testnames_to_test :
284
- task_queue .put ((0 , (python_exec , tuple ( test_goal . split ()) )))
283
+ task_queue .put ((0 , (python_exec , test_goal )))
285
284
286
285
# Create the target directory before starting tasks to avoid races.
287
286
target_dir = os .path .abspath (os .path .join (os .path .dirname (__file__ ), 'target' ))
0 commit comments