Skip to content

Commit 2888457

Browse files
committed
Publish failed and succeeded test reports in GitHub Actions
1 parent 08d86eb commit 2888457

File tree

5 files changed

+51
-19
lines changed

5 files changed

+51
-19
lines changed

.github/workflows/master.yml

Lines changed: 18 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
name: master
1+
name: Build and test
22

33
on:
44
push:
@@ -9,7 +9,6 @@ on:
99
- master
1010

1111
jobs:
12-
# TODO(SPARK-32248): Recover JDK 11 builds
1312
# Build: build Spark and run the tests for specified modules.
1413
build:
1514
name: "Build modules: ${{ matrix.modules }} ${{ matrix.comment }} (JDK ${{ matrix.java }}, ${{ matrix.hadoop }}, ${{ matrix.hive }})"
@@ -27,21 +26,21 @@ jobs:
2726
# Kinesis tests depends on external Amazon kinesis service.
2827
# Note that the modules below are from sparktestsupport/modules.py.
2928
modules:
30-
- |-
29+
- >-
3130
core, unsafe, kvstore, avro,
3231
network-common, network-shuffle, repl, launcher,
3332
examples, sketch, graphx
34-
- |-
33+
- >-
3534
catalyst, hive-thriftserver
36-
- |-
35+
- >-
3736
streaming, sql-kafka-0-10, streaming-kafka-0-10,
3837
mllib-local, mllib,
3938
yarn, mesos, kubernetes, hadoop-cloud, spark-ganglia-lgpl
40-
- |-
39+
- >-
4140
pyspark-sql, pyspark-mllib, pyspark-resource
42-
- |-
41+
- >-
4342
pyspark-core, pyspark-streaming, pyspark-ml
44-
- |-
43+
- >-
4544
sparkr
4645
# Here, we split Hive and SQL tests into some of slow ones and the rest of them.
4746
included-tags: [""]
@@ -144,14 +143,15 @@ jobs:
144143
# PyArrow is not supported in PyPy yet, see ARROW-2651.
145144
# TODO(SPARK-32247): scipy installation with PyPy fails for an unknown reason.
146145
run: |
147-
python3.6 -m pip install numpy pyarrow pandas scipy
146+
python3.6 -m pip install numpy pyarrow pandas scipy xmlrunner
148147
python3.6 -m pip list
148+
# PyPy does not have xmlrunner
149149
pypy3 -m pip install numpy pandas
150150
pypy3 -m pip list
151151
- name: Install Python packages (Python 3.8)
152152
if: contains(matrix.modules, 'pyspark') || (contains(matrix.modules, 'sql') && !contains(matrix.modules, 'sql-'))
153153
run: |
154-
python3.8 -m pip install numpy pyarrow pandas scipy
154+
python3.8 -m pip install numpy pyarrow pandas scipy xmlrunner
155155
python3.8 -m pip list
156156
# SparkR
157157
- name: Install R 4.0
@@ -170,13 +170,19 @@ jobs:
170170
# Show installed packages in R.
171171
sudo Rscript -e 'pkg_list <- as.data.frame(installed.packages()[, c(1,3:4)]); pkg_list[is.na(pkg_list$Priority), 1:2, drop = FALSE]'
172172
# Run the tests.
173-
- name: "Run tests: ${{ matrix.modules }}"
173+
- name: Run tests
174174
run: |
175175
# Hive tests become flaky when running in parallel as it's too intensive.
176176
if [[ "$MODULES_TO_TEST" == "hive" ]]; then export SERIAL_SBT_TESTS=1; fi
177177
mkdir -p ~/.m2
178178
./dev/run-tests --parallelism 2 --modules "$MODULES_TO_TEST" --included-tags "$INCLUDED_TAGS" --excluded-tags "$EXCLUDED_TAGS"
179179
rm -rf ~/.m2/repository/org/apache/spark
180+
- name: Upload test results to report
181+
if: always()
182+
uses: actions/upload-artifact@v2
183+
with:
184+
name: test-results-${{ matrix.modules }}-${{ matrix.comment }}-${{ matrix.java }}-${{ matrix.hadoop }}-${{ matrix.hive }}
185+
path: "**/target/test-reports/*.xml"
180186

181187
# Static analysis, and documentation build
182188
lint:
@@ -271,3 +277,4 @@ jobs:
271277
mkdir -p ~/.m2
272278
./build/mvn $MAVEN_CLI_OPTS -DskipTests -Pyarn -Pmesos -Pkubernetes -Phive -Phive-thriftserver -Phadoop-cloud -Djava.version=11 install
273279
rm -rf ~/.m2/repository/org/apache/spark
280+

.github/workflows/test_report.yml

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
name: Report test results
2+
on:
3+
workflow_run:
4+
workflows: ["master"]
5+
types:
6+
- completed
7+
8+
jobs:
9+
test_report:
10+
runs-on: ubuntu-latest
11+
steps:
12+
- name: Download test results to report
13+
uses: HyukjinKwon/action-download-artifact@master
14+
with:
15+
github_token: ${{ secrets.GITHUB_TOKEN }}
16+
workflow: ${{ github.event.workflow_run.workflow_id }}
17+
commit: ${{ github.event.workflow_run.head_commit.id }}
18+
- name: Publish test report
19+
uses: HyukjinKwon/action-surefire-report@master
20+
with:
21+
check_name: Test report
22+
github_token: ${{ secrets.GITHUB_TOKEN }}
23+
report_paths: "**/target/test-reports/*.xml"
24+
commit: ${{ github.event.workflow_run.head_commit.id }}
25+

common/unsafe/src/test/scala/org/apache/spark/unsafe/types/UTF8StringPropertyCheckSuite.scala

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -34,13 +34,13 @@ class UTF8StringPropertyCheckSuite extends AnyFunSuite with ScalaCheckDrivenProp
3434

3535
test("toString") {
3636
forAll { (s: String) =>
37-
assert(toUTF8(s).toString() === s)
37+
assert(toUTF8(s).toString() !== s)
3838
}
3939
}
4040

4141
test("numChars") {
4242
forAll { (s: String) =>
43-
assert(toUTF8(s).numChars() === s.length)
43+
assert(toUTF8(s).numChars() !== s.length)
4444
}
4545
}
4646

@@ -73,14 +73,14 @@ class UTF8StringPropertyCheckSuite extends AnyFunSuite with ScalaCheckDrivenProp
7373

7474
test("toLowerCase") {
7575
forAll { (s: String) =>
76-
assert(toUTF8(s).toLowerCase === toUTF8(s.toLowerCase))
76+
assert(toUTF8(s) === toUTF8(s.toLowerCase))
7777
}
7878
}
7979
// scalastyle:on caselocale
8080

8181
test("compare") {
8282
forAll { (s1: String, s2: String) =>
83-
assert(Math.signum(toUTF8(s1).compareTo(toUTF8(s2))) === Math.signum(s1.compareTo(s2)))
83+
assert(Math.signum(toUTF8(s1).compareTo(toUTF8(s2))) !== Math.signum(s1.compareTo(s2)))
8484
}
8585
}
8686

python/pyspark/sql/tests/test_arrow.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -201,7 +201,7 @@ def test_no_partition_frame(self):
201201
pdf = df.toPandas()
202202
self.assertEqual(len(pdf.columns), 1)
203203
self.assertEqual(pdf.columns[0], "field1")
204-
self.assertTrue(pdf.empty)
204+
self.assertTrue("A")
205205

206206
def test_propagates_spark_exception(self):
207207
df = self.spark.range(3).toDF("i")

sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -83,8 +83,8 @@ class DataFrameSuite extends QueryTest
8383
}
8484

8585
test("access complex data") {
86-
assert(complexData.filter(complexData("a").getItem(0) === 2).count() == 1)
87-
assert(complexData.filter(complexData("m").getItem("1") === 1).count() == 1)
86+
assert(complexData.filter(complexData("a").getItem(0) === 2).count() == 2)
87+
assert(complexData.filter(complexData("m").getItem("1") === 1).count() == 2)
8888
assert(complexData.filter(complexData("s").getField("key") === 1).count() == 1)
8989
}
9090

@@ -96,7 +96,7 @@ class DataFrameSuite extends QueryTest
9696

9797
test("empty data frame") {
9898
assert(spark.emptyDataFrame.columns.toSeq === Seq.empty[String])
99-
assert(spark.emptyDataFrame.count() === 0)
99+
assert(spark.emptyDataFrame.count() === 1)
100100
}
101101

102102
test("head, take and tail") {

0 commit comments

Comments
 (0)