Skip to content

Commit 3da633e

Browse files
committed
Add test and logic for skipping empty summary table
Signed-off-by: Zack Koppert <zkoppert@github.com>
1 parent 38e1563 commit 3da633e

File tree

3 files changed

+59
-47
lines changed

3 files changed

+59
-47
lines changed

.vscode/settings.json

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,5 +3,9 @@
33
"."
44
],
55
"python.testing.unittestEnabled": false,
6-
"python.testing.pytestEnabled": true
6+
"python.testing.pytestEnabled": true,
7+
"[python]": {
8+
"editor.defaultFormatter": "ms-python.black-formatter"
9+
},
10+
"python.formatting.provider": "none"
711
}

markdown_writer.py

Lines changed: 53 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -78,6 +78,7 @@ def write_to_markdown(
7878
num_issues_closed: Union[int, None],
7979
labels=None,
8080
search_query=None,
81+
hide_label_metrics=False,
8182
) -> None:
8283
"""Write the issues with metrics to a markdown file.
8384
@@ -94,6 +95,7 @@ def write_to_markdown(
9495
num_issues_closed (int): The number of issues that were closed.
9596
labels (List[str]): A list of the labels that are used in the issues.
9697
search_query (str): The search query used to find the issues.
98+
hide_label_metrics (bool): Represents whether the user has chosen to hide label metrics in the output
9799
98100
Returns:
99101
None.
@@ -123,6 +125,7 @@ def write_to_markdown(
123125
labels,
124126
columns,
125127
file,
128+
hide_label_metrics,
126129
)
127130

128131
# Write second table with individual issue/pr/discussion metrics
@@ -179,54 +182,61 @@ def write_overall_metrics_tables(
179182
labels,
180183
columns,
181184
file,
185+
hide_label_metrics,
182186
):
183187
"""Write the overall metrics tables to the markdown file."""
184-
file.write("| Metric | Average | Median | 90th percentile |\n")
185-
file.write("| --- | --- | --- | ---: |\n")
186-
if "Time to first response" in columns:
187-
if stats_time_to_first_response is not None:
188-
file.write(
189-
f"| Time to first response "
190-
f"| {stats_time_to_first_response['avg']} "
191-
f"| {stats_time_to_first_response['med']} "
192-
f"| {stats_time_to_first_response['90p']} |\n"
193-
)
194-
else:
195-
file.write("| Time to first response | None | None | None |\n")
196-
if "Time to close" in columns:
197-
if stats_time_to_close is not None:
198-
file.write(
199-
f"| Time to close "
200-
f"| {stats_time_to_close['avg']} "
201-
f"| {stats_time_to_close['med']} "
202-
f"| {stats_time_to_close['90p']} |\n"
203-
)
204-
else:
205-
file.write("| Time to close | None | None | None |\n")
206-
if "Time to answer" in columns:
207-
if stats_time_to_answer is not None:
208-
file.write(
209-
f"| Time to answer "
210-
f"| {stats_time_to_answer['avg']} "
211-
f"| {stats_time_to_answer['med']} "
212-
f"| {stats_time_to_answer['90p']} |\n"
213-
)
214-
else:
215-
file.write("| Time to answer | None | None | None |\n")
216-
if labels and stats_time_in_labels:
217-
for label in labels:
218-
if (
219-
f"Time spent in {label}" in columns
220-
and label in stats_time_in_labels["avg"]
221-
):
188+
if (
189+
"Time to first response" in columns
190+
or "Time to close" in columns
191+
or "Time to answer" in columns
192+
or (hide_label_metrics is False and len(labels) > 0)
193+
):
194+
file.write("| Metric | Average | Median | 90th percentile |\n")
195+
file.write("| --- | --- | --- | ---: |\n")
196+
if "Time to first response" in columns:
197+
if stats_time_to_first_response is not None:
198+
file.write(
199+
f"| Time to first response "
200+
f"| {stats_time_to_first_response['avg']} "
201+
f"| {stats_time_to_first_response['med']} "
202+
f"| {stats_time_to_first_response['90p']} |\n"
203+
)
204+
else:
205+
file.write("| Time to first response | None | None | None |\n")
206+
if "Time to close" in columns:
207+
if stats_time_to_close is not None:
222208
file.write(
223-
f"| Time spent in {label} "
224-
f"| {stats_time_in_labels['avg'][label]} "
225-
f"| {stats_time_in_labels['med'][label]} "
226-
f"| {stats_time_in_labels['90p'][label]} |\n"
209+
f"| Time to close "
210+
f"| {stats_time_to_close['avg']} "
211+
f"| {stats_time_to_close['med']} "
212+
f"| {stats_time_to_close['90p']} |\n"
227213
)
214+
else:
215+
file.write("| Time to close | None | None | None |\n")
216+
if "Time to answer" in columns:
217+
if stats_time_to_answer is not None:
218+
file.write(
219+
f"| Time to answer "
220+
f"| {stats_time_to_answer['avg']} "
221+
f"| {stats_time_to_answer['med']} "
222+
f"| {stats_time_to_answer['90p']} |\n"
223+
)
224+
else:
225+
file.write("| Time to answer | None | None | None |\n")
226+
if labels and stats_time_in_labels:
227+
for label in labels:
228+
if (
229+
f"Time spent in {label}" in columns
230+
and label in stats_time_in_labels["avg"]
231+
):
232+
file.write(
233+
f"| Time spent in {label} "
234+
f"| {stats_time_in_labels['avg'][label]} "
235+
f"| {stats_time_in_labels['med'][label]} "
236+
f"| {stats_time_in_labels['90p'][label]} |\n"
237+
)
238+
file.write("\n")
228239
# Write count stats to a separate table
229-
file.write("\n")
230240
file.write("| Metric | Count |\n")
231241
file.write("| --- | ---: |\n")
232242
file.write(f"| Number of items that remain open | {num_issues_opened} |\n")

test_markdown_writer.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -294,16 +294,14 @@ def test_writes_markdown_file_with_non_hidden_columns_only(self):
294294
num_issues_closed=num_issues_closed,
295295
labels=["label1"],
296296
search_query="repo:user/repo is:issue",
297+
hide_label_metrics=True,
297298
)
298299

299300
# Check that the function writes the correct markdown file
300301
with open("issue_metrics.md", "r", encoding="utf-8") as file:
301302
content = file.read()
302303
expected_content = (
303304
"# Issue Metrics\n\n"
304-
"| Metric | Average | Median | 90th percentile |\n"
305-
"| --- | --- | --- | ---: |\n"
306-
"\n"
307305
"| Metric | Count |\n"
308306
"| --- | ---: |\n"
309307
"| Number of items that remain open | 2 |\n"

0 commit comments

Comments
 (0)