-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtest_project.py
159 lines (119 loc) · 5.09 KB
/
test_project.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
import pytest
from project import QuizzardApp
from quiz import QuestionDashboard, Results
import hashlib, random, string, os
import pandas as pd
from tabulate import tabulate
@pytest.fixture
def app():
return QuizzardApp('Journel')
def test_quizzard_app_initialization():
user_name = "Journel Cabrillos"
quizzard_app = QuizzardApp(user_name)
assert quizzard_app.user_name == user_name
assert len(quizzard_app.menu_options) == 1
assert quizzard_app.menu_options[0]['name'] == 'choice'
assert quizzard_app.menu_options[0]['message'] == f'Welcome, {user_name} to CS50 - QuizzardApp'
assert len(quizzard_app.menu_options[0]['choices']) == 3
assert quizzard_app.menu_options[0]['choices'][0] == 'Start Quiz'
assert quizzard_app.menu_options[0]['choices'][1] == 'View High Scores'
assert quizzard_app.menu_options[0]['choices'][2] == 'Exit'
def test_question_dashboard(monkeypatch):
questionList = [{
"id": 0,
"category": "Science",
"question": "What is the chemical symbol for gold?",
"choices": ["Au", "Ag", "Cu", "Fe"],
"answer": "Au"
}]
mock_input = 'Au'
def mock_prompt(questions:list):
assert questions[0]['question'] == "What is the chemical symbol for gold?"
return {'choice': mock_input}
monkeypatch.setattr('PyInquirer.prompt', mock_prompt)
qb = QuestionDashboard("Journel Cabrillos", questionList)
userdata = qb.show_question_and_gen_userdata()
assert userdata['user_answers'] == [mock_input]
def test_results_output_perfect_answers(capfd):
res = Results(user_data={
"user_name" : "Journel Cabrillos",
"user_answers" : ["Python", "SQL", "Pandas", "DataFrame"]
},
correct_answer=["Python", "SQL", "Pandas", "DataFrame"]
)
# Display results
res.show()
# Create test dataframe
test_df = pd.DataFrame(
{
'Your answers' : ["Python", "SQL", "Pandas", "DataFrame"],
'Validation' : ["✅", "✅", "✅", "✅"]
}
)
table = tabulate(test_df, headers='keys', tablefmt='grid')
assert capfd.readouterr().out == f"\n{'-' * 14} Results {'-' * 14}\n{table}\n{'-' * 12} Score : {res.score}% {'-' * 12}\n"
def test_results_output_wrong_answers(capfd):
res = Results(user_data={
"user_name" : "Journel Cabrillos",
"user_answers" : ["Python", "MySQL", "Pandas", "DataFrame"]
},
correct_answer=["Python", "SQL", "Pandas", "DataFrame"]
)
# Display results
res.show()
# Create test dataframe
test_df = pd.DataFrame(
{
'Your answers' : ["Python", "MySQL", "Pandas", "DataFrame"],
'Validation' : ["✅", "❌", "✅", "✅"]
}
)
table = tabulate(test_df, headers='keys', tablefmt='grid')
assert capfd.readouterr().out == f"\n{'-' * 14} Results {'-' * 14}\n{table}\n{'-' * 12} Score : {res.score}% {'-' * 12}\n"
def test_results_score():
test_user_data = {
"user_name" : "Journel Cabrillos",
"user_answers" : ["Python", "MySQL", "Pandas", "DataFrame"]
}
test_correct_ans = ["Python", "SQL", "Pandas", "DataFrame"]
def _cmprelst(l1, l2):
''' compare list l1, l2 and returns score based on similarity'''
if len(l1) != len(l2):
raise ValueError("l1 & l2 should have the same length.")
t = len(l1)
s = sum(i1 == i2 for i1, i2 in zip(l1, l2))
score = int(round((s / t), 2)* 100)
return score
res = Results(test_user_data, test_correct_ans)
test_score = _cmprelst(test_user_data['user_answers'], test_correct_ans)
assert test_score == res.score
def test_results_dump_score():
_isUpdated = bool()
gen_random_alphanum = lambda length: ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(length))
def _calc_checksum(fp : str) -> str:
''' gets file checksum'''
md5h = hashlib.md5()
with open(fp, 'rb') as f:
for chunk in iter(lambda: f.read(4096), b''):
md5h.update(chunk)
return md5h.hexdigest()
prev_cksum = _calc_checksum("./data/scores.json")
# Add test actions that enforces a score update
test_results = Results(
user_data={
"user_name":gen_random_alphanum(5),
"user_answers":['1', '1', '1', '1']
},
correct_answer=['1', '1', '1', '1']
)
test_results.dump_scores()
new_cksum = _calc_checksum("./data/scores.json")
assert new_cksum != prev_cksum
def test_show_certificate():
from datetime import datetime
gen_random_alphanum = lambda length: ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(length))
test_user_name = gen_random_alphanum(5)
test_cert_path = f"certs/{test_user_name.lower().strip().replace(' ', '')}_{datetime.now().strftime('%m%d%Y')}.pdf"
test_results = Results({"user_name": test_user_name, "user_answers":['1', '1', '1', '1']}, ['1', '1', '1', '1'])
test_results.show_certificate_dialog()
assert os.path.isfile(test_cert_path) == True