Add trans and migration #604
Annotations
31 errors and 2 warnings
/home/runner/work/online-judge/online-judge/judge/admin/profile.py#L13
class ProfileForm(ModelForm):
def __init__(self, *args, **kwargs):
super(ProfileForm, self).__init__(*args, **kwargs)
if "current_contest" in self.base_fields:
# form.fields['current_contest'] does not exist when the user has only view permission on the model.
- self.fields[
- "current_contest"
- ].queryset = self.instance.contest_history.select_related("contest").only(
- "contest__name", "user_id", "virtual"
- )
- self.fields["current_contest"].label_from_instance = (
- lambda obj: "%s v%d" % (obj.contest.name, obj.virtual)
+ self.fields["current_contest"].queryset = (
+ self.instance.contest_history.select_related("contest").only(
+ "contest__name", "user_id", "virtual"
+ )
+ )
+ self.fields["current_contest"].label_from_instance = lambda obj: (
+ "%s v%d" % (obj.contest.name, obj.virtual)
if obj.virtual
else obj.contest.name
)
class Meta:
|
/home/runner/work/online-judge/online-judge/judge/admin/submission.py#L267
"problem__points",
)
)
for submission in submissions:
submission.points = round(
- submission.case_points
- / submission.case_total
- * submission.problem.points
- if submission.case_total
- else 0,
+ (
+ submission.case_points
+ / submission.case_total
+ * submission.problem.points
+ if submission.case_total
+ else 0
+ ),
1,
)
if (
not submission.problem.partial
and submission.points < submission.problem.points
|
/home/runner/work/online-judge/online-judge/judge/contest_format/ecoo.py#L164
def display_participation_result(self, participation, show_final=False):
return format_html(
'<td class="user-points">{points}<div class="solving-time">{cumtime}</div></td>',
points=floatformat(participation.score),
- cumtime=nice_repr(timedelta(seconds=participation.cumtime), "noday")
- if self.config["cumtime"]
- else "",
+ cumtime=(
+ nice_repr(timedelta(seconds=participation.cumtime), "noday")
+ if self.config["cumtime"]
+ else ""
+ ),
)
|
/home/runner/work/online-judge/online-judge/judge/contest_format/ioi.py#L117
],
),
points=floatformat(
format_data["points"], -self.contest.points_precision
),
- time=nice_repr(timedelta(seconds=format_data["time"]), "noday")
- if self.config["cumtime"]
- else "",
+ time=(
+ nice_repr(timedelta(seconds=format_data["time"]), "noday")
+ if self.config["cumtime"]
+ else ""
+ ),
)
else:
return mark_safe('<td class="problem-score-col"></td>')
def display_participation_result(self, participation, show_final=False):
|
/home/runner/work/online-judge/online-judge/judge/contest_format/ioi.py#L134
score = participation.score
cumtime = participation.cumtime
return format_html(
'<td class="user-points">{points}<div class="solving-time">{cumtime}</div></td>',
points=floatformat(score, -self.contest.points_precision),
- cumtime=nice_repr(timedelta(seconds=cumtime), "noday")
- if self.config["cumtime"]
- else "",
+ cumtime=(
+ nice_repr(timedelta(seconds=cumtime), "noday")
+ if self.config["cumtime"]
+ else ""
+ ),
)
|
/home/runner/work/online-judge/online-judge/judge/judgeapi.py#L110
"submission-id": submission.id,
"problem-id": submission.problem.code,
"language": submission.language.key,
"source": submission.source.source,
"judge-id": judge_id,
- "priority": BATCH_REJUDGE_PRIORITY
- if batch_rejudge
- else REJUDGE_PRIORITY
- if rejudge
- else priority,
+ "priority": (
+ BATCH_REJUDGE_PRIORITY
+ if batch_rejudge
+ else REJUDGE_PRIORITY if rejudge else priority
+ ),
}
)
except BaseException:
logger.exception("Failed to send request to judge")
Submission.objects.filter(id=submission.id).update(status="IE", result="IE")
|
/home/runner/work/online-judge/online-judge/judge/management/commands/render_pdf.py#L83
get_template("problem/raw.html")
.render(
{
"problem": problem,
"problem_name": problem_name,
- "description": problem.description
- if trans is None
- else trans.description,
+ "description": (
+ problem.description if trans is None else trans.description
+ ),
"url": "",
}
)
.replace('"//', '"https://')
.replace("'//", "'https://")
|
/home/runner/work/online-judge/online-judge/judge/models/submission.py#L165
except AttributeError:
return
contest_problem = contest.problem
contest.points = round(
- self.case_points / self.case_total * contest_problem.points
- if self.case_total > 0
- else 0,
+ (
+ self.case_points / self.case_total * contest_problem.points
+ if self.case_total > 0
+ else 0
+ ),
3,
)
if not contest_problem.partial and contest.points != contest_problem.points:
contest.points = 0
contest.save()
|
/home/runner/work/online-judge/online-judge/judge/tasks/contest.py#L22
for participation in participations.iterator():
for contest_submission in participation.submissions.iterator():
submission = contest_submission.submission
contest_problem = contest_submission.problem
contest_submission.points = round(
- submission.case_points
- / submission.case_total
- * contest_problem.points
- if submission.case_total > 0
- else 0,
+ (
+ submission.case_points
+ / submission.case_total
+ * contest_problem.points
+ if submission.case_total > 0
+ else 0
+ ),
3,
)
if (
not contest_problem.partial
and contest_submission.points != contest_problem.points
|
/home/runner/work/online-judge/online-judge/judge/ratings.py#L12
MEAN_INIT = 1400.0
VAR_INIT = 250**2 * (BETA2 / 212**2)
SD_INIT = sqrt(VAR_INIT)
VALID_RANGE = MEAN_INIT - 20 * SD_INIT, MEAN_INIT + 20 * SD_INIT
VAR_PER_CONTEST = 1219.047619 * (BETA2 / 212**2)
-VAR_LIM = (
- sqrt(VAR_PER_CONTEST**2 + 4 * BETA2 * VAR_PER_CONTEST) - VAR_PER_CONTEST
-) / 2
+VAR_LIM = (sqrt(VAR_PER_CONTEST**2 + 4 * BETA2 * VAR_PER_CONTEST) - VAR_PER_CONTEST) / 2
SD_LIM = sqrt(VAR_LIM)
TANH_C = sqrt(3) / pi
def tie_ranker(iterable, key=attrgetter("points")):
|
/home/runner/work/online-judge/online-judge/judge/models/profile.py#L606
"email": user.email,
"username": user.username,
"mute": profile.mute,
"first_name": user.first_name or None,
"last_name": user.last_name or None,
- "profile_image_url": profile.profile_image.url
- if profile.profile_image
- else None,
+ "profile_image_url": (
+ profile.profile_image.url if profile.profile_image else None
+ ),
"display_rank": profile.display_rank,
"rating": profile.rating,
}
res = {k: v for k, v in res.items() if v is not None}
return res
|
/home/runner/work/online-judge/online-judge/judge/tasks/submission.py#L46
with Progress(self, submissions.count(), stage=_("Modifying submissions")) as p:
rescored = 0
for submission in submissions.iterator():
submission.points = round(
- submission.case_points / submission.case_total * problem.points
- if submission.case_total
- else 0,
+ (
+ submission.case_points / submission.case_total * problem.points
+ if submission.case_total
+ else 0
+ ),
1,
)
if not problem.partial and submission.points < problem.points:
submission.points = 0
submission.save(update_fields=["points"])
|
/home/runner/work/online-judge/online-judge/judge/utils/pwned.py#L29
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."""
-
import hashlib
import logging
import requests
|
/home/runner/work/online-judge/online-judge/judge/utils/mathoid.py#L67
self.mathoid_url,
data={
"q": reescape.sub(lambda m: "\\" + m.group(0), formula).encode(
"utf-8"
),
- "type": "tex"
- if formula.startswith(r"\displaystyle")
- else "inline-tex",
+ "type": (
+ "tex" if formula.startswith(r"\displaystyle") else "inline-tex"
+ ),
},
)
response.raise_for_status()
data = response.json()
except requests.ConnectionError:
|
/home/runner/work/online-judge/online-judge/judge/views/api/api_v1.py#L77
"rating_ceiling": contest.rating_ceiling,
"format": {
"name": contest.format_name,
"config": contest.format_config,
},
- "problems": [
- {
- "points": int(problem.points),
- "partial": problem.partial,
- "name": problem.problem.name,
- "code": problem.problem.code,
- }
- for problem in problems
- ]
- if can_see_problems
- else [],
+ "problems": (
+ [
+ {
+ "points": int(problem.points),
+ "partial": problem.partial,
+ "name": problem.problem.name,
+ "code": problem.problem.code,
+ }
+ for problem in problems
+ ]
+ if can_see_problems
+ else []
+ ),
"rankings": [
{
"user": participation.username,
"points": participation.score,
"cumtime": participation.cumtime,
|
/home/runner/work/online-judge/online-judge/judge/views/blog.py#L60
)
context["current_contests"] = visible_contests.filter(
start_time__lte=now, end_time__gt=now
)
context["future_contests"] = visible_contests.filter(start_time__gt=now)
- context[
- "recent_organizations"
- ] = OrganizationProfile.get_most_recent_organizations(self.request.profile)
+ context["recent_organizations"] = (
+ OrganizationProfile.get_most_recent_organizations(self.request.profile)
+ )
profile_queryset = Profile.objects
if self.request.organization:
profile_queryset = self.request.organization.members
context["top_rated"] = (
|
/home/runner/work/online-judge/online-judge/judge/views/course.py#L76
total_lesson_points += lesson.points
res["total"] = {
"achieved_points": total_achieved_points,
"total_points": total_lesson_points,
- "percentage": total_achieved_points / total_lesson_points * 100
- if total_lesson_points
- else 0,
+ "percentage": (
+ total_achieved_points / total_lesson_points * 100
+ if total_lesson_points
+ else 0
+ ),
}
return res
class CourseList(ListView):
|
/home/runner/work/online-judge/online-judge/judge/views/course.py#L363
)
total_points += lesson_problem.score
grades[s]["total"] = {
"achieved_points": achieved_points,
"total_points": total_points,
- "percentage": achieved_points / total_points * 100
- if total_points
- else 0,
+ "percentage": (
+ achieved_points / total_points * 100 if total_points else 0
+ ),
}
return grades
def get_context_data(self, **kwargs):
context = super(CourseStudentResultsLesson, self).get_context_data(**kwargs)
|
/home/runner/work/online-judge/online-judge/judge/views/ranked_submission.py#L49
WHERE sub.problem_id = %s {constraint}
GROUP BY sub.user_id
""".format(
points=points, contest_join=contest_join, constraint=constraint
),
- params=[self.problem.id, self.contest.id] * 3
- if self.in_contest
- else [self.problem.id] * 3,
+ params=(
+ [self.problem.id, self.contest.id] * 3
+ if self.in_contest
+ else [self.problem.id] * 3
+ ),
alias="best_subs",
join_fields=[("id", "id")],
related_model=Submission,
)
|
/home/runner/work/online-judge/online-judge/judge/views/test_formatter/test_formatter.py#L199
def post(self, request):
file_path = request.POST.get("file_path")
with open(file_path, "rb") as zip_file:
response = HttpResponse(zip_file.read(), content_type="application/zip")
- response[
- "Content-Disposition"
- ] = f"attachment; filename={os.path.basename(file_path)}"
+ response["Content-Disposition"] = (
+ f"attachment; filename={os.path.basename(file_path)}"
+ )
return response
|
/home/runner/work/online-judge/online-judge/judge/views/problem.py#L395
get_template("problem/raw.html")
.render(
{
"problem": problem,
"problem_name": problem_name,
- "description": problem.description
- if trans is None
- else trans.description,
+ "description": (
+ problem.description
+ if trans is None
+ else trans.description
+ ),
"url": request.build_absolute_uri(),
}
)
.replace('"//', '"https://')
.replace("'//", "'https://")
|
/home/runner/work/online-judge/online-judge/judge/views/problem.py#L519
)
elif sort_key == "type":
if self.show_types:
queryset = list(queryset)
queryset.sort(
- key=lambda problem: problem.types_list[0]
- if problem.types_list
- else "",
+ key=lambda problem: (
+ problem.types_list[0] if problem.types_list else ""
+ ),
reverse=self.order.startswith("-"),
)
return queryset
@cached_property
|
/home/runner/work/online-judge/online-judge/judge/views/problem.py#L1201
"submission_limit": submission_limit,
"submissions_left": submissions_left,
"ACE_URL": settings.ACE_URL,
"default_lang": default_lang,
"problem_id": problem.id,
- "output_only": problem.data_files.output_only
- if hasattr(problem, "data_files")
- else False,
+ "output_only": (
+ problem.data_files.output_only
+ if hasattr(problem, "data_files")
+ else False
+ ),
"next_valid_submit_time": next_valid_submit_time,
},
)
|
/home/runner/work/online-judge/online-judge/judge/widgets/checkbox.py#L19
template.render(
{
"original_widget": original,
"select_all_id": select_all_id,
"select_all_name": select_all_name,
- "all_selected": all(choice[0] in value for choice in self.choices)
- if value
- else False,
+ "all_selected": (
+ all(choice[0] in value for choice in self.choices)
+ if value
+ else False
+ ),
"empty": not self.choices,
}
)
)
|
/home/runner/work/online-judge/online-judge/judge/views/widgets.py#L96
lat, long = float(request.GET["lat"]), float(request.GET["long"])
except (ValueError, KeyError):
return HttpResponse(
_("Bad latitude or longitude"), content_type="text/plain", status=404
)
- return {"askgeo": self.askgeo, "geonames": self.geonames,}.get(
+ return {
+ "askgeo": self.askgeo,
+ "geonames": self.geonames,
+ }.get(
backend, self.default
)(lat, long)
|
/home/runner/work/online-judge/online-judge/judge/widgets/mixins.py#L45
media = super().media
template = self.__templates[self.compress_css, self.compress_js]
result = html.fromstring(template.render(Context({"media": media})))
return forms.Media(
- css={"all": [result.find(".//link").get("href")]}
- if self.compress_css
- else media._css,
- js=[result.find(".//script").get("src")]
- if self.compress_js
- else media._js,
+ css=(
+ {"all": [result.find(".//link").get("href")]}
+ if self.compress_css
+ else media._css
+ ),
+ js=(
+ [result.find(".//script").get("src")]
+ if self.compress_js
+ else media._js
+ ),
)
|
/home/runner/work/online-judge/online-judge/judge/views/contests.py#L379
def get_context_data(self, **kwargs):
context = super(ContestMixin, self).get_context_data(**kwargs)
if self.request.user.is_authenticated:
try:
- context[
- "live_participation"
- ] = self.request.profile.contest_history.get(
- contest=self.object,
- virtual=ContestParticipation.LIVE,
+ context["live_participation"] = (
+ self.request.profile.contest_history.get(
+ contest=self.object,
+ virtual=ContestParticipation.LIVE,
+ )
)
except ContestParticipation.DoesNotExist:
context["live_participation"] = None
context["has_joined"] = False
else:
|
/home/runner/work/online-judge/online-judge/judge/views/contests.py#L409
)
context["meta_description"] = self.object.summary or metadata[0]
context["og_image"] = self.object.og_image or metadata[1]
context["has_moss_api_key"] = settings.MOSS_API_KEY is not None
context["contest_has_hidden_subtasks"] = self.object.format.has_hidden_subtasks
- context[
- "show_final_ranking"
- ] = self.object.format.has_hidden_subtasks and self.object.is_editable_by(
- self.request.user
+ context["show_final_ranking"] = (
+ self.object.format.has_hidden_subtasks
+ and self.object.is_editable_by(self.request.user)
)
context["logo_override_image"] = self.object.logo_override_image
if (
not context["logo_override_image"]
|
/home/runner/work/online-judge/online-judge/judge/views/contests.py#L1053
user=user,
username=user.username,
points=points,
cumtime=cumtime,
tiebreaker=participation.tiebreaker,
- participation_rating=participation.rating.rating
- if hasattr(participation, "rating")
- else None,
+ participation_rating=(
+ participation.rating.rating if hasattr(participation, "rating") else None
+ ),
problem_cells=[
contest.format.display_user_problem(
participation, contest_problem, show_final
)
for contest_problem in contest_problems
|
/home/runner/work/online-judge/online-judge/judge/views/submission.py#L467
context["in_hidden_subtasks_contest"] = self.in_hidden_subtasks_contest()
if context["in_hidden_subtasks_contest"]:
for submission in context["submissions"]:
self.modify_attrs(submission)
- context[
- "is_in_editable_contest"
- ] = self.in_contest and self.contest.is_editable_by(self.request.user)
+ context["is_in_editable_contest"] = (
+ self.in_contest and self.contest.is_editable_by(self.request.user)
+ )
return context
def get(self, request, *args, **kwargs):
check = self.access_check(request)
|
/home/runner/work/online-judge/online-judge/judge/views/submission.py#L501
if "results" in request.GET:
response = {}
if not self.in_hidden_subtasks_contest():
response["results_json"] = self.get_result_data()
- response[
- "results_colors_json"
- ] = settings.DMOJ_STATS_SUBMISSION_RESULT_COLORS
+ response["results_colors_json"] = (
+ settings.DMOJ_STATS_SUBMISSION_RESULT_COLORS
+ )
else:
response["results_json"] = None
return JsonResponse(response)
return super(SubmissionsListBase, self).get(request, *args, **kwargs)
|
Run linters
The following actions uses node12 which is deprecated and will be forced to run on node16: actions/checkout@v2, actions/setup-python@v1. For more info: https://github.blog/changelog/2023-06-13-github-actions-all-actions-will-run-on-node16-instead-of-node12-by-default/
|
Run linters
The following actions use a deprecated Node.js version and will be forced to run on node20: actions/checkout@v2, actions/setup-python@v1, wearerequired/lint-action@v2. For more info: https://github.blog/changelog/2024-03-07-github-actions-all-actions-will-run-on-node20-instead-of-node16-by-default/
|