-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathsubmission.py
66 lines (58 loc) · 2.81 KB
/
submission.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
"""
Template script for the submission. You can use this as a starting point for your code: you can
copy this script as is into your repository, and then modify the associated Model class to include
your logic, instead of the random baseline. Most of this script should be left unchanged for your submission
as we should be able to run your code to confirm your scores.
Please make sure you read and understand the competition rules and guidelines before you start.
"""
import os
from datetime import datetime
from dotenv import load_dotenv
# import env variables from file
load_dotenv('upload.env', verbose=True)
# variables for the submission
EMAIL = os.getenv('EMAIL') # the e-mail you used to sign up
assert EMAIL != '' and EMAIL is not None
BUCKET_NAME = os.getenv('BUCKET_NAME') # you received it in your e-mail
PARTICIPANT_ID = os.getenv('PARTICIPANT_ID') # you received it in your e-mail
AWS_ACCESS_KEY = os.getenv('AWS_ACCESS_KEY') # you received it in your e-mail
AWS_SECRET_KEY = os.getenv('AWS_SECRET_KEY') # you received it in your e-mail
# run the evaluation loop when the script is called directly
if __name__ == '__main__':
# import the basic classes
from evaluation.EvalRSRunner import EvalRSRunner
from evaluation.EvalRSRecList import MyEvalRSRecList
from evaluation.EvalRSRunner import ChallengeDataset
from submission.ModelPretrained import MyModel
print('\n\n==== Starting evaluation script at: {} ====\n'.format(datetime.utcnow()))
# load the dataset
print('\n\n==== Loading dataset at: {} ====\n'.format(datetime.utcnow()))
# this will load the dataset with the default values for the challenge
dataset = ChallengeDataset()
print('\n\n==== Init runner at: {} ====\n'.format(datetime.utcnow()))
# run the evaluation loop
runner = EvalRSRunner(
dataset=dataset,
aws_access_key_id=AWS_ACCESS_KEY,
aws_secret_access_key=AWS_SECRET_KEY,
participant_id=PARTICIPANT_ID,
bucket_name=BUCKET_NAME,
email=EMAIL
)
print('==== Runner loaded, starting loop at: {} ====\n'.format(datetime.utcnow()))
# NOTE: this evaluation will run with default values for the parameters and the upload flag
# For local testing and iteration, you can check the tutorial in the notebooks folder and the
# kaggle notebook: https://www.kaggle.com/code/vinidd/cikm-data-challenge
my_model = MyModel(
dataset.df_tracks,
dataset.df_users,
)
# run evaluation with your model
# the evaluation loop will magically perform the fold splitting, training / testing
# and then submit the results to the leaderboard
runner.evaluate(
model=my_model,
upload=False,
custom_RecList=MyEvalRSRecList
)
print('\n\n==== Evaluation ended at: {} ===='.format(datetime.utcnow()))