From ca44daa8c810e9c2ac15d6fc9c21134621b0effd Mon Sep 17 00:00:00 2001 From: Greg Werner Date: Mon, 9 Sep 2024 17:43:13 -0400 Subject: [PATCH] create script file to fetch submissions Signed-off-by: Greg Werner --- scripts/README.md | 34 +++++++++++++++++++++ scripts/fetch_submissions.py | 59 ++++++++++++++++++++++++++++++++++++ 2 files changed, 93 insertions(+) create mode 100644 scripts/README.md create mode 100644 scripts/fetch_submissions.py diff --git a/scripts/README.md b/scripts/README.md new file mode 100644 index 0000000..15298ea --- /dev/null +++ b/scripts/README.md @@ -0,0 +1,34 @@ +# Utility Scripts + +## Setup + +1. Install requirements. + +> It is recommended to install packages in a virtual environment, such as with `virtualenv` or `pipenv`. + +```bash +pip install pandas +pip install requests +``` + +1. Obtain API Key + +Navigate to Campus -> Settings -> API Key. Create a new API Key, copy the value, and save it as you will need it for step 3. + +3. Update script with API Key and Course Slug values. To view the course slug, navigate to your course and copy the course slug after the `.../courses/` part. From the value in the address bar. For example for: + +```bash +https://uofmichigan.illumidesk.com/app/courses/rob101-fa-2024/lessons/activity/8c804205-e5d7-4640-8f88-8cdbfdaf5b23 +``` + +The course slug is `rob101-fa-2024`. + +## Fetch Submissions + +1. Run the script to fetch student submissions: + +```python +python fetch_course_submissions.py +``` + +2. A CSV file with the results will save to the same folder where you ran your script. diff --git a/scripts/fetch_submissions.py b/scripts/fetch_submissions.py new file mode 100644 index 0000000..cbd8258 --- /dev/null +++ b/scripts/fetch_submissions.py @@ -0,0 +1,59 @@ +import requests +import pandas as pd +from datetime import datetime + + +COURSE_SLUG = "rob101-fa-2023" +API_KEY = "38c47c2ad80d3a584b79601b60d9b3b36affd209" +url = f"https://api.illumidesk.com/api/v1/courses/{COURSE_SLUG}/submissions/" + +headers = { + "accept": "application/json", + "Authorization": f"Token {API_KEY}", +} + +# Initialize parameters for pagination +limit = 100 +offset = 0 +all_data = [] # List to hold all fetched submissions + +# Continue fetching while there are more results +while True: + # Create the URL with pagination parameters + paginated_url = f"{url}?limit={limit}&offset={offset}" + + # Send the GET request + response = requests.get(paginated_url, headers=headers) + + # Check if the request was successful + if response.status_code == 200: + # Parse the JSON response + data = response.json() + results = data.get('results', []) + + # If no more results, break the loop + if not results: + break + + # Append results to all_data + all_data.extend(results) + + # Update the offset for the next request + offset += limit + else: + print(f"Failed to retrieve data. Status code: {response.status_code}") + break + +# Convert the data to a DataFrame +if all_data: + df = pd.json_normalize(all_data) + + # Generate a unique filename using the current date and time + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + csv_file_name = f"submissions_data_{timestamp}.csv" + + # Save the DataFrame to a CSV file + df.to_csv(csv_file_name, index=False) + print(f"All submissions have been saved to CSV file '{csv_file_name}'.") +else: + print("No data was retrieved.") \ No newline at end of file