Skip to content

Commit

Permalink
fixup! crunchroll: respond to crunchryoll site upgrade
Browse files Browse the repository at this point in the history
  • Loading branch information
TAAPArthur committed May 19, 2024
1 parent 444b166 commit 4a47c40
Showing 1 changed file with 19 additions and 18 deletions.
37 changes: 19 additions & 18 deletions amt/servers/crunchyroll.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
from ..util.media_type import MediaType
from threading import RLock

from urllib.parse import urlencode


class GenericCrunchyrollServer(Server):
Expand Down Expand Up @@ -55,22 +54,25 @@ def session_get_json(self, url, mem_cache=False, skip_cache=True, **kwargs):

return self.session_get_cache_json(url, mem_cache=mem_cache, skip_cache=skip_cache, **kwargs)

def get_auth_headers_str(self):
return ",".join((f"{k}:{v}" for k, v in self.get_auth_headers().items()))

def get_auth_headers(self):
if not GenericCrunchyrollServer._auth_headers:
self.update_auth()
self.update_auth()
return GenericCrunchyrollServer._auth_headers

def get_auth_headers_str(self):
return ",".join((f"{k}:{v}" for k, v in self.get_auth_headers().items()))

def set_auth_info(self, data):
GenericCrunchyrollServer._auth_headers = {"Authorization": data["token_type"] + " " + data["access_token"], "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.20 Safari/537.36"}
GenericCrunchyrollServer._auth_headers = {"Authorization": data["token_type"] + " " + data["access_token"]}
GenericCrunchyrollServer._auth_refresh = time.time() + data.get("expires_in", 300) - 10
self.logger.info("Setting auth %d", GenericCrunchyrollServer._auth_refresh)

def update_auth(self):
self.logger.info("Update auth");
with GenericCrunchyrollServer.crunchyroll_lock:
if GenericCrunchyrollServer._auth_headers and GenericCrunchyrollServer._auth_refresh > time.time():
if GenericCrunchyrollServer._auth_refresh > time.time():
self.logger.info("Skipping updating auth; may attempt in %d s %d", GenericCrunchyrollServer._auth_refresh, time.time());
return
self.logger.info("Updating auth %d vs %d", GenericCrunchyrollServer._auth_refresh, time.time());

auth_headers = {"Authorization": GenericCrunchyrollServer._BASIC_AUTH}
if self.refresh_token:
Expand Down Expand Up @@ -158,15 +160,14 @@ def get_all_media_data_from_url(self, url):
if match:
return self.get_media_data_for_series(match.group(1))
media_id = self.get_chapter_id_for_url(url)
bucket, params = self.get_params()
query = urlencode(params)
url = f"{self.get_api_domain()}/cms/v2{bucket}/episodes/{media_id}"
data = self.session_get_cache_json(f"{self.get_api_domain()}/cms/v2{bucket}/episodes/{media_id}?{query}", key=url)
url = f"{self.get_api_domain()}/content/v2/cms/objects/{media_id}?rating=true&locale=en-US"
try :
data = self.session_get_cache_json(url, key=url, need_auth_headers=True)

return [self.create_media_data(id=data["series_id"],
name=data["series_title"],
season_id=data["season_id"], season_title=data["season_title"],
lang=data["audio_locale"])]
media_id = data["data"][0]["episode_metadata"]["series_id"]
return self.get_media_data_for_series(media_id)
except:
return []

def get_chapter_id_for_url(self, url):
return self.stream_url_regex.search(url).group(1)
Expand All @@ -181,7 +182,7 @@ def update_media_data(self, media_data, **kwargs):

def get_stream_urls(self, media_data=None, chapter_data=None):
url = f"https://cr-play-service.prd.crunchyrollsvc.com/v1/{chapter_data['id']}/console/switch/play"
data = self.session_get_json(url, key=url, mem_cache=True, headers=self.get_auth_headers())
data = self.session_get_json(url, need_auth_headers=True)

url_list = []
for hardSubs in data["hardSubs"].values():
Expand All @@ -193,7 +194,7 @@ def get_stream_urls(self, media_data=None, chapter_data=None):

def get_subtitle_info(self, media_data, chapter_data):
url = f"https://cr-play-service.prd.crunchyrollsvc.com/v1/{chapter_data['id']}/console/switch/play"
data = self.session_get_json(url, key=url, mem_cache=True, headers=self.get_auth_headers())
data = self.session_get_json(url, need_auth_headers=True)

for subInfo in data["subtitles"].values():
yield subInfo["language"], subInfo["url"], subInfo["format"], False
Expand Down

0 comments on commit 4a47c40

Please sign in to comment.