Skip to content

Commit

Permalink
added 5 site support + dynamically getting tracker
Browse files Browse the repository at this point in the history
zippyshare.com, osdn.net, mediafire.com, yadi.sk, racaty.net supports.

see 48 - 52. lines in tobrot > helper_funcs > download_aria_p_n.py

tracker_urlsss = [
    "https://raw.githubusercontent.com/XIU2/TrackersListCollection/master/all.txt",
    "https://raw.githubusercontent.com/ngosang/trackerslist/master/trackers_all.txt",
    "https://raw.githubusercontent.com/DeSireFire/animeTrackerList/master/AT_all.txt"
    ]

you can add more tracker lists here.
mail.ru and github was not working.
(
or i can't execute... when i execute from python its saying:
200 excepted. 400 got.
bla bla bla.
so i commented that lines for no-error.
)
  • Loading branch information
cihanvol authored May 11, 2021
1 parent d64dcea commit 69af322
Show file tree
Hide file tree
Showing 4 changed files with 235 additions and 3 deletions.
1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -12,3 +12,4 @@ bs4
lxml
requests
messages
js2py
175 changes: 175 additions & 0 deletions tobrot/helper_funcs/direct_link_generator.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,175 @@
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
#
""" Helper Module containing various sites direct links generators. This module is copied and modified as per need
from https://github.com/AvinashReddy3108/PaperplaneExtended . I hereby take no credit of the following code other
than the modifications. See https://github.com/AvinashReddy3108/PaperplaneExtended/commits/master/userbot/modules/direct_links.py
for original authorship. """

import json
import re
import urllib.parse
from os import popen
from random import choice
from js2py import EvalJs
import requests
from bs4 import BeautifulSoup

from tobrot.helper_funcs.exceptions import DirectDownloadLinkException


def direct_link_generator(text_url: str):
""" direct links generator """
if not text_url:
raise DirectDownloadLinkException("`No links found!`")
elif 'zippyshare.com' in text_url:
return zippy_share(text_url)
elif 'yadi.sk' in text_url:
return yandex_disk(text_url)
# elif 'cloud.mail.ru' in text_url:
# return cm_ru(text_url)
elif 'mediafire.com' in text_url:
return mediafire(text_url)
elif 'osdn.net' in text_url:
return osdn(text_url)
# elif 'github.com' in text_url:
# return github(text_url)
elif 'racaty.net' in text_url:
return racaty(text_url)
else:
raise DirectDownloadLinkException(f'No Direct link function found for {text_url}')


def zippy_share(url: str) -> str:
link = re.findall("https:/.(.*?).zippyshare", url)[0]
response_content = (requests.get(url)).content
bs_obj = BeautifulSoup(response_content, "lxml")

try:
js_script = bs_obj.find("div", {"class": "center",}).find_all(
"script"
)[1]
except:
js_script = bs_obj.find("div", {"class": "right",}).find_all(
"script"
)[0]

js_content = re.findall(r'\.href.=."/(.*?)";', str(js_script))
js_content = 'var x = "/' + js_content[0] + '"'

evaljs = EvalJs()
setattr(evaljs, "x", None)
evaljs.execute(js_content)
js_content = getattr(evaljs, "x")

return f"https://{link}.zippyshare.com{js_content}"


def yandex_disk(url: str) -> str:
""" Yandex.Disk direct links generator
Based on https://github.com/wldhx/yadisk-direct"""
try:
text_url = re.findall(r'\bhttps?://.*yadi\.sk\S+', url)[0]
except IndexError:
reply = "`No Yandex.Disk links found`\n"
return reply
api = 'https://cloud-api.yandex.net/v1/disk/public/resources/download?public_key={}'
try:
dl_url = requests.get(api.format(text_url)).json()['href']
return dl_url
except KeyError:
raise DirectDownloadLinkException("`Error: File not found / Download limit reached`\n")

'''
def cm_ru(url: str) -> str:
""" cloud.mail.ru direct links generator
Using https://github.com/JrMasterModelBuilder/cmrudl.py"""
reply = ''
try:
text_url = re.findall(r'\bhttps?://.*cloud\.mail\.ru\S+', url)[0]
except IndexError:
raise DirectDownloadLinkException("`No cloud.mail.ru links found`\n")
command = f'vendor/cmrudl.py/cmrudl -s {text_url}'
result = popen(command).read()
result = result.splitlines()[-1]
try:
data = json.loads(result)
except json.decoder.JSONDecodeError:
raise DirectDownloadLinkException("`Error: Can't extract the link`\n")
dl_url = data['download']
return dl_url
'''

def mediafire(url: str) -> str:
""" MediaFire direct links generator """
try:
text_url = re.findall(r'\bhttps?://.*mediafire\.com\S+', url)[0]
except IndexError:
raise DirectDownloadLinkException("`No MediaFire links found`\n")
page = BeautifulSoup(requests.get(text_url).content, 'lxml')
info = page.find('a', {'aria-label': 'Download file'})
dl_url = info.get('href')
return dl_url


def osdn(url: str) -> str:
""" OSDN direct links generator """
osdn_link = 'https://osdn.net'
try:
text_url = re.findall(r'\bhttps?://.*osdn\.net\S+', url)[0]
except IndexError:
raise DirectDownloadLinkException("`No OSDN links found`\n")
page = BeautifulSoup(
requests.get(text_url, allow_redirects=True).content, 'lxml')
info = page.find('a', {'class': 'mirror_link'})
text_url = urllib.parse.unquote(osdn_link + info['href'])
mirrors = page.find('form', {'id': 'mirror-select-form'}).findAll('tr')
urls = []
for data in mirrors[1:]:
mirror = data.find('input')['value']
urls.append(re.sub(r'm=(.*)&f', f'm={mirror}&f', text_url))
return urls[0]

'''
def github(url: str) -> str:
""" GitHub direct links generator """
try:
text_url = re.findall(r'\bhttps?://.*github\.com.*releases\S+', url)[0]
except IndexError:
raise DirectDownloadLinkException("`No GitHub Releases links found`\n")
download = requests.get(text_url, stream=True, allow_redirects=False)
try:
dl_url = download.headers["location"]
return dl_url
except KeyError:
raise DirectDownloadLinkException("`Error: Can't extract the link`\n")
'''

def useragent():
"""
useragent random setter
"""
useragents = BeautifulSoup(
requests.get(
'https://developers.whatismybrowser.com/'
'useragents/explore/operating_system_name/android/').content,
'lxml').findAll('td', {'class': 'useragent'})
user_agent = choice(useragents)
return user_agent.text

def racaty(url: str) -> str:
dl_url = ''
try:
text_url = re.findall(r'\bhttps?://.*racaty\.net\S+', url)[0]
except IndexError:
raise DirectDownloadLinkException("`No Racaty links found`\n")
reqs=requests.get(text_url)
bss=BeautifulSoup(reqs.text,'html.parser')
op=bss.find('input',{'name':'op'})['value']
id=bss.find('input',{'name':'id'})['value']
rep=requests.post(text_url,data={'op':op,'id':id})
bss2=BeautifulSoup(rep.text,'html.parser')
dl_url=bss2.find('a',{'id':'uniqueExpirylink'})['href']
return dl_url
54 changes: 51 additions & 3 deletions tobrot/helper_funcs/download_aria_p_n.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
import os
import sys
import time
import requests

import aria2p
from pyrogram.errors import FloodWait, MessageNotModified
Expand All @@ -27,11 +28,41 @@
)
from tobrot.helper_funcs.extract_link_from_message import extract_link
from tobrot.helper_funcs.upload_to_tg import upload_to_gdrive, upload_to_tg

from tobrot.helper_funcs.direct_link_generator import direct_link_generator
from tobrot.helper_funcs.exceptions import DirectDownloadLinkException
sys.setrecursionlimit(10 ** 4)

def KopyasizListe(string):
kopyasiz = list(string.split(","))
kopyasiz = list(dict.fromkeys(kopyasiz))
return kopyasiz

def Virgullustring(string):
string = string.replace("\n\n",",")
string = string.replace("\n",",")
string = string.replace(",,",",")
string = string.rstrip(',')
string = string.lstrip(',')
return string

tracker_urlsss = [
"https://raw.githubusercontent.com/XIU2/TrackersListCollection/master/all.txt",
"https://raw.githubusercontent.com/ngosang/trackerslist/master/trackers_all.txt",
"https://raw.githubusercontent.com/DeSireFire/animeTrackerList/master/AT_all.txt"
]
tumtorrenttrackerstringi = ""
sonstringtrckr = ""
for i in range(len(tracker_urlsss)):
response = requests.get(tracker_urlsss[i])
response.encoding = "utf-8"
tumtorrenttrackerstringi += "\n"
tumtorrenttrackerstringi += response.text
trackerlistemiz = KopyasizListe(Virgullustring(tumtorrenttrackerstringi))
sonstringtrckr = ','.join(trackerlistemiz)
# LOGGER.info(sonstringtrckr)
# trackelreri alıyoz dinamik olarak
async def aria_start():
global sonstringtrckr
aria2_daemon_start_cmd = []
# start the daemon, aria2c command
aria2_daemon_start_cmd.append("aria2c")
Expand All @@ -47,7 +78,9 @@ async def aria_start():
aria2_daemon_start_cmd.append("--rpc-listen-all=false")
aria2_daemon_start_cmd.append(f"--rpc-listen-port={ARIA_TWO_STARTED_PORT}")
aria2_daemon_start_cmd.append("--rpc-max-request-size=1024M")
aria2_daemon_start_cmd.append("--seed-time=0")
aria2_daemon_start_cmd.append(f"--bt-tracker={sonstringtrckr}")
aria2_daemon_start_cmd.append("--bt-max-peers=0")
aria2_daemon_start_cmd.append("--seed-time=0.01")
aria2_daemon_start_cmd.append("--max-overall-upload-limit=1K")
aria2_daemon_start_cmd.append("--split=10")
aria2_daemon_start_cmd.append(
Expand Down Expand Up @@ -119,7 +152,22 @@ def add_url(aria_instance, text_url, c_file_name):
# options = {
# "dir": c_file_name
# }
uris = [text_url]
#
# or "cloud.mail.ru" in text_url \ doesnt work.
# or "github.com" in text_url \ doesnt work.
#
if "zippyshare.com" in text_url \
or "osdn.net" in text_url \
or "mediafire.com" in text_url \
or "yadi.sk" in text_url \
or "racaty.net" in text_url:
try:
urisitring = direct_link_generator(text_url)
uris = [urisitring]
except DirectDownloadLinkException as e:
LOGGER.info(f'{text_url}: {e}')
else:
uris = [text_url]
# Add URL Into Queue
try:
download = aria_instance.add_uris(uris, options=options)
Expand Down
8 changes: 8 additions & 0 deletions tobrot/helper_funcs/exceptions.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
class DirectDownloadLinkException(Exception):
"""No method found for extracting direct download link from the http link"""
pass


class NotSupportedExtractionArchive(Exception):
"""The archive format use is trying to extract is not supported"""
pass

0 comments on commit 69af322

Please sign in to comment.