Skip to content

Commit

Permalink
Luscious Support, Code Cleanup, Small fixes and debug print removed
Browse files Browse the repository at this point in the history
Luscious is now supported except for videos. (I Couldn't test it since I am not able to create account because their site is broken on all of my browsers)
Cleaned up code in a few files and removed unnecessary imports
Fixed Debug text being printed in the Yiffer Module
Added Placeholder files for nhentai and pixiv
Fixed possible issue in the Multporn Module
Updated ReadMe
  • Loading branch information
Official-Husko committed Jul 21, 2022
1 parent 5689eed commit 385d10e
Show file tree
Hide file tree
Showing 11 changed files with 110 additions and 40 deletions.
2 changes: 1 addition & 1 deletion Clean Folder.bat
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
rmdir /s /q .\dist
rmdir /s /q .\build
rmdir /s /q .\__pycache__
del ".\GetFileList.spec"
del ".\NN-Downloader.spec"
8 changes: 8 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,16 @@ This is not the complete version and it only works on a [few][13] sites currentl
- [Furbooru][6] (API)
- [Multporn][7]
- [Yiffer][8]
- [Luscious][16]

#### Planned:
- [YiffGallery][9]
- ~~[FurryBooru][10]~~ Currently not possible due to cloudflare issues.
- [BooruPlus][11]
- [nHentai][15]
- [Pixiv][17]
- [HentaiRead][18]


[1]:https://github.com/Official-Husko/multporn-image-downloader-v2
[2]:https://github.com/Official-Husko/multporn-image-downloader
Expand All @@ -37,6 +41,10 @@ This is not the complete version and it only works on a [few][13] sites currentl
[13]:https://github.com/Official-Husko/NN-Downloader#currently-supported=
[14]:https://github.com/Official-Husko/NN-Downloader/releases/latest
[15]:https://nhentai.net/
[16]:https://luscious.net/
[17]:https://www.pixiv.net/
[18]:https://hentairead.com/


Further sites can be added. Just open a [support ticket][11] with the url to the site.

Expand Down
31 changes: 22 additions & 9 deletions main.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from modules import E621, RULE34, ProxyScraper, FURBOORU, E926, Multporn, Yiffer
from modules import E621, RULE34, ProxyScraper, FURBOORU, E926, Multporn, Yiffer, Luscious
import json
import os
from termcolor import colored
Expand All @@ -7,7 +7,7 @@
from sys import exit


version = "1.2.0"
version = "1.3.0"
windll.kernel32.SetConsoleTitleW(f"NN-Downloader | v{version}")
proxy_list = []
header = {"User-Agent":f"nn-downloader/{version} (by Official Husko on GitHub)"}
Expand All @@ -28,15 +28,20 @@ def main_startup():
if not os.path.exists("media"):
os.mkdir("media")

# Check if config exists else create it
# Check if config exists and read it
if os.path.exists("config.json"):
with open("config.json") as cf:
config = json.load(cf)
user_blacklist = config["blacklisted_tags"]
user_proxies = config["proxies"]
user_OTD = config["oneTimeDownload"]
user_blacklist = config["blacklisted_tags"]
user_blocked_formats = config["blacklisted_formats"]

# Create a new config with default values
else:
default_config = {
"proxies": "true",
"oneTimeDownload": "true",
"user_credentials": {
"e621": {
"apiUser": "",
Expand Down Expand Up @@ -68,13 +73,17 @@ def main_startup():
"blacklisted_tags": [
"example1",
"example2"
],
"blacklisted_formats": [
"example1",
"example2"
]
}
with open("config.json", "w") as cc:
json.dump(default_config, cc, indent=6)
cc.close()
print(colored("New Config file generated. Please enter the Api Keys and the blacklisted tags in there after that restart the tool.", "green"))
sleep(5)
print(colored("New Config file generated. Please configure it for your use case and add API keys for needed services.", "green"))
sleep(7)
exit(0)

if user_proxies == True:
Expand All @@ -91,7 +100,7 @@ def main_startup():
Main.main_startup()
print("")

if site in ["multporn", "yiffer"]:
if site in ["multporn", "yiffer", "luscious"]:
pass
else:
print(colored("Please enter the tags you want to use", "green"))
Expand Down Expand Up @@ -139,8 +148,12 @@ def main_startup():
print(colored("Please enter the link. (e.g. https://yiffer.xyz/Howl & Jasper)", "green"))
URL = input(">> ")
Yiffer.Fetcher(proxy_list=proxy_list, user_proxies=user_proxies, header=header, URL=URL)


elif site == "luscious":
print(colored("Please enter the link. (e.g. https://www.luscious.net/albums/bifurcation-ongoing_437722)", "green"))
URL = input(">> ")
Luscious.Fetcher(proxy_list=proxy_list, user_proxies=user_proxies, header=header, URL=URL)


else:
print(colored("Site not supported. Open a ticket to request support for that site!", "red"))

Expand Down
3 changes: 2 additions & 1 deletion modules/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,4 +4,5 @@
from .furbooru import FURBOORU
from .e926 import E926
from .multporn import Multporn
from .yiffer import Yiffer
from .yiffer import Yiffer
from .luscious import Luscious
17 changes: 0 additions & 17 deletions modules/furbooru.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,43 +29,26 @@ def Fetcher(user_tags, user_blacklist, proxy_list, max_sites, user_proxies, apiK
else:
for item in req["images"]:
image_hidden = item["hidden_from_users"]
#print(image_hidden)
if image_hidden != False:
#print("hidden")
pass
else:
#print(item)
#sleep(3)
post_tags = item["tags"]
image_address = item["representations"]["full"]
image_format = item["format"]
image_id = item["id"]
#print(post_tags)
#print(image_address)
#print(image_format)
#print(image_id)
#print("")
#sleep(3)
user_blacklist_lenght = len(user_blacklist)
#print("Bad: ", user_blacklist_lenght)
passed = 0

for blacklisted_tag in user_blacklist:
if blacklisted_tag in post_tags:
#print(blacklisted_tag)
break
else:
passed += 1
#print("Good: ", passed)
if passed == user_blacklist_lenght:
image_data = {"image_address": image_address, "image_format": image_format, "image_id": image_id}
approved_list.append(image_data)
#print(colored(f"{image_id} passed the test!", "green"))
#print("")
else:
pass
#print(colored(f"{image_id} did not pass the test!", "red"))
#print("")
with alive_bar(len(approved_list), calibrate=1, dual_line=True, title='Downloading') as bar:
for data in approved_list:
image_address = data["image_address"]
Expand Down
74 changes: 74 additions & 0 deletions modules/luscious.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
import requests
import random
from termcolor import colored
from time import sleep
from alive_progress import alive_bar
import os

class Luscious():
def Fetcher(proxy_list, user_proxies, header, URL):

# sort link for category
parts = URL.split("/")
if parts[3] == "pictures":
title = parts[5].partition("_")
id = parts[5].rpartition("_")
elif parts[3] in ["album", "albums"]:
title = parts[4].partition("_")
id = parts[4].rpartition("_")
else:
print("An error occured! Please report this with the link you used.")
sleep(5)
return
id = id[2]
title = title[0]

page = 1
while True:
querystring = {"operationName":"AlbumListOwnPictures","query":"query AlbumListOwnPictures($input: PictureListInput!) { picture { list(input: $input) { info { ...FacetCollectionInfo } items { id title url_to_original url_to_video tags { text } } } } } fragment FacetCollectionInfo on FacetCollectionInfo { page has_next_page total_items total_pages items_per_page url_complete }","variables":"{\"input\":{\"filters\":[{\"name\":\"album_id\",\"value\":\"" + str(id) + "\"}],\"display\":\"rating_all_time\",\"page\":" + str(page) + "}}"}
URL = "https://api.luscious.net/graphql/nobatch/"
if user_proxies == True:
proxy = random.choice(proxy_list)
req = requests.get(URL, headers=header, proxies=proxy, params=querystring).json()
else:
req = requests.get(URL, headers=header, params=querystring).json()

avail_sites = req["data"]["picture"]["list"]["info"]["total_pages"]
total_items = req["data"]["picture"]["list"]["info"]["total_items"]

if page > avail_sites:
print("")
print(colored(f"No Further Sites Found.", "green"))
sleep(3)
break

if req["data"]["picture"]["list"]["items"] == [] and page == 2:
print("An error occured! Please report this with the link you used.")
sleep(5)
break

# Download Each file
with alive_bar(total_items, calibrate=1, dual_line=True, title='Downloading') as bar:
for item in req["data"]["picture"]["list"]["items"]:

image_id = item["id"]
image_title = item["title"]
image_address = item["url_to_original"]
image_format = image_address.rpartition(".")
bar.text = f'-> Downloading: {image_title}, please wait...'

if user_proxies == True:
proxy = random.choice(proxy_list)
img_data = requests.get(image_address, proxies=proxy).content
else:
sleep(1)
img_data = requests.get(image_address).content
if not os.path.exists(f"media/{title}"):
os.mkdir(f"media/{title}")
with open(f"media/{title}/{str(image_title)}.{image_format[2]}", 'wb') as handler:
handler.write(img_data)
bar()

print(colored(f"Page {page} Completed", "green"))
page += 1
sleep(5)
3 changes: 1 addition & 2 deletions modules/multporn.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
import requests
import random
import re
import json
import xmltodict
from termcolor import colored
from time import sleep
Expand Down Expand Up @@ -85,7 +84,7 @@ def Fetcher(proxy_list, user_proxies, header, URL):
if req.status_code == 404:
print(colored("An error occurred! please report this to the dev"))
sleep(3)
pass
return

# convert the xml to json for the sake of my mental health
juicebox_data = xmltodict.parse(req.content)
Expand Down
1 change: 1 addition & 0 deletions modules/nhentai.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
# Phew a bit empty in here isn't it?
1 change: 1 addition & 0 deletions modules/pixiv.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
# Phew a bit empty in here isn't it?
8 changes: 0 additions & 8 deletions modules/rule34.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,31 +26,23 @@ def Fetcher(user_tags, user_blacklist, proxy_list, max_sites, user_proxies, head
break
else:
for item in req:
#print(item)
post_tags = str.split(item["tags"])
image_address = item["file_url"]
image_name = item["image"]
image_id = item["id"]
user_blacklist_lenght = len(user_blacklist)
#print("Bad: ", user_blacklist_lenght)
passed = 0

for blacklisted_tag in user_blacklist:
if blacklisted_tag in post_tags:
#print(blacklisted_tag)
break
else:
passed += 1
#print("Good: ", passed)
if passed == user_blacklist_lenght:
image_data = {"image_address": image_address, "image_name": image_name, "image_id": image_id}
approved_list.append(image_data)
#print(colored(f"{image_id} passed the test!", "green"))
#print("")
else:
pass
#print(colored(f"{image_id} did not pass the test!", "red"))
#print("")
with alive_bar(len(approved_list), calibrate=1, dual_line=True, title='Downloading') as bar:
for data in approved_list:
image_address = data["image_address"]
Expand Down
2 changes: 0 additions & 2 deletions modules/yiffer.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,7 @@ def Fetcher(proxy_list, user_proxies, header, URL):
# link operations
URL = requests.utils.unquote(URL, encoding='utf-8', errors='replace')
parts = URL.split("/")
print(parts)
title = parts[3]
print(title)

# Get item info
URL = f"https://yiffer.xyz/api/comics/{title}"
Expand Down

0 comments on commit 385d10e

Please sign in to comment.