diff --git a/README.md b/README.md index 6f20d35..6007571 100644 --- a/README.md +++ b/README.md @@ -15,6 +15,9 @@ Quasarr includes a solution to quickly and easily decrypt protected links. Just follow the link from the console output (or discord notification) and solve the CAPTCHA. Quasarr will confidently handle the rest. +**Warning: this project is still in the proof-of-concept stage. +It is only tested with Radarr and the three currently supported hostnames.** + # Instructions * Follow instructions to set up at least one hostname for Quasarr @@ -24,9 +27,6 @@ Quasarr will confidently handle the rest. * Use this API key: `quasarr` * As with other download clients, you must ensure the download path used by JDownloader is accessible to *arr. -**Warning: this project is still in the proof-of-concept stage. -It is only tested with Radarr and only two hostname are currently supported.** - # Setup `pip install quasarr` diff --git a/quasarr/__init__.py b/quasarr/__init__.py index 0572eca..88360e7 100644 --- a/quasarr/__init__.py +++ b/quasarr/__init__.py @@ -12,10 +12,10 @@ import time from quasarr.arr import api -from quasarr.persistence.config import Config, get_clean_hostnames -from quasarr.persistence.sqlite_database import DataBase +from quasarr.storage.config import Config, get_clean_hostnames +from quasarr.storage.sqlite_database import DataBase from quasarr.providers import shared_state, version -from quasarr.providers.setup import path_config, hostnames_config, nx_credentials_config, jdownloader_config +from quasarr.storage.setup import path_config, hostnames_config, nx_credentials_config, jdownloader_config def run(): @@ -45,7 +45,7 @@ def run(): config_path = "/config" if not arguments.internal_address: print( - "You must set the INTERNAL_ADDRESS variable to a locally reachable URL, e.g. http://localhost:8080") + "You must set the INTERNAL_ADDRESS variable to a locally reachable URL, e.g. http://192.168.1.1:8080") print("The local URL will be used by Radarr/Sonarr to connect to Quasarr") print("Stopping Quasarr...") sys.exit(1) diff --git a/quasarr/arr/__init__.py b/quasarr/arr/__init__.py index 14f094d..d7e0f9c 100644 --- a/quasarr/arr/__init__.py +++ b/quasarr/arr/__init__.py @@ -2,6 +2,7 @@ # Quasarr # Project by https://github.com/rix1337 +import json import re import traceback from base64 import urlsafe_b64decode @@ -25,30 +26,72 @@ def api(shared_state_dict, shared_state_lock): app = Bottle() - @app.route('/captcha') + @app.get('/captcha') def serve_captcha(): - protected = shared_state.get_db("protected").retrieve_all_titles() - if not protected: - return render_centered_html('

Quasarr

No protected packages found! CAPTCHA not needed.

') try: device = shared_state.values["device"] except KeyError: device = None if not device: - return render_centered_html('

Quasarr

JDownloader connection not established.

') + return render_centered_html(f'''

Quasarr

+

JDownloader connection not established.

+ {render_button("Back", "primary", {"onclick": "location.href='/'"})}''') + + protected = shared_state.get_db("protected").retrieve_all_titles() + if not protected: + return render_centered_html(f'''

Quasarr

+

No protected packages found! CAPTCHA not needed.

+ {render_button("Back", "primary", {"onclick": "location.href='/'"})}''') + else: + package = protected[0] + package_id = package[0] + data = json.loads(package[1]) + title = data["title"] + links = data["links"] + password = data["password"] + + link_options = "" + if len(links) > 1: + for link in links: + if "filecrypt." in link[0]: + link_options += f'' + link_select = f'''
+ + +
+ + ''' + else: + link_select = f'
Mirror: {links[0][1]}
' + content = render_centered_html(r'''

Quasarr

+ {link_select}

+
Your adblocker prevents the captcha from loading. Disable it!
- +
- +
{render_button("Back", "primary", {"onclick": "location.href='/'"})} +
''') @@ -85,34 +131,31 @@ def submit_token(): protected = shared_state.get_db("protected").retrieve_all_titles() if not protected: return {"success": False, "title": "No protected packages found! CAPTCHA not needed."} - else: - first_protected = protected[0] - package_id = first_protected[0] - details = first_protected[1].split("|") - title = details[0] - link = details[1] - password = details[3] - links = [] + download_links = [] try: data = request.json token = data.get('token') + package_id = data.get('package_id') + title = data.get('title') + link = data.get('link') + password = data.get('password') if token: print(f"Received token: {token}") print(f"Decrypting links for {title}") - links = get_filecrypt_links(shared_state, token, title, link, password) + download_links = get_filecrypt_links(shared_state, token, title, link, password) - print(f"Decrypted {len(links)} download links for {title}") + print(f"Decrypted {len(download_links)} download links for {title}") - shared_state.download_package(links, title, password, package_id) + shared_state.download_package(download_links, title, password, package_id) shared_state.get_db("protected").delete(package_id) except Exception as e: print(f"Error decrypting: {e}") - return {"success": bool(links), "title": title} + return {"success": bool(download_links), "title": title} @app.post('/captcha/.html') def proxy(captcha_id): diff --git a/quasarr/captcha_solver/__init__.py b/quasarr/captcha_solver/__init__.py index 0c83741..821f8c1 100644 --- a/quasarr/captcha_solver/__init__.py +++ b/quasarr/captcha_solver/__init__.py @@ -129,20 +129,21 @@ def get_filecrypt_links(shared_state, token, title, url, password=None): print("Attempting to decrypt Filecrypt link: " + url) session = requests.Session() + password_field = None if password: - password_id = "password" try: output = requests.get(url, headers={'User-Agent': shared_state.values["user_agent"]}) soup = BeautifulSoup(output.text, 'html.parser') input_element = soup.find('input', placeholder=lambda value: value and 'password' in value.lower()) - password_id = input_element['name'] - print("Password field name identified: " + password_id) + password_field = input_element['name'] + print("Password field name identified: " + password_field) url = output.url except: - print("Could not get password field name! Filecrypt may have changed their layout.") + print("No password field found. Skipping password entry!") + if password and password_field: print("Using Password: " + password) - output = session.post(url, data=password_id + "=" + password, + output = session.post(url, data=password_field + "=" + password, headers={'User-Agent': shared_state.values["user_agent"], 'Content-Type': 'application/x-www-form-urlencoded'}) else: diff --git a/quasarr/downloads/__init__.py b/quasarr/downloads/__init__.py index ca8bdc2..d114ca3 100644 --- a/quasarr/downloads/__init__.py +++ b/quasarr/downloads/__init__.py @@ -2,6 +2,9 @@ # Quasarr # Project by https://github.com/rix1337 +import json + +from quasarr.downloads.sources.dw import get_dw_download_links from quasarr.downloads.sources.nx import get_nx_download_links from quasarr.providers.myjd_api import TokenExpiredException, RequestTimeoutException, MYJDException from quasarr.providers.notifications import send_discord_captcha_alert @@ -40,13 +43,13 @@ def get_packages(shared_state): if protected_packages: for package in protected_packages: package_id = package[0] - package_details = package[1].split("|") + data = json.loads(package[1]) details = { - "title": package_details[0], - "url": package_details[1], - "size_mb": package_details[2], - "password": package_details[3] + "title": data["title"], + "urls": data["links"], + "size_mb": data["size_mb"], + "password": data["password"] } packages.append({ @@ -195,36 +198,6 @@ def get_packages(shared_state): return downloads -def download_package(shared_state, request_from, title, url, size_mb, password): - if "radarr".lower() in request_from.lower(): - category = "movies" - else: - category = "tv" - - package_id = "" - - nx = shared_state.values["config"]("Hostnames").get("nx") - if nx.lower() in url.lower(): - links = get_nx_download_links(shared_state, url, title) - print(f"Decrypted {len(links)} download links for {title}") - package_id = f"Quasarr_{category}_{str(hash(title + url)).replace('-', '')}" - - added = shared_state.download_package(links, title, password, package_id) - - if not added: - print(f"Failed to add {title} to linkgrabber") - package_id = None - - elif "filecrypt".lower() in url.lower(): - print(f"CAPTCHA-Solution required for {title}{shared_state.values['external_address']}/captcha") - send_discord_captcha_alert(shared_state, title) - package_id = f"Quasarr_{category}_{str(hash(title + url)).replace('-', '')}" - blob = f"{title}|{url}|{size_mb}|{password}" - shared_state.values["database"]("protected").update_store(package_id, blob) - - return package_id - - def delete_package(shared_state, package_id): deleted = "" @@ -255,3 +228,43 @@ def delete_package(shared_state, package_id): else: print(f"Failed to delete package {package_id}") return deleted + + +def download_package(shared_state, request_from, title, url, size_mb, password): + if "radarr".lower() in request_from.lower(): + category = "movies" + else: + category = "tv" + + package_id = "" + + dw = shared_state.values["config"]("Hostnames").get("dw") + nx = shared_state.values["config"]("Hostnames").get("nx") + + if nx.lower() in url.lower(): + links = get_nx_download_links(shared_state, url, title) + print(f"Decrypted {len(links)} download links for {title}") + package_id = f"Quasarr_{category}_{str(hash(title + url)).replace('-', '')}" + + added = shared_state.download_package(links, title, password, package_id) + + if not added: + print(f"Failed to add {title} to linkgrabber") + package_id = None + + elif dw.lower() in url.lower(): + links = get_dw_download_links(shared_state, url, title) + print(f"CAPTCHA-Solution required for {title} - {shared_state.values['external_address']}/captcha") + send_discord_captcha_alert(shared_state, title) + package_id = f"Quasarr_{category}_{str(hash(title + str(links))).replace('-', '')}" + blob = json.dumps({"title": title, "links": links, "size_mb": size_mb, "password": password}) + shared_state.values["database"]("protected").update_store(package_id, blob) + + elif "filecrypt".lower() in url.lower(): + print(f"CAPTCHA-Solution required for {title} - {shared_state.values['external_address']}/captcha") + send_discord_captcha_alert(shared_state, title) + package_id = f"Quasarr_{category}_{str(hash(title + url)).replace('-', '')}" + blob = json.dumps({"title": title, "links": [[url, "filecrypt"]], "size_mb": size_mb, "password": password}) + shared_state.values["database"]("protected").update_store(package_id, blob) + + return package_id diff --git a/quasarr/downloads/sources/dw.py b/quasarr/downloads/sources/dw.py new file mode 100644 index 0000000..85b5ef0 --- /dev/null +++ b/quasarr/downloads/sources/dw.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# Quasarr +# Project by https://github.com/rix1337 + +import re + +import requests +from bs4 import BeautifulSoup + + +def get_dw_download_links(shared_state, url, title): + dw = shared_state.values["config"]("Hostnames").get("dw") + ajax_url = "https://" + dw + "/wp-admin/admin-ajax.php" + + headers = { + 'User-Agent': shared_state.values["user_agent"], + } + + session = requests.Session() + + try: + request = session.get(url, headers=headers) + content = BeautifulSoup(request.text, "html.parser") + download_buttons = content.findAll("button", {"class": "show_link"}) + except: + print(f"DW site has been updated. Grabbing download links for {title} not possible!") + return False + + download_links = [] + try: + for button in download_buttons: + payload = f"action=show_link&link_id={button['value']}" + headers = { + 'User-Agent': shared_state.values["user_agent"], + 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8' + } + + response = session.post(ajax_url, payload, headers=headers) + if response.status_code != 200: + print(f"DW site has been updated. Grabbing download links for {title} not possible!") + continue + else: + response = response.json() + link = response["data"].split(",")[0] + + if dw in link: + match = re.search(r'https://' + dw + r'/azn/af\.php\?v=([A-Z0-9]+)(#.*)?', link) + if match: + link = (f'https://filecrypt.cc/Container/{match.group(1)}' + f'.html{match.group(2) if match.group(2) else ""}') + + hoster = button.nextSibling.img["src"].split("/")[-1].replace(".png", "") + download_links.append([link, hoster]) + except: + print(f"DW site has been updated. Parsing download links for {title} not possible!") + pass + + return download_links diff --git a/quasarr/providers/notifications.py b/quasarr/providers/notifications.py index 2beff88..b37dc3f 100644 --- a/quasarr/providers/notifications.py +++ b/quasarr/providers/notifications.py @@ -13,7 +13,7 @@ def send_discord_captcha_alert(shared_state, title): data = { 'username': 'Quasarr', - # 'avatar_url': 'https://imgur.com/fooBar.png', + 'avatar_url': 'https://i.imgur.com/UXBdr1h.png', 'embeds': [{ 'title': title, 'description': 'Links are protected. Please solve the CAPTCHA to continue downloading.', diff --git a/quasarr/providers/shared_state.py b/quasarr/providers/shared_state.py index eb3af70..4aaf02a 100644 --- a/quasarr/providers/shared_state.py +++ b/quasarr/providers/shared_state.py @@ -5,8 +5,8 @@ import os import time -from quasarr.persistence.config import Config -from quasarr.persistence.sqlite_database import DataBase +from quasarr.storage.config import Config +from quasarr.storage.sqlite_database import DataBase from quasarr.providers.myjd_api import Myjdapi, TokenExpiredException, RequestTimeoutException, MYJDException, Jddevice values = {} @@ -31,7 +31,7 @@ def update(key, value): def set_sites(): - update("sites", ["FX", "NX"]) + update("sites", ["DW", "FX", "NX"]) def set_connection_info(internal_address, external_address, port): diff --git a/quasarr/providers/version.py b/quasarr/providers/version.py index 01b7d58..42de728 100644 --- a/quasarr/providers/version.py +++ b/quasarr/providers/version.py @@ -6,7 +6,7 @@ def get_version(): - return "0.1.4" + return "0.1.5" def create_version_file(): diff --git a/quasarr/search/__init__.py b/quasarr/search/__init__.py index 11488a6..e59ec09 100644 --- a/quasarr/search/__init__.py +++ b/quasarr/search/__init__.py @@ -2,18 +2,32 @@ # Quasarr # Project by https://github.com/rix1337 +from quasarr.search.sources.dw import dw_feed, dw_search from quasarr.search.sources.fx import fx_feed, fx_search from quasarr.search.sources.nx import nx_feed, nx_search def get_search_results(shared_state, request_from, imdb_id=None): results = [] + + dw = shared_state.values["config"]("Hostnames").get("dw") + fx = shared_state.values["config"]("Hostnames").get("fx") + nx = shared_state.values["config"]("Hostnames").get("nx") + if imdb_id: - results.extend(nx_search(shared_state, request_from, imdb_id)) - results.extend(fx_search(shared_state, imdb_id)) + if dw: + results.extend(dw_search(shared_state, request_from, imdb_id)) + if fx: + results.extend(fx_search(shared_state, imdb_id)) + if nx: + results.extend(nx_search(shared_state, request_from, imdb_id)) else: - results.extend(nx_feed(shared_state, request_from)) - results.extend(fx_feed(shared_state)) + if dw: + results.extend(dw_feed(shared_state, request_from)) + if fx: + results.extend(fx_feed(shared_state)) + if nx: + results.extend(nx_feed(shared_state, request_from)) print(f"Providing {len(results)} releases to {request_from}") return results diff --git a/quasarr/search/sources/dw.py b/quasarr/search/sources/dw.py new file mode 100644 index 0000000..e5e5539 --- /dev/null +++ b/quasarr/search/sources/dw.py @@ -0,0 +1,187 @@ +# -*- coding: utf-8 -*- +# Quasarr +# Project by https://github.com/rix1337 + +import datetime +import re +from base64 import urlsafe_b64encode + +import requests +from bs4 import BeautifulSoup + + +def convert_to_rss_date(date_str): + german_months = ["Januar", "Februar", "März", "April", "Mai", "Juni", + "Juli", "August", "September", "Oktober", "November", "Dezember"] + english_months = ["January", "February", "March", "April", "May", "June", + "July", "August", "September", "October", "November", "December"] + + for german, english in zip(german_months, english_months): + if german in date_str: + date_str = date_str.replace(german, english) + break + + parsed_date = datetime.datetime.strptime(date_str, '%d. %B %Y / %H:%M') + rss_date = parsed_date.strftime('%a, %d %b %Y %H:%M:%S %z') + + return rss_date + + +def extract_size(text): + match = re.match(r"(\d+) ([A-Za-z]+)", text) + if match: + size = match.group(1) + unit = match.group(2) + return {"size": size, "sizeunit": unit} + else: + raise ValueError(f"Invalid size format: {text}") + + +def dw_get_download_links(shared_state, content, title): + try: + try: + content = BeautifulSoup(content, "html.parser") + except: + content = BeautifulSoup(str(content), "html.parser") + download_buttons = content.findAll("button", {"class": "show_link"}) + except: + print("DW hat die Detail-Seite angepasst. Parsen von Download-Links für " + title + " nicht möglich!") + return False + + dw = shared_state.values["config"]("Hostnames").get("dw") + ajax_url = "https://" + dw + "/wp-admin/admin-ajax.php" + + download_links = [] + try: + for button in download_buttons: + payload = "action=show_link&link_id=" + button["value"] + + headers = { + 'User-Agent': shared_state.values["user_agent"], + } + + response = requests.post(ajax_url, payload, headers=headers).json() + if response["success"]: + link = response["data"].split(",")[0] + + if dw in link: + match = re.search(r'https://' + dw + r'/azn/af\.php\?v=([A-Z0-9]+)(#.*)?', link) + if match: + link = (f'https://filecrypt.cc/Container/{match.group(1)}' + f'.html{match.group(2) if match.group(2) else ""}') + + hoster = button.nextSibling.img["src"].split("/")[-1].replace(".png", "") + download_links.append([link, hoster]) + except: + print("DW site has been updated. Parsing download links not possible!") + pass + + return download_links + + +def dw_feed(shared_state, request_from): + releases = [] + dw = shared_state.values["config"]("Hostnames").get("dw") + password = dw + + if "Radarr" in request_from: + feed_type = "videos/filme/" + else: + feed_type = "videos/serien/" + + url = f'https://{dw}/{feed_type}' + headers = { + 'User-Agent': shared_state.values["user_agent"], + } + + try: + request = requests.get(url, headers=headers).content + feed = BeautifulSoup(request, "html.parser") + articles = feed.findAll('h4') + + for article in articles: + try: + source = article.a["href"] + title = article.a.text.strip() + size_info = article.find("span").text.strip() + size_item = extract_size(size_info) + mb = shared_state.convert_to_mb(size_item) + date = article.parent.parent.find("span", {"class": "date updated"}).text.strip() + published = convert_to_rss_date(date) + payload = urlsafe_b64encode(f"{title}|{source}|{mb}|{password}".encode("utf-8")).decode( + "utf-8") + link = f"{shared_state.values['internal_address']}/download/?payload={payload}" + except Exception as e: + print(f"Error parsing DW feed: {e}") + continue + + releases.append({ + "details": { + "title": f"[DW] {title}", + "link": link, + "size": mb, + "date": published, + "source": source + }, + "type": "protected" + }) + + except Exception as e: + print(f"Error loading DW feed: {e}") + + return releases + + +def dw_search(shared_state, request_from, imdb_id): + releases = [] + dw = shared_state.values["config"]("Hostnames").get("dw") + password = dw + + if "Radarr" in request_from: + search_type = "videocategory=filme" + else: + search_type = "videocategory=serien" + + url = f'https://{dw}/?s={imdb_id}&{search_type}' + headers = { + 'User-Agent': shared_state.values["user_agent"], + } + + try: + request = requests.get(url, headers=headers).content + search = BeautifulSoup(request, "html.parser") + results = search.findAll('h4') + + except Exception as e: + print(f"Error loading DW search feed: {e}") + return releases + + if results: + for result in results: + try: + source = result.a["href"] + title = result.a.text.strip() + size_info = result.find("span").text.strip() + size_item = extract_size(size_info) + mb = shared_state.convert_to_mb(size_item) + date = result.parent.parent.find("span", {"class": "date updated"}).text.strip() + published = convert_to_rss_date(date) + payload = urlsafe_b64encode(f"{title}|{source}|{mb}|{password}".encode("utf-8")).decode( + "utf-8") + link = f"{shared_state.values['internal_address']}/download/?payload={payload}" + except Exception as e: + print(f"Error parsing DW search: {e}") + continue + + releases.append({ + "details": { + "title": f"[DW] {title}", + "link": link, + "size": mb, + "date": published, + "source": source + }, + "type": "protected" + }) + + return releases diff --git a/quasarr/search/sources/nx.py b/quasarr/search/sources/nx.py index 6431d51..d9c953a 100644 --- a/quasarr/search/sources/nx.py +++ b/quasarr/search/sources/nx.py @@ -12,9 +12,7 @@ def nx_feed(shared_state, request_from): releases = [] - nx = shared_state.values["config"]("Hostnames").get("nx") - password = nx if "Radarr" in request_from: @@ -76,16 +74,14 @@ def nx_feed(shared_state, request_from): def nx_search(shared_state, request_from, imdb_id): releases = [] - - password = "" + nx = shared_state.values["config"]("Hostnames").get("nx") + password = nx if "Radarr" in request_from: valid_type = "movie" else: valid_type = "episode" - nx = shared_state.values["config"]("Hostnames").get("nx") - german_title = get_localized_title(shared_state, imdb_id, 'de') if not german_title: print(f"German title from IMDb required for NX search") diff --git a/quasarr/persistence/__init__.py b/quasarr/storage/__init__.py similarity index 100% rename from quasarr/persistence/__init__.py rename to quasarr/storage/__init__.py diff --git a/quasarr/persistence/config.py b/quasarr/storage/config.py similarity index 98% rename from quasarr/persistence/config.py rename to quasarr/storage/config.py index d2fe9c3..8e8476d 100644 --- a/quasarr/persistence/config.py +++ b/quasarr/storage/config.py @@ -11,7 +11,7 @@ from Cryptodome.Random import get_random_bytes from Cryptodome.Util.Padding import pad -from quasarr.persistence.sqlite_database import DataBase +from quasarr.storage.sqlite_database import DataBase from quasarr.providers import shared_state diff --git a/quasarr/providers/setup.py b/quasarr/storage/setup.py similarity index 98% rename from quasarr/providers/setup.py rename to quasarr/storage/setup.py index a395c67..81a1472 100644 --- a/quasarr/providers/setup.py +++ b/quasarr/storage/setup.py @@ -10,7 +10,7 @@ import quasarr from quasarr.downloads.sources import nx -from quasarr.persistence.config import Config +from quasarr.storage.config import Config from quasarr.providers.html_templates import render_button, render_form, render_success, render_fail from quasarr.providers.web_server import Server @@ -69,7 +69,7 @@ def hostnames_config(shared_state): def hostname_form(): hostname_fields = '''
-
+
''' hostname_form_content = "".join( @@ -143,7 +143,7 @@ def nx_credentials_config(shared_state): def nx_credentials_form(): form_content = '''
-
+


@@ -191,7 +191,7 @@ def hostname_form(): verify_form_html = f'''

-
+


{render_button("Verify Credentials", diff --git a/quasarr/persistence/sqlite_database.py b/quasarr/storage/sqlite_database.py similarity index 100% rename from quasarr/persistence/sqlite_database.py rename to quasarr/storage/sqlite_database.py