-
Filtering and Sorting
-
Set up your filtering parameters
- and sorting preferences here.
-
+
+
Filtering and Sorting
+
Set up your filtering parameters
+ and sorting preferences here.
+
Sorting Options
diff --git a/stream_fusion/static/logo_anim_stream-fusion-3.gif b/stream_fusion/static/logo_anim_stream-fusion-3.gif
new file mode 100644
index 0000000..477fb66
Binary files /dev/null and b/stream_fusion/static/logo_anim_stream-fusion-3.gif differ
diff --git a/stream_fusion/static/logo_anim_stream-fusion-3.mp4 b/stream_fusion/static/logo_anim_stream-fusion-3.mp4
new file mode 100644
index 0000000..43a990a
Binary files /dev/null and b/stream_fusion/static/logo_anim_stream-fusion-3.mp4 differ
diff --git a/stream_fusion/static/videos/en_download_video.mp4 b/stream_fusion/static/videos/en_download_video.mp4
new file mode 100644
index 0000000..7d8cda6
Binary files /dev/null and b/stream_fusion/static/videos/en_download_video.mp4 differ
diff --git a/stream_fusion/static/videos/fr_download_video.mp4 b/stream_fusion/static/videos/fr_download_video.mp4
new file mode 100644
index 0000000..7cfbf73
Binary files /dev/null and b/stream_fusion/static/videos/fr_download_video.mp4 differ
diff --git a/stream_fusion/utils/cache/cache.py b/stream_fusion/utils/cache/cache.py
index a8d7435..a9ae758 100644
--- a/stream_fusion/utils/cache/cache.py
+++ b/stream_fusion/utils/cache/cache.py
@@ -9,6 +9,7 @@
from stream_fusion.constants import EXCLUDED_TRACKERS
from stream_fusion.settings import settings
+
def search_public(media):
logger.info("Searching for public cached " + media.type + " results")
url = settings.public_cache_url + "getResult/" + media.type + "/"
@@ -36,25 +37,33 @@ def cache_public(torrents: List[TorrentItem], media):
cache_item = dict()
cache_item["title"] = torrent.raw_title
- cache_item["trackers"] = "tracker:".join(torrent.trackers)
- cache_item["magnet"] = torrent.magnet
+ cache_item["trackers"] = (
+ "tracker:".join(torrent.trackers) if torrent.trackers else "Unknown"
+ )
+ cache_item["magnet"] = torrent.magnet if torrent.magnet else "Unknown"
cache_item["files"] = [] # I guess keep it empty?
cache_item["hash"] = torrent.info_hash
- cache_item["indexer"] = torrent.indexer
+ cache_item["indexer"] = torrent.indexer if torrent.indexer else "Unknown"
cache_item["quality"] = (
torrent.parsed_data.resolution
if torrent.parsed_data.resolution
else "Unknown"
)
- cache_item["qualitySpec"] = ";".join(torrent.parsed_data.quality)
- cache_item["seeders"] = torrent.seeders
+ cache_item["qualitySpec"] = (
+ ";".join(torrent.parsed_data.quality)
+ if torrent.parsed_data.quality
+ else "Unknown"
+ )
+ cache_item["seeders"] = torrent.seeders if torrent.seeders else 0
cache_item["size"] = torrent.size
- cache_item["language"] = ";".join(torrent.languages)
+ cache_item["language"] = (
+ ";".join(torrent.languages) if torrent.languages else "Unknown"
+ )
cache_item["type"] = media.type
cache_item["availability"] = False
if media.type == "movie":
- cache_item["year"] = media.year
+ cache_item["year"] = media.year if media.year else "1990"
elif media.type == "series":
cache_item["season"] = media.season
cache_item["episode"] = media.episode
@@ -74,9 +83,13 @@ def cache_public(torrents: List[TorrentItem], media):
response.raise_for_status()
if response.status_code == 200:
- logger.info(f"Cached {str(len(cache_items))} {media.type} results on Public Cache")
+ logger.info(
+ f"Cached {str(len(cache_items))} {media.type} results on Public Cache"
+ )
else:
- logger.error(f"Failed to public cache {media.type} results: {str(response)}")
+ logger.error(
+ f"Failed to public cache {media.type} results: {str(response)}"
+ )
except:
logger.error("Failed to public cache results")
pass
diff --git a/stream_fusion/utils/cache/local_redis.py b/stream_fusion/utils/cache/local_redis.py
index f4b6151..379d2dd 100644
--- a/stream_fusion/utils/cache/local_redis.py
+++ b/stream_fusion/utils/cache/local_redis.py
@@ -28,12 +28,11 @@ async def get_redis_client(self):
max_connections=10
)
except Exception as e:
- self.logger.error(f"Failed to create Redis client: {e}")
+ self.logger.error(f"RedisCache: Failed to create Redis client: {e}")
self._redis_client = None
return self._redis_client
async def reconnect(self):
- self.logger.info("Attempting to reconnect to Redis")
self._redis_client = None
return await self.get_redis_client()
@@ -44,10 +43,10 @@ async def execute_with_retry(self, operation, *args, **kwargs):
return await operation(*args, **kwargs)
except ConnectionError:
if attempt < max_retries - 1:
- self.logger.warning(f"Redis connection lost. Attempting reconnection (attempt {attempt + 1}/{max_retries})")
+ self.logger.warning(f"RedisCache: Connection lost. Attempting reconnection (attempt {attempt + 1}/{max_retries})")
await self.reconnect()
else:
- self.logger.error("Max retries reached. Unable to reconnect to Redis.")
+ self.logger.error("RedisCache: Max retries reached. Unable to reconnect to Redis.")
raise
async def get_list(self, key: str) -> List[Any]:
@@ -80,46 +79,45 @@ async def clear(self) -> None:
try:
client = await self.get_redis_client()
await client.flushdb()
- self.logger.info("Cache cleared successfully")
except Exception as e:
- self.logger.error(f"Error clearing cache: {e}")
+ self.logger.error(f"RedisCache: Error clearing cache: {e}")
async def get_or_set(self, func: callable, *args, **kwargs) -> Any:
- self.logger.debug(f"Entering get_or_set for function: {func.__name__}")
+ self.logger.debug(f"RedisCache: Entering get_or_set for function: {func.__name__}")
start_time = time.time()
if not await self.can_cache():
- self.logger.debug("Cache is not available, executing function directly")
+ self.logger.debug("RedisCache: Cache is not available, executing function directly")
return await self._execute_func(func, *args, **kwargs)
key = self.generate_key(func.__name__, *args, **kwargs)
- self.logger.debug(f"Generated cache key: {key}")
+ self.logger.debug(f"RedisCache: Generated cache key: {key}")
cached_result = await self.get(key)
- self.logger.debug(f"Attempted to get result from cache. Found: {cached_result is not None}")
+ self.logger.debug(f"RedisCache: Attempted to get result from cache. Found: {cached_result is not None}")
if cached_result is not None:
- self.logger.debug(f"Returning cached result for key: {key}")
+ self.logger.debug(f"RedisCache: Returning cached result for key: {key}")
return cached_result
- self.logger.debug(f"Cache miss for key: {key}. Executing function.")
+ self.logger.debug(f"RedisCache: Cache miss for key: {key}. Executing function.")
result = await self._execute_func(func, *args, **kwargs)
- self.logger.debug(f"Function execution completed. Setting result in cache.")
+ self.logger.debug(f"RedisCache: Function execution completed. Setting result in cache.")
await self.set(key, result)
end_time = time.time()
- self.logger.debug(f"get_or_set completed in {end_time - start_time:.2f} seconds")
+ self.logger.debug(f"RedisCache: get_or_set completed in {end_time - start_time:.2f} seconds")
return result
async def _execute_func(self, func, *args, **kwargs):
- self.logger.debug(f"Executing function: {func.__name__}")
+ self.logger.debug(f"RedisCache: Executing function: {func.__name__}")
start_time = time.time()
sig = inspect.signature(func)
params = sig.parameters
- self.logger.debug(f"Function {func.__name__} expects {len(params)} arguments")
- self.logger.debug(f"Received args: {args}, kwargs: {kwargs}")
+ self.logger.debug(f"RedisCache: Function {func.__name__} expects {len(params)} arguments")
+ self.logger.trace(f"RedisCache: Received args: {args}, kwargs: {kwargs}")
call_args = {}
for i, (param_name, param) in enumerate(params.items()):
@@ -130,41 +128,41 @@ async def _execute_func(self, func, *args, **kwargs):
elif param.default is not param.empty:
call_args[param_name] = param.default
else:
- self.logger.error(f"Missing required argument: {param_name}")
+ self.logger.error(f"RedisCache: Missing required argument: {param_name}")
raise TypeError(f"Missing required argument: {param_name}")
- self.logger.debug(f"Prepared arguments for {func.__name__}: {call_args}")
+ self.logger.trace(f"RedisCache: Prepared arguments for {func.__name__}: {call_args}")
if asyncio.iscoroutinefunction(func):
- self.logger.debug(f"Function {func.__name__} is asynchronous")
+ self.logger.debug(f"RedisCache: Function {func.__name__} is asynchronous")
try:
result = await func(**call_args)
- self.logger.debug(f"Asynchronous function {func.__name__} completed successfully")
+ self.logger.debug(f"RedisCache: Asynchronous function {func.__name__} completed successfully")
except Exception as e:
- self.logger.error(f"Error executing asynchronous function {func.__name__}: {str(e)}")
+ self.logger.error(f"RedisCache: Error executing asynchronous function {func.__name__}: {str(e)}")
raise
else:
- self.logger.debug(f"Function {func.__name__} is synchronous")
+ self.logger.debug(f"RedisCache: Function {func.__name__} is synchronous")
try:
result = func(**call_args)
- self.logger.debug(f"Synchronous function {func.__name__} completed successfully")
+ self.logger.debug(f"RedisCache: Synchronous function {func.__name__} completed successfully")
except Exception as e:
- self.logger.error(f"Error executing synchronous function {func.__name__}: {str(e)}")
+ self.logger.error(f"RedisCache: Error executing synchronous function {func.__name__}: {str(e)}")
raise
end_time = time.time()
- self.logger.debug(f"Function {func.__name__} executed in {end_time - start_time:.2f} seconds")
+ self.logger.debug(f"RedisCache: Function {func.__name__} executed in {end_time - start_time:.2f} seconds")
return result
async def can_cache(self) -> bool:
- self.logger.debug("Checking if caching is possible")
+ self.logger.debug("RedisCache: Checking if caching is possible")
try:
client = await self.get_redis_client()
result = await client.ping()
- self.logger.debug(f"Redis ping result: {result}")
+ self.logger.debug(f"RedisCache: Redis ping result: {result}")
return result
except Exception as e:
- self.logger.error(f"Unable to connect to Redis: {e}")
+ self.logger.error(f"RedisCache: Unable to connect to Redis: {e}")
return False
async def get(self, key: str) -> Any:
diff --git a/stream_fusion/utils/debrid/alldebrid.py b/stream_fusion/utils/debrid/alldebrid.py
index 55baa13..b5761f5 100644
--- a/stream_fusion/utils/debrid/alldebrid.py
+++ b/stream_fusion/utils/debrid/alldebrid.py
@@ -4,7 +4,6 @@
from fastapi import HTTPException
-from stream_fusion.constants import NO_CACHE_VIDEO_URL
from stream_fusion.utils.debrid.base_debrid import BaseDebrid
from stream_fusion.utils.general import season_episode_in_filename
from stream_fusion.logging_config import logger
@@ -14,25 +13,23 @@
class AllDebrid(BaseDebrid):
def __init__(self, config):
super().__init__(config)
- self.base_url = "https://api.alldebrid.com/v4/"
+ self.base_url = f"{settings.ad_base_url}/{settings.ad_api_version}/"
self.agent = settings.ad_user_app
def get_headers(self):
if settings.ad_unique_account:
if not settings.proxied_link:
- logger.warning("AllDebrid unique account is enabled, but proxied link is disabled. "
- "This may lead to account ban.")
- logger.warning("Please enable proxied link in the settings.")
+ logger.warning("AllDebrid: Unique account enabled, but proxied link is disabled. This may lead to account ban.")
+ logger.warning("AllDebrid: Please enable proxied link in the settings.")
raise HTTPException(status_code=500, detail="Proxied link is disabled.")
if settings.ad_token:
return {"Authorization": f"Bearer {settings.ad_token}"}
else:
- logger.warning("AllDebrid unique account is enabled, but no token is provided. "
- "Please provide a token in the env.")
+ logger.warning("AllDebrid: Unique account enabled, but no token provided. Please provide a token in the env.")
raise HTTPException(status_code=500, detail="AllDebrid token is not provided.")
else:
return {"Authorization": f"Bearer {self.config["ADToken"]}"}
-
+
def add_magnet(self, magnet, ip=None):
url = f"{self.base_url}magnet/upload?agent={self.agent}"
data = {"magnets[]": magnet}
@@ -56,27 +53,27 @@ def get_stream_link(self, query, config, ip=None):
stream_type = query['type']
torrent_download = unquote(query["torrent_download"]) if query["torrent_download"] is not None else None
- torrent_id = self.__add_magnet_or_torrent(magnet, torrent_download, ip)
- logger.info(f"Torrent ID: {torrent_id}")
+ torrent_id = self.add_magnet_or_torrent(magnet, torrent_download, ip)
+ logger.info(f"AllDebrid: Torrent ID: {torrent_id}")
if not self.wait_for_ready_status(
lambda: self.check_magnet_status(torrent_id, ip)["data"]["magnets"]["status"] == "Ready"):
- logger.error("Torrent not ready, caching in progress.")
- return NO_CACHE_VIDEO_URL
- logger.info("Torrent is ready.")
+ logger.error("AllDebrid: Torrent not ready, caching in progress.")
+ return settings.no_cache_video_url
+ logger.info("AllDebrid: Torrent is ready.")
- logger.info(f"Getting data for torrent id: {torrent_id}")
+ logger.info(f"AllDebrid: Retrieving data for torrent ID: {torrent_id}")
data = self.check_magnet_status(torrent_id, ip)["data"]
- logger.info(f"Retrieved data for torrent id")
+ logger.info(f"AllDebrid: Data retrieved for torrent ID")
- link = NO_CACHE_VIDEO_URL
+ link = settings.no_cache_video_url
if stream_type == "movie":
- logger.info("Getting link for movie")
+ logger.info("AllDebrid: Getting link for movie")
link = max(data["magnets"]['links'], key=lambda x: x['size'])['link']
elif stream_type == "series":
numeric_season = int(query['season'].replace("S", ""))
numeric_episode = int(query['episode'].replace("E", ""))
- logger.info(f"Getting link for series {numeric_season}, {numeric_episode}")
+ logger.info(f"AllDebrid: Getting link for series S{numeric_season:02d}E{numeric_episode:02d}")
matching_files = []
for file in data["magnets"]["links"]:
@@ -84,63 +81,63 @@ def get_stream_link(self, query, config, ip=None):
matching_files.append(file)
if len(matching_files) == 0:
- logger.error(f"No matching files for {numeric_season} {numeric_episode} in torrent.")
- raise HTTPException(status_code=404, detail=f"No matching files for {numeric_season} {numeric_episode} in torrent.")
+ logger.error(f"AllDebrid: No matching files for S{numeric_season:02d}E{numeric_episode:02d} in torrent.")
+ raise HTTPException(status_code=404, detail=f"No matching files for S{numeric_season:02d}E{numeric_episode:02d} in torrent.")
link = max(matching_files, key=lambda x: x["size"])["link"]
else:
- logger.error("Unsupported stream type.")
+ logger.error("AllDebrid: Unsupported stream type.")
raise HTTPException(status_code=500, detail="Unsupported stream type.")
- if link == NO_CACHE_VIDEO_URL:
- logger.info("Video are not cached in ad returning NO_CACHE_VIDEO_URL")
+ if link == settings.no_cache_video_url:
+ logger.info("AllDebrid: Video not cached, returning NO_CACHE_VIDEO_URL")
return link
- logger.info(f"Alldebrid link: {link}")
+ logger.info(f"AllDebrid: Retrieved link: {link}")
unlocked_link_data = self.unrestrict_link(link, ip)
if not unlocked_link_data:
- logger.error("Failed to unlock link in ad.")
- raise HTTPException(status_code=500, detail="Failed to unlock link in ad.")
+ logger.error("AllDebrid: Failed to unlock link.")
+ raise HTTPException(status_code=500, detail="Failed to unlock link in AllDebrid.")
- logger.info(f"Unrestricted link: {unlocked_link_data['data']['link']}")
+ logger.info(f"AllDebrid: Unrestricted link: {unlocked_link_data['data']['link']}")
return unlocked_link_data["data"]["link"]
def get_availability_bulk(self, hashes_or_magnets, ip=None):
if len(hashes_or_magnets) == 0:
- logger.info("No hashes to be sent to All-Debrid.")
+ logger.info("AllDebrid: No hashes to be sent.")
return dict()
url = f"{self.base_url}magnet/instant?agent={self.agent}"
data = {"magnets[]": hashes_or_magnets}
return self.json_response(url, method='post', headers=self.get_headers(), data=data)
- def __add_magnet_or_torrent(self, magnet, torrent_download=None, ip=None):
+ def add_magnet_or_torrent(self, magnet, torrent_download=None, ip=None):
torrent_id = ""
if torrent_download is None:
- logger.info(f"Adding magnet to AllDebrid")
+ logger.info(f"AllDebrid: Adding magnet")
magnet_response = self.add_magnet(magnet, ip)
- logger.info(f"AllDebrid add magnet response: {magnet_response}")
+ logger.info(f"AllDebrid: Add magnet response received")
if not magnet_response or "status" not in magnet_response or magnet_response["status"] != "success":
return "Error: Failed to add magnet."
torrent_id = magnet_response["data"]["magnets"][0]["id"]
else:
- logger.info(f"Downloading torrent file")
+ logger.info(f"AllDebrid: Downloading torrent file")
torrent_file = self.download_torrent_file(torrent_download)
- logger.info(f"Torrent file downloaded")
+ logger.info(f"AllDebrid: Torrent file downloaded")
- logger.info(f"Adding torrent file to AllDebrid")
+ logger.info(f"AllDebrid: Adding torrent file")
upload_response = self.add_torrent(torrent_file, ip)
- logger.info(f"AllDebrid add torrent file response: {upload_response}")
+ logger.info(f"AllDebrid: Add torrent file response received")
if not upload_response or "status" not in upload_response or upload_response["status"] != "success":
return "Error: Failed to add torrent file in AllDebrid."
torrent_id = upload_response["data"]["files"][0]["id"]
- logger.info(f"New torrent ID: {torrent_id}")
- return torrent_id
\ No newline at end of file
+ logger.info(f"AllDebrid: New torrent ID: {torrent_id}")
+ return torrent_id
diff --git a/stream_fusion/utils/debrid/base_debrid.py b/stream_fusion/utils/debrid/base_debrid.py
index bbf75b6..d8c3de5 100644
--- a/stream_fusion/utils/debrid/base_debrid.py
+++ b/stream_fusion/utils/debrid/base_debrid.py
@@ -1,4 +1,5 @@
from collections import deque
+import json
import time
import requests
@@ -12,37 +13,37 @@ def __init__(self, config):
self.config = config
self.logger = logger
self.__session = self._create_session()
-
- # Limiteurs de débit
+
+ # Rate limiters
self.global_limit = 250
self.global_period = 60
self.torrent_limit = 1
self.torrent_period = 1
-
+
self.global_requests = deque()
self.torrent_requests = deque()
def _create_session(self):
session = requests.Session()
if settings.proxy_url:
- self.logger.info(f"Using proxy: {settings.proxy_url}")
+ self.logger.info(f"BaseDebrid: Using proxy: {settings.proxy_url}")
session.proxies = {
- 'http': str(settings.proxy_url),
- 'https': str(settings.proxy_url)
+ "http": str(settings.proxy_url),
+ "https": str(settings.proxy_url),
}
return session
def _rate_limit(self, requests_queue, limit, period):
current_time = time.time()
-
+
while requests_queue and requests_queue[0] <= current_time - period:
requests_queue.popleft()
-
+
if len(requests_queue) >= limit:
sleep_time = requests_queue[0] - (current_time - period)
if sleep_time > 0:
time.sleep(sleep_time)
-
+
requests_queue.append(time.time())
def _global_rate_limit(self):
@@ -51,51 +52,109 @@ def _global_rate_limit(self):
def _torrent_rate_limit(self):
self._rate_limit(self.torrent_requests, self.torrent_limit, self.torrent_period)
- def json_response(self, url, method='get', data=None, headers=None, files=None):
+ def json_response(self, url, method="get", data=None, headers=None, files=None):
self._global_rate_limit()
- if 'torrents' in url:
+ if "torrents" in url:
self._torrent_rate_limit()
max_attempts = 5
for attempt in range(max_attempts):
try:
- if method == 'get':
+ if method == "get":
response = self.__session.get(url, headers=headers)
- elif method == 'post':
- response = self.__session.post(url, data=data, headers=headers, files=files)
- elif method == 'put':
+ elif method == "post":
+ response = self.__session.post(
+ url, data=data, headers=headers, files=files
+ )
+ elif method == "put":
response = self.__session.put(url, data=data, headers=headers)
- elif method == 'delete':
+ elif method == "delete":
response = self.__session.delete(url, headers=headers)
else:
- raise ValueError(f"Unsupported HTTP method: {method}")
+ raise ValueError(f"BaseDebrid: Unsupported HTTP method: {method}")
response.raise_for_status()
- return response.json()
+
+ try:
+ return response.json()
+ except json.JSONDecodeError as json_err:
+ self.logger.error(f"BaseDebrid: Invalid JSON response: {json_err}")
+ self.logger.debug(
+ f"BaseDebrid: Response content: {response.text[:200]}..."
+ )
+ if attempt < max_attempts - 1:
+ wait_time = 2**attempt + 1
+ self.logger.info(
+ f"BaseDebrid: Retrying in {wait_time} seconds..."
+ )
+ time.sleep(wait_time)
+ else:
+ return None
+
except requests.exceptions.HTTPError as e:
- if e.response.status_code == 429:
- wait_time = 2 ** attempt + 1
- self.logger.warning(f"Rate limit exceeded. Attempt {attempt + 1}/{max_attempts}. Waiting for {wait_time} seconds.")
+ status_code = e.response.status_code
+ if status_code == 429:
+ wait_time = 2**attempt + 1
+ self.logger.warning(
+ f"BaseDebrid: Rate limit exceeded. Attempt {attempt + 1}/{max_attempts}. Waiting for {wait_time} seconds."
+ )
+ time.sleep(wait_time)
+ elif 400 <= status_code < 500:
+ self.logger.error(
+ f"BaseDebrid: Client error occurred: {e}. Status code: {status_code}"
+ )
+ return None
+ elif 500 <= status_code < 600:
+ self.logger.error(
+ f"BaseDebrid: Server error occurred: {e}. Status code: {status_code}"
+ )
+ if attempt < max_attempts - 1:
+ wait_time = 2**attempt + 1
+ self.logger.info(
+ f"BaseDebrid: Retrying in {wait_time} seconds..."
+ )
+ time.sleep(wait_time)
+ else:
+ return None
+ else:
+ self.logger.error(
+ f"BaseDebrid: Unexpected HTTP error occurred: {e}. Status code: {status_code}"
+ )
+ return None
+ except requests.exceptions.ConnectionError as e:
+ self.logger.error(f"BaseDebrid: Connection error occurred: {e}")
+ if attempt < max_attempts - 1:
+ wait_time = 2**attempt + 1
+ self.logger.info(f"BaseDebrid: Retrying in {wait_time} seconds...")
+ time.sleep(wait_time)
+ else:
+ return None
+ except requests.exceptions.Timeout as e:
+ self.logger.error(f"BaseDebrid: Request timed out: {e}")
+ if attempt < max_attempts - 1:
+ wait_time = 2**attempt + 1
+ self.logger.info(f"BaseDebrid: Retrying in {wait_time} seconds...")
time.sleep(wait_time)
else:
- self.logger.error(f"HTTP error occurred: {e}")
return None
- except Exception as e:
- self.logger.error(f"An error occurred: {e}")
+ except requests.exceptions.RequestException as e:
+ self.logger.error(f"BaseDebrid: An unexpected error occurred: {e}")
return None
- self.logger.error("Max attempts reached. Unable to complete request.")
+ self.logger.error(
+ "BaseDebrid: Max attempts reached. Unable to complete request."
+ )
return None
-
+
def wait_for_ready_status(self, check_status_func, timeout=30, interval=5):
- self.logger.info(f"Waiting for {timeout} seconds to cache.")
+ self.logger.info(f"BaseDebrid: Waiting for {timeout} seconds for caching.")
start_time = time.time()
while time.time() - start_time < timeout:
if check_status_func():
- self.logger.info("File is ready!")
+ self.logger.info("BaseDebrid: File is ready!")
return True
time.sleep(interval)
- self.logger.info(f"Waiting timed out.")
+ self.logger.info(f"BaseDebrid: Waiting timed out.")
return False
def download_torrent_file(self, download_url):
@@ -105,6 +164,9 @@ def download_torrent_file(self, download_url):
def get_stream_link(self, query, ip=None):
raise NotImplementedError
+
+ def add_magnet_or_torrent(self, magnet, torrent_download=None, ip=None):
+ raise NotImplementedError
def add_magnet(self, magnet, ip=None):
raise NotImplementedError
diff --git a/stream_fusion/utils/debrid/get_debrid_service.py b/stream_fusion/utils/debrid/get_debrid_service.py
index c4035a8..43cc251 100644
--- a/stream_fusion/utils/debrid/get_debrid_service.py
+++ b/stream_fusion/utils/debrid/get_debrid_service.py
@@ -2,6 +2,7 @@
from stream_fusion.utils.debrid.alldebrid import AllDebrid
from stream_fusion.utils.debrid.realdebrid import RealDebrid
+from stream_fusion.utils.debrid.torbox import Torbox
from stream_fusion.logging_config import logger
from stream_fusion.settings import settings
@@ -15,23 +16,48 @@ def get_all_debrid_services(config):
for service in services:
if service == "Real-Debrid":
debrid_service.append(RealDebrid(config))
- logger.debug("Real Debrid service added to be use")
+ logger.debug("Real-Debrid: service added to be use")
if service == "AllDebrid":
debrid_service.append(AllDebrid(config))
- logger.debug("All Debrid service added to be use")
+ logger.debug("AllDebrid: service added to be use")
+ if service == "TorBox":
+ debrid_service.append(Torbox(config))
+ logger.debug("TorBox: service added to be use")
if not debrid_service:
raise HTTPException(status_code=500, detail="Invalid service configuration.")
return debrid_service
+def get_download_service(config):
+ if not settings.download_service:
+ service = config['debridDownloader']
+ else:
+ service = settings.download_service
+ if not service:
+ logger.error("No download service found in the user config.")
+ return
+ if service == "RD":
+ return RealDebrid(config)
+ elif service == "AD":
+ return AllDebrid(config)
+ elif service == "TB":
+ return Torbox(config)
+ else:
+ logger.error("Invalid service configuration return by stremio in the query.")
+ raise HTTPException(status_code=500, detail="Invalid service configuration return by stremio.")
+
def get_debrid_service(config, service):
if not service:
- service == settings.default_debrid_service
+ service == settings.download_service
if service == "RD":
return RealDebrid(config)
elif service == "AD":
return AllDebrid(config)
+ elif service == "TB":
+ return Torbox(config)
+ elif service == "DL":
+ return get_download_service(config)
else:
logger.error("Invalid service configuration return by stremio in the query.")
raise HTTPException(status_code=500, detail="Invalid service configuration return by stremio.")
diff --git a/stream_fusion/utils/debrid/premiumize.py b/stream_fusion/utils/debrid/premiumize.py
index 1d3ceb2..1dc4570 100644
--- a/stream_fusion/utils/debrid/premiumize.py
+++ b/stream_fusion/utils/debrid/premiumize.py
@@ -1,7 +1,7 @@
# Assuming the BaseDebrid class and necessary imports are already defined as shown previously
import json
-from stream_fusion.constants import NO_CACHE_VIDEO_URL
+from stream_fusion.settings import settings
from stream_fusion.utils.debrid.base_debrid import BaseDebrid
from stream_fusion.utils.general import get_info_hash_from_magnet, season_episode_in_filename
from stream_fusion.logging_config import logger
@@ -63,7 +63,7 @@ def get_stream_link(self, query, config, ip=None):
if not self.wait_for_ready_status(lambda: self.get_availability(info_hash)["transcoded"][0] is True):
logger.info("Torrent not ready, caching in progress")
- return NO_CACHE_VIDEO_URL
+ return settings.no_cache_video_url
logger.info("Torrent is ready.")
diff --git a/stream_fusion/utils/debrid/realdebrid.py b/stream_fusion/utils/debrid/realdebrid.py
index be001cd..2ee5d65 100644
--- a/stream_fusion/utils/debrid/realdebrid.py
+++ b/stream_fusion/utils/debrid/realdebrid.py
@@ -5,12 +5,13 @@
from fastapi import HTTPException
import requests
-from stream_fusion.constants import NO_CACHE_VIDEO_URL
from stream_fusion.services.rd_conn.token_manager import RDTokenManager
from stream_fusion.utils.debrid.base_debrid import BaseDebrid
-from stream_fusion.utils.general import get_info_hash_from_magnet
-from stream_fusion.utils.general import is_video_file
-from stream_fusion.utils.general import season_episode_in_filename
+from stream_fusion.utils.general import (
+ get_info_hash_from_magnet,
+ is_video_file,
+ season_episode_in_filename,
+)
from stream_fusion.settings import settings
from stream_fusion.logging_config import logger
@@ -18,288 +19,355 @@
class RealDebrid(BaseDebrid):
def __init__(self, config):
super().__init__(config)
- self.base_url = "https://api.real-debrid.com"
+ self.base_url = f"{settings.rd_base_url}/{settings.rd_api_version}/"
if not settings.rd_unique_account:
self.token_manager = RDTokenManager(config)
def get_headers(self):
if settings.rd_unique_account:
if not settings.proxied_link:
- logger.warning("Real-Debrid unique account is enabled, but proxied link is disabled. "
- "This may lead to account ban.")
- logger.warning("Please enable proxied link in the settings.")
- raise HTTPException(status_code=500, detail="Proxied link is disabled.")
+ logger.warning(
+ "Real-Debrid: Unique account is enabled, but proxied link is disabled. This may lead to account ban."
+ )
+ logger.warning(
+ "Real-Debrid: Please enable proxied link in the settings."
+ )
+ raise HTTPException(
+ status_code=500, detail="Real-Debrid: Proxied link is disabled."
+ )
if settings.rd_token:
return {"Authorization": f"Bearer {settings.rd_token}"}
else:
- logger.warning("Real-Debrid unique account is enabled, but no token is provided. "
- "Please provide a token in the env.")
- raise HTTPException(status_code=500, detail="Real-Debrid token is not provided.")
+ logger.warning(
+ "Real-Debrid: Unique account is enabled, but no token is provided. Please provide a token in the env."
+ )
+ raise HTTPException(
+ status_code=500, detail="Real-Debrid: Token is not provided."
+ )
else:
return {"Authorization": f"Bearer {self.token_manager.get_access_token()}"}
def add_magnet(self, magnet, ip=None):
- url = f"{self.base_url}/rest/1.0/torrents/addMagnet"
+ url = f"{self.base_url}torrents/addMagnet"
data = {"magnet": magnet}
- logger.info(f"Adding magnet to RD: {magnet}")
- return self.json_response(url, method='post', headers=self.get_headers(), data=data)
+ logger.info(f"Real-Debrid: Adding magnet: {magnet}")
+ return self.json_response(
+ url, method="post", headers=self.get_headers(), data=data
+ )
def add_torrent(self, torrent_file):
- url = f"{self.base_url}/rest/1.0/torrents/addTorrent"
- return self.json_response(url, method='put', headers=self.get_headers(), data=torrent_file)
+ url = f"{self.base_url}torrents/addTorrent"
+ return self.json_response(
+ url, method="put", headers=self.get_headers(), data=torrent_file
+ )
def delete_torrent(self, id):
- url = f"{self.base_url}/rest/1.0/torrents/delete/{id}"
- return self.json_response(url, method='delete', headers=self.get_headers())
+ url = f"{self.base_url}torrents/delete/{id}"
+ return self.json_response(url, method="delete", headers=self.get_headers())
def get_torrent_info(self, torrent_id):
- logger.info(f"Getting torrent info for: {torrent_id}")
- url = f"{self.base_url}/rest/1.0/torrents/info/{torrent_id}"
+ logger.info(f"Real-Debrid: Getting torrent info for ID: {torrent_id}")
+ url = f"{self.base_url}torrents/info/{torrent_id}"
torrent_info = self.json_response(url, headers=self.get_headers())
- if not torrent_info or 'files' not in torrent_info:
+ if not torrent_info or "files" not in torrent_info:
return None
return torrent_info
def select_files(self, torrent_id, file_id):
- logger.info(f"Selecting file(s): {file_id}")
+ logger.info(
+ f"Real-Debrid: Selecting file(s): {file_id} for torrent ID: {torrent_id}"
+ )
self._torrent_rate_limit()
- url = f"{self.base_url}/rest/1.0/torrents/selectFiles/{torrent_id}"
+ url = f"{self.base_url}torrents/selectFiles/{torrent_id}"
data = {"files": str(file_id)}
requests.post(url, headers=self.get_headers(), data=data)
- # Do not touch that requests.post, it is not a mistake. The requests.post is used to have the same ip than token owner.
def unrestrict_link(self, link):
- url = f"{self.base_url}/rest/1.0/unrestrict/link"
+ url = f"{self.base_url}unrestrict/link"
data = {"link": link}
- return self.json_response(url, method='post', headers=self.get_headers(), data=data)
+ max_retries = 3
+ retry_delay = 5
+
+ for attempt in range(max_retries):
+ try:
+ response = self.json_response(url, method="post", headers=self.get_headers(), data=data)
+ if response and "download" in response:
+ return response
+ else:
+ logger.warning(f"Real-Debrid: Unexpected response when unrestricting link: {response}")
+ except requests.RequestException as e:
+ if attempt < max_retries - 1:
+ logger.warning(f"Real-Debrid: Error unrestricting link (attempt {attempt + 1}/{max_retries}): {str(e)}")
+ time.sleep(retry_delay)
+ else:
+ logger.error(f"Real-Debrid: Failed to unrestrict link after {max_retries} attempts: {str(e)}")
+ raise
+
+ return None
def is_already_added(self, magnet):
hash = magnet.split("urn:btih:")[1].split("&")[0].lower()
- url = f"{self.base_url}/rest/1.0/torrents"
+ url = f"{self.base_url}torrents"
torrents = self.json_response(url, headers=self.get_headers())
for torrent in torrents:
- if torrent['hash'].lower() == hash:
- return torrent['id']
+ if torrent["hash"].lower() == hash:
+ return torrent["id"]
return False
def wait_for_link(self, torrent_id, timeout=60, interval=5):
start_time = time.time()
while time.time() - start_time < timeout:
torrent_info = self.get_torrent_info(torrent_id)
- if torrent_info and 'links' in torrent_info and len(torrent_info['links']) > 0:
- return torrent_info['links']
+ if (
+ torrent_info
+ and "links" in torrent_info
+ and len(torrent_info["links"]) > 0
+ ):
+ return torrent_info["links"]
time.sleep(interval)
-
return None
def get_availability_bulk(self, hashes_or_magnets, ip=None):
self._torrent_rate_limit()
if len(hashes_or_magnets) == 0:
- logger.info("No hashes to be sent to Real-Debrid.")
+ logger.info("Real-Debrid: No hashes to be sent.")
return dict()
-
- url = f"{self.base_url}/rest/1.0/torrents/instantAvailability/{'/'.join(hashes_or_magnets)}"
+ url = f"{self.base_url}torrents/instantAvailability/{'/'.join(hashes_or_magnets)}"
return self.json_response(url, headers=self.get_headers())
def get_stream_link(self, query, config, ip=None):
-
- magnet = query['magnet']
- stream_type = query['type']
- file_index = int(query['file_index']) if query['file_index'] is not None else None
- season = query['season']
- episode = query['episode']
- torrent_download = unquote(query["torrent_download"]) if query["torrent_download"] is not None else None
+ # Extract query parameters
+ magnet = query["magnet"]
+ stream_type = query["type"]
+ file_index = int(query["file_index"]) if query["file_index"] is not None else None
+ season = query["season"]
+ episode = query["episode"]
info_hash = get_info_hash_from_magnet(magnet)
- logger.info(f"RealDebrid get stream link for {stream_type} with hash: {info_hash}")
- cached_torrent_ids = self.__get_cached_torrent_ids(info_hash)
- logger.info(f"Found {len(cached_torrent_ids)} cached torrents with hash: {info_hash}")
-
- torrent_info = None
- if len(cached_torrent_ids) > 0:
- if stream_type == "movie":
- torrent_info = self.get_torrent_info(cached_torrent_ids[0])
- elif stream_type == "series":
- torrent_info = self.__get_cached_torrent_info(cached_torrent_ids, file_index, season, episode)
- else:
- return "Error: Unsupported stream type."
+ logger.info(f"Real-Debrid: Getting stream link for {stream_type} with hash: {info_hash}")
- # The torrent is not yet added
- if torrent_info is None:
- torrent_info = self.__add_magnet_or_torrent(magnet, torrent_download)
- if not torrent_info or 'files' not in torrent_info:
- return "Error: Failed to get torrent info."
+ # Check for cached torrents
+ cached_torrent_ids = self._get_cached_torrent_ids(info_hash)
+ logger.info(f"Real-Debrid: Found {len(cached_torrent_ids)} cached torrents with hash: {info_hash}")
- logger.info("Selecting file")
- self.__select_file(torrent_info, stream_type, file_index, season, episode)
+ torrent_id = None
+ if cached_torrent_ids:
+ torrent_info = self._get_cached_torrent_info(cached_torrent_ids, file_index, season, episode, stream_type)
+ if torrent_info:
+ torrent_id = torrent_info["id"]
+ logger.info(f"Real-Debrid: Found cached torrent with ID: {torrent_id}")
- # == operator, to avoid adding the season pack twice and setting 5 as season pack treshold
- if len(cached_torrent_ids) == 0 and stream_type == "series" and len(torrent_info["files"]) > 5:
- logger.info("Prefetching season pack")
- prefetched_torrent_info = self.__prefetch_season_pack(magnet, torrent_download)
- if len(prefetched_torrent_info["links"]) > 0:
- self.delete_torrent(torrent_info["id"])
- torrent_info = prefetched_torrent_info
+ # If the torrent is not in cache, add it
+ if torrent_id is None:
+ torrent_id = self.add_magnet_or_torrent_and_select(query, ip)
+ if not torrent_id:
+ logger.error("Real-Debrid: Failed to add or find torrent.")
+ raise HTTPException(status_code=500, detail="Real-Debrid: Failed to add or find torrent.")
- torrent_id = torrent_info["id"]
- logger.info(f"Waiting for the link(s) to be ready for torrent ID: {torrent_id}")
- # Waiting for the link(s) to be ready
- links = self.wait_for_link(torrent_id)
+ logger.info(f"Real-Debrid: Waiting for link(s) to be ready for torrent ID: {torrent_id}")
+ links = self.wait_for_link(torrent_id, timeout=20) # Increased timeout to allow for slow servers
if links is None:
- return NO_CACHE_VIDEO_URL
+ logger.warning("Real-Debrid: No links available after waiting. Returning NO_CACHE_VIDEO_URL.")
+ return settings.no_cache_video_url
+
+ # Refresh torrent info to ensure we have the latest data
+ torrent_info = self.get_torrent_info(torrent_id)
+ # Select the appropriate link
if len(links) > 1:
- logger.info("Finding appropiate link")
- download_link = self.__find_appropiate_link(torrent_info, links, file_index, season, episode)
+ logger.info("Real-Debrid: Finding appropriate link")
+ download_link = self._find_appropriate_link(torrent_info, links, file_index, season, episode)
else:
download_link = links[0]
- logger.info(f"Unrestricting the download link: {download_link}")
- # Unrestricting the download link
+ # Unrestrict the link
+ logger.info(f"Real-Debrid: Unrestricting the download link: {download_link}")
unrestrict_response = self.unrestrict_link(download_link)
- if not unrestrict_response or 'download' not in unrestrict_response:
- return "Error: Failed to unrestrict link."
+ if not unrestrict_response or "download" not in unrestrict_response:
+ logger.error("Real-Debrid: Failed to unrestrict link.")
+ return None
- logger.info(f"Got download link: {unrestrict_response['download']}")
- return unrestrict_response['download']
+ logger.info(f"Real-Debrid: Got download link: {unrestrict_response['download']}")
+ return unrestrict_response["download"]
- def __get_cached_torrent_ids(self, info_hash):
+ def _get_cached_torrent_ids(self, info_hash):
self._torrent_rate_limit()
- url = f"{self.base_url}/rest/1.0/torrents"
+ url = f"{self.base_url}torrents"
torrents = self.json_response(url, headers=self.get_headers())
- logger.info(f"Searching users real-debrid downloads for {info_hash}")
- torrent_ids = []
- for torrent in torrents:
- if torrent['hash'].lower() == info_hash:
- torrent_ids.append(torrent['id'])
-
+ logger.info(f"Real-Debrid: Searching user's downloads for hash: {info_hash}")
+ torrent_ids = [
+ torrent["id"]
+ for torrent in torrents
+ if torrent["hash"].lower() == info_hash
+ ]
return torrent_ids
- def __get_cached_torrent_info(self, cached_ids, file_index, season, episode):
- cached_torrents = []
+ def _get_cached_torrent_info(
+ self, cached_ids, file_index, season, episode, stream_type
+ ):
for cached_torrent_id in cached_ids:
cached_torrent_info = self.get_torrent_info(cached_torrent_id)
- if self.__torrent_contains_file(cached_torrent_info, file_index, season, episode):
- if len(cached_torrent_info["links"]) > 0: # If the links are ready
- return cached_torrent_info
-
- cached_torrents.append(cached_torrent_info)
-
- if len(cached_torrents) == 0:
- return None
-
- return max(cached_torrents, key=lambda x: x['progress'])
+ if self._torrent_contains_file(
+ cached_torrent_info, file_index, season, episode, stream_type
+ ):
+ return cached_torrent_info
+ return None
- def __torrent_contains_file(self, torrent_info, file_index, season, episode):
+ def _torrent_contains_file(
+ self, torrent_info, file_index, season, episode, stream_type
+ ):
if not torrent_info or "files" not in torrent_info:
return False
- if file_index is None:
- for file in torrent_info["files"]:
- if file["selected"] and season_episode_in_filename(file['path'], season, episode):
- return True
- else:
- for file in torrent_info["files"]:
- if file['id'] == file_index:
- return file["selected"] == 1
-
+ if stream_type == "movie":
+ return any(file["selected"] for file in torrent_info["files"])
+ elif stream_type == "series":
+ if file_index is not None:
+ return any(
+ file["id"] == file_index and file["selected"]
+ for file in torrent_info["files"]
+ )
+ else:
+ return any(
+ file["selected"]
+ and season_episode_in_filename(file["path"], season, episode)
+ for file in torrent_info["files"]
+ )
return False
- def __add_magnet_or_torrent(self, magnet, torrent_download=None, anon_magnet=False):
- torrent_id = ""
+ def add_magnet_or_torrent(self, magnet, torrent_download=None, ip=None):
if torrent_download is None:
- logger.info(f"Adding magnet to RealDebrid")
- if anon_magnet:
- magnet = re.sub(r'&tr=[^&]*', '', magnet)
+ logger.info("Real-Debrid: Adding magnet")
magnet_response = self.add_magnet(magnet)
- logger.info(f"RealDebrid add magnet response: {magnet_response}")
-
- if not magnet_response or 'id' not in magnet_response:
- return "Error: Failed to add magnet."
-
- torrent_id = magnet_response['id']
- elif anon_magnet and torrent_download:
- logger.info(f"Adding magnet to RealDebrid")
- clean_magnet = re.sub(r'&tr=[^&]*', '', magnet)
- magnet_response = self.add_magnet(clean_magnet)
- logger.info(f"RealDebrid add magnet response: {magnet_response}")
+ logger.info(f"Real-Debrid: Add magnet response: {magnet_response}")
- if not magnet_response or 'id' not in magnet_response:
- return "Error: Failed to add magnet."
+ if not magnet_response or "id" not in magnet_response:
+ logger.error("Real-Debrid: Failed to add magnet.")
+ raise HTTPException(
+ status_code=500, detail="Real-Debrid: Failed to add magnet."
+ )
- torrent_id = magnet_response['id']
+ torrent_id = magnet_response["id"]
else:
- logger.info(f"Downloading torrent file from Jackett")
+ logger.info("Real-Debrid: Downloading and adding torrent file")
torrent_file = self.download_torrent_file(torrent_download)
- logger.info(f"Torrent file downloaded from Jackett")
-
- logger.info(f"Adding torrent file to RealDebrid")
upload_response = self.add_torrent(torrent_file)
- logger.info(f"RealDebrid add torrent file response: {upload_response}")
+ logger.info(f"Real-Debrid: Add torrent file response: {upload_response}")
- if not upload_response or 'id' not in upload_response:
- return "Error: Failed to add torrent file."
+ if not upload_response or "id" not in upload_response:
+ logger.error("Real-Debrid: Failed to add torrent file.")
+ raise HTTPException(
+ status_code=500, detail="Real-Debrid: Failed to add torrent file."
+ )
- torrent_id = upload_response['id']
+ torrent_id = upload_response["id"]
- logger.info(f"New torrent ID: {torrent_id}")
+ logger.info(f"Real-Debrid: New torrent added with ID: {torrent_id}")
return self.get_torrent_info(torrent_id)
+
+ def add_magnet_or_torrent_and_select(self, query, ip=None):
+ magnet = query['magnet']
+ torrent_download = unquote(query["torrent_download"]) if query["torrent_download"] is not None else None
+ stream_type = query['type']
+ file_index = int(query["file_index"]) if query["file_index"] is not None else None
+ season = query["season"]
+ episode = query["episode"]
- def __prefetch_season_pack(self, magnet, torrent_download):
- torrent_info = self.__add_magnet_or_torrent(magnet, torrent_download)
- video_file_indexes = []
-
- for file in torrent_info["files"]:
- if is_video_file(file["path"]):
- video_file_indexes.append(str(file["id"]))
+ torrent_info = self.add_magnet_or_torrent(magnet, torrent_download, ip)
+ if not torrent_info or "files" not in torrent_info:
+ logger.error("Real-Debrid: Failed to add or find torrent.")
+ return None
- self.select_files(torrent_info["id"], ",".join(video_file_indexes))
- time.sleep(10)
- return self.get_torrent_info(torrent_info["id"])
+ is_season_pack = stream_type == "series" and len(torrent_info["files"]) > 5
- def __select_file(self, torrent_info, stream_type, file_index, season, episode):
+ if is_season_pack:
+ logger.info("Real-Debrid: Processing season pack")
+ self._process_season_pack(torrent_info)
+ else:
+ logger.info("Real-Debrid: Selecting specific file")
+ self._select_file(
+ torrent_info, stream_type, file_index, season, episode
+ )
+
+ logger.info(f"Real-Debrid: Added magnet or torrent to download service: {magnet[:50]}")
+ return torrent_info['id']
+
+ def _process_season_pack(self, torrent_info):
+ logger.info("Real-Debrid: Processing season pack files")
+ video_file_indexes = [
+ str(file["id"])
+ for file in torrent_info["files"]
+ if is_video_file(file["path"])
+ ]
+
+ if video_file_indexes:
+ self.select_files(torrent_info["id"], ",".join(video_file_indexes))
+ logger.info(
+ f"Real-Debrid: Selected {len(video_file_indexes)} video files from season pack"
+ )
+ time.sleep(10)
+ else:
+ logger.warning("Real-Debrid: No video files found in the season pack")
+
+ def _select_file(self, torrent_info, stream_type, file_index, season, episode):
torrent_id = torrent_info["id"]
if file_index is not None:
- logger.info(f"Selecting file_index: {file_index}")
+ logger.info(f"Real-Debrid: Selecting file_index: {file_index}")
self.select_files(torrent_id, file_index)
return
files = torrent_info["files"]
if stream_type == "movie":
- largest_file_id = max(files, key=lambda x: x['bytes'])['id']
- logger.info(f"Selecting file_index: {largest_file_id}")
+ largest_file_id = max(files, key=lambda x: x["bytes"])["id"]
+ logger.info(f"Real-Debrid: Selecting largest file_index: {largest_file_id}")
self.select_files(torrent_id, largest_file_id)
elif stream_type == "series":
- matching_files = []
- for file in files:
- if season_episode_in_filename(file["path"], season, episode):
- matching_files.append(file)
-
- largest_file_id = max(matching_files, key=lambda x: x['bytes'])['id']
- logger.info(f"Selecting file_index: {largest_file_id}")
- self.select_files(torrent_id, largest_file_id)
-
- def __find_appropiate_link(self, torrent_info, links, file_index, season, episode):
- selected_files = list(filter(lambda file: file["selected"] == 1, torrent_info["files"]))
+ matching_files = [
+ file
+ for file in files
+ if season_episode_in_filename(file["path"], season, episode)
+ ]
+ if matching_files:
+ largest_file_id = max(matching_files, key=lambda x: x["bytes"])["id"]
+ logger.info(
+ f"Real-Debrid: Selecting largest matching file_index: {largest_file_id}"
+ )
+ self.select_files(torrent_id, largest_file_id)
+ else:
+ logger.warning(
+ "Real-Debrid: No matching files found for the specified episode"
+ )
+
+ def _find_appropriate_link(self, torrent_info, links, file_index, season, episode):
+ # Refresh torrent info to get the latest selected files
+ torrent_info = self.get_torrent_info(torrent_info["id"])
+ selected_files = [file for file in torrent_info["files"] if file["selected"] == 1]
+
+ logger.info(f"Real-Debrid: Finding appropriate link. Selected files: {len(selected_files)}, Available links: {len(links)}")
+
+ if not selected_files:
+ logger.warning("Real-Debrid: No files were selected. Selecting the largest file.")
+ largest_file = max(torrent_info["files"], key=lambda x: x['bytes'])
+ selected_files = [largest_file]
- index = 0
if file_index is not None:
- for file in selected_files:
- if file["id"] == file_index:
- break
- index += 1
+ index = next((i for i, file in enumerate(selected_files) if file["id"] == file_index), None)
else:
- matching_indexes = []
- for file in selected_files:
- if season_episode_in_filename(file["path"], season, episode):
- matching_indexes.append({"index": index, "file": file})
- index += 1
-
- index = max(matching_indexes, lambda x: x["file"]["bytes"])["index"]
+ matching_indexes = [
+ {"index": i, "file": file}
+ for i, file in enumerate(selected_files)
+ if season_episode_in_filename(file["path"], season, episode)
+ ]
+ if matching_indexes:
+ index = max(matching_indexes, key=lambda x: x["file"]["bytes"])["index"]
+ else:
+ logger.warning("Real-Debrid: No matching episode found. Selecting the largest file.")
+ index = max(range(len(selected_files)), key=lambda i: selected_files[i]['bytes'])
- if len(links) - 1 < index:
- logger.debug(f"From selected files {selected_files}, index: {index} is out of range for {links}.")
- return NO_CACHE_VIDEO_URL
+ if index is None or index >= len(links):
+ logger.warning(f"Real-Debrid: Appropriate link not found. Falling back to the first available link.")
+ return links[0] if links else settings.no_cache_video_url
+ logger.info(f"Real-Debrid: Selected link index: {index}")
return links[index]
diff --git a/stream_fusion/utils/debrid/torbox.py b/stream_fusion/utils/debrid/torbox.py
new file mode 100644
index 0000000..5dbabfa
--- /dev/null
+++ b/stream_fusion/utils/debrid/torbox.py
@@ -0,0 +1,245 @@
+from itertools import islice
+import uuid
+import tenacity
+from urllib.parse import unquote
+
+from fastapi import HTTPException
+from stream_fusion.utils.debrid.base_debrid import BaseDebrid
+from stream_fusion.utils.general import get_info_hash_from_magnet, season_episode_in_filename, is_video_file
+from stream_fusion.logging_config import logger
+from stream_fusion.settings import settings
+
+class Torbox(BaseDebrid):
+ def __init__(self, config):
+ super().__init__(config)
+ self.base_url = f"{settings.tb_base_url}/{settings.tb_api_version}/api"
+ self.token = settings.tb_token if settings.tb_unique_account else self.config["TBToken"]
+ logger.info(f"Torbox: Initialized with base URL: {self.base_url}")
+
+ def get_headers(self):
+ if settings.tb_unique_account:
+ if not settings.proxied_link:
+ logger.warning("TorBox: Unique account enabled, but proxied link is disabled. This may lead to account ban.")
+ logger.warning("TorBox: Please enable proxied link in the settings.")
+ raise HTTPException(status_code=500, detail="Proxied link is disabled.")
+ if settings.tb_token:
+ return {"Authorization": f"Bearer {settings.tb_token}"}
+ else:
+ logger.warning("TorBox: Unique account enabled, but no token provided. Please provide a token in the env.")
+ raise HTTPException(status_code=500, detail="AllDebrid token is not provided.")
+ else:
+ return {"Authorization": f"Bearer {self.config["TBToken"]}"}
+
+ def add_magnet(self, magnet, ip=None, privacy="private"):
+ logger.info(f"Torbox: Adding magnet: {magnet[:50]}...")
+ url = f"{self.base_url}/torrents/createtorrent"
+ seed = 2 if privacy == "private" else 1
+ data = {
+ "magnet": magnet,
+ "seed": seed,
+ "allow_zip": "false"
+ }
+ response = self.json_response(url, method='post', headers=self.get_headers(), data=data)
+ logger.info(f"Torbox: Add magnet response: {response}")
+ return response
+
+ def add_torrent(self, torrent_file, privacy="private"):
+ logger.info("Torbox: Adding torrent file")
+ url = f"{self.base_url}/torrents/createtorrent"
+ seed = 2 if privacy == "private" else 1
+ data = {
+ "seed": seed,
+ "allow_zip": "false"
+ }
+ files = {
+ "file": (str(uuid.uuid4()) + ".torrent", torrent_file, 'application/x-bittorrent')
+ }
+ response = self.json_response(url, method='post', headers=self.get_headers(), data=data, files=files)
+ logger.info(f"Torbox: Add torrent file response: {response}")
+ return response
+
+ def get_torrent_info(self, torrent_id):
+ logger.info(f"Torbox: Getting info for torrent ID: {torrent_id}")
+ url = f"{self.base_url}/torrents/mylist?bypass_cache=true&id={torrent_id}"
+ response = self.json_response(url, headers=self.get_headers())
+ logger.debug(f"Torbox: Torrent info response: {response}")
+ return response
+
+ def control_torrent(self, torrent_id, operation):
+ logger.info(f"Torbox: Controlling torrent ID: {torrent_id}, operation: {operation}")
+ url = f"{self.base_url}/torrents/controltorrent"
+ data = {
+ "torrent_id": torrent_id,
+ "operation": operation
+ }
+ response = self.json_response(url, method='post', headers=self.get_headers(), data=data)
+ logger.info(f"Torbox: Control torrent response: {response}")
+ return response
+
+ @tenacity.retry(
+ stop=tenacity.stop_after_attempt(3),
+ wait=tenacity.wait_fixed(2),
+ retry=tenacity.retry_if_exception_type((HTTPException, TimeoutError))
+ )
+ def request_download_link(self, torrent_id, file_id=None, zip_link=False):
+ logger.info(f"Torbox: Requesting download link for torrent ID: {torrent_id}, file ID: {file_id}, zip link: {zip_link}")
+ url = f"{self.base_url}/torrents/requestdl?token={self.token}&torrent_id={torrent_id}&file_id={file_id}&zip_link={str(zip_link).lower()}"
+ logger.info(f"Torbox: Requesting URL: {url}")
+ response = self.json_response(url, headers=self.get_headers())
+ logger.info(f"Torbox: Request download link response: {response}")
+ return response
+
+ def get_stream_link(self, query, config, ip=None):
+ magnet = query['magnet']
+ stream_type = query['type']
+ file_index = int(query['file_index']) if query['file_index'] is not None else None
+ season = query['season']
+ episode = query['episode']
+ torrent_download = unquote(query["torrent_download"]) if query["torrent_download"] is not None else None
+
+ info_hash = get_info_hash_from_magnet(magnet)
+ logger.info(f"Torbox: Getting stream link for {stream_type} with hash: {info_hash}")
+
+ # Check if the torrent is already added
+ existing_torrent = self._find_existing_torrent(info_hash)
+
+ if existing_torrent:
+ logger.info(f"Torbox: Found existing torrent with ID: {existing_torrent['id']}")
+ torrent_info = existing_torrent
+ if not torrent_info or "id" not in torrent_info:
+ logger.error("Torbox: Failed to add or find torrent.")
+ return None
+ torrent_id = torrent_info["id"]
+ else:
+ # Add the magnet or torrent file
+ torrent_info = self.add_magnet_or_torrent(magnet, torrent_download)
+ if not torrent_info or "torrent_id" not in torrent_info:
+ logger.error("Torbox: Failed to add or find torrent.")
+ return None
+ torrent_id = torrent_info["torrent_id"]
+
+ logger.info(f"Torbox: Working with torrent ID: {torrent_id}")
+
+ # Wait for the torrent to be ready
+ if not self._wait_for_torrent_completion(torrent_id):
+ logger.warning("Torbox: Torrent not ready, caching in progress.")
+ return settings.no_cache_video_url
+
+ # Select the appropriate file
+ file_id = self._select_file(torrent_info, stream_type, file_index, season, episode)
+
+ if file_id == None:
+ logger.error("Torbox: No matching file found.")
+ return settings.no_cache_video_url
+
+ # Request the download link
+ download_link_response = self.request_download_link(torrent_id, file_id)
+
+ if not download_link_response or "data" not in download_link_response:
+ logger.error("Torbox: Failed to get download link.")
+ return settings.no_cache_video_url
+
+ logger.info(f"Torbox: Got download link: {download_link_response['data']}")
+ return download_link_response['data']
+
+ def get_availability_bulk(self, hashes_or_magnets, ip=None):
+ logger.info(f"Torbox: Checking availability for {len(hashes_or_magnets)} hashes/magnets")
+
+ all_results = []
+
+ for i in range(0, len(hashes_or_magnets), 50):
+ batch = list(islice(hashes_or_magnets, i, i + 50))
+ logger.info(f"Torbox: Checking batch of {len(batch)} hashes/magnets (batch {i//50 + 1})")
+ url = f"{self.base_url}/torrents/checkcached?hash={','.join(batch)}&format=list&list_files=true"
+ logger.trace(f"Torbox: Requesting URL: {url}")
+ response = self.json_response(url, headers=self.get_headers())
+
+ if response and response.get("success") and response["data"]:
+ all_results.extend(response["data"])
+ else:
+ logger.debug(f"Torbox: No cached avaibility for batch {i//50 + 1}")
+ return None
+
+ logger.info(f"Torbox: Availability check completed for all {len(hashes_or_magnets)} hashes/magnets")
+ return {
+ "success": True,
+ "detail": "Torrent cache status retrieved successfully.",
+ "data": all_results
+ }
+
+ def _find_existing_torrent(self, info_hash):
+ logger.info(f"Torbox: Searching for existing torrent with hash: {info_hash}")
+ torrents = self.json_response(f"{self.base_url}/torrents/mylist", headers=self.get_headers())
+ if torrents and "data" in torrents:
+ for torrent in torrents["data"]:
+ if torrent["hash"].lower() == info_hash.lower():
+ logger.info(f"Torbox: Found existing torrent with ID: {torrent['id']}")
+ return torrent
+ logger.info("Torbox: No existing torrent found")
+ return None
+
+ def add_magnet_or_torrent(self, magnet, torrent_download=None, ip=None, privacy="private"):
+ if torrent_download is None:
+ logger.info("Torbox: Adding magnet")
+ response = self.add_magnet(magnet, ip, privacy)
+ else:
+ logger.info("Torbox: Downloading and adding torrent file")
+ torrent_file = self.download_torrent_file(torrent_download)
+ response = self.add_torrent(torrent_file, privacy)
+
+ logger.info(f"Torbox: Add torrent response: {response}")
+
+ if not response or "data" not in response or response["data"] is None:
+ logger.error("Torbox: Failed to add magnet/torrent")
+ return None
+
+ return response["data"]
+
+ def _wait_for_torrent_completion(self, torrent_id, timeout=60, interval=10):
+ logger.info(f"Torbox: Waiting for torrent completion, ID: {torrent_id}")
+ def check_status():
+ torrent_info = self.get_torrent_info(torrent_id)
+ if torrent_info and "data" in torrent_info:
+ files = torrent_info["data"].get("files", [])
+ logger.info(f"Torbox: Current torrent status: {torrent_info['data']['download_state']}")
+ return True if len(files) > 0 else False
+ return False
+
+ result = self.wait_for_ready_status(check_status, timeout, interval)
+ if result:
+ logger.info("Torbox: Torrent is ready")
+ else:
+ logger.warning("Torbox: Torrent completion timeout")
+ return result
+
+ def _select_file(self, torrent_info, stream_type, file_index, season, episode):
+ logger.info(f"Torbox: Selecting file for {stream_type}, file_index: {file_index}, season: {season}, episode: {episode}")
+ files = torrent_info.get("files", [])
+
+ if stream_type == "movie":
+ if file_index is not None:
+ logger.info(f"Torbox: Selected file index {file_index} for movie")
+ return file_index
+ largest_file = max(files, key=lambda x: x["size"])
+ logger.info(f"Torbox: Selected largest file (ID: {largest_file['id']}, Size: {largest_file['size']}) for movie")
+ return largest_file["id"]
+
+ elif stream_type == "series":
+ if file_index is not None:
+ logger.info(f"Torbox: Selected file index {file_index} for series")
+ return file_index
+
+ matching_files = [
+ file for file in files
+ if season_episode_in_filename(file["short_name"], season, episode) and is_video_file(file["short_name"])
+ ]
+
+ if matching_files:
+ largest_matching_file = max(matching_files, key=lambda x: x["size"])
+ logger.info(f"Torbox: Selected largest matching file (ID: {largest_matching_file['id']}, Name: {largest_matching_file['name']}, Size: {largest_matching_file['size']}) for series")
+ return largest_matching_file["id"]
+ else:
+ logger.warning(f"Torbox: No matching files found for S{season}E{episode}")
+
+ logger.error(f"Torbox: Failed to select appropriate file for {stream_type}")
+ return None
\ No newline at end of file
diff --git a/stream_fusion/utils/filter/language_filter.py b/stream_fusion/utils/filter/language_filter.py
index 693e1ad..9e7d567 100644
--- a/stream_fusion/utils/filter/language_filter.py
+++ b/stream_fusion/utils/filter/language_filter.py
@@ -15,19 +15,20 @@ def filter(self, data):
filtered_data = []
for torrent in data:
if not torrent.languages:
+ logger.debug(f"Skipping {torrent.raw_title} with no languages")
continue
languages = torrent.languages.copy()
if torrent.indexer == "DMM - API" and "multi" in languages:
regex = self.fr_regex.search(torrent.raw_title)
- logger.debug(f"Regex match for {torrent.raw_title} : {regex}")
+ logger.trace(f"Regex match for {torrent.raw_title} : {regex}")
if not regex:
languages.remove("multi")
if torrent.indexer == "DMM - API" and "fr" in languages:
regex = self.fr_regex.search(torrent.raw_title)
- logger.debug(f"Regex match for {torrent.raw_title} : {regex}")
+ logger.trace(f"Regex match for {torrent.raw_title} : {regex}")
if not regex:
languages.remove("fr")
@@ -35,7 +36,7 @@ def filter(self, data):
lang in self.config["languages"] for lang in languages
):
torrent.languages = languages
- logger.debug(f"Keeping {torrent.raw_title} with lang : {languages} ")
+ logger.trace(f"Keeping {torrent.raw_title} with lang : {languages} ")
filtered_data.append(torrent)
return filtered_data
diff --git a/stream_fusion/utils/filter/max_size_filter.py b/stream_fusion/utils/filter/max_size_filter.py
index cf43f60..baec662 100644
--- a/stream_fusion/utils/filter/max_size_filter.py
+++ b/stream_fusion/utils/filter/max_size_filter.py
@@ -14,8 +14,8 @@ def filter(self, data):
if torrent_size <= self.max_size_bytes:
filtered_data.append(torrent)
else:
- logger.debug(f"Excluded torrent due to size: {torrent.raw_title}, Size: {torrent_size / (1024*1024*1024):.2f} GB")
- logger.info(f"MaxSizeFilter: input {len(data)}, output {len(filtered_data)}")
+ logger.trace(f"Excluded torrent due to size: {torrent.raw_title}, Size: {torrent_size / (1024*1024*1024):.2f} GB")
+ logger.debug(f"MaxSizeFilter: input {len(data)}, output {len(filtered_data)}")
return filtered_data
def can_filter(self):
diff --git a/stream_fusion/utils/filter/quality_exclusion_filter.py b/stream_fusion/utils/filter/quality_exclusion_filter.py
index bf5d26a..85c76c6 100644
--- a/stream_fusion/utils/filter/quality_exclusion_filter.py
+++ b/stream_fusion/utils/filter/quality_exclusion_filter.py
@@ -48,7 +48,6 @@ def _is_stream_allowed(self, stream: TorrentItem) -> bool:
logger.debug(f"Stream excluded due to HEVC codec: {parsed_data.raw_title}")
return False
- logger.debug("Stream allowed")
return True
def can_filter(self) -> bool:
diff --git a/stream_fusion/utils/filter/results_per_quality_filter.py b/stream_fusion/utils/filter/results_per_quality_filter.py
index 06faa70..a29cea7 100644
--- a/stream_fusion/utils/filter/results_per_quality_filter.py
+++ b/stream_fusion/utils/filter/results_per_quality_filter.py
@@ -21,7 +21,7 @@ def filter(self, data):
resolution_count[resolution] += 1
filtered_items.append(item)
- logger.info(f"ResultsPerQualityFilter: input {len(data)}, output {len(filtered_items)}")
+ logger.debug(f"ResultsPerQualityFilter: input {len(data)}, output {len(filtered_items)}")
return filtered_items
def can_filter(self):
diff --git a/stream_fusion/utils/filter/title_exclusion_filter.py b/stream_fusion/utils/filter/title_exclusion_filter.py
index c11c51d..46418d9 100644
--- a/stream_fusion/utils/filter/title_exclusion_filter.py
+++ b/stream_fusion/utils/filter/title_exclusion_filter.py
@@ -13,7 +13,7 @@ def filter(self, data):
if self._should_include_stream(stream):
filtered_items.append(stream)
- logger.info(f"TitleExclusionFilter: input {len(data)}, output {len(filtered_items)}")
+ logger.debug(f"TitleExclusionFilter: input {len(data)}, output {len(filtered_items)}")
return filtered_items
def _should_include_stream(self, stream):
@@ -21,7 +21,7 @@ def _should_include_stream(self, stream):
title_upper = stream.raw_title.upper()
for keyword in self.excluded_keywords:
if keyword in title_upper:
- logger.debug(f"Excluded stream: {stream.raw_title} (keyword: {keyword})")
+ logger.trace(f"Excluded stream: {stream.raw_title} (keyword: {keyword})")
return False
return True
except AttributeError:
diff --git a/stream_fusion/utils/filter_results.py b/stream_fusion/utils/filter_results.py
index e4f6090..96d2121 100644
--- a/stream_fusion/utils/filter_results.py
+++ b/stream_fusion/utils/filter_results.py
@@ -1,12 +1,11 @@
import re
from typing import List
-from RTN import title_match, RTN, DefaultRanking, SettingsModel, sort_torrents
+from RTN import title_match
from stream_fusion.utils.filter.language_filter import LanguageFilter
from stream_fusion.utils.filter.max_size_filter import MaxSizeFilter
from stream_fusion.utils.filter.quality_exclusion_filter import QualityExclusionFilter
-from stream_fusion.utils.filter.results_per_quality_filter import ResultsPerQualityFilter
from stream_fusion.utils.filter.title_exclusion_filter import TitleExclusionFilter
from stream_fusion.utils.torrent.torrent_item import TorrentItem
from stream_fusion.logging_config import logger
@@ -15,7 +14,7 @@
def sort_quality(item: TorrentItem):
- logger.debug(f"Evaluating quality for item: {item.raw_title}")
+ logger.trace(f"Filters: Evaluating quality for item: {item.raw_title}")
if not item.parsed_data.resolution:
return float("inf"), True
resolution = item.parsed_data.resolution
@@ -24,24 +23,7 @@ def sort_quality(item: TorrentItem):
def items_sort(items, config):
- logger.info(f"Starting item sorting. Sort method: {config['sort']}")
- # settings = SettingsModel(
- # require=[],
- # exclude=config["exclusionKeywords"] + config["exclusion"],
- # preferred=[],
- # )
-
- # rtn = RTN(settings=settings, ranking_model=DefaultRanking())
- # logger.debug("Applying RTN ranking to items")
- # torrents = [rtn.rank(item.raw_title, item.info_hash) for item in items]
- # sorted_torrents = sort_torrents(set(torrents))
-
- # for key, value in sorted_torrents.items():
- # index = next((i for i, item in enumerate(items) if item.info_hash == key), None)
- # if index is not None:
- # items[index].parsed_data = value
-
- logger.info(f"Sorting items by method: {config['sort']}")
+ logger.info(f"Filters: Sorting items by method: {config['sort']}")
if config["sort"] == "quality":
sorted_items = sorted(items, key=sort_quality)
elif config["sort"] == "sizeasc":
@@ -52,30 +34,38 @@ def items_sort(items, config):
sorted_items = sorted(items, key=lambda x: (sort_quality(x), -int(x.size)))
else:
logger.warning(
- f"Unrecognized sort method: {config['sort']}. No sorting applied."
+ f"Filters: Unrecognized sort method: {config['sort']}. No sorting applied."
)
sorted_items = items
- logger.info(f"Sorting complete. Number of sorted items: {len(sorted_items)}")
+ logger.success(
+ f"Filters: Sorting complete. Number of sorted items: {len(sorted_items)}"
+ )
return sorted_items
+
def filter_out_non_matching_movies(items, year):
- logger.info(f"Filtering non-matching movies for year : {year}")
+ logger.info(f"Filters: Filtering non-matching movies for year: {year}")
year_min = str(int(year) - 1)
year_max = str(int(year) + 1)
- year_pattern = re.compile(rf'\b{year_max}|{year}|{year_min}\b')
+ year_pattern = re.compile(rf"\b{year_max}|{year}|{year_min}\b")
filtered_items = []
for item in items:
if year_pattern.search(item.raw_title):
- logger.debug(f"Match found for year {year} in item: {item.raw_title}")
+ logger.trace(
+ f"Filters: Match found for year {year} in item: {item.raw_title}"
+ )
filtered_items.append(item)
else:
- logger.debug(f"No match found for year {year} in item: {item.raw_title}")
+ logger.trace(
+ f"Filters: No match found for year {year} in item: {item.raw_title}"
+ )
return filtered_items
+
def filter_out_non_matching_series(items, season, episode):
logger.info(
- f"Filtering non-matching items for season {season} and episode {episode}"
+ f"Filters: Filtering non-matching items for season {season} and episode {episode}"
)
filtered_items = []
clean_season = season.replace("S", "")
@@ -83,61 +73,76 @@ def filter_out_non_matching_series(items, season, episode):
numeric_season = int(clean_season)
numeric_episode = int(clean_episode)
- integrale_pattern = re.compile(r'\b(INTEGRALE|COMPLET|COMPLETE|INTEGRAL)\b', re.IGNORECASE)
+ integrale_pattern = re.compile(
+ r"\b(INTEGRALE|COMPLET|COMPLETE|INTEGRAL)\b", re.IGNORECASE
+ )
for item in items:
if len(item.parsed_data.seasons) == 0 and len(item.parsed_data.episodes) == 0:
if integrale_pattern.search(item.raw_title):
- logger.debug(f"Integrale match found for item: {item.raw_title}")
+ logger.trace(
+ f"Filters: Integrale match found for item: {item.raw_title}"
+ )
filtered_items.append(item)
- logger.debug(f"No season or episode information found for item: {item.raw_title}")
+ logger.trace(
+ f"Filters: No season or episode information found for item: {item.raw_title}"
+ )
continue
if (
len(item.parsed_data.episodes) == 0
and numeric_season in item.parsed_data.seasons
):
- logger.debug(f"Exact season match found for item: {item.raw_title}")
+ logger.trace(
+ f"Filters: Exact season match found for item: {item.raw_title}"
+ )
filtered_items.append(item)
continue
if (
numeric_season in item.parsed_data.seasons
and numeric_episode in item.parsed_data.episodes
):
- logger.debug(f"Exact season and episode match found for item: {item.raw_title}")
+ logger.trace(
+ f"Filters: Exact season and episode match found for item: {item.raw_title}"
+ )
filtered_items.append(item)
continue
- logger.info(
- f"Filtering complete. {len(filtered_items)} matching items found out of {len(items)} total"
+ logger.debug(
+ f"Filters: Filtering complete. {len(filtered_items)} matching items found out of {len(items)} total"
)
return filtered_items
def clean_tmdb_title(title):
- # Dictionnaire des caractères à filtrer, groupés par catégorie
+ # Dictionary of characters to filter, grouped by category
characters_to_filter = {
- 'ponctuation': r'<>"/\\|?*',
- 'controle': r'\x00-\x1F',
- 'symboles': r'\u2122\u00AE\u00A9\u2120\u00A1\u00BF\u2013\u2014\u2018\u2019\u201C\u201D\u2022\u2026',
- 'espaces': r'\s+'
+ "punctuation": r'<>"/\\|?*',
+ "control": r"\x00-\x1F",
+ "symbols": r"\u2122\u00AE\u00A9\u2120\u00A1\u00BF\u2013\u2014\u2018\u2019\u201C\u201D\u2022\u2026",
+ "spaces": r"\s+",
}
-
- filter_pattern = ''.join([f'[{chars}]' for chars in characters_to_filter.values()])
- cleaned_title = re.sub(r':(\S)', r' \1', title)
- cleaned_title = re.sub(r'\s*:\s*', ' ', cleaned_title)
- cleaned_title = re.sub(filter_pattern, ' ', cleaned_title)
+
+ filter_pattern = "".join([f"[{chars}]" for chars in characters_to_filter.values()])
+ cleaned_title = re.sub(r":(\S)", r" \1", title)
+ cleaned_title = re.sub(r"\s*:\s*", " ", cleaned_title)
+ cleaned_title = re.sub(filter_pattern, " ", cleaned_title)
cleaned_title = cleaned_title.strip()
- cleaned_title = re.sub(characters_to_filter['espaces'], ' ', cleaned_title)
-
+ cleaned_title = re.sub(characters_to_filter["spaces"], " ", cleaned_title)
+
return cleaned_title
+
def remove_non_matching_title(items, titles):
filtered_items = []
- integrale_pattern = re.compile(r'\b(INTEGRALE|COMPLET|COMPLETE|INTEGRAL)\b', re.IGNORECASE)
+ integrale_pattern = re.compile(
+ r"\b(INTEGRALE|COMPLET|COMPLETE|INTEGRAL)\b", re.IGNORECASE
+ )
cleaned_titles = [clean_tmdb_title(title) for title in titles]
- cleaned_titles = [integrale_pattern.sub('', title).strip() for title in cleaned_titles]
- logger.info(f"Removing items not matching titles: {cleaned_titles}")
-
+ cleaned_titles = [
+ integrale_pattern.sub("", title).strip() for title in cleaned_titles
+ ]
+ logger.info(f"Filters: Removing items not matching titles: {cleaned_titles}")
+
def is_ordered_subset(subset, full_set):
subset_words = subset.lower().split()
full_set_words = full_set.lower().split()
@@ -146,78 +151,97 @@ def is_ordered_subset(subset, full_set):
if subset_index < len(subset_words) and word == subset_words[subset_index]:
subset_index += 1
return subset_index == len(subset_words)
-
+
for item in items:
- cleaned_item_title = integrale_pattern.sub('', item.parsed_data.parsed_title).strip()
+ cleaned_item_title = integrale_pattern.sub(
+ "", item.parsed_data.parsed_title
+ ).strip()
for title in cleaned_titles:
- logger.debug(f"Comparing item title: {cleaned_item_title} with title: {title}")
-
+ logger.trace(
+ f"Filters: Comparing item title: {cleaned_item_title} with title: {title}"
+ )
+
if is_ordered_subset(cleaned_item_title, title):
- logger.debug(f"Ordered subset match found. Item accepted: {cleaned_item_title}")
+ logger.trace(
+ f"Filters: Ordered subset match found. Item accepted: {cleaned_item_title}"
+ )
filtered_items.append(item)
break
elif is_ordered_subset(title, cleaned_item_title):
- logger.debug(f"Reverse ordered subset match found. Item accepted: {cleaned_item_title}")
+ logger.trace(
+ f"Filters: Reverse ordered subset match found. Item accepted: {cleaned_item_title}"
+ )
filtered_items.append(item)
break
else:
- logger.debug(f"No ordered subset match. Trying title_match()")
+ logger.trace(f"Filters: No ordered subset match. Trying title_match()")
if title_match(title, cleaned_item_title):
- logger.debug(f"title_match() succeeded. Item accepted: {cleaned_item_title}")
+ logger.trace(
+ f"Filters: title_match() succeeded. Item accepted: {cleaned_item_title}"
+ )
filtered_items.append(item)
break
else:
- logger.debug(f"No match found, item skipped: {cleaned_item_title}")
-
- logger.info(
- f"Title filtering complete. {len(filtered_items)} items kept out of {len(items)} total"
+ logger.trace(f"Filters: No match found, item skipped: {cleaned_item_title}")
+
+ logger.debug(
+ f"Filters: Title filtering complete. {len(filtered_items)} items kept out of {len(items)} total"
)
return filtered_items
def filter_items(items, media, config):
- logger.info(f"Starting item filtering for media: {media.titles[0]}")
+ logger.info(f"Filters: Starting item filtering for media: {media.titles[0]}")
filters = {
"languages": LanguageFilter(config),
"maxSize": MaxSizeFilter(config, media.type),
"exclusionKeywords": TitleExclusionFilter(config),
"exclusion": QualityExclusionFilter(config),
- "resultsPerQuality": ResultsPerQualityFilter(config),
+ # "resultsPerQuality": ResultsPerQualityFilter(config),
}
- logger.info(f"Initial item count: {len(items)}")
+ logger.info(f"Filters: Initial item count: {len(items)}")
if media.type == "series":
- logger.info(f"Filtering out non-matching series torrents")
+ logger.info(f"Filters: Filtering out non-matching series torrents")
items = filter_out_non_matching_series(items, media.season, media.episode)
- logger.info(f"Item count after season/episode filtering: {len(items)}")
+ logger.success(
+ f"Filters: Item count after season/episode filtering: {len(items)}"
+ )
if media.type == "movie":
- logger.info(f"Filtering out non-matching movie torrents")
+ logger.info(f"Filters: Filtering out non-matching movie torrents")
items = filter_out_non_matching_movies(items, media.year)
- logger.info(f"Item count after year filtering: {len(items)}")
+ logger.success(f"Filters: Item count after year filtering: {len(items)}")
+ logger.info(f"Filters: Filtering out items not matching titles: {media.titles}")
items = remove_non_matching_title(items, media.titles)
- logger.info(f"Item count after title filtering: {len(items)}")
+ logger.success(f"Filters: Item count after title filtering: {len(items)}")
for filter_name, filter_instance in filters.items():
try:
- logger.info(f"Applying {filter_name} filter: {config[filter_name]}")
+ logger.info(
+ f"Filters: Applying {filter_name} filter: {config[filter_name]}"
+ )
items = filter_instance(items)
- logger.info(f"Item count after {filter_name} filter: {len(items)}")
+ logger.success(
+ f"Filters: Item count after {filter_name} filter: {len(items)}"
+ )
except Exception as e:
- logger.error(f"Error while applying {filter_name} filter", exc_info=e)
+ logger.error(
+ f"Filters: Error while applying {filter_name} filter", exc_info=e
+ )
- logger.info(f"Filtering complete. Final item count: {len(items)}")
+ logger.success(f"Filters: Filtering complete. Final item count: {len(items)}")
return items
def sort_items(items, config):
if config["sort"] is not None:
- logger.info(f"Sorting items according to config: {config['sort']}")
+ logger.info(f"Filters: Sorting items according to config: {config['sort']}")
return items_sort(items, config)
else:
- logger.info("No sorting specified, returning items in original order")
+ logger.info("Filters: No sorting specified, returning items in original order")
return items
@@ -225,7 +249,7 @@ def merge_items(
cache_items: List[TorrentItem], search_items: List[TorrentItem]
) -> List[TorrentItem]:
logger.info(
- f"Merging cached items ({len(cache_items)}) and search items ({len(search_items)})"
+ f"Filters: Merging cached items ({len(cache_items)}) and search items ({len(search_items)})"
)
merged_dict = {}
@@ -240,5 +264,7 @@ def add_to_merged(item: TorrentItem):
add_to_merged(item)
merged_items = list(merged_dict.values())
- logger.info(f"Merging complete. Total unique items: {len(merged_items)}")
+ logger.success(
+ f"Filters: Merging complete. Total unique items: {len(merged_items)}"
+ )
return merged_items
diff --git a/stream_fusion/utils/jackett/jackett_service.py b/stream_fusion/utils/jackett/jackett_service.py
index af9f72a..f064760 100644
--- a/stream_fusion/utils/jackett/jackett_service.py
+++ b/stream_fusion/utils/jackett/jackett_service.py
@@ -23,7 +23,7 @@ def __init__(self, config):
self.logger = logger
self.__api_key = settings.jackett_api_key
- self.__base_url = f"{settings.jackett_shema}://{settings.jackett_host}:{settings.jackett_port}/api/v2.0"
+ self.__base_url = f"{settings.jackett_schema}://{settings.jackett_host}:{settings.jackett_port}/api/v2.0"
self.__session = requests.Session()
adapter = HTTPAdapter(pool_connections=100, pool_maxsize=100)
diff --git a/stream_fusion/utils/metdata/tmdb.py b/stream_fusion/utils/metdata/tmdb.py
index b14bb04..55a6ec9 100644
--- a/stream_fusion/utils/metdata/tmdb.py
+++ b/stream_fusion/utils/metdata/tmdb.py
@@ -18,7 +18,7 @@ def get_metadata(self, id, type):
url = f"https://api.themoviedb.org/3/find/{full_id[0]}?api_key={settings.tmdb_api_key}&external_source=imdb_id&language={lang}"
response = requests.get(url)
data = response.json()
- logger.debug(data)
+ logger.trace(data)
if lang == self.config['languages'][0]:
if type == "movie":
diff --git a/stream_fusion/utils/parser/__init__.py b/stream_fusion/utils/parser/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/stream_fusion/utils/parser/parser_service.py b/stream_fusion/utils/parser/parser_service.py
new file mode 100644
index 0000000..0205b9b
--- /dev/null
+++ b/stream_fusion/utils/parser/parser_service.py
@@ -0,0 +1,168 @@
+import json
+import queue
+import threading
+from typing import List, Dict
+
+from RTN import ParsedData
+from stream_fusion.settings import settings
+from stream_fusion.utils.models.media import Media
+from stream_fusion.utils.torrent.torrent_item import TorrentItem
+from stream_fusion.utils.string_encoding import encodeb64
+
+from stream_fusion.utils.parser.parser_utils import (
+ detect_french_language,
+ extract_release_group,
+ filter_by_availability,
+ filter_by_direct_torrent,
+ get_emoji,
+ INSTANTLY_AVAILABLE,
+ DOWNLOAD_REQUIRED,
+ DIRECT_TORRENT,
+)
+
+
+class StreamParser:
+ def __init__(self, config: Dict):
+ self.config = config
+ self.configb64 = encodeb64(json.dumps(config).replace("=", "%3D"))
+
+ def parse_to_stremio_streams(
+ self, torrent_items: List[TorrentItem], media: Media
+ ) -> List[Dict]:
+ stream_list = []
+ threads = []
+ thread_results_queue = queue.Queue()
+
+ for torrent_item in torrent_items[: int(self.config["maxResults"])]:
+ thread = threading.Thread(
+ target=self._parse_to_debrid_stream,
+ args=(torrent_item, thread_results_queue, media),
+ daemon=True,
+ )
+ thread.start()
+ threads.append(thread)
+
+ for thread in threads:
+ thread.join()
+
+ while not thread_results_queue.empty():
+ stream_list.append(thread_results_queue.get())
+
+ if self.config["debrid"]:
+ stream_list = sorted(stream_list, key=filter_by_availability)
+ stream_list = sorted(stream_list, key=filter_by_direct_torrent)
+
+ return stream_list
+
+ def _parse_to_debrid_stream(
+ self, torrent_item: TorrentItem, results: queue.Queue, media: Media
+ ) -> None:
+ parsed_data: ParsedData = torrent_item.parsed_data
+ name = self._create_stream_name(torrent_item, parsed_data)
+ title = self._create_stream_title(torrent_item, parsed_data, media)
+
+ queryb64 = encodeb64(
+ json.dumps(torrent_item.to_debrid_stream_query(media))
+ ).replace("=", "%3D")
+
+ results.put(
+ {
+ "name": name,
+ "description": title,
+ "url": f"{self.config['addonHost']}/playback/{self.configb64}/{queryb64}",
+ "behaviorHints": {
+ "bingeGroup": f"stream-fusion-{torrent_item.info_hash}",
+ "filename": torrent_item.file_name or torrent_item.raw_title,
+ },
+ }
+ )
+
+ if self.config["torrenting"] and torrent_item.privacy == "public":
+ self._add_direct_torrent_stream(torrent_item, parsed_data, title, results)
+
+ def _create_stream_name(
+ self, torrent_item: TorrentItem, parsed_data: ParsedData
+ ) -> str:
+ resolution = parsed_data.resolution or "Unknown"
+ if torrent_item.availability == "RD":
+ name = f"{INSTANTLY_AVAILABLE}instant\nReal-Debrid\n({resolution})"
+ elif torrent_item.availability == "AD":
+ name = f"{INSTANTLY_AVAILABLE}instant\nAllDebrid\n({resolution})"
+ elif torrent_item.availability == "TB":
+ name = f"{INSTANTLY_AVAILABLE}instant\nTorBox\n({resolution})"
+ else:
+ name = f"{DOWNLOAD_REQUIRED}download\n{self.config.get("debridDownloader", settings.download_service)}\n({resolution})"
+ return name
+
+ def _create_stream_title(
+ self, torrent_item: TorrentItem, parsed_data: ParsedData, media: Media
+ ) -> str:
+ title = f"{torrent_item.raw_title}\n"
+
+ if media.type == "series" and torrent_item.file_name:
+ title += f"{torrent_item.file_name}\n"
+
+ title += self._add_language_info(torrent_item, parsed_data)
+ title += self._add_torrent_info(torrent_item)
+ title += self._add_media_info(parsed_data)
+
+ return title.strip()
+
+ def _add_language_info(
+ self, torrent_item: TorrentItem, parsed_data: ParsedData
+ ) -> str:
+ info = (
+ "/".join(get_emoji(lang) for lang in torrent_item.languages)
+ if torrent_item.languages
+ else "🌐"
+ )
+
+ lang_type = detect_french_language(torrent_item.raw_title)
+ if lang_type:
+ info += f" ✔ {lang_type} "
+
+ group = extract_release_group(torrent_item.raw_title) or parsed_data.group
+ if group:
+ info += f" ☠️ {group}"
+
+ return f"{info}\n"
+
+ def _add_torrent_info(self, torrent_item: TorrentItem) -> str:
+ size_in_gb = round(int(torrent_item.size) / 1024 / 1024 / 1024, 2)
+ return f"🔍 {torrent_item.indexer} 💾 {size_in_gb}GB 👥 {torrent_item.seeders} \n"
+
+ def _add_media_info(self, parsed_data: ParsedData) -> str:
+ info = []
+ if parsed_data.codec:
+ info.append(f"🎥 {parsed_data.codec}")
+ if parsed_data.quality:
+ info.append(f"📺 {parsed_data.quality}")
+ if parsed_data.audio:
+ info.append(f"🎧 {' '.join(parsed_data.audio)}")
+ return " ".join(info) + "\n" if info else ""
+
+ def _add_direct_torrent_stream(
+ self,
+ torrent_item: TorrentItem,
+ parsed_data: ParsedData,
+ title: str,
+ results: queue.Queue,
+ ) -> None:
+ direct_torrent_name = f"{DIRECT_TORRENT}\n{parsed_data.quality}\n"
+ if parsed_data.quality and parsed_data.quality[0] not in ["Unknown", ""]:
+ direct_torrent_name += f"({'|'.join(parsed_data.quality)})"
+
+ results.put(
+ {
+ "name": direct_torrent_name,
+ "description": title,
+ "infoHash": torrent_item.info_hash,
+ "fileIdx": (
+ int(torrent_item.file_index) if torrent_item.file_index else None
+ ),
+ "behaviorHints": {
+ "bingeGroup": f"stream-fusion-{torrent_item.info_hash}",
+ "filename": torrent_item.file_name or torrent_item.raw_title,
+ },
+ }
+ )
diff --git a/stream_fusion/utils/parser/parser_utils.py b/stream_fusion/utils/parser/parser_utils.py
new file mode 100644
index 0000000..034600d
--- /dev/null
+++ b/stream_fusion/utils/parser/parser_utils.py
@@ -0,0 +1,33 @@
+import re
+from typing import Dict
+from stream_fusion.constants import FR_RELEASE_GROUPS, FRENCH_PATTERNS
+
+INSTANTLY_AVAILABLE = "⚡"
+DOWNLOAD_REQUIRED = "⬇️"
+DIRECT_TORRENT = "🏴☠️"
+
+def get_emoji(language: str) -> str:
+ emoji_dict = {
+ "fr": "🇫🇷 FR", "en": "🇬🇧 EN", "es": "🇪🇸 ES",
+ "de": "🇩🇪 GR", "it": "🇮🇹 IT", "pt": "🇵🇹 PO",
+ "ru": "🇷🇺 RU", "in": "🇮🇳 IN", "nl": "🇳🇱 DU",
+ "hu": "🇭🇺 HU", "la": "🇲🇽 LA", "multi": "🌍 MULTi",
+ }
+ return emoji_dict.get(language, "🇬🇧")
+
+def filter_by_availability(item: Dict) -> int:
+ return 0 if item["name"].startswith(INSTANTLY_AVAILABLE) else 1
+
+def filter_by_direct_torrent(item: Dict) -> int:
+ return 1 if item["name"].startswith(DIRECT_TORRENT) else 0
+
+def extract_release_group(title: str) -> str:
+ combined_pattern = "|".join(FR_RELEASE_GROUPS)
+ match = re.search(combined_pattern, title)
+ return match.group(0) if match else None
+
+def detect_french_language(title: str) -> str:
+ for language, pattern in FRENCH_PATTERNS.items():
+ if re.search(pattern, title, re.IGNORECASE):
+ return language
+ return None
\ No newline at end of file
diff --git a/stream_fusion/utils/torrent/torrent_item.py b/stream_fusion/utils/torrent/torrent_item.py
index ef90fda..9198ff1 100644
--- a/stream_fusion/utils/torrent/torrent_item.py
+++ b/stream_fusion/utils/torrent/torrent_item.py
@@ -41,7 +41,8 @@ def to_debrid_stream_query(self, media: Media) -> dict:
"season": media.season if isinstance(media, Series) else None,
"episode": media.episode if isinstance(media, Series) else None,
"torrent_download": quote(self.torrent_download) if self.torrent_download is not None else None,
- "service": self.availability if self.availability else None,
+ "service": self.availability if self.availability else "DL",
+ "privacy": self.privacy if self.privacy else "private",
}
def to_dict(self):
diff --git a/stream_fusion/utils/torrent/torrent_service.py b/stream_fusion/utils/torrent/torrent_service.py
index 66a1ac1..7081e31 100644
--- a/stream_fusion/utils/torrent/torrent_service.py
+++ b/stream_fusion/utils/torrent/torrent_service.py
@@ -250,7 +250,7 @@ def __find_single_episode_file(self, file_structure, season, episode):
return max(episode_files, key=lambda file: file["size"])
def __find_full_index(self, file_structure):
- self.logger.info("Starting to build full index of video files")
+ self.logger.debug("Starting to build full index of video files")
video_formats = {".mkv", ".mp4", ".avi", ".mov", ".flv", ".wmv", ".webm", ".mpg", ".mpeg", ".m4v", ".3gp", ".3g2",
".ogv", ".ogg", ".drc", ".gif", ".gifv", ".mng", ".avi", ".mov", ".qt", ".wmv", ".yuv", ".rm",
".rmvb", ".asf", ".amv", ".m4p", ".m4v", ".mpg", ".mp2", ".mpeg", ".mpe", ".mpv", ".mpg",
@@ -282,11 +282,11 @@ def __find_full_index(self, file_structure):
"seasons": parsed_file.seasons,
"episodes": parsed_file.episodes
})
- self.logger.debug(f"Added file to index: {file_name}")
+ self.logger.trace(f"Added file to index: {file_name}")
file_index += 1
- self.logger.info(f"Full index built with {len(full_index)} video files")
+ self.logger.debug(f"Full index built with {len(full_index)} video files")
return full_index
def __find_movie_file(self, file_structure):
diff --git a/stream_fusion/utils/torrent/torrent_smart_container.py b/stream_fusion/utils/torrent/torrent_smart_container.py
index 176876c..938fe1e 100644
--- a/stream_fusion/utils/torrent/torrent_smart_container.py
+++ b/stream_fusion/utils/torrent/torrent_smart_container.py
@@ -1,3 +1,4 @@
+import os
import threading
from typing import List, Dict
@@ -6,16 +7,22 @@
from stream_fusion.utils.debrid.alldebrid import AllDebrid
from stream_fusion.utils.debrid.premiumize import Premiumize
from stream_fusion.utils.debrid.realdebrid import RealDebrid
+from stream_fusion.utils.debrid.torbox import Torbox
from stream_fusion.utils.torrent.torrent_item import TorrentItem
from stream_fusion.utils.cache.cache import cache_public
from stream_fusion.utils.general import season_episode_in_filename
from stream_fusion.logging_config import logger
+
class TorrentSmartContainer:
def __init__(self, torrent_items: List[TorrentItem], media):
self.logger = logger
- self.logger.info(f"Initializing TorrentSmartContainer with {len(torrent_items)} items")
- self.__itemsDict: Dict[TorrentItem] = self.__build_items_dict_by_infohash(torrent_items)
+ self.logger.info(
+ f"Initializing TorrentSmartContainer with {len(torrent_items)} items"
+ )
+ self.__itemsDict: Dict[TorrentItem] = self._build_items_dict_by_infohash(
+ torrent_items
+ )
self.__media = media
def get_unaviable_hashes(self):
@@ -23,120 +30,187 @@ def get_unaviable_hashes(self):
for hash, item in self.__itemsDict.items():
if item.availability is False:
hashes.append(hash)
- self.logger.debug(f"Retrieved {len(hashes)} hashes to process for RealDebrid")
+ self.logger.debug(
+ f"TorrentSmartContainer: Retrieved {len(hashes)} hashes to process"
+ )
return hashes
def get_items(self):
items = list(self.__itemsDict.values())
- self.logger.debug(f"Retrieved {len(items)} items")
+ self.logger.debug(f"TorrentSmartContainer: Retrieved {len(items)} items")
return items
def get_direct_torrentable(self):
- self.logger.info("Retrieving direct torrentable items")
+ self.logger.info("TorrentSmartContainer: Retrieving direct torrentable items")
direct_torrentable_items = []
for torrent_item in self.__itemsDict.values():
if torrent_item.privacy == "public" and torrent_item.file_index is not None:
direct_torrentable_items.append(torrent_item)
- self.logger.info(f"Found {len(direct_torrentable_items)} direct torrentable items")
+ self.logger.info(
+ f"TorrentSmartContainer: Found {len(direct_torrentable_items)} direct torrentable items"
+ )
return direct_torrentable_items
def get_best_matching(self):
- self.logger.info("Finding best matching items")
+ self.logger.info("TorrentSmartContainer: Finding best matching items")
best_matching = []
- self.logger.debug(f"Total items to process: {len(self.__itemsDict)}")
+ self.logger.debug(
+ f"TorrentSmartContainer: Total items to process: {len(self.__itemsDict)}"
+ )
for torrent_item in self.__itemsDict.values():
- self.logger.debug(f"Processing item: {torrent_item.raw_title} - Has torrent: {torrent_item.torrent_download is not None}")
+ self.logger.trace(
+ f"TorrentSmartContainer: Processing item: {torrent_item.raw_title} - Has torrent: {torrent_item.torrent_download is not None}"
+ )
if torrent_item.torrent_download is not None:
- self.logger.debug(f"Has file index: {torrent_item.file_index is not None}")
+ self.logger.trace(
+ f"TorrentSmartContainer: Has file index: {torrent_item.file_index is not None}"
+ )
if torrent_item.file_index is not None:
best_matching.append(torrent_item)
- self.logger.debug("Item added to best matching (has file index)")
+ self.logger.trace(
+ "TorrentSmartContainer: Item added to best matching (has file index)"
+ )
else:
- matching_file = self.__find_matching_file(torrent_item.full_index, self.__media.season, self.__media.episode)
+ matching_file = self._find_matching_file(
+ torrent_item.full_index,
+ self.__media.season,
+ self.__media.episode,
+ )
if matching_file:
- torrent_item.file_index = matching_file['file_index']
- torrent_item.file_name = matching_file['file_name']
- torrent_item.size = matching_file['size']
+ torrent_item.file_index = matching_file["file_index"]
+ torrent_item.file_name = matching_file["file_name"]
+ torrent_item.size = matching_file["size"]
best_matching.append(torrent_item)
- self.logger.debug(f"Item added to best matching (found matching file: {matching_file['file_name']})")
+ self.logger.trace(
+ f"TorrentSmartContainer: Item added to best matching (found matching file: {matching_file['file_name']})"
+ )
else:
- self.logger.debug("No matching file found, item not added to best matching")
+ self.logger.trace(
+ "TorrentSmartContainer: No matching file found, item not added to best matching"
+ )
else:
- best_matching.append(torrent_item)
- self.logger.debug("Item added to best matching (magnet link)")
- self.logger.info(f"Found {len(best_matching)} best matching items")
+ if not (( not torrent_item.availability or torrent_item.availability == "DL" ) and torrent_item.indexer == "DMM - API"):
+ best_matching.append(torrent_item)
+ self.logger.trace(
+ "TorrentSmartContainer: Item added to best matching (magnet link)"
+ )
+ self.logger.success(
+ f"TorrentSmartContainer: Found {len(best_matching)} best matching items"
+ )
return best_matching
- def __find_matching_file(self, full_index, season, episode):
- self.logger.info(f"Searching for matching file: Season {season}, Episode {episode}")
-
+ def _find_matching_file(self, full_index, season, episode):
+ self.logger.trace(
+ f"TorrentSmartContainer: Searching for matching file: Season {season}, Episode {episode}"
+ )
+
if not full_index:
- self.logger.warning("Full index is empty, cannot find matching file")
+ self.logger.trace(
+ "TorrentSmartContainer: Full index is empty, cannot find matching file"
+ )
return None
try:
- target_season = int(season.replace('S', ''))
- target_episode = int(episode.replace('E', ''))
+ target_season = int(season.replace("S", ""))
+ target_episode = int(episode.replace("E", ""))
except ValueError:
- self.logger.error(f"Invalid season or episode format: {season}, {episode}")
+ self.logger.error(
+ f"TorrentSmartContainer: Invalid season or episode format: {season}, {episode}"
+ )
return None
best_match = None
for file_entry in full_index:
- if target_season in file_entry['seasons'] and target_episode in file_entry['episodes']:
- if best_match is None or file_entry['size'] > best_match['size']:
+ if (
+ target_season in file_entry["seasons"]
+ and target_episode in file_entry["episodes"]
+ ):
+ if best_match is None or file_entry["size"] > best_match["size"]:
best_match = file_entry
- self.logger.debug(f"Found potential match: {file_entry['file_name']}")
+ self.logger.trace(
+ f"TorrentSmartContainer: Found potential match: {file_entry['file_name']}"
+ )
if best_match:
- self.logger.info(f"Best matching file found: {best_match['file_name']}")
+ self.logger.trace(
+ f"TorrentSmartContainer: Best matching file found: {best_match['file_name']}"
+ )
return best_match
else:
- self.logger.warning(f"No matching file found for Season {season}, Episode {episode}")
+ self.logger.warning(
+ f"TorrentSmartContainer: No matching file found for Season {season}, Episode {episode}"
+ )
return None
def cache_container_items(self):
- self.logger.info("Starting cache process for container items")
- threading.Thread(target=self.__save_to_cache).start()
+ self.logger.info(
+ "TorrentSmartContainer: Starting cache process for container items"
+ )
+ threading.Thread(target=self._save_to_cache).start()
- def __save_to_cache(self):
- self.logger.info("Saving public items to cache")
- public_torrents = list(filter(lambda x: x.privacy == "public", self.get_items()))
- self.logger.debug(f"Found {len(public_torrents)} public torrents to cache")
+ def _save_to_cache(self):
+ self.logger.info("TorrentSmartContainer: Saving public items to cache")
+ public_torrents = list(
+ filter(lambda x: x.privacy == "public", self.get_items())
+ )
+ self.logger.debug(
+ f"TorrentSmartContainer: Found {len(public_torrents)} public torrents to cache"
+ )
cache_public(public_torrents, self.__media)
- self.logger.info("Caching process completed")
+ self.logger.info("TorrentSmartContainer: Caching process completed")
def update_availability(self, debrid_response, debrid_type, media):
if not debrid_response or debrid_response == {} or debrid_response == []:
- self.logger.debug("Debrid response is empty : " + str(debrid_response))
+ self.logger.debug(
+ "TorrentSmartContainer: Debrid response is empty : "
+ + str(debrid_response)
+ )
return
- self.logger.info(f"Updating availability for {debrid_type.__name__}")
+ self.logger.info(
+ f"TorrentSmartContainer: Updating availability for {debrid_type.__name__}"
+ )
if debrid_type is RealDebrid:
- self.__update_availability_realdebrid(debrid_response, media)
+ self._update_availability_realdebrid(debrid_response, media)
elif debrid_type is AllDebrid:
- self.__update_availability_alldebrid(debrid_response, media)
+ self._update_availability_alldebrid(debrid_response, media)
+ elif debrid_type is Torbox:
+ self._update_availability_torbox(debrid_response, media)
elif debrid_type is Premiumize:
- self.__update_availability_premiumize(debrid_response)
+ self._update_availability_premiumize(debrid_response)
else:
- self.logger.error(f"Unsupported debrid type: {debrid_type.__name__}")
- raise NotImplementedError(f"Debrid type {debrid_type.__name__} not implemented")
+ self.logger.error(
+ f"TorrentSmartContainer: Unsupported debrid type: {debrid_type.__name__}"
+ )
+ raise NotImplementedError(
+ f"TorrentSmartContainer: Debrid type {debrid_type.__name__} not implemented"
+ )
- def __update_availability_realdebrid(self, response, media):
- self.logger.info("Updating availability for RealDebrid")
+ def _update_availability_realdebrid(self, response, media):
+ self.logger.info("TorrentSmartContainer: Updating availability for RealDebrid")
for info_hash, details in response.items():
if "rd" not in details:
- self.logger.debug(f"Skipping hash {info_hash}: no RealDebrid data")
+ self.logger.debug(
+ f"TorrentSmartContainer: Skipping hash {info_hash}: no RealDebrid data"
+ )
continue
torrent_item: TorrentItem = self.__itemsDict[info_hash]
- self.logger.debug(f"Processing {torrent_item.type}: {torrent_item.raw_title}")
+ self.logger.debug(
+ f"Processing {torrent_item.type}: {torrent_item.raw_title}"
+ )
files = []
if torrent_item.type == "series":
- self.__process_series_files(details, media, torrent_item, files, debrid="RD")
+ self._process_series_files(
+ details, media, torrent_item, files, debrid="RD"
+ )
else:
- self.__process_movie_files(details, files)
- self.__update_file_details(torrent_item, files, debrid="RD")
- self.logger.info("RealDebrid availability update completed")
+ self._process_movie_files(details, files)
+ self._update_file_details(torrent_item, files, debrid="RD")
+ self.logger.info(
+ "TorrentSmartContainer: RealDebrid availability update completed"
+ )
- def __process_series_files(self, details, media, torrent_item, files, debrid: str = "??"):
+ def _process_series_files(
+ self, details, media, torrent_item, files, debrid: str = "??"
+ ):
for variants in details["rd"]:
file_found = False
for file_index, file in variants.items():
@@ -144,75 +218,147 @@ def __process_series_files(self, details, media, torrent_item, files, debrid: st
clean_episode = media.episode.replace("E", "")
numeric_season = int(clean_season)
numeric_episode = int(clean_episode)
- if season_episode_in_filename(file["filename"], numeric_season, numeric_episode):
+ if season_episode_in_filename(
+ file["filename"], numeric_season, numeric_episode
+ ):
self.logger.debug(f"Matching file found: {file['filename']}")
torrent_item.file_index = file_index
torrent_item.file_name = file["filename"]
torrent_item.size = file["filesize"]
torrent_item.availability = debrid
file_found = True
- files.append({
- "file_index": file_index,
- "title": file["filename"],
- "size": file["filesize"]
- })
+ files.append(
+ {
+ "file_index": file_index,
+ "title": file["filename"],
+ "size": file["filesize"],
+ }
+ )
break
if file_found:
break
- def __process_movie_files(self, details, files):
+ def _process_movie_files(self, details, files):
for variants in details["rd"]:
for file_index, file in variants.items():
- self.logger.debug(f"Adding movie file: {file['filename']}")
- files.append({
- "file_index": file_index,
- "title": file["filename"],
- "size": file["filesize"]
- })
+ self.logger.debug(
+ f"TorrentSmartContainer: Adding movie file: {file['filename']}"
+ )
+ files.append(
+ {
+ "file_index": file_index,
+ "title": file["filename"],
+ "size": file["filesize"],
+ }
+ )
- def __update_availability_alldebrid(self, response, media):
- self.logger.info("Updating availability for AllDebrid")
+ def _update_availability_alldebrid(self, response, media):
+ self.logger.info("TorrentSmartContainer: Updating availability for AllDebrid")
if response == {}:
- self.logger.error("AllDebrid response is empty")
+ self.logger.error("TorrentSmartContainer: AllDebrid response is empty")
return
if response["status"] != "success":
- self.logger.error(f"AllDebrid API error: {response}")
+ self.logger.error(f"TorrentSmartContainer: AllDebrid API error: {response}")
return
for data in response["data"]["magnets"]:
if not data["instant"]:
- self.logger.debug(f"Skipping non-instant magnet: {data['hash']}")
+ self.logger.debug(
+ f"TorrentSmartContainer: Skipping non-instant magnet: {data['hash']}"
+ )
continue
torrent_item: TorrentItem = self.__itemsDict[data["hash"]]
files = []
- self.__explore_folders(data["files"], files, 1, torrent_item.type, media)
- self.__update_file_details(torrent_item, files, debrid="AD")
- self.logger.info("AllDebrid availability update completed")
+ self._explore_folders_alldebrid(
+ data["files"], files, 1, torrent_item.type, media
+ )
+ self._update_file_details(torrent_item, files, debrid="AD")
+ self.logger.info(
+ "TorrentSmartContainer: AllDebrid availability update completed"
+ )
- def __update_availability_premiumize(self, response):
- self.logger.info("Updating availability for Premiumize")
+ def _update_availability_torbox(self, response, media):
+ self.logger.info("TorrentSmartContainer: Updating availability for Torbox")
+ if response["success"] is False:
+ self.logger.error(f"TorrentSmartContainer: Torbox API error: {response}")
+ return
+
+ for data in response["data"]:
+ torrent_item: TorrentItem = self.__itemsDict[data["hash"]]
+ files = self._process_torbox_files(data["files"], torrent_item.type, media)
+ self._update_file_details(torrent_item, files, debrid="TB")
+
+ self.logger.info("TorrentSmartContainer: Torbox availability update completed")
+
+ def _process_torbox_files(self, files, type, media):
+ processed_files = []
+ for index, file in enumerate(files):
+ if type == "series":
+ if self._is_matching_episode_torbox(file["name"], media):
+ processed_files.append(
+ {
+ "file_index": index,
+ "title": file["name"],
+ "size": file["size"],
+ }
+ )
+ elif type == "movie":
+ processed_files.append(
+ {
+ "file_index": index,
+ "title": file["name"],
+ "size": file["size"],
+ }
+ )
+ return processed_files
+
+ def _is_matching_episode_torbox(self, filepath, media):
+ # Extract only the filename from the full path
+ filename = os.path.basename(filepath)
+
+ clean_season = media.season.replace("S", "")
+ clean_episode = media.episode.replace("E", "")
+ numeric_season = int(clean_season)
+ numeric_episode = int(clean_episode)
+
+ return season_episode_in_filename(filename, numeric_season, numeric_episode)
+
+ def _update_availability_premiumize(self, response):
+ self.logger.info("TorrentSmartContainer: Updating availability for Premiumize")
if response["status"] != "success":
- self.logger.error(f"Premiumize API error: {response}")
+ self.logger.error(
+ f"TorrentSmartContainer: Premiumize API error: {response}"
+ )
return
torrent_items = self.get_items()
for i, is_available in enumerate(response["response"]):
if bool(is_available):
torrent_items[i].availability = response["transcoded"][i]
- self.logger.debug(f"Updated availability for item {i}: {torrent_items[i].availability}")
- self.logger.info("Premiumize availability update completed")
+ self.logger.debug(
+ f"TorrentSmartContainer: Updated availability for item {i}: {torrent_items[i].availability}"
+ )
+ self.logger.info(
+ "TorrentSmartContainer: Premiumize availability update completed"
+ )
- def __update_file_details(self, torrent_item, files, debrid: str = "??"):
+ def _update_file_details(self, torrent_item, files, debrid: str = "??"):
if not files:
- self.logger.debug(f"No files to update for {torrent_item.raw_title}")
+ self.logger.debug(
+ f"TorrentSmartContainer: No files to update for {torrent_item.raw_title}"
+ )
return
file = max(files, key=lambda file: file["size"])
torrent_item.availability = debrid
torrent_item.file_index = file["file_index"]
torrent_item.file_name = file["title"]
torrent_item.size = file["size"]
- self.logger.debug(f"Updated file details for {torrent_item.raw_title}: {file['title']}")
+ self.logger.debug(
+ f"TorrentSmartContainer: Updated file details for {torrent_item.raw_title}: {file['title']}"
+ )
- def __build_items_dict_by_infohash(self, items: List[TorrentItem]):
- self.logger.info(f"Building items dictionary by infohash ({len(items)} items)")
+ def _build_items_dict_by_infohash(self, items: List[TorrentItem]):
+ self.logger.info(
+ f"TorrentSmartContainer: Building items dictionary by infohash ({len(items)} items)"
+ )
items_dict = {}
for item in items:
if item.info_hash is not None:
@@ -220,41 +366,60 @@ def __build_items_dict_by_infohash(self, items: List[TorrentItem]):
self.logger.debug(f"Adding {item.info_hash} to items dict")
items_dict[item.info_hash] = item
else:
- self.logger.debug(f"Skipping duplicate info hash: {item.info_hash}")
- self.logger.info(f"Built dictionary with {len(items_dict)} unique items")
+ self.logger.debug(
+ f"TorrentSmartContainer: Skipping duplicate info hash: {item.info_hash}"
+ )
+ self.logger.info(
+ f"TorrentSmartContainer: Built dictionary with {len(items_dict)} unique items"
+ )
return items_dict
- def __explore_folders(self, folder, files, file_index, type, media):
-
+ def _explore_folders_alldebrid(self, folder, files, file_index, type, media):
+
if type == "series":
for file in folder:
if "e" in file:
- file_index = self.__explore_folders(file["e"], files, file_index, type, media)
+ file_index = self._explore_folders_alldebrid(
+ file["e"], files, file_index, type, media
+ )
continue
parsed_file = parse(file["n"])
clean_season = media.season.replace("S", "")
clean_episode = media.episode.replace("E", "")
numeric_season = int(clean_season)
numeric_episode = int(clean_episode)
- if numeric_season in parsed_file.seasons and numeric_episode in parsed_file.episodes:
- self.logger.debug(f"Matching series file found: {file['n']}")
- files.append({
- "file_index": file_index,
- "title": file["n"],
- "size": file["s"] if "s" in file else 0
- })
+ if (
+ numeric_season in parsed_file.seasons
+ and numeric_episode in parsed_file.episodes
+ ):
+ self.logger.debug(
+ f"TorrentSmartContainer: Matching series file found: {file['n']}"
+ )
+ files.append(
+ {
+ "file_index": file_index,
+ "title": file["n"],
+ "size": file["s"] if "s" in file else 0,
+ }
+ )
file_index += 1
elif type == "movie":
file_index = 1
for file in folder:
if "e" in file:
- file_index = self.__explore_folders(file["e"], files, file_index, type, media)
+ file_index = self._explore_folders_alldebrid(
+ file["e"], files, file_index, type, media
+ )
continue
- self.logger.debug(f"Adding movie file: {file['n']}")
- files.append({
- "file_index": file_index,
- "title": file["n"],
- "size": file["s"] if "s" in file else 0
- })
+ self.logger.debug(
+ f"TorrentSmartContainer: Adding movie file: {file['n']}"
+ )
+ files.append(
+ {
+ "file_index": file_index,
+ "title": file["n"],
+ "size": file["s"] if "s" in file else 0,
+ }
+ )
file_index += 1
- return file_index
\ No newline at end of file
+ return file_index
diff --git a/stream_fusion/utils/yggfilx/yggflix_service.py b/stream_fusion/utils/yggfilx/yggflix_service.py
index c328bce..e0f8cc4 100644
--- a/stream_fusion/utils/yggfilx/yggflix_service.py
+++ b/stream_fusion/utils/yggfilx/yggflix_service.py
@@ -111,6 +111,6 @@ def __post_process_results(
item.parsed_data=parse(item.raw_title)
items.append(item)
- logger.debug(f"Yggflix parsed: {item.parsed_data}")
+ logger.trace(f"Yggflix result: {item}")
return items
diff --git a/stream_fusion/videos/nocache.mp4 b/stream_fusion/videos/nocache.mp4
deleted file mode 100644
index 0e24b78..0000000
Binary files a/stream_fusion/videos/nocache.mp4 and /dev/null differ
diff --git a/stream_fusion/web/playback/stream/views.py b/stream_fusion/web/playback/stream/views.py
index d6519f3..4dd3b9c 100644
--- a/stream_fusion/web/playback/stream/views.py
+++ b/stream_fusion/web/playback/stream/views.py
@@ -1,4 +1,6 @@
+from io import BytesIO
import json
+from urllib.parse import unquote
import redis.asyncio as redis
import asyncio
from fastapi import APIRouter, Depends, HTTPException, Request, Response, status
@@ -13,75 +15,205 @@
from stream_fusion.utils.cache.local_redis import RedisCache
from stream_fusion.logging_config import logger
from stream_fusion.settings import settings
-from stream_fusion.utils.debrid.get_debrid_service import get_all_debrid_services, get_debrid_service
+from stream_fusion.utils.debrid.get_debrid_service import (
+ get_debrid_service,
+ get_download_service,
+)
+from stream_fusion.utils.debrid.realdebrid import RealDebrid
+from stream_fusion.utils.debrid.torbox import Torbox
from stream_fusion.utils.parse_config import parse_config
from stream_fusion.utils.string_encoding import decodeb64
from stream_fusion.utils.security import check_api_key
-from stream_fusion.constants import NO_CACHE_VIDEO_URL
from stream_fusion.web.playback.stream.schemas import (
ErrorResponse,
HeadResponse,
)
-
router = APIRouter()
-redis_client = redis.Redis(host=settings.redis_host, port=settings.redis_port, db=settings.redis_db)
-redis_session = create_redis_session(host=settings.redis_host, port=settings.redis_port, db=settings.redis_db)
+redis_client = redis.Redis(
+ host=settings.redis_host, port=settings.redis_port, db=settings.redis_db
+)
+redis_session = create_redis_session(
+ host=settings.redis_host, port=settings.redis_port, db=settings.redis_db
+)
+
+DOWNLOAD_IN_PROGRESS_FLAG = "DOWNLOAD_IN_PROGRESS"
class ProxyStreamer:
- def __init__(self, request: Request, url: str, headers: dict):
+ def __init__(
+ self,
+ request: Request,
+ url: str,
+ headers: dict,
+ buffer_size: int = settings.proxy_buffer_size,
+ ):
self.request = request
self.url = url
self.headers = headers
self.response = None
+ self.buffer = BytesIO()
+ self.buffer_size = buffer_size
+ self.eof = False
+
+ async def fill_buffer(self):
+ while len(self.buffer.getvalue()) < self.buffer_size and not self.eof:
+ chunk = await self.response.content.read(
+ self.buffer_size - len(self.buffer.getvalue())
+ )
+ if not chunk:
+ self.eof = True
+ break
+ self.buffer.write(chunk)
+ self.buffer.seek(0)
async def stream_content(self):
async with self.request.app.state.http_session.get(
self.url, headers=self.headers
) as self.response:
- async for chunk in self.response.content.iter_any():
- yield chunk
+ while True:
+ if self.buffer.tell() == len(self.buffer.getvalue()):
+ if self.eof:
+ break
+ self.buffer = BytesIO()
+ await self.fill_buffer()
+
+ chunk = self.buffer.read(8192)
+ if chunk:
+ yield chunk
+ else:
+ await asyncio.sleep(0.1)
async def close(self):
if self.response:
await self.response.release()
- logger.debug("Streaming connection closed")
+ logger.debug("Playback: Streaming connection closed")
+
+
+# class ProxyStreamer:
+# def __init__(self, request: Request, url: str, headers: dict):
+# self.request = request
+# self.url = url
+# self.headers = headers
+# self.response = None
+
+# async def stream_content(self):
+# async with self.request.app.state.http_session.get(
+# self.url, headers=self.headers
+# ) as self.response:
+# async for chunk in self.response.content.iter_any():
+# yield chunk
+
+# async def close(self):
+# if self.response:
+# await self.response.release()
+# logger.debug("Streaming connection closed")
+
+
+async def handle_download(
+ query: dict, config: dict, ip: str, redis_cache: RedisCache
+) -> str:
+ api_key = config.get("apiKey")
+ cache_key = f"download:{api_key}:{json.dumps(query)}_{ip}"
+
+ # Check if a download is already in progress
+ if await redis_cache.get(cache_key) == DOWNLOAD_IN_PROGRESS_FLAG:
+ logger.info("Playback: Download already in progress")
+ return settings.no_cache_video_url
+
+ # Mark the start of the download
+ await redis_cache.set(
+ cache_key, DOWNLOAD_IN_PROGRESS_FLAG, expiration=600 # 10 minute expiration
+ )
+
+ try:
+ debrid_service = get_download_service(config)
+ if not debrid_service:
+ raise HTTPException(
+ status_code=500, detail="Download service not available"
+ )
+
+ if isinstance(debrid_service, RealDebrid):
+ torrent_id = debrid_service.add_magnet_or_torrent_and_select(query, ip)
+ logger.success(
+ f"Playback: Added magnet or torrent to Real-Debrid: {torrent_id}"
+ )
+ if not torrent_id:
+ raise HTTPException(
+ status_code=500,
+ detail="Failed to add magnet or torrent to Real-Debrid",
+ )
+ elif isinstance(debrid_service, Torbox):
+ magnet = query["magnet"]
+ torrent_download = (
+ unquote(query["torrent_download"])
+ if query["torrent_download"] is not None
+ else None
+ )
+ privacy = query.get("privacy", "private")
+ torrent_info = debrid_service.add_magnet_or_torrent(
+ magnet, torrent_download, ip, privacy
+ )
+ logger.success(
+ f"Playback: Added magnet or torrent to TorBox: {magnet[:50]}"
+ )
+ if not torrent_info:
+ raise HTTPException(
+ status_code=500,
+ detail="Failed to add magnet or torrent to TorBox",
+ )
+ else:
+ magnet = query["magnet"]
+ torrent_download = (
+ unquote(query["torrent_download"])
+ if query["torrent_download"] is not None
+ else None
+ )
+ debrid_service.add_magnet_or_torrent(magnet, torrent_download, ip)
+ logger.success(
+ f"Playback: Added magnet or torrent to download service: {magnet[:50]}"
+ )
+
+ return settings.no_cache_video_url
+ except Exception as e:
+ await redis_cache.delete(cache_key)
+ logger.error(f"Playback: Error handling download: {str(e)}", exc_info=True)
+ raise e
async def get_stream_link(
decoded_query: str, config: dict, ip: str, redis_cache: RedisCache
) -> str:
- logger.debug(f"Getting stream link for query: {decoded_query}, IP: {ip}")
+ logger.debug(f"Playback: Getting stream link for query: {decoded_query}, IP: {ip}")
api_key = config.get("apiKey")
cache_key = f"stream_link:{api_key}:{decoded_query}_{ip}"
cached_link = await redis_cache.get(cache_key)
if cached_link:
- logger.info(f"Stream link found in cache: {cached_link}")
+ logger.info(f"Playback: Stream link found in cache: {cached_link}")
return cached_link
- logger.debug("Stream link not found in cache, generating new link")
+ logger.debug("Playback: Stream link not found in cache, generating new link")
query = json.loads(decoded_query)
service = query.get("service", False)
- if service:
+ if service == "DL":
+ link = await handle_download(query, config, ip, redis_cache)
+ elif service:
debrid_service = get_debrid_service(config, service)
link = debrid_service.get_stream_link(query, config, ip)
else:
- debrid_service = get_debrid_service(config, "RD")
- # TODO: add the method to dowlnoad the torrent in the selected debrid service in config.
- debrid_service.get_stream_link(query, config, ip)
- link = NO_CACHE_VIDEO_URL
-
- if link != NO_CACHE_VIDEO_URL:
- logger.debug(f"Caching new stream link: {link}")
- await redis_cache.set(cache_key, link, expiration=7200) # Cache for 2 hours
- logger.info(f"New stream link generated and cached: {link}")
+ logger.error("Playback: Service not found in query")
+ raise HTTPException(status_code=500, detail="Service not found in query")
+
+ if link != settings.no_cache_video_url:
+ logger.debug(f"Playback: Caching new stream link: {link}")
+ await redis_cache.set(cache_key, link, expiration=3600) # Cache for 1 hour
+ logger.info(f"Playback: New stream link generated and cached: {link}")
else:
- logger.debug("Stream link not cached (NO_CACHE_VIDEO_URL)")
+ logger.debug("Playback: Stream link not cached (NO_CACHE_VIDEO_URL)")
return link
@@ -92,45 +224,52 @@ async def get_playback(
query: str,
request: Request,
redis_cache: RedisCache = Depends(get_redis_cache_dependency),
- apikey_dao: APIKeyDAO = Depends()
+ apikey_dao: APIKeyDAO = Depends(),
):
- logger.debug(f"Received playback request for config: {config}, query: {query}")
try:
config = parse_config(config)
api_key = config.get("apiKey")
if not api_key:
- logger.warning("API key not found in config.")
+ logger.warning("Playback: API key not found in config.")
raise HTTPException(status_code=401, detail="API key not found in config.")
await check_api_key(api_key, apikey_dao)
if not query:
- logger.warning("Query is empty")
+ logger.warning("Playback: Query is empty")
raise HTTPException(status_code=400, detail="Query required.")
decoded_query = decodeb64(query)
ip = request.client.host
- logger.debug(f"Decoded query: {decoded_query}, Client IP: {ip}")
+ logger.debug(f"Playback: Decoded query: {decoded_query}, Client IP: {ip}")
+
+ query_dict = json.loads(decoded_query)
+ logger.debug(f"Playback: Received playback request for query: {decoded_query}")
+ service = query_dict.get("service", False)
+
+ if service == "DL":
+ link = await handle_download(query_dict, config, ip, redis_cache)
+ return RedirectResponse(url=link, status_code=status.HTTP_302_FOUND)
lock_key = f"lock:stream:{api_key}:{decoded_query}_{ip}"
lock = redis_client.lock(lock_key, timeout=60)
try:
if await lock.acquire(blocking=False):
- logger.debug("Lock acquired, getting stream link")
+ logger.debug("Playback: Lock acquired, getting stream link")
link = await get_stream_link(decoded_query, config, ip, redis_cache)
else:
- logger.debug("Lock not acquired, waiting for cached link")
+ logger.debug("Playback: Lock not acquired, waiting for cached link")
cache_key = f"stream_link:{api_key}:{decoded_query}_{ip}"
for _ in range(30):
await asyncio.sleep(1)
cached_link = await redis_cache.get(cache_key)
if cached_link:
- logger.debug("Cached link found while waiting")
+ logger.debug("Playback: Cached link found while waiting")
link = cached_link
break
else:
- logger.warning("Timed out waiting for cached link")
+ logger.warning("Playback: Timed out waiting for cached link")
raise HTTPException(
status_code=503,
detail="Service temporarily unavailable. Please try again.",
@@ -138,35 +277,39 @@ async def get_playback(
finally:
try:
await lock.release()
- logger.debug("Lock released")
+ logger.debug("Playback: Lock released")
except LockError:
- logger.warning("Failed to release lock (already released)")
+ logger.warning("Playback: Failed to release lock (already released)")
if not settings.proxied_link:
- logger.debug(f"Redirecting to non-proxied link: {link}")
+ logger.debug(f"Playback: Redirecting to non-proxied link: {link}")
return RedirectResponse(
url=link, status_code=status.HTTP_301_MOVED_PERMANENTLY
)
- logger.debug("Preparing to proxy stream")
+ if service == "TB": # TODO: Check this for torbox
+ logger.debug("Playback: Bypass proxied link for TorBox")
+ return RedirectResponse(url=link, status_code=status.HTTP_302_FOUND)
+
+ logger.debug("Playback: Preparing to proxy stream")
headers = {}
range_header = request.headers.get("range")
if range_header and "=" in range_header:
- logger.debug(f"Range header found: {range_header}")
+ logger.debug(f"Playback: Range header found: {range_header}")
range_value = range_header.strip().split("=")[1]
range_parts = range_value.split("-")
start = int(range_parts[0]) if range_parts[0] else 0
end = int(range_parts[1]) if len(range_parts) > 1 and range_parts[1] else ""
headers["Range"] = f"bytes={start}-{end}"
- logger.debug(f"Range header set: {headers['Range']}")
+ logger.debug(f"Playback: Range header set: {headers['Range']}")
streamer = ProxyStreamer(request, link, headers)
- logger.debug(f"Initiating request to: {link}")
+ logger.debug(f"Playback: Initiating request to: {link}")
async with request.app.state.http_session.head(
link, headers=headers
) as response:
- logger.debug(f"Response status: {response.status}")
+ logger.debug(f"Playback: Response status: {response.status}")
stream_headers = {
"Content-Type": "video/mp4",
"Accept-Ranges": "bytes",
@@ -178,15 +321,17 @@ async def get_playback(
}
if response.status == 206:
- logger.debug("Partial content response")
+ logger.debug("Playback: Partial content response")
stream_headers["Content-Range"] = response.headers["Content-Range"]
for header in ["Content-Length", "ETag", "Last-Modified"]:
if header in response.headers:
stream_headers[header] = response.headers[header]
- logger.debug(f"Header set: {header}: {stream_headers[header]}")
+ logger.debug(
+ f"Playback: Header set: {header}: {stream_headers[header]}"
+ )
- logger.debug("Preparing streaming response")
+ logger.debug("Playback: Preparing streaming response")
return StreamingResponse(
streamer.stream_content(),
status_code=206 if "Range" in headers else 200,
@@ -195,7 +340,7 @@ async def get_playback(
)
except Exception as e:
- logger.error(f"Playback error: {str(e)}", exc_info=True)
+ logger.error(f"Playback: Playback error: {str(e)}", exc_info=True)
raise HTTPException(
status_code=500,
detail=ErrorResponse(
@@ -214,7 +359,7 @@ async def head_playback(
query: str,
request: Request,
redis_cache: RedisCache = Depends(get_redis_cache_dependency),
- apikey_dao: APIKeyDAO = Depends()
+ apikey_dao: APIKeyDAO = Depends(),
):
try:
config = parse_config(config)
@@ -222,14 +367,16 @@ async def head_playback(
if api_key:
await check_api_key(api_key, apikey_dao)
else:
- logger.warning("API key not found in config.")
+ logger.warning("Playback: API key not found in config.")
raise HTTPException(status_code=401, detail="API key not found in config.")
if not query:
raise HTTPException(status_code=400, detail="Query required.")
+
decoded_query = decodeb64(query)
ip = request.client.host
- cache_key = f"stream_link:{api_key}:{decoded_query}_{ip}"
+ query_dict = json.loads(decoded_query)
+ service = query_dict.get("service", False)
headers = {
"Content-Type": "video/mp4",
@@ -241,11 +388,25 @@ async def head_playback(
"Access-Control-Allow-Methods": "GET, HEAD, OPTIONS",
}
+ if service == "DL":
+ # Check if download is in progress
+ download_cache_key = f"download:{api_key}:{json.dumps(query_dict)}_{ip}"
+ if await redis_cache.get(download_cache_key) == DOWNLOAD_IN_PROGRESS_FLAG:
+ logger.info("Playback: Download in progress, returning 202 Accepted")
+ return Response(status_code=status.HTTP_202_ACCEPTED, headers=headers)
+ else:
+ logger.info("Playback: Download not started, returning 200 OK")
+ return Response(status_code=status.HTTP_200_OK, headers=headers)
+
+ cache_key = f"stream_link:{api_key}:{decoded_query}_{ip}"
+
for _ in range(30):
if await redis_cache.exists(cache_key):
link = await redis_cache.get(cache_key)
- if not settings.proxied_link: # avoid sending HEAD request if link is sent directly
+ if (
+ not settings.proxied_link
+ ): # avoid sending HEAD request if link is sent directly
return Response(status_code=status.HTTP_200_OK, headers=headers)
async with request.app.state.http_session.head(link) as response:
@@ -260,14 +421,17 @@ async def head_playback(
return Response(status_code=status.HTTP_202_ACCEPTED, headers=headers)
except redis.ConnectionError as e:
- logger.error(f"Redis connection error: {e}")
- return Response(status_code=status.HTTP_503_SERVICE_UNAVAILABLE, content="Service temporarily unavailable")
+ logger.error(f"Playback: Redis connection error: {e}")
+ return Response(
+ status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
+ content="Service temporarily unavailable",
+ )
except Exception as e:
- logger.error(f"HEAD request error: {e}")
+ logger.error(f"Playback: HEAD request error: {e}")
return Response(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
content=ErrorResponse(
detail="An error occurred while processing the request."
).model_dump_json(),
- media_type="application/json"
- )
\ No newline at end of file
+ media_type="application/json",
+ )
diff --git a/stream_fusion/web/root/config/views.py b/stream_fusion/web/root/config/views.py
index 86180ee..ac8f77e 100644
--- a/stream_fusion/web/root/config/views.py
+++ b/stream_fusion/web/root/config/views.py
@@ -34,6 +34,7 @@ async def configure(request: Request):
"sharewood_unique_account": settings.sharewood_unique_account,
"ygg_unique_account": settings.ygg_unique_account,
"jackett_enable": settings.jackett_enable,
+ "tb_unique_account": settings.tb_unique_account,
})
diff --git a/stream_fusion/web/root/search/views.py b/stream_fusion/web/root/search/views.py
index 909eca5..67af062 100644
--- a/stream_fusion/web/root/search/views.py
+++ b/stream_fusion/web/root/search/views.py
@@ -9,6 +9,9 @@
from stream_fusion.utils.cache.local_redis import RedisCache
from stream_fusion.logging_config import logger
from stream_fusion.utils.debrid.get_debrid_service import get_all_debrid_services
+from stream_fusion.utils.filter.results_per_quality_filter import (
+ ResultsPerQualityFilter,
+)
from stream_fusion.utils.filter_results import (
filter_items,
merge_items,
@@ -16,6 +19,7 @@
)
from stream_fusion.utils.jackett.jackett_result import JackettResult
from stream_fusion.utils.jackett.jackett_service import JackettService
+from stream_fusion.utils.parser.parser_service import StreamParser
from stream_fusion.utils.sharewood.sharewood_service import SharewoodService
from stream_fusion.utils.yggfilx.yggflix_service import YggflixService
from stream_fusion.utils.metdata.cinemeta import Cinemeta
@@ -45,10 +49,10 @@ async def get_results(
request: Request,
redis_cache: RedisCache = Depends(get_redis_cache_dependency),
apikey_dao: APIKeyDAO = Depends(),
- torrent_dao: TorrentItemDAO = Depends()
+ torrent_dao: TorrentItemDAO = Depends(),
) -> SearchResponse:
start = time.time()
- logger.info(f"Stream request: {stream_type} - {stream_id}")
+ logger.info(f"Search: Stream request initiated for {stream_type} - {stream_id}")
stream_id = stream_id.replace(".json", "")
config = parse_config(config)
@@ -56,11 +60,11 @@ async def get_results(
if api_key:
await check_api_key(api_key, apikey_dao)
else:
- logger.warning("API key not found in config.")
+ logger.warning("Search: API key not found in config.")
raise HTTPException(status_code=401, detail="API key not found in config.")
def get_metadata():
- logger.info(f"Fetching metadata from {config['metadataProvider']}")
+ logger.info(f"Search: Fetching metadata from {config['metadataProvider']}")
if config["metadataProvider"] == "tmdb" and settings.tmdb_api_key:
metadata_provider = TMDB(config)
else:
@@ -70,29 +74,36 @@ def get_metadata():
media = await redis_cache.get_or_set(
get_metadata, stream_id, stream_type, config["metadataProvider"]
)
- logger.info(f"Retrieved media metadata: {str(media.titles)}")
+ logger.debug(f"Search: Retrieved media metadata for {str(media.titles)}")
def stream_cache_key(media):
if isinstance(media, Movie):
- key_string = f"stream:{api_key}:{media.titles[0]}:{media.year}:{media.languages[0]}"
+ key_string = (
+ f"stream:{api_key}:{media.titles[0]}:{media.year}:{media.languages[0]}"
+ )
elif isinstance(media, Series):
key_string = f"stream:{api_key}:{media.titles[0]}:{media.languages[0]}:{media.season}{media.episode}"
else:
- raise TypeError("Only Movie and Series are allowed as media!")
+ logger.error("Search: Only Movie and Series are allowed as media!")
+ raise HTTPException(
+ status_code=500, detail="Only Movie and Series are allowed as media!"
+ )
hashed_key = hashlib.sha256(key_string.encode("utf-8")).hexdigest()
return hashed_key[:16]
cached_result = await redis_cache.get(stream_cache_key(media))
if cached_result is not None:
- logger.info("Returning cached processed results")
+ logger.info("Search: Returning cached processed results")
total_time = time.time() - start
- logger.info(f"Request completed in {total_time:.2f} seconds")
+ logger.success(f"Search: Request completed in {total_time:.2f} seconds")
return SearchResponse(streams=cached_result)
debrid_services = get_all_debrid_services(config)
- logger.info(f"Found {len(debrid_services)} debrid services")
- logger.info(f"Debrid services: {[debrid.__class__.__name__ for debrid in debrid_services]}")
-
+ logger.debug(f"Search: Found {len(debrid_services)} debrid services")
+ logger.info(
+ f"Search: Debrid services: {[debrid.__class__.__name__ for debrid in debrid_services]}"
+ )
+
def media_cache_key(media):
if isinstance(media, Movie):
key_string = f"media:{media.titles[0]}:{media.year}:{media.languages[0]}"
@@ -114,8 +125,8 @@ async def perform_search(update_cache=False):
if config["cache"] and not update_cache:
public_cached_results = search_public(media)
if public_cached_results:
- logger.info(
- f"Found {len(public_cached_results)} public cached results"
+ logger.success(
+ f"Search: Found {len(public_cached_results)} public cached results"
)
public_cached_results = [
JackettResult().from_cached_item(torrent, media)
@@ -130,14 +141,31 @@ async def perform_search(update_cache=False):
)
search_results.extend(public_cached_results)
+ if config["yggflix"] and len(search_results) < int(
+ config["minCachedResults"]
+ ):
+ yggflix_service = YggflixService(config)
+ yggflix_search_results = yggflix_service.search(media)
+ if yggflix_search_results:
+ logger.success(
+ f"Search: Found {len(yggflix_search_results)} results from YggFlix"
+ )
+ yggflix_search_results = filter_items(
+ yggflix_search_results, media, config=config
+ )
+ yggflix_search_results = await torrent_service.convert_and_process(
+ yggflix_search_results
+ )
+ search_results = merge_items(search_results, yggflix_search_results)
+
if config["zilean"] and len(search_results) < int(
config["minCachedResults"]
):
zilean_service = ZileanService(config)
zilean_search_results = zilean_service.search(media)
if zilean_search_results:
- logger.info(
- f"Found {len(zilean_search_results)} results from Zilean"
+ logger.success(
+ f"Search: Found {len(zilean_search_results)} results from Zilean"
)
zilean_search_results = [
ZileanResult().from_api_cached_item(torrent, media)
@@ -150,25 +178,10 @@ async def perform_search(update_cache=False):
zilean_search_results = await torrent_service.convert_and_process(
zilean_search_results
)
- logger.debug(f"Zilean final search results: {len(zilean_search_results)}")
- search_results = merge_items(search_results, zilean_search_results)
-
- if config["yggflix"] and len(search_results) < int(
- config["minCachedResults"]
- ):
- yggflix_service = YggflixService(config)
- yggflix_search_results = yggflix_service.search(media)
- if yggflix_search_results:
logger.info(
- f"Found {len(yggflix_search_results)} results from YggFlix"
- )
- yggflix_search_results = filter_items(
- yggflix_search_results, media, config=config
- )
- yggflix_search_results = await torrent_service.convert_and_process(
- yggflix_search_results
+ f"Search: Zilean final search results: {len(zilean_search_results)}"
)
- search_results = merge_items(search_results, yggflix_search_results)
+ search_results = merge_items(search_results, zilean_search_results)
if config["sharewood"] and len(search_results) < int(
config["minCachedResults"]
@@ -176,46 +189,49 @@ async def perform_search(update_cache=False):
sharewood_service = SharewoodService(config)
sharewood_search_results = sharewood_service.search(media)
if sharewood_search_results:
- logger.info(
- f"Found {len(sharewood_search_results)} results from Sharewood"
+ logger.success(
+ f"Search: Found {len(sharewood_search_results)} results from Sharewood"
)
sharewood_search_results = filter_items(
sharewood_search_results, media, config=config
)
- sharewood_search_results = await torrent_service.convert_and_process(
- sharewood_search_results
+ sharewood_search_results = (
+ await torrent_service.convert_and_process(
+ sharewood_search_results
+ )
+ )
+ search_results = merge_items(
+ search_results, sharewood_search_results
)
- search_results = merge_items(search_results, sharewood_search_results)
if config["jackett"] and len(search_results) < int(
config["minCachedResults"]
):
jackett_service = JackettService(config)
jackett_search_results = jackett_service.search(media)
- logger.info(f"Found {len(jackett_search_results)} results from Jackett")
+ logger.success(
+ f"Search: Found {len(jackett_search_results)} results from Jackett"
+ )
filtered_jackett_search_results = filter_items(
jackett_search_results, media, config=config
)
- logger.info(
- f"Filtered Jackett results: {len(filtered_jackett_search_results)}"
- )
if filtered_jackett_search_results:
torrent_results = await torrent_service.convert_and_process(
filtered_jackett_search_results
)
- logger.debug(
- f"Converted {len(torrent_results)} Jackett results to TorrentItems"
- )
search_results = merge_items(search_results, torrent_results)
if update_cache and search_results:
- logger.info(f"Updating cache with {len(search_results)} results")
+ logger.info(
+ f"Search: Updating cache with {len(search_results)} results"
+ )
try:
cache_key = media_cache_key(media)
search_results_dict = [item.to_dict() for item in search_results]
await redis_cache.set(cache_key, search_results_dict)
+ logger.success("Search: Cache update successful")
except Exception as e:
- logger.error(f"Error updating cache: {e}")
+ logger.error(f"Search: Error updating cache: {e}")
await perform_search()
return search_results
@@ -226,20 +242,27 @@ async def get_and_filter_results(media, config):
unfiltered_results = await redis_cache.get(cache_key)
if unfiltered_results is None:
- logger.info("No results in cache. Performing new search.")
+ logger.debug("Search: No results in cache. Performing new search.")
nocache_results = await get_search_results(media, config)
nocache_results_dict = [item.to_dict() for item in nocache_results]
await redis_cache.set(cache_key, nocache_results_dict)
+ logger.info(
+ f"Search: New search completed, found {len(nocache_results)} results"
+ )
return nocache_results
else:
- logger.info(f"Results retrieved from redis cache - {len(unfiltered_results)}.")
- unfiltered_results = [TorrentItem.from_dict(item) for item in unfiltered_results]
+ logger.info(
+ f"Search: Retrieved {len(unfiltered_results)} results from redis cache"
+ )
+ unfiltered_results = [
+ TorrentItem.from_dict(item) for item in unfiltered_results
+ ]
filtered_results = filter_items(unfiltered_results, media, config=config)
if len(filtered_results) < min_results:
logger.info(
- f"Insufficient filtered results ({len(filtered_results)}). Performing new search."
+ f"Search: Insufficient filtered results ({len(filtered_results)}). Performing new search."
)
await redis_cache.delete(cache_key)
unfiltered_results = await get_search_results(media, config)
@@ -247,10 +270,15 @@ async def get_and_filter_results(media, config):
await redis_cache.set(cache_key, unfiltered_results_dict)
filtered_results = filter_items(unfiltered_results, media, config=config)
- logger.info(f"Final number of filtered results: {len(filtered_results)}")
+ logger.success(
+ f"Search: Final number of filtered results: {len(filtered_results)}"
+ )
return filtered_results
- search_results = await get_and_filter_results(media, config)
+ raw_search_results = await get_and_filter_results(media, config)
+ logger.debug(f"Search: Filtered search results: {len(raw_search_results)}")
+ search_results = ResultsPerQualityFilter(config).filter(raw_search_results)
+ logger.info(f"Search: Filtered search results per quality: {len(search_results)}")
def stream_processing(search_results, media, config):
torrent_smart_container = TorrentSmartContainer(search_results, media)
@@ -264,29 +292,31 @@ def stream_processing(search_results, media, config):
torrent_smart_container.update_availability(
result, type(debrid), media
)
- logger.info(f"Checked availability for {len(result.items())} items")
+ logger.info(
+ f"Search: Checked availability for {len(result.items())} items with {type(debrid).__name__}"
+ )
else:
- logger.info("No availability results found in debrid service")
- logger.info("Please check your proxy settings")
+ logger.warning(
+ "Search: No availability results found in debrid service"
+ )
if config["cache"]:
- logger.debug("Caching public container items")
+ logger.info("Search: Caching public container items")
torrent_smart_container.cache_container_items()
best_matching_results = torrent_smart_container.get_best_matching()
best_matching_results = sort_items(best_matching_results, config)
- logger.info(f"Found {len(best_matching_results)} best matching results")
+ logger.info(f"Search: Found {len(best_matching_results)} best matching results")
- stream_list = parse_to_stremio_streams(best_matching_results, config, media)
- logger.info(f"Processed {len(stream_list)} streams for Stremio")
+ parser = StreamParser(config)
+ stream_list = parser.parse_to_stremio_streams(best_matching_results, media)
+ logger.success(f"Search: Processed {len(stream_list)} streams for Stremio")
return stream_list
stream_list = stream_processing(search_results, media, config)
streams = [Stream(**stream) for stream in stream_list]
- await redis_cache.set(
- stream_cache_key(media), streams, expiration=3600
- )
+ await redis_cache.set(stream_cache_key(media), streams, expiration=3600)
total_time = time.time() - start
- logger.info(f"Request completed in {total_time:.2f} seconds")
- return SearchResponse(streams=streams)
\ No newline at end of file
+ logger.info(f"Search: Request completed in {total_time:.2f} seconds")
+ return SearchResponse(streams=streams)