diff --git a/CHANGELOG b/CHANGELOG index ac19dc300..17cbddfc8 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -2,7 +2,7 @@ Added tenacity requirement at 8.4.2 Update PlexAPI requirement to 4.15.14 Update psutil requirement to 6.0.0 -Update setuptools requirement to 70.1.0 +Update setuptools requirement to 70.1.1 # Removed Features @@ -16,5 +16,6 @@ Added [`letterboxd_user_lists`](https://kometa.wiki/en/latest/files/dynamic_type # Bug Fixes Fixed multiple anime `int()` Errors Fixed #2100 `verify_ssl` wasn't working when downloading images +Fixed `imdb_watchlist` Various other Minor Fixes \ No newline at end of file diff --git a/docs/files/builders/imdb.md b/docs/files/builders/imdb.md index 794bfd432..fa62b3b54 100644 --- a/docs/files/builders/imdb.md +++ b/docs/files/builders/imdb.md @@ -118,31 +118,36 @@ collections: Finds every item in an IMDb User's Watchlist. -The expected input is an IMDb User ID (example: `ur12345678`). Multiple values are supported as a list or as a comma-separated string. +| List Parameter | Description | +|:---------------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `user_id` | Specify the User ID for the IMDb Watchlist. **This attribute is required.**
**Options:** The ID that starts with `ur` found in the URL of the watchlist. (ex. `ur12345678`) | +| `limit` | Specify how items you want returned by the query.
**Options:** Any Integer `0` or greater where `0` get all items.
**Default:** `0` | +| `sort_by` | Choose from one of the many available sort options.
**Options:** `custom.asc`, `custom.desc`, `title.asc`, `title.desc`, `rating.asc`, `rating.desc`, `popularity.asc`, `popularity.desc`, `votes.asc`, `votes.desc`, `release.asc`, `release.desc`, `runtime.asc`, `runtime.desc`, `added.asc`, `added.desc`
**Default:** `custom.asc` | + +Multiple values are supported as a list only a comma-separated string will not work. The `sync_mode: sync` and `collection_order: custom` Setting are recommended since the lists are continuously updated and in a specific order. ```yaml collections: My Watch Watchlist: - imdb_watchlist: ur64054558 - collection_order: custom - sync_mode: sync -``` -```yaml -collections: - My Friends Watchlists: - imdb_watchlist: ur64054558, ur12345678 + imdb_watchlist: + user_id: ur64054558 + sort_by: rating.asc collection_order: custom sync_mode: sync ``` + ```yaml collections: My Friends Watchlists: imdb_watchlist: - - ur64054558 - - ur12345678 - collection_order: custom + - user_id: ur64054558 + sort_by: rating.asc + limit: 100 + - user_id: ur12345678 + sort_by: rating.asc + limit: 100 sync_mode: sync ``` diff --git a/modules/builder.py b/modules/builder.py index 2b3d6780e..ddb51c82d 100644 --- a/modules/builder.py +++ b/modules/builder.py @@ -1477,12 +1477,9 @@ def _imdb(self, method_name, method_data): self.builders.append((method_name, value)) else: raise Failed(f"{self.Type} Error: imdb_id {value} must begin with tt") - elif method_name == "imdb_list": - try: - for imdb_dict in self.config.IMDb.validate_imdb_lists(self.Type, method_data): - self.builders.append((method_name, imdb_dict)) - except Failed as e: - logger.error(e) + elif method_name in ["imdb_list", "imdb_watchlist"]: + for imdb_dict in self.config.IMDb.validate_imdb(self.Type, method_name, method_data): + self.builders.append((method_name, imdb_dict)) elif method_name == "imdb_chart": for value in util.get_list(method_data): if value in imdb.movie_charts and not self.library.is_movie: @@ -1493,9 +1490,6 @@ def _imdb(self, method_name, method_data): self.builders.append((method_name, value)) else: raise Failed(f"{self.Type} Error: chart: {value} is invalid options are {[i for i in imdb.charts]}") - elif method_name == "imdb_watchlist": - for imdb_user in self.config.IMDb.validate_imdb_watchlists(self.Type, method_data, self.language): - self.builders.append((method_name, imdb_user)) elif method_name == "imdb_award": for dict_data in util.parse(self.Type, method_name, method_data, datatype="listdict"): dict_methods = {dm.lower(): dm for dm in dict_data} diff --git a/modules/imdb.py b/modules/imdb.py index d833fd4ed..94d49296e 100644 --- a/modules/imdb.py +++ b/modules/imdb.py @@ -1,6 +1,5 @@ -import csv, gzip, json, math, os, re, shutil, time +import csv, gzip, json, math, os, re, shutil from modules import util -from modules.request import parse_qs, urlparse from modules.util import Failed logger = util.logger @@ -127,6 +126,7 @@ git_base = "https://raw.githubusercontent.com/Kometa-Team/IMDb-Awards/master" search_hash_url = "https://raw.githubusercontent.com/Kometa-Team/IMDb-Hash/master/HASH" list_hash_url = "https://raw.githubusercontent.com/Kometa-Team/IMDb-Hash/master/LIST_HASH" +watchlist_hash_url = "https://raw.githubusercontent.com/Kometa-Team/IMDb-Hash/master/WATCHLIST_HASH" graphql_url = "https://api.graphql.imdb.com/" list_url = f"{base_url}/list/ls" @@ -142,6 +142,7 @@ def __init__(self, requests, cache, default_dir): self._events = {} self._search_hash = None self._list_hash = None + self._watchlist_hash = None self.event_url_validation = {} def _request(self, url, language=None, xpath=None, params=None): @@ -166,6 +167,12 @@ def list_hash(self): self._list_hash = self.requests.get(list_hash_url).text.strip() return self._list_hash + @property + def watchlist_hash(self): + if self._watchlist_hash is None: + self._watchlist_hash = self.requests.get(watchlist_hash_url).text.strip() + return self._watchlist_hash + @property def events_validation(self): if self._events_validation is None: @@ -177,32 +184,44 @@ def get_event(self, event_id): self._events[event_id] = self.requests.get_yaml(f"{git_base}/events/{event_id}.yml").data return self._events[event_id] - def validate_imdb_lists(self, err_type, imdb_lists): + def validate_imdb(self, err_type, method, imdb_dicts): valid_lists = [] - for imdb_dict in util.get_list(imdb_lists, split=False): + main = "list_id" if method == "imdb_list" else "user_id" + for imdb_dict in util.get_list(imdb_dicts, split=True if isinstance(imdb_dicts, str) else False): if not isinstance(imdb_dict, dict): - imdb_dict = {"list_id": imdb_dict} - if "url" in imdb_dict and "list_id" not in imdb_dict: - imdb_dict["list_id"] = imdb_dict["url"] + imdb_dict = {main: imdb_dict} + if "url" in imdb_dict and main not in imdb_dict: + imdb_dict[main] = imdb_dict["url"] dict_methods = {dm.lower(): dm for dm in imdb_dict} - if "list_id" not in dict_methods: - raise Failed(f"{err_type} Error: imdb_list list_id attribute not found") - elif imdb_dict[dict_methods["list_id"]] is None: - raise Failed(f"{err_type} Error: imdb_list list_id attribute is blank") + if main not in dict_methods: + raise Failed(f"{err_type} Error: {method} {main} attribute not found") + elif imdb_dict[dict_methods[main]] is None: + raise Failed(f"{err_type} Error: {method} {main} attribute is blank") else: - imdb_url = imdb_dict[dict_methods["list_id"]].strip() - if imdb_url.startswith(f"{base_url}/search/"): - raise Failed("IMDb Error: URLs with https://www.imdb.com/search/ no longer works with imdb_list use imdb_search.") - if imdb_url.startswith(f"{base_url}/filmosearch/"): - raise Failed("IMDb Error: URLs with https://www.imdb.com/filmosearch/ no longer works with imdb_list use imdb_search.") - search = re.search(r"(ls\d+)", imdb_url) - if not search: - raise Failed("IMDb Error: imdb_list list_id must begin with ls (ex. ls005526372)") - new_dict = {"list_id": search.group(1)} + main_data = imdb_dict[dict_methods[main]].strip() + if method == "imdb_list": + if main_data.startswith(f"{base_url}/search/"): + raise Failed(f"IMDb Error: URLs with https://www.imdb.com/search/ no longer works with {method} use imdb_search.") + if main_data.startswith(f"{base_url}/filmosearch/"): + raise Failed(f"IMDb Error: URLs with https://www.imdb.com/filmosearch/ no longer works with {method} use imdb_search.") + search = re.search(r"(ls\d+)", main_data) + if not search: + raise Failed(f"IMDb Error: {method} {main} must begin with ls (ex. ls005526372)") + new_dict = {main: search.group(1)} + else: + user_id = None + if main_data.startswith("ur"): + try: + user_id = int(main_data[2:]) + except ValueError: + pass + if not user_id: + raise Failed(f"{err_type} Error: {method} {main}: {main_data} not in the format of 'ur########'") + new_dict = {main: main_data} if "limit" in dict_methods: if imdb_dict[dict_methods["limit"]] is None: - logger.warning(f"{err_type} Warning: imdb_list limit attribute is blank using 0 as default") + logger.warning(f"{err_type} Warning: {method} limit attribute is blank using 0 as default") else: try: value = int(str(imdb_dict[dict_methods["limit"]])) @@ -211,31 +230,16 @@ def validate_imdb_lists(self, err_type, imdb_lists): except ValueError: pass if "limit" not in new_dict: - logger.warning(f"{err_type} Warning: imdb_list limit attribute: {imdb_dict[dict_methods['limit']]} must be an integer 0 or greater using 0 as default") + logger.warning(f"{err_type} Warning: {method} limit attribute: {imdb_dict[dict_methods['limit']]} must be an integer 0 or greater using 0 as default") if "limit" not in new_dict: new_dict["limit"] = 0 if "sort_by" in dict_methods: - new_dict["sort_by"] = util.parse(err_type, dict_methods, imdb_dict, parent="imdb_list", default="custom.asc", options=list_sort_options) + new_dict["sort_by"] = util.parse(err_type, dict_methods, imdb_dict, parent=method, default="custom.asc", options=list_sort_options) valid_lists.append(new_dict) return valid_lists - def validate_imdb_watchlists(self, err_type, users, language): - valid_users = [] - for user in util.get_list(users): - user_id = None - if user.startswith("ur"): - try: - user_id = int(user[2:]) - except ValueError: - pass - if not user_id: - raise Failed(f"{err_type} Error: User {user} not in the format of 'ur########'") - if self._watchlist(user, language): - valid_users.append(user) - return valid_users - def get_event_years(self, event_id): if event_id in self.events_validation: return True, self.events_validation[event_id]["years"] @@ -263,16 +267,16 @@ def get_award_names(self, event_id, event_year): break return award_names, category_names - def _watchlist(self, user, language): - imdb_url = f"{base_url}/user/{user}/watchlist" - for text in self._request(imdb_url, language=language, xpath="//div[@class='article']/script/text()")[0].split("\n"): - if text.strip().startswith("IMDbReactInitialState.push"): - jsonline = text.strip() - return [f for f in json.loads(jsonline[jsonline.find('{'):-2])["starbars"]] - raise Failed(f"IMDb Error: Failed to parse URL: {imdb_url}") + def _json_operation(self, list_type): + if list_type == "search": + return "AdvancedTitleSearch", self.search_hash + elif list_type == "list": + return "TitleListMainPage", self.list_hash + else: + return "WatchListPageRefiner", self.watchlist_hash - def _graphql_json(self, data, search=True): - page_limit = 250 if search else 100 + def _graphql_json(self, data, list_type): + page_limit = 250 if list_type == "search" else 100 out = { "locale": "en-US", "first": data["limit"] if "limit" in data and 0 < data["limit"] < page_limit else page_limit, @@ -302,10 +306,10 @@ def check_constraint(bases, mods, constraint, lower="", translation=None, range_ if range_data: out[constraint][range_name[i]] = range_data - sort = data["sort_by"] if "sort_by" in data else "popularity.asc" if search else "custom.asc" + sort = data["sort_by"] if "sort_by" in data else "popularity.asc" if list_type == "search" else "custom.asc" sort_by, sort_order = sort.split(".") - if search: + if list_type == "search": out["titleTypeConstraint"] = {"anyTitleTypeIds": [title_type_options[t] for t in data["type"]] if "type" in data else []} out["sortBy"] = sort_by_options[sort_by] out["sortOrder"] = sort_order.upper() @@ -373,24 +377,26 @@ def check_constraint(bases, mods, constraint, lower="", translation=None, range_ if "adult" in data and data["adult"]: out["explicitContentConstraint"] = {"explicitContentFilter": "INCLUDE_ADULT"} else: - out["lsConst"] = data["list_id"] + if list_type == "list": + out["lsConst"] = data["list_id"] + else: + out["urConst"] = data["user_id"] out["sort"] = {"by": list_sort_by_options[sort_by], "order": sort_order.upper()} logger.trace(out) - return { - "operationName": "AdvancedTitleSearch" if search else "TitleListMainPage", - "variables": out, - "extensions": {"persistedQuery": {"version": 1, "sha256Hash": self.search_hash if search else self.list_hash}} - } + op, sha = self._json_operation(list_type) + return {"operationName": op, "variables": out, "extensions": {"persistedQuery": {"version": 1, "sha256Hash": sha}}} - def _pagination(self, data, search=True): - json_obj = self._graphql_json(data, search=search) - item_count = 250 if search else 100 + def _pagination(self, data, list_type): + is_list = list_type != "search" + json_obj = self._graphql_json(data, list_type) + item_count = 100 if is_list else 250 imdb_ids = [] logger.ghost("Parsing Page 1") response_json = self._graph_request(json_obj) try: - search_data = response_json["data"]["advancedTitleSearch"] if search else response_json["data"]["list"]["titleListItemSearch"] + step = "list" if list_type == "list" else "predefinedList" + search_data = response_json["data"][step]["titleListItemSearch"] if is_list else response_json["data"]["advancedTitleSearch"] total = search_data["total"] limit = data["limit"] if limit < 1 or total < limit: @@ -400,16 +406,16 @@ def _pagination(self, data, search=True): remainder = item_count num_of_pages = math.ceil(int(limit) / item_count) end_cursor = search_data["pageInfo"]["endCursor"] - imdb_ids.extend([n["node"]["title"]["id"] if search else n["listItem"]["id"] for n in search_data["edges"]]) + imdb_ids.extend([n["listItem"]["id"] if is_list else n["node"]["title"]["id"] for n in search_data["edges"]]) if num_of_pages > 1: for i in range(2, num_of_pages + 1): start_num = (i - 1) * item_count + 1 logger.ghost(f"Parsing Page {i}/{num_of_pages} {start_num}-{limit if i == num_of_pages else i * item_count}") json_obj["variables"]["after"] = end_cursor response_json = self._graph_request(json_obj) - search_data = response_json["data"]["advancedTitleSearch"] if search else response_json["data"]["list"]["titleListItemSearch"] + search_data = response_json["data"][step]["titleListItemSearch"] if is_list else response_json["data"]["advancedTitleSearch"] end_cursor = search_data["pageInfo"]["endCursor"] - ids_found = [n["node"]["title"]["id"] if search else n["listItem"]["id"] for n in search_data["edges"]] + ids_found = [n["listItem"]["id"] if is_list else n["node"]["title"]["id"] for n in search_data["edges"]] if i == num_of_pages: ids_found = ids_found[:remainder] imdb_ids.extend(ids_found) @@ -511,19 +517,16 @@ def get_imdb_ids(self, method, data, language): if method == "imdb_id": logger.info(f"Processing IMDb ID: {data}") return [(data, "imdb")] - elif method == "imdb_list": - logger.info(f"Processing IMDb List: {data['list_id']}") + elif method in ["imdb_list", "imdb_watchlist"]: + logger.info(f"Processing IMDb {'List' if method == "imdb_list" else 'Watchlist'}: {data['list_id' if method == "imdb_list" else 'user_id']}") if data["limit"] > 0: logger.info(f" Limit: {data['limit']}") if "sort_by" in data: logger.info(f" Sort By: {data['sort_by']}") - return [(i, "imdb") for i in self._pagination(data, search=False)] + return [(i, "imdb") for i in self._pagination(data, "list" if method == "imdb_list" else "watchlist")] elif method == "imdb_chart": logger.info(f"Processing IMDb Chart: {charts[data]}") return [(_i, "imdb") for _i in self._ids_from_chart(data, language)] - elif method == "imdb_watchlist": - logger.info(f"Processing IMDb Watchlist: {data}") - return [(_i, "imdb") for _i in self._watchlist(data, language)] elif method == "imdb_award": if data["event_year"] not in ["all", "latest"] and len(data["event_year"]) == 1: event_slug = f"{data['event_year'][0]}/1" if "-" not in data["event_year"][0] else data["event_year"][0].replace("-", "/") @@ -538,7 +541,7 @@ def get_imdb_ids(self, method, data, language): logger.info(f"Processing IMDb Search:") for k, v in data.items(): logger.info(f" {k}: {v}") - return [(_i, "imdb") for _i in self._pagination(data)] + return [(_i, "imdb") for _i in self._pagination(data, "search")] else: raise Failed(f"IMDb Error: Method {method} not supported") diff --git a/requirements.txt b/requirements.txt index cb0e34ba0..658efff2b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -12,5 +12,5 @@ requests==2.32.3 tenacity==8.4.2 ruamel.yaml==0.18.6 schedule==1.2.2 -setuptools==70.1.0 +setuptools==70.1.1 tmdbapis==1.2.16 \ No newline at end of file