Skip to content

Commit

Permalink
Merge pull request #514 from meisnate12/develop
Browse files Browse the repository at this point in the history
v1.14.0
  • Loading branch information
meisnate12 authored Dec 26, 2021
2 parents c23ce43 + 29cc04f commit 0de6e2f
Show file tree
Hide file tree
Showing 24 changed files with 1,902 additions and 996 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ The original concept for Plex Meta Manager is [Plex Auto Collections](https://gi

The script can update many metadata fields for movies, shows, collections, seasons, and episodes and can act as a backup if your plex DB goes down. It can even update metadata the plex UI can't like Season Names. If the time is put into the metadata configuration file you can have a way to recreate your library and all its metadata changes with the click of a button.

The script works with most Metadata agents including the new Plex Movie Agent, New Plex TV Agent, [Hama Anime Agent](https://github.com/ZeroQI/Hama.bundle), and [MyAnimeList Anime Agent](https://github.com/Fribb/MyAnimeList.bundle).
The script works with most Metadata agents including the New Plex Movie Agent, New Plex TV Agent, [Hama Anime Agent](https://github.com/ZeroQI/Hama.bundle), [MyAnimeList Anime Agent](https://github.com/Fribb/MyAnimeList.bundle), and [XBMC NFO Movie and TV Agents](https://github.com/gboudreau/XBMCnfoMoviesImporter.bundle).

## Getting Started

Expand Down
2 changes: 1 addition & 1 deletion VERSION
Original file line number Diff line number Diff line change
@@ -1 +1 @@
1.13.3
1.14.0
20 changes: 14 additions & 6 deletions config/config.yml.template
Original file line number Diff line number Diff line change
Expand Up @@ -13,25 +13,33 @@ libraries: # Library mappings must have a c
metadata_path:
- file: config/Anime.yml # You have to create this file the other is online
- git: meisnate12/AnimeCharts
playlist_files:
- file: config/playlists.yml
settings: # Can be individually specified per library as well
cache: true
cache_expiration: 60
asset_directory: config/assets
asset_folders: true
asset_depth: 0
create_asset_folders: false
dimensional_asset_rename: false
show_missing_season_assets: false
sync_mode: append
collection_minimum: 1
delete_below_minimum: true
delete_not_scheduled: false
run_again_delay: 2
missing_only_released: false
only_filter_missing: false
show_unmanaged: true
show_filtered: false
show_options: false
show_missing: true
show_missing_assets: true
save_missing: true
run_again_delay: 2
missing_only_released: false
only_filter_missing: false
collection_minimum: 1
delete_below_minimum: true
delete_not_scheduled: false
tvdb_language: eng
ignore_ids:
ignore_imdb_ids:
webhooks: # Can be individually specified per library as well
error:
run_start:
Expand Down
13 changes: 10 additions & 3 deletions modules/anilist.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,15 +50,22 @@
class AniList:
def __init__(self, config):
self.config = config
self.options = {
self._options = None

@property
def options(self):
if self._options:
return self._options
self._options = {
"Tag": {}, "Tag Category": {},
"Genre": {g.lower().replace(" ", "-"): g for g in self._request(genre_query, {})["data"]["GenreCollection"]},
"Country": {c: c.upper() for c in country_codes},
"Season": media_season, "Format": media_format, "Status": media_status, "Source": media_source,
}
for media_tag in self._request(tag_query, {})["data"]["MediaTagCollection"]:
self.options["Tag"][media_tag["name"].lower().replace(" ", "-")] = media_tag["name"]
self.options["Tag Category"][media_tag["category"].lower().replace(" ", "-")] = media_tag["category"]
self._options["Tag"][media_tag["name"].lower().replace(" ", "-")] = media_tag["name"]
self._options["Tag Category"][media_tag["category"].lower().replace(" ", "-")] = media_tag["category"]
return self._options

def _request(self, query, variables, level=1):
if self.config.trace_mode:
Expand Down
1,297 changes: 643 additions & 654 deletions modules/builder.py

Large diffs are not rendered by default.

5 changes: 4 additions & 1 deletion modules/cache.py
Original file line number Diff line number Diff line change
Expand Up @@ -202,7 +202,10 @@ def _query_map(self, map_name, _id, from_id, to_id, media_type=None, return_type
if row and row[to_id]:
datetime_object = datetime.strptime(row["expiration_date"], "%Y-%m-%d")
time_between_insertion = datetime.now() - datetime_object
id_to_return = row[to_id] if to_id == "imdb_id" else int(row[to_id])
try:
id_to_return = int(row[to_id])
except ValueError:
id_to_return = row[to_id]
expired = time_between_insertion.days > self.expiration
out_type = row["media_type"] if return_type else None
if return_type:
Expand Down
172 changes: 140 additions & 32 deletions modules/config.py

Large diffs are not rendered by default.

72 changes: 56 additions & 16 deletions modules/convert.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,22 +10,52 @@
class Convert:
def __init__(self, config):
self.config = config
self.anidb_ids = {}
self.mal_to_anidb = {}
self.anilist_to_anidb = {}
self.anidb_to_imdb = {}
self.anidb_to_tvdb = {}
for anime_id in self.config.get_json(anime_lists_url):
if "anidb_id" in anime_id:
self.anidb_ids[anime_id["anidb_id"]] = anime_id
if "mal_id" in anime_id:
self.mal_to_anidb[int(anime_id["mal_id"])] = int(anime_id["anidb_id"])
if "anilist_id" in anime_id:
self.anilist_to_anidb[int(anime_id["anilist_id"])] = int(anime_id["anidb_id"])
if "imdb_id" in anime_id and str(anime_id["imdb_id"]).startswith("tt"):
self.anidb_to_imdb[int(anime_id["anidb_id"])] = util.get_list(anime_id["imdb_id"])
if "thetvdb_id" in anime_id:
self.anidb_to_tvdb[int(anime_id["anidb_id"])] = int(anime_id["thetvdb_id"])
self._loaded = False
self._anidb_ids = {}
self._mal_to_anidb = {}
self._anilist_to_anidb = {}
self._anidb_to_imdb = {}
self._anidb_to_tvdb = {}

@property
def anidb_ids(self):
self._load_anime_conversion()
return self._anidb_ids

@property
def mal_to_anidb(self):
self._load_anime_conversion()
return self._mal_to_anidb

@property
def anilist_to_anidb(self):
self._load_anime_conversion()
return self._anilist_to_anidb

@property
def anidb_to_imdb(self):
self._load_anime_conversion()
return self._anidb_to_imdb

@property
def anidb_to_tvdb(self):
self._load_anime_conversion()
return self._anidb_to_tvdb

def _load_anime_conversion(self):
if not self._loaded:
for anime_id in self.config.get_json(anime_lists_url):
if "anidb_id" in anime_id:
self._anidb_ids[anime_id["anidb_id"]] = anime_id
if "mal_id" in anime_id:
self._mal_to_anidb[int(anime_id["mal_id"])] = int(anime_id["anidb_id"])
if "anilist_id" in anime_id:
self._anilist_to_anidb[int(anime_id["anilist_id"])] = int(anime_id["anidb_id"])
if "imdb_id" in anime_id and str(anime_id["imdb_id"]).startswith("tt"):
self._anidb_to_imdb[int(anime_id["anidb_id"])] = util.get_list(anime_id["imdb_id"])
if "thetvdb_id" in anime_id:
self._anidb_to_tvdb[int(anime_id["anidb_id"])] = int(anime_id["thetvdb_id"])
self._loaded = True

def anidb_to_ids(self, anidb_ids, library):
ids = []
Expand Down Expand Up @@ -224,6 +254,16 @@ def get_id(self, item, library):
elif item_type == "imdb": imdb_id.append(check_id)
elif item_type == "thetvdb": tvdb_id.append(int(check_id))
elif item_type == "themoviedb": tmdb_id.append(int(check_id))
elif item_type in ["xbmcnfo", "xbmcnfotv"]:
if len(check_id) > 10:
raise Failed(f"XMBC NFO Local ID: {check_id}")
try:
if item_type == "xbmcnfo":
tmdb_id.append(int(check_id))
else:
tvdb_id.append(int(check_id))
except ValueError:
imdb_id.append(check_id)
elif item_type == "hama":
if check_id.startswith("tvdb"):
tvdb_id.append(int(re.search("-(.*)", check_id).group(1)))
Expand Down
9 changes: 4 additions & 5 deletions modules/flixpatrol.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,10 +50,9 @@ def _tmdb(self, flixpatrol_url, language):
if len(ids) > 0 and ids[0]:
if "https://www.themoviedb.org" in ids[0]:
return util.regex_first_int(ids[0].split("https://www.themoviedb.org")[1], "TMDB Movie ID")
raise Failed(f"FlixPatrol Error: TMDb Movie ID not found in {ids[0]}")
raise Failed(f"FlixPatrol Error: TMDb Movie ID not found at {flixpatrol_url}")

def _parse_list(self, list_url, language, is_movie):
def _parse_list(self, list_url, language, is_movie, limit=0):
flixpatrol_urls = []
if list_url.startswith(urls["top10"]):
platform = list_url[len(urls["top10"]):].split("/")[0]
Expand All @@ -73,15 +72,15 @@ def _parse_list(self, list_url, language, is_movie):
list_url, language,
f"//a[@class='flex group' and .//span[.='{'Movie' if is_movie else 'TV Show'}']]/@href"
)
return flixpatrol_urls
return flixpatrol_urls if limit == 0 else flixpatrol_urls[:limit]

def validate_flixpatrol_lists(self, flixpatrol_lists, language, is_movie):
valid_lists = []
for flixpatrol_list in util.get_list(flixpatrol_lists, split=False):
list_url = flixpatrol_list.strip()
if not list_url.startswith(tuple([v for k, v in urls.items()])):
fails = "\n".join([f"{v} (For {k.replace('_', ' ').title()})" for k, v in urls.items()])
raise Failed(f"FlixPatrol Error: {list_url} must begin with either:{fails}")
raise Failed(f"FlixPatrol Error: {list_url} must begin with either:\n{fails}")
elif len(self._parse_list(list_url, language, is_movie)) > 0:
valid_lists.append(list_url)
else:
Expand Down Expand Up @@ -133,7 +132,7 @@ def get_flixpatrol_ids(self, method, data, language, is_movie):
logger.info(f"Processing FlixPatrol URL: {data}")
url = self.get_url(method, data, is_movie)

items = self._parse_list(url, language, is_movie)
items = self._parse_list(url, language, is_movie, limit=data["limit"] if isinstance(data, dict) else 0)
media_type = "movie" if is_movie else "show"
total_items = len(items)
if total_items > 0:
Expand Down
62 changes: 59 additions & 3 deletions modules/imdb.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,20 @@

logger = logging.getLogger("Plex Meta Manager")

builders = ["imdb_list", "imdb_id"]
builders = ["imdb_list", "imdb_id", "imdb_chart"]
movie_charts = ["box_office", "popular_movies", "top_movies", "top_english", "top_indian", "lowest_rated"]
show_charts = ["popular_shows", "top_shows"]
charts = {
"box_office": "Box Office",
"popular_movies": "Most Popular Movies",
"popular_shows": "Most Popular TV Shows",
"top_movies": "Top 250 Movies",
"top_shows": "Top 250 TV Shows",
"top_english": "Top Rated English Movies",
"top_indian": "Top Rated Indian Movies",
"lowest_rated": "Lowest Rated Movies"
}

base_url = "https://www.imdb.com"
urls = {
"lists": f"{base_url}/list/ls",
Expand All @@ -24,12 +37,31 @@ def validate_imdb_lists(self, imdb_lists, language):
if not isinstance(imdb_dict, dict):
imdb_dict = {"url": imdb_dict}
dict_methods = {dm.lower(): dm for dm in imdb_dict}
imdb_url = util.parse("url", imdb_dict, methods=dict_methods, parent="imdb_list").strip()
if "url" not in dict_methods:
raise Failed(f"Collection Error: imdb_list url attribute not found")
elif imdb_dict[dict_methods["url"]] is None:
raise Failed(f"Collection Error: imdb_list url attribute is blank")
else:
imdb_url = imdb_dict[dict_methods["url"]].strip()
if not imdb_url.startswith(tuple([v for k, v in urls.items()])):
fails = "\n".join([f"{v} (For {k.replace('_', ' ').title()})" for k, v in urls.items()])
raise Failed(f"IMDb Error: {imdb_url} must begin with either:{fails}")
self._total(imdb_url, language)
list_count = util.parse("limit", imdb_dict, datatype="int", methods=dict_methods, default=0, parent="imdb_list", minimum=0) if "limit" in dict_methods else 0
list_count = None
if "limit" in dict_methods:
if imdb_dict[dict_methods["limit"]] is None:
logger.warning(f"Collection Warning: imdb_list limit attribute is blank using 0 as default")
else:
try:
value = int(str(imdb_dict[dict_methods["limit"]]))
if 0 <= value:
list_count = value
except ValueError:
pass
if list_count is None:
logger.warning(f"Collection Warning: imdb_list limit attribute must be an integer 0 or greater using 0 as default")
if list_count is None:
list_count = 0
valid_lists.append({"url": imdb_url, "limit": list_count})
return valid_lists

Expand Down Expand Up @@ -96,6 +128,27 @@ def _ids_from_url(self, imdb_url, language, limit):
return imdb_ids
raise Failed(f"IMDb Error: No IMDb IDs Found at {imdb_url}")

def _ids_from_chart(self, chart):
if chart == "box_office":
url = "chart/boxoffice"
elif chart == "popular_movies":
url = "chart/moviemeter"
elif chart == "popular_shows":
url = "chart/tvmeter"
elif chart == "top_movies":
url = "chart/top"
elif chart == "top_shows":
url = "chart/toptv"
elif chart == "top_english":
url = "chart/top-english-movies"
elif chart == "top_indian":
url = "india/top-rated-indian-movies"
elif chart == "lowest_rated":
url = "chart/bottom"
else:
raise Failed(f"IMDb Error: chart: {chart} not ")
return self.config.get_html(f"https://www.imdb.com/{url}").xpath("//div[@class='wlb_ribbon']/@data-tconst")

def get_imdb_ids(self, method, data, language):
if method == "imdb_id":
logger.info(f"Processing IMDb ID: {data}")
Expand All @@ -104,5 +157,8 @@ def get_imdb_ids(self, method, data, language):
status = f"{data['limit']} Items at " if data['limit'] > 0 else ''
logger.info(f"Processing IMDb List: {status}{data['url']}")
return [(i, "imdb") for i in self._ids_from_url(data["url"], language, data["limit"])]
elif method == "imdb_chart":
logger.info(f"Processing IMDb Chart: {charts[data]}")
return [(_i, "imdb") for _i in self._ids_from_chart(data)]
else:
raise Failed(f"IMDb Error: Method {method} not supported")
Loading

0 comments on commit 0de6e2f

Please sign in to comment.