Skip to content

Commit

Permalink
Merge pull request #1952 from diamondpete/heavyonhotties
Browse files Browse the repository at this point in the history
NewSite/Add Support for Heavy on Hotties
  • Loading branch information
DirtyRacer1337 authored Aug 16, 2024
2 parents 0c90f50 + 61bce05 commit 56c5157
Show file tree
Hide file tree
Showing 6 changed files with 163 additions and 3 deletions.
1 change: 1 addition & 0 deletions Contents/Code/PAactors.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,7 @@ def processActors(self, metadata, siteNum):

if searchStudioIndex == 32 and actorName != 'QueenSnake':
actorName = '%s QueenSnake' % actorName
searchActorName = actorName.lower()

break

Expand Down
18 changes: 18 additions & 0 deletions Contents/Code/PAdatabaseActors.py
Original file line number Diff line number Diff line change
Expand Up @@ -186,6 +186,7 @@
'Lea Lexis': ['Lea Lexus', 'Lea Lush', 'Leah Lush', 'Lee Lexus', 'Lea Lixus'],
'Leana Lovings': ['Leanna Lovings'],
'Lexi Anne Garza': ['Lexi Aaane', 'Lexi Anne'],
'Leyla Peachbloom': ['Layla Peachbloom', 'Leila Peachbloom', 'Leyla Bloom'],
'Lia Lor': ['Brady Paige'],
'Light Fairy': ['Flora Fairy', 'Lightfairy'],
'Lilly Ford': ['Lilly Lit'],
Expand Down Expand Up @@ -6088,6 +6089,22 @@
'Talon': ['Talon Valenti'],
'Zeus': ['Jesus "Zeus"Munoz'],
},
70: { # Heavy on Hotties
'Alice Wonderbang': ['Sasha'],
'Ashley Stillar': ['Nicol'],
'Cindy White': ['Cindy'],
'Jana Maho': ['Angelia'],
'Josephine Jackson': ['Josephine'],
'Lilu Moon': ['Lilu4u'],
'Leony Dark': ['Leony'],
'Maggie Gold': ['Maggies'],
'Melissa Mandlikova': ['Pamela'],
'Natasha Sweet': ['Nadiya'],
'Olga Winter': ['Olga'],
'Suzie Sun': ['Suzy'],
'Teena Lipoldino': ['Teena'],
'Victoria Ferrera': ['Roksolana'],
},
}

ActorsStudioIndexes = {
Expand Down Expand Up @@ -6161,6 +6178,7 @@
67: ['Data18'],
68: ['Mofos'],
69: ['Adult Empire'],
70: ['Heavy on Hotties'],
}


Expand Down
6 changes: 6 additions & 0 deletions Contents/Code/PAsiteList.py
Original file line number Diff line number Diff line change
Expand Up @@ -189,6 +189,7 @@
import sitePornbox
import siteJesseLoadsMonsterFacials
import networkGASM
import siteHeavyOnHotties

searchSites = {
0: ('BlackedRaw', 'https://www.blackedraw.com', '/graphql'),
Expand Down Expand Up @@ -1977,6 +1978,7 @@
1883: ('Voodooed', 'https://voodooed.com', '/?s='),
1884: ('Vored', 'https://vored.com', '/?s='),
1885: ('Latina MILF', 'https://letsdoeit.com', 'https://site-api.project1service.com'),
1886: ('Heavy on Hotties', 'https://www.heavyonhotties.com', '/'),
}

abbreviations = (
Expand Down Expand Up @@ -3314,4 +3316,8 @@ def getProviderFromSiteNum(siteNum):
elif 1866 <= siteNum <= 1882:
provider = networkGASM

# Heavy on Hotties
elif siteNum == 1886:
provider = siteHeavyOnHotties

return provider
6 changes: 3 additions & 3 deletions Contents/Code/PAutils.py
Original file line number Diff line number Diff line change
Expand Up @@ -395,7 +395,7 @@ def postParseTitle(output):
# Remove single period at end of title
output = re.sub(r'(?<=[^\.].)(?<=\w)(?:\.)$', '', output)
# Remove space between word and certain punctuation
output = re.sub(r'\s+(?=[.,!:\'\)])', '', output)
output = re.sub(r'\s+(?=[.,!\'\)]|(:(?!\))))', '', output)
# Add space between word and opening quote
output = re.sub(r'(?<=\S)([\"]\S+)', lambda m: ' ' + m.group(1), output)
# Remove space between punctuation and word
Expand Down Expand Up @@ -467,12 +467,12 @@ def manualWordFix(word):
exceptions = (
'im', 'theyll', 'cant', 'ive', 'shes', 'theyre', 'tshirt', 'dont', 'wasnt', 'youre', 'ill', 'whats', 'didnt',
'isnt', 'senor', 'senorita', 'thats', 'gstring', 'milfs', 'oreilly', 'bangbros', 'bday', 'dms', 'bffs',
'ohmy', 'wont', 'whos', 'shouldnt', 'lasirena'
'ohmy', 'wont', 'whos', 'shouldnt', 'lasirena', 'espanol'
)
corrections = (
'I\'m', 'They\'ll', 'Can\'t', 'I\'ve', 'She\'s', 'They\'re', 'T-Shirt', 'Don\'t', 'Wasn\'t', 'You\'re', 'I\'ll', 'What\'s', 'Didn\'t',
'Isn\'t', 'Señor', 'Señorita', 'That\'s', 'G-String', 'MILFs', 'O\'Reilly', 'BangBros', 'B-Day', 'DMs', 'BFFs',
'OhMy', 'Won\'t', 'Who\'s', 'Shouldn\'t', 'LaSirena'
'OhMy', 'Won\'t', 'Who\'s', 'Shouldn\'t', 'LaSirena', 'español'
)
pattern = re.compile(r'\d|\W')
cleanWord = re.sub(pattern, '', word)
Expand Down
134 changes: 134 additions & 0 deletions Contents/Code/siteHeavyOnHotties.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,134 @@
import PAsearchSites
import PAutils


def search(results, lang, siteNum, searchData):
directURL = '%s/movies/%s' % (PAsearchSites.getSearchBaseURL(siteNum), slugify(searchData.title))
searchResults = [directURL]
directURL = '%s/movies/%s' % (PAsearchSites.getSearchBaseURL(siteNum), '-'.join(searchData.title.lower().split(' ')[1:]))
searchResults.append(directURL)

titleNoActors = ' '.join(searchData.title.split(' ')[2:])
if titleNoActors.startswith('and '):
titleNoActors = ' '.join(titleNoActors.split(' ')[3:])
directURL = '%s/movies/%s' % (PAsearchSites.getSearchBaseURL(siteNum), slugify(titleNoActors.replace('\'', '')))
searchResults.append(directURL)

googleResults = PAutils.getFromGoogleSearch(searchData.title, siteNum)
for sceneURL in googleResults:
if '/movies/' in sceneURL and '/page-' not in sceneURL and sceneURL not in searchResults:
searchResults.append(sceneURL)

for sceneURL in searchResults:
try:
req = PAutils.HTTPRequest(sceneURL)
scenePageElements = HTML.ElementFromString(req.text)
titleNoFormatting = scenePageElements.xpath('//h1')[0].text_content().split(':', 1)[-1].strip().strip('\"')
curID = PAutils.Encode(sceneURL)

date = scenePageElements.xpath('//span[@class="released title"]/strong')
if date:
releaseDate = parse(date[0].text_content().strip()).strftime('%Y-%m-%d')
else:
releaseDate = searchData.dateFormat() if searchData.date else ''

displayDate = releaseDate if date else ''

if searchData.date and displayDate:
score = 100 - Util.LevenshteinDistance(searchData.date, releaseDate)
else:
score = 100 - Util.LevenshteinDistance(searchData.title.lower(), titleNoFormatting.lower())

results.Append(MetadataSearchResult(id='%s|%d|%s' % (curID, siteNum, releaseDate), name='%s [%s] %s' % (PAutils.parseTitle(titleNoFormatting, siteNum), PAsearchSites.getSearchSiteName(siteNum), displayDate), score=score, lang=lang))
except:
pass

return results


def update(metadata, lang, siteNum, movieGenres, movieActors, art):
metadata_id = str(metadata.id).split('|')
sceneURL = PAutils.Decode(metadata_id[0])
sceneDate = metadata_id[2]
if not sceneURL.startswith('http'):
sceneURL = PAsearchSites.getSearchBaseURL(siteNum) + sceneURL
req = PAutils.HTTPRequest(sceneURL)
detailsPageElements = HTML.ElementFromString(req.text)

# Title
metadata.title = PAutils.parseTitle(detailsPageElements.xpath('//h1')[0].text_content().split(':', 1)[-1].split(' - ')[-1].strip().strip('\"'), siteNum)

# Summary
summary = detailsPageElements.xpath('//div[@class="video_text"]')
if summary:
metadata.summary = summary[0].text_content().strip()

# Studio
metadata.studio = "Heavy on Hotties"

# Tagline and Collection(s)
metadata.collections.add(metadata.studio)

# Release Date
date = detailsPageElements.xpath('//span[@class="released title"]/strong')
if date:
date_object = parse(date[0].text_content().strip())
metadata.originally_available_at = date_object
metadata.year = metadata.originally_available_at.year
elif sceneDate:
date_object = parse(sceneDate)
metadata.originally_available_at = date_object
metadata.year = metadata.originally_available_at.year

# Actor(s)
for actorLink in detailsPageElements.xpath('//span[@class="feature title"]//a[contains(@href, "models")]'):
actorName = actorLink.text_content().strip()
actorPhotoURL = ''

actorURL = actorLink.xpath('./@href')[0]
if not actorURL.startswith('http'):
actorURL = '%s%s' % (PAsearchSites.getSearchBaseURL(siteNum), actorURL)
req = PAutils.HTTPRequest(actorURL)
actorPageElements = HTML.ElementFromString(req.text)

try:
actorPhotoURL = actorPageElements.xpath('//div[./h1]/img/@src')[0]
if not actorPhotoURL.startswith('http'):
actorPhotoURL = 'https:%s' % actorPhotoURL
except:
pass

movieActors.addActor(actorName, actorPhotoURL)

# Posters/Background
xpaths = [
'//video/@poster',
]

for xpath in xpaths:
for img in detailsPageElements.xpath(xpath):
if not img.startswith('http'):
img = 'https:' + img

art.append(img)

Log('Artwork found: %d' % len(art))
for idx, posterUrl in enumerate(art, 1):
if not PAsearchSites.posterAlreadyExists(posterUrl, metadata):
# Download image file for analysis
try:
image = PAutils.HTTPRequest(posterUrl)
im = StringIO(image.content)
resized_image = Image.open(im)
width, height = resized_image.size
# Add the image proxy items to the collection
if width > 1:
# Item is a poster
metadata.posters[posterUrl] = Proxy.Media(image.content, sort_order=idx)
if width > 100:
# Item is an art item
metadata.art[posterUrl] = Proxy.Media(image.content, sort_order=idx)
except:
pass

return metadata
1 change: 1 addition & 0 deletions docs/sitelist.md
Original file line number Diff line number Diff line change
Expand Up @@ -669,6 +669,7 @@ If you're having difficulty finding the SceneID, double-check [PAsiteList.py](..
- TGirl Japan Hardcore
- TGirls.porn
- TGirls.xxx
+ #### Heavy on Hotties | ✅
+ #### Hegre | ✓ - **Title only**
+ #### HentaiPros | ✅
+ #### HighTechVR Network | ❌
Expand Down

0 comments on commit 56c5157

Please sign in to comment.