Skip to content

Commit

Permalink
NewSite/Add Support for Heavy on Hotties
Browse files Browse the repository at this point in the history
  • Loading branch information
diamondpete committed Aug 15, 2024
1 parent 0c90f50 commit 288593e
Show file tree
Hide file tree
Showing 5 changed files with 147 additions and 0 deletions.
1 change: 1 addition & 0 deletions Contents/Code/PAactors.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,7 @@ def processActors(self, metadata, siteNum):

if searchStudioIndex == 32 and actorName != 'QueenSnake':
actorName = '%s QueenSnake' % actorName
searchActorName = actorName.lower()

break

Expand Down
5 changes: 5 additions & 0 deletions Contents/Code/PAdatabaseActors.py
Original file line number Diff line number Diff line change
Expand Up @@ -6088,6 +6088,10 @@
'Talon': ['Talon Valenti'],
'Zeus': ['Jesus "Zeus"Munoz'],
},
70: { # Heavy on Hotties
'Josephine Jackson': ['Josephine'],
'Natasha Sweet': ['Nadiya'],
},
}

ActorsStudioIndexes = {
Expand Down Expand Up @@ -6161,6 +6165,7 @@
67: ['Data18'],
68: ['Mofos'],
69: ['Adult Empire'],
70: ['Heavy on Hotties'],
}


Expand Down
6 changes: 6 additions & 0 deletions Contents/Code/PAsiteList.py
Original file line number Diff line number Diff line change
Expand Up @@ -189,6 +189,7 @@
import sitePornbox
import siteJesseLoadsMonsterFacials
import networkGASM
import siteHeavyOnHotties

searchSites = {
0: ('BlackedRaw', 'https://www.blackedraw.com', '/graphql'),
Expand Down Expand Up @@ -1977,6 +1978,7 @@
1883: ('Voodooed', 'https://voodooed.com', '/?s='),
1884: ('Vored', 'https://vored.com', '/?s='),
1885: ('Latina MILF', 'https://letsdoeit.com', 'https://site-api.project1service.com'),
1886: ('Heavy on Hotties', 'https://www.heavyonhotties.com', '/'),
}

abbreviations = (
Expand Down Expand Up @@ -3314,4 +3316,8 @@ def getProviderFromSiteNum(siteNum):
elif 1866 <= siteNum <= 1882:
provider = networkGASM

# Heavy on Hotties
elif siteNum == 1886:
provider = siteHeavyOnHotties

return provider
134 changes: 134 additions & 0 deletions Contents/Code/siteHeavyOnHotties.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,134 @@
import PAsearchSites
import PAutils


def search(results, lang, siteNum, searchData):
directURL = '%s/movies/%s' % (PAsearchSites.getSearchBaseURL(siteNum), slugify(searchData.title))
searchResults = [directURL]
directURL = '%s/movies/%s' % (PAsearchSites.getSearchBaseURL(siteNum), '-'.join(searchData.title.lower().split(' ')[1:]))
searchResults.append(directURL)

titleNoActors = ' '.join(searchData.title.split(' ')[2:])
if titleNoActors.startswith('and '):
titleNoActors = ' '.join(titleNoActors.split(' ')[3:])
directURL = '%s/movies/%s' % (PAsearchSites.getSearchBaseURL(siteNum), slugify(titleNoActors.replace('\'', '')))
searchResults.append(directURL)

googleResults = PAutils.getFromGoogleSearch(searchData.title, siteNum)
for sceneURL in googleResults:
if '/movies/' in sceneURL and '/page-' not in sceneURL and sceneURL not in searchResults:
searchResults.append(sceneURL)

for sceneURL in searchResults:
try:
req = PAutils.HTTPRequest(sceneURL)
scenePageElements = HTML.ElementFromString(req.text)
titleNoFormatting = scenePageElements.xpath('//h1')[0].text_content().split(':')[-1].strip().strip('\"')
curID = PAutils.Encode(sceneURL)

date = scenePageElements.xpath('//span[@class="released title"]/strong')
if date:
releaseDate = parse(date[0].text_content().strip()).strftime('%Y-%m-%d')
else:
releaseDate = searchData.dateFormat() if searchData.date else ''

displayDate = releaseDate if date else ''

if searchData.date and displayDate:
score = 100 - Util.LevenshteinDistance(searchData.date, releaseDate)
else:
score = 100 - Util.LevenshteinDistance(searchData.title.lower(), titleNoFormatting.lower())

results.Append(MetadataSearchResult(id='%s|%d|%s' % (curID, siteNum, releaseDate), name='%s [%s] %s' % (PAutils.parseTitle(titleNoFormatting, siteNum), PAsearchSites.getSearchSiteName(siteNum), displayDate), score=score, lang=lang))
except:
pass

return results


def update(metadata, lang, siteNum, movieGenres, movieActors, art):
metadata_id = str(metadata.id).split('|')
sceneURL = PAutils.Decode(metadata_id[0])
sceneDate = metadata_id[2]
if not sceneURL.startswith('http'):
sceneURL = PAsearchSites.getSearchBaseURL(siteNum) + sceneURL
req = PAutils.HTTPRequest(sceneURL)
detailsPageElements = HTML.ElementFromString(req.text)

# Title
metadata.title = PAutils.parseTitle(detailsPageElements.xpath('//h1')[0].text_content().split(':')[-1].split(' - ')[-1].strip().strip('\"'), siteNum)

# Summary
summary = detailsPageElements.xpath('//div[@class="video_text"]')
if summary:
metadata.summary = summary[0].text_content().strip()

# Studio
metadata.studio = "Heavy on Hotties"

# Tagline and Collection(s)
metadata.collections.add(metadata.studio)

# Release Date
date = detailsPageElements.xpath('//span[@class="released title"]/strong')
if date:
date_object = parse(date[0].text_content().strip())
metadata.originally_available_at = date_object
metadata.year = metadata.originally_available_at.year
elif sceneDate:
date_object = parse(sceneDate)
metadata.originally_available_at = date_object
metadata.year = metadata.originally_available_at.year

# Actor(s)
for actorLink in detailsPageElements.xpath('//span[@class="feature title"]//a[contains(@href, "models")]'):
actorName = actorLink.text_content().strip()
actorPhotoURL = ''

actorURL = actorLink.xpath('./@href')[0]
if not actorURL.startswith('http'):
actorURL = '%s%s' % (PAsearchSites.getSearchBaseURL(siteNum), actorURL)
req = PAutils.HTTPRequest(actorURL)
actorPageElements = HTML.ElementFromString(req.text)

try:
actorPhotoURL = actorPageElements.xpath('//div[./h1]/img/@src')[0]
if not actorPhotoURL.startswith('http'):
actorPhotoURL = 'https:%s' % actorPhotoURL
except:
pass

movieActors.addActor(actorName, actorPhotoURL)

# Posters/Background
xpaths = [
'//video/@poster',
]

for xpath in xpaths:
for img in detailsPageElements.xpath(xpath):
if not img.startswith('http'):
img = 'https:' + img

art.append(img)

Log('Artwork found: %d' % len(art))
for idx, posterUrl in enumerate(art, 1):
if not PAsearchSites.posterAlreadyExists(posterUrl, metadata):
# Download image file for analysis
try:
image = PAutils.HTTPRequest(posterUrl)
im = StringIO(image.content)
resized_image = Image.open(im)
width, height = resized_image.size
# Add the image proxy items to the collection
if width > 1:
# Item is a poster
metadata.posters[posterUrl] = Proxy.Media(image.content, sort_order=idx)
if width > 100:
# Item is an art item
metadata.art[posterUrl] = Proxy.Media(image.content, sort_order=idx)
except:
pass

return metadata
1 change: 1 addition & 0 deletions docs/sitelist.md
Original file line number Diff line number Diff line change
Expand Up @@ -669,6 +669,7 @@ If you're having difficulty finding the SceneID, double-check [PAsiteList.py](..
- TGirl Japan Hardcore
- TGirls.porn
- TGirls.xxx
+ #### Heavy on Hotties | ✅
+ #### Hegre | ✓ - **Title only**
+ #### HentaiPros | ✅
+ #### HighTechVR Network | ❌
Expand Down

0 comments on commit 288593e

Please sign in to comment.