Skip to content

Commit

Permalink
Merge branch 'dev' into main
Browse files Browse the repository at this point in the history
  • Loading branch information
zly-idleness committed Aug 29, 2024
2 parents cbbaa23 + 024f93e commit 20a4c80
Show file tree
Hide file tree
Showing 20 changed files with 266 additions and 274 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/checksum.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ jobs:

- name: Run RVC-Models-Downloader
run: |
wget https://github.com/fumiama/RVC-Models-Downloader/releases/download/v0.2.7/rvcmd_linux_amd64.deb
wget https://github.com/fumiama/RVC-Models-Downloader/releases/download/v0.2.8/rvcmd_linux_amd64.deb
sudo apt -y install ./rvcmd_linux_amd64.deb
rm -f ./rvcmd_linux_amd64.deb
rvcmd -notrs -w 1 -notui assets/chtts
Expand Down
12 changes: 11 additions & 1 deletion ChatTTS/config/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,10 @@
class Path:
vocos_ckpt_path: str = "asset/Vocos.pt"
dvae_ckpt_path: str = "asset/DVAE_full.pt"
gpt_ckpt_path: str = "asset/GPT.pt"
gpt_ckpt_path: str = "asset/gpt"
decoder_ckpt_path: str = "asset/Decoder.pt"
tokenizer_path: str = "asset/tokenizer"
embed_path: str = "asset/Embed.safetensors"


@dataclass(repr=False, eq=False)
Expand Down Expand Up @@ -62,6 +63,14 @@ class GPT:
num_vq: int = 4


@dataclass(repr=False, eq=False)
class Embed:
hidden_size: int = 768
num_audio_tokens: int = 626
num_text_tokens: int = 21178
num_vq: int = 4


@dataclass(repr=False, eq=False)
class FeatureExtractorInitArgs:
sample_rate: int = 24000
Expand Down Expand Up @@ -118,6 +127,7 @@ class Config:
decoder: Decoder = Decoder()
dvae: DVAE = DVAE()
gpt: GPT = GPT()
embed: Embed = Embed()
vocos: Vocos = Vocos()
spk_stat: str = (
"愐穤巩噅廷戇笉屈癐媄垹垧帶爲漈塀殐慄亅倴庲舴猂瑈圐狴夥圓帍戛挠腉耐劤坽喳幾战謇聀崒栄呥倸庭燡欈杁襐褄乭埗幺爃弔摁斐捔兕佖廐舏竾豃磐姓趡佄幒爚欄豄讐皳訵仩帆投謌荃蝐叄圝伆幦抂茁呄掑斃讹傮庞爣蜀橁偐祄亥兡常爂欍扉丐浔佱僈強払伅扂蛐徴憍傞巀戺欀艂琐嗴啥値彷刂權穈扒卤俔贲庛初笂卄贐枴仭亁庛剎猢扃缐趤刁偵幪舏伌煁婐潤晍位弾舙茥穁葏蠣訑企庤刊笍橁溑僔云偁庯戚伍潉膐脴僵噔廃艅匊祂唐憴壝嗙席爥欁虁谐牴帽势弿牳蜁兀蛐傄喩丿帔刔圆衁廐罤庁促帙劢伈汄樐檄勵伴弝舑欍罅虐昴劭勅帜刼朊蕁虐蓴樑伫幨扑謪剀堐稴丵伱弐舮諸赁習俔容厱幫牶謃孄糐答嗝僊帜燲笄終瀒判久僤帘爴茇千孑冄凕佳引扐蜁歁缏裄剽儺恘爋朏眿廐呄塍嘇幻爱茠詁訐剴唭俐幾戊欀硁菐贄楕偒巡爀弎屄莐睳賙凶彎刅漄區唐溴剑劋庽舽猄煃跐夔惥伾庮舎伈罁垑坄怅业怯刁朇獁嶏覔坩俳巶爜朐潁崐萄俹凛常爺笌穀聐此夡倛帡刀匉終窏舣販侽怿扉伥贿憐忓謩姆幌犊漂慆癒却甝兎帼戏欅詂浐朔仹壭帰臷弎恇菐獤帡偖帘爞伅腂皐纤囅充幓戠伥灂丐訤戱倱弋爮嬌癁恐孄侥劬忶刓國詀桒古偩嘄庬戚茝赂监燤嘑勌幦舽持呂諐棤姑再底舡笍艃瀐孴倉傔弋爔猠乁濑塄偽嘧恂舛缇襃厐窴仡刱忕別漇穁岏缴廽价庌爊謈硄讑惤倁儂庭爋伇蝂嶐莔摝傠库刞茄歃戏薤伍伯廮创笠塄熐兴勽俄帅剉最腀砐敤卝侍弆戺朒虃旐蚄梕亖幔牻朣扅贐玔堝噅帡剌圅摀崐彤流僳庙爖嬇啁渐悤堁丛幆刧挜彃悐幤刹嚟恕芁看聀摐焔向乁帖爭欁癃糒圄弙佱廜戤謍婀咐昴焍亩廦艏拼謿芐癤怹兽幸舳朇畁喐稔毝丼弈懲挀譂勑哴啁伎常舭笯晁堑俄叩剔廟爍欦絁夒伤休傑廳戌蜅潆癐彴摑勯床刽欅艁砐忄搉从廡舊猥潂唐委仱僜廼爤朄呃弐礔滵垓幩爄挂筁乐籤刕凟幵爠弉癅乑吴勥伖帪舩茆婁碐幤叭乢巜艳猁桀桐啄唩俊幍舮猀艅焐螔琽亀帋爜缅噃咐斤喩予幩爛笆摀浐猴依侹幃刕園慄蛐栤澹仑座爼謉桃慐浔斕偻幛懰嬓衁愐氄悅仿应芔漄衃敐謤傁匩幹抃圉癄廐裄屵噉幍利謍聂搐蛔嚙坍怗舁圐畃膐栄刵东巆戤諾呃偑媤嗨跞忶爝眄祂朒嶔僭劉忾刐匋癄袐翴珅僷廲芄茈恈皐擄崑伄廉牍匃剃犏澤唑丄庺戃伃煀某杄偙亽帴切缌罄挐尴噙倰带舞漄橄塐糴俩僯帀般漀坂栐更両俇廱舌猁慂拐偤嶱卶应刪眉獁茐伔嘅偺帟舊漂恀栐暄喡乞庙舆匂敀潑恔劑侖延戦盽怶唯慳蝘蟃孫娎益袰玍屃痶翮笪儚裀倹椌玻翀詵筽舘惯堿某侰晈藏缮詗廦夸妎瑻瀒裔媀憞唃冶璭狻渠荑奬熹茅愺氰菣滠翦岓褌泣崲嚭欓湒聙宺爄蛅愸庍匃帆誔穮懌蓪玷澌氋抌訙屌臞廛玸听屺希疭孝凂紋新煎彃膲跱尪懁眆窴珏卓揨菸紭概囥显壌榄垫嘮嬭覤媸侵佮烒耸觌婀秋狃帹葯訤桜糨笾腢伀肶悍炂艤禖岅臺惘梷瞍友盁佨岧憳瓧嘴汬藊愌蘤嶠硴绤蜲襏括勾谂縨妥蓪澭竭萢藜纞糲煮愆瀯孯琓罂諺塿燗狟弙衯揻縷丱糅臄梱瀮杰巳猙亊符胠匃泀廏圃膂蒃籏礩岈簹缌劺燲褡孓膜拔蠿觮呋煣厌尷熜論弲牭紫寊誃紀橴賬傸箍弚窃侫簲慯烣渽祌壓媥噜夽夛諛玹疮禄冪謇媽衤盰缺繑薫兾萧嵱打滽箺嚯凣狢蠜崼覽烸簶盯籓摀苶峸懗泲涻凮愳緗剋笔懆廡瞿椏礤惐藥崍腈烄伹亯昣翬褍絋桫僨吨莌丛矄蜞娈憊苆塁蓏嚢嫼绻崱婋囱蠸篯晣芀繼索兓僖誹岯圪褰蠇唓妷胅巁渮砛傈蝷嵚冃購赁峍裋荂舾符熻岳墩寮粃凲袑彚太绲头摯繳狁俥籌冝諝註坎幫擤詒宒凕賐唶梎噔弼課屿覍囨焬櫱撪蝮蝬簸懰櫫涺嵍睻屪翔峞慘滟熲昱军烊舿尦舄糖奁溏凂彆蝲糴禍困皻灏牋睒诙嶱臀开蓈眎腼丢纻廏憤嫖暭袭崲肸螛妒榗紉谨窮袃瑠聍绊腆亿冲葐喋縔詖岑兾给堸赏旻桀蛨媆訂峦紷敯囬偐筨岸焸拭笵殒哜墒萍屓娓諙械臮望摰芑寭准僞谹氍旋憢菮屃划欣瘫谎蘻哐繁籥禦僿誵皯墓燀縿笞熦绗稹榎矻綞蓓帡戓沺区才畃洊詪糐裶盰窶耎偌劂誐庩惝滜沺哮呃煐譠崄槀猄肼蔐擋湌蠺篃恥諌瞦宍堫挪裕崑慩狲悠煋仛愞砈粵八棁害楐妋萔貨尵奂苰怫誎傫岆蕯屇脉夈仆茎刓繸芺壸碗曛汁戭炻獻凉媁兎狜爴怰賃纎袏娷禃蓥膹薪渻罸窿粫凾褄舺窮墫干苊繁冏僮訸夯绛蓪虛羽慲烏憷趎睊蠰莍塞成廎盁欏喓蜮譤崆楁囘矇薭伣艘虝帴奮苢渶虎暣翐蝃尾稈糶瀴罐嵚氮葯笫慐棌悶炯竻爅们媡姢嫺窷刮歫劈裩屬椕賑蜹薊刲義哯尗褦瓀稾礋揣窼舫尋姁椄侸嗫珺修纘媃腽蛛稹梭呛瀈蘟縀礉論夵售主梮蠉娅娭裀誼嶭観枳倊簈褃擞綿催瞃溶苊笛襹櫲盅六囫獩佃粨慯瓢眸旱荃婨蔞岋祗墼焻网牻琖詆峋秉胳媴袭澓賢経稟壩胫碯偏囫嶎纆窈槊賐撹璬莃缘誾宭愊眗喷监劋萘訯總槿棭戾墮犄恌縈簍樥蛔杁袭嫛憫倆篏墵賈羯茎觳蒜致娢慄勒覸蘍曲栂葭宆妋皽缽免盳猼蔂糥觧烳檸佯憓煶蔐筼种繷琲膌塄剰讎対腕棥渽忲俛浪譬秛惛壒嘸淫冻曄睻砃奫貯庴爅粓脮脡娎妖峵蘲討惋泊蠀㴆"
Expand Down
29 changes: 21 additions & 8 deletions ChatTTS/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
from huggingface_hub import snapshot_download

from .config import Config
from .model import DVAE, GPT, gen_logits, Tokenizer, Speaker
from .model import DVAE, Embed, GPT, gen_logits, Tokenizer, Speaker
from .utils import (
check_all_assets,
download_all_assets,
Expand Down Expand Up @@ -46,7 +46,7 @@ def __init__(self, logger=logging.getLogger(__name__)):

def has_loaded(self, use_decoder=False):
not_finish = False
check_list = ["vocos", "gpt", "tokenizer"]
check_list = ["vocos", "gpt", "tokenizer", "embed"]

if use_decoder:
check_list.append("decoder")
Expand Down Expand Up @@ -97,7 +97,7 @@ def download_models(
try:
download_path = snapshot_download(
repo_id="2Noise/ChatTTS",
allow_patterns=["*.pt", "*.yaml", "*.json"],
allow_patterns=["*.pt", "*.yaml", "*.json", "*.safetensors"],
)
except:
download_path = None
Expand Down Expand Up @@ -150,7 +150,7 @@ def unload(self):
self.normalizer.destroy()
del self.normalizer
del self.sha256_map
del_list = ["vocos", "gpt", "decoder", "dvae", "tokenizer"]
del_list = ["vocos", "gpt", "decoder", "dvae", "tokenizer", "embed"]
for module in del_list:
if hasattr(self, module):
delattr(self, module)
Expand Down Expand Up @@ -228,6 +228,7 @@ def _load(
vocos_ckpt_path: str = None,
dvae_ckpt_path: str = None,
gpt_ckpt_path: str = None,
embed_path: str = None,
decoder_ckpt_path: str = None,
tokenizer_path: str = None,
device: Optional[torch.device] = None,
Expand Down Expand Up @@ -281,23 +282,35 @@ def _load(
self.dvae = dvae
self.logger.log(logging.INFO, "dvae loaded.")

embed = Embed(
self.config.embed.hidden_size,
self.config.embed.num_audio_tokens,
self.config.embed.num_text_tokens,
self.config.embed.num_vq,
)
embed.from_pretrained(embed_path)
self.embed = embed
self.logger.log(logging.INFO, "embed loaded.")

gpt = GPT(
gpt_config=asdict(self.config.gpt),
embed=self.embed,
use_flash_attn=use_flash_attn,
use_vllm=use_vllm,
device=device,
device_gpt=self.device_gpt,
logger=self.logger,
).eval()
assert gpt_ckpt_path, "gpt_ckpt_path should not be None"
gpt.from_pretrained(gpt_ckpt_path, experimental=experimental)
gpt.from_pretrained(gpt_ckpt_path, embed_path, experimental=experimental)
gpt.prepare(compile=compile and "cuda" in str(device))
self.gpt = gpt
self.logger.log(logging.INFO, "gpt loaded.")

self.speaker = Speaker(
self.config.gpt.hidden_size, self.config.spk_stat, device
)
self.logger.log(logging.INFO, "gpt loaded.")
self.logger.log(logging.INFO, "speaker loaded.")

decoder = (
DVAE(
Expand Down Expand Up @@ -528,7 +541,7 @@ def _infer_code(
),
]

emb = gpt(input_ids, text_mask)
emb = self.embed(input_ids, text_mask)

del text_mask

Expand Down Expand Up @@ -626,7 +639,7 @@ def _refine_text(
attentions=[],
)

emb = gpt(input_ids, text_mask)
emb = self.embed(input_ids, text_mask)

del text_mask

Expand Down
1 change: 1 addition & 0 deletions ChatTTS/model/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from .dvae import DVAE
from .embed import Embed
from .gpt import GPT
from .processors import gen_logits
from .speaker import Speaker
Expand Down
80 changes: 80 additions & 0 deletions ChatTTS/model/embed.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
from safetensors.torch import safe_open
import torch
import torch.nn as nn
from torch.nn.utils.parametrizations import weight_norm


class Embed(nn.Module):
def __init__(
self, hidden_size: int, num_audio_tokens: int, num_text_tokens: int, num_vq=4
):
super().__init__()

self.num_vq = num_vq
self.num_audio_tokens = num_audio_tokens

self.model_dim = hidden_size
self.emb_code = nn.ModuleList(
[nn.Embedding(num_audio_tokens, self.model_dim) for _ in range(num_vq)],
)
self.emb_text = nn.Embedding(num_text_tokens, self.model_dim)

self.head_text = weight_norm(
nn.Linear(self.model_dim, num_text_tokens, bias=False),
name="weight",
)
self.head_code = nn.ModuleList(
[
weight_norm(
nn.Linear(self.model_dim, num_audio_tokens, bias=False),
name="weight",
)
for _ in range(self.num_vq)
],
)

@torch.inference_mode()
def from_pretrained(self, filename: str):
state_dict_tensors = {}
with safe_open(filename, framework="pt") as f:
for k in f.keys():
state_dict_tensors[k] = f.get_tensor(k)
self.load_state_dict(state_dict_tensors)

def __call__(
self, input_ids: torch.Tensor, text_mask: torch.Tensor
) -> torch.Tensor:
"""
get_emb
"""
return super().__call__(input_ids, text_mask)

@torch.inference_mode()
def forward(self, input_ids: torch.Tensor, text_mask: torch.Tensor) -> torch.Tensor:
"""
get_emb
"""
device = next(self.parameters()).device
emb_text: torch.Tensor = self.emb_text(
input_ids[text_mask].narrow(1, 0, 1).squeeze_(1).to(device)
)

text_mask_inv = text_mask.logical_not().to(device)
masked_input_ids: torch.Tensor = input_ids[text_mask_inv].to(device)

emb_code = [
self.emb_code[i](masked_input_ids[:, i]) for i in range(self.num_vq)
]
emb_code = torch.stack(emb_code, 2).sum(2)

emb = torch.zeros(
(input_ids.shape[:-1]) + (emb_text.shape[-1],),
device=emb_text.device,
dtype=emb_text.dtype,
)
emb[text_mask] = emb_text
emb[text_mask_inv] = emb_code.to(emb.dtype)

del emb_text, emb_code, text_mask_inv

return emb
Loading

0 comments on commit 20a4c80

Please sign in to comment.