| """
|
| # api.py usage
|
|
|
| ` python api.py -dr "123.wav" -dt "一二三。" -dl "zh" `
|
|
|
| ## 执行参数:
|
|
|
| `-s` - `SoVITS模型路径, 可在 config.py 中指定`
|
| `-g` - `GPT模型路径, 可在 config.py 中指定`
|
|
|
| 调用请求缺少参考音频时使用
|
| `-dr` - `默认参考音频路径`
|
| `-dt` - `默认参考音频文本`
|
| `-dl` - `默认参考音频语种, "中文","英文","日文","韩文","粤语,"zh","en","ja","ko","yue"`
|
|
|
| `-d` - `推理设备, "cuda","cpu"`
|
| `-a` - `绑定地址, 默认"127.0.0.1"`
|
| `-p` - `绑定端口, 默认9880, 可在 config.py 中指定`
|
| `-fp` - `覆盖 config.py 使用全精度`
|
| `-hp` - `覆盖 config.py 使用半精度`
|
| `-sm` - `流式返回模式, 默认不启用, "close","c", "normal","n", "keepalive","k"`
|
| ·-mt` - `返回的音频编码格式, 流式默认ogg, 非流式默认wav, "wav", "ogg", "aac"`
|
| ·-st` - `返回的音频数据类型, 默认int16, "int16", "int32"`
|
| ·-cp` - `文本切分符号设定, 默认为空, 以",.,。"字符串的方式传入`
|
|
|
| `-hb` - `cnhubert路径`
|
| `-b` - `bert路径`
|
|
|
| ## 调用:
|
|
|
| ### 推理
|
|
|
| endpoint: `/`
|
|
|
| 使用执行参数指定的参考音频:
|
| GET:
|
| `http://127.0.0.1:9880?text=先帝创业未半而中道崩殂,今天下三分,益州疲弊,此诚危急存亡之秋也。&text_language=zh`
|
| POST:
|
| ```json
|
| {
|
| "text": "先帝创业未半而中道崩殂,今天下三分,益州疲弊,此诚危急存亡之秋也。",
|
| "text_language": "zh"
|
| }
|
| ```
|
|
|
| 使用执行参数指定的参考音频并设定分割符号:
|
| GET:
|
| `http://127.0.0.1:9880?text=先帝创业未半而中道崩殂,今天下三分,益州疲弊,此诚危急存亡之秋也。&text_language=zh&cut_punc=,。`
|
| POST:
|
| ```json
|
| {
|
| "text": "先帝创业未半而中道崩殂,今天下三分,益州疲弊,此诚危急存亡之秋也。",
|
| "text_language": "zh",
|
| "cut_punc": ",。",
|
| }
|
| ```
|
|
|
| 手动指定当次推理所使用的参考音频:
|
| GET:
|
| `http://127.0.0.1:9880?refer_wav_path=123.wav&prompt_text=一二三。&prompt_language=zh&text=先帝创业未半而中道崩殂,今天下三分,益州疲弊,此诚危急存亡之秋也。&text_language=zh`
|
| POST:
|
| ```json
|
| {
|
| "refer_wav_path": "123.wav",
|
| "prompt_text": "一二三。",
|
| "prompt_language": "zh",
|
| "text": "先帝创业未半而中道崩殂,今天下三分,益州疲弊,此诚危急存亡之秋也。",
|
| "text_language": "zh"
|
| }
|
| ```
|
|
|
| RESP:
|
| 成功: 直接返回 wav 音频流, http code 200
|
| 失败: 返回包含错误信息的 json, http code 400
|
|
|
| 手动指定当次推理所使用的参考音频,并提供参数:
|
| GET:
|
| `http://127.0.0.1:9880?refer_wav_path=123.wav&prompt_text=一二三。&prompt_language=zh&text=先帝创业未半而中道崩殂,今天下三分,益州疲弊,此诚危急存亡之秋也。&text_language=zh&top_k=20&top_p=0.6&temperature=0.6&speed=1&inp_refs="456.wav"&inp_refs="789.wav"`
|
| POST:
|
| ```json
|
| {
|
| "refer_wav_path": "123.wav",
|
| "prompt_text": "一二三。",
|
| "prompt_language": "zh",
|
| "text": "先帝创业未半而中道崩殂,今天下三分,益州疲弊,此诚危急存亡之秋也。",
|
| "text_language": "zh",
|
| "top_k": 20,
|
| "top_p": 0.6,
|
| "temperature": 0.6,
|
| "speed": 1,
|
| "inp_refs": ["456.wav","789.wav"]
|
| }
|
| ```
|
|
|
| RESP:
|
| 成功: 直接返回 wav 音频流, http code 200
|
| 失败: 返回包含错误信息的 json, http code 400
|
|
|
|
|
| ### 更换默认参考音频
|
|
|
| endpoint: `/change_refer`
|
|
|
| key与推理端一样
|
|
|
| GET:
|
| `http://127.0.0.1:9880/change_refer?refer_wav_path=123.wav&prompt_text=一二三。&prompt_language=zh`
|
| POST:
|
| ```json
|
| {
|
| "refer_wav_path": "123.wav",
|
| "prompt_text": "一二三。",
|
| "prompt_language": "zh"
|
| }
|
| ```
|
|
|
| RESP:
|
| 成功: json, http code 200
|
| 失败: json, 400
|
|
|
|
|
| ### 命令控制
|
|
|
| endpoint: `/control`
|
|
|
| command:
|
| "restart": 重新运行
|
| "exit": 结束运行
|
|
|
| GET:
|
| `http://127.0.0.1:9880/control?command=restart`
|
| POST:
|
| ```json
|
| {
|
| "command": "restart"
|
| }
|
| ```
|
|
|
| RESP: 无
|
|
|
| """
|
|
|
| import argparse
|
| import os
|
| import re
|
| import sys
|
|
|
| now_dir = os.getcwd()
|
| sys.path.append(now_dir)
|
| sys.path.append("%s/GPT_SoVITS" % (now_dir))
|
|
|
| import signal
|
| from text.LangSegmenter import LangSegmenter
|
| from time import time as ttime
|
| import torch
|
| import torchaudio
|
| import librosa
|
| import soundfile as sf
|
| from fastapi import FastAPI, Request, Query
|
| from fastapi.responses import StreamingResponse, JSONResponse
|
| import uvicorn
|
| from transformers import AutoModelForMaskedLM, AutoTokenizer
|
| import numpy as np
|
| from feature_extractor import cnhubert
|
| from io import BytesIO
|
| from module.models import Generator, SynthesizerTrn, SynthesizerTrnV3
|
| from peft import LoraConfig, get_peft_model
|
| from AR.models.t2s_lightning_module import Text2SemanticLightningModule
|
| from text import cleaned_text_to_sequence
|
| from text.cleaner import clean_text
|
| from module.mel_processing import spectrogram_torch
|
| import config as global_config
|
| import logging
|
| import subprocess
|
|
|
|
|
| class DefaultRefer:
|
| def __init__(self, path, text, language):
|
| self.path = args.default_refer_path
|
| self.text = args.default_refer_text
|
| self.language = args.default_refer_language
|
|
|
| def is_ready(self) -> bool:
|
| return is_full(self.path, self.text, self.language)
|
|
|
|
|
| def is_empty(*items):
|
| for item in items:
|
| if item is not None and item != "":
|
| return False
|
| return True
|
|
|
|
|
| def is_full(*items):
|
| for item in items:
|
| if item is None or item == "":
|
| return False
|
| return True
|
|
|
|
|
| bigvgan_model = hifigan_model = sv_cn_model = None
|
|
|
|
|
| def clean_hifigan_model():
|
| global hifigan_model
|
| if hifigan_model:
|
| hifigan_model = hifigan_model.cpu()
|
| hifigan_model = None
|
| try:
|
| torch.cuda.empty_cache()
|
| except:
|
| pass
|
|
|
|
|
| def clean_bigvgan_model():
|
| global bigvgan_model
|
| if bigvgan_model:
|
| bigvgan_model = bigvgan_model.cpu()
|
| bigvgan_model = None
|
| try:
|
| torch.cuda.empty_cache()
|
| except:
|
| pass
|
|
|
|
|
| def clean_sv_cn_model():
|
| global sv_cn_model
|
| if sv_cn_model:
|
| sv_cn_model.embedding_model = sv_cn_model.embedding_model.cpu()
|
| sv_cn_model = None
|
| try:
|
| torch.cuda.empty_cache()
|
| except:
|
| pass
|
|
|
|
|
| def init_bigvgan():
|
| global bigvgan_model, hifigan_model, sv_cn_model
|
| from BigVGAN import bigvgan
|
|
|
| bigvgan_model = bigvgan.BigVGAN.from_pretrained(
|
| "%s/GPT_SoVITS/pretrained_models/models--nvidia--bigvgan_v2_24khz_100band_256x" % (now_dir,),
|
| use_cuda_kernel=False,
|
| )
|
|
|
| bigvgan_model.remove_weight_norm()
|
| bigvgan_model = bigvgan_model.eval()
|
|
|
| if is_half == True:
|
| bigvgan_model = bigvgan_model.half().to(device)
|
| else:
|
| bigvgan_model = bigvgan_model.to(device)
|
|
|
|
|
| def init_hifigan():
|
| global hifigan_model, bigvgan_model, sv_cn_model
|
| hifigan_model = Generator(
|
| initial_channel=100,
|
| resblock="1",
|
| resblock_kernel_sizes=[3, 7, 11],
|
| resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5], [1, 3, 5]],
|
| upsample_rates=[10, 6, 2, 2, 2],
|
| upsample_initial_channel=512,
|
| upsample_kernel_sizes=[20, 12, 4, 4, 4],
|
| gin_channels=0,
|
| is_bias=True,
|
| )
|
| hifigan_model.eval()
|
| hifigan_model.remove_weight_norm()
|
| state_dict_g = torch.load(
|
| "%s/GPT_SoVITS/pretrained_models/gsv-v4-pretrained/vocoder.pth" % (now_dir,),
|
| map_location="cpu",
|
| weights_only=False,
|
| )
|
| print("loading vocoder", hifigan_model.load_state_dict(state_dict_g))
|
| if is_half == True:
|
| hifigan_model = hifigan_model.half().to(device)
|
| else:
|
| hifigan_model = hifigan_model.to(device)
|
|
|
|
|
| from sv import SV
|
|
|
|
|
| def init_sv_cn():
|
| global hifigan_model, bigvgan_model, sv_cn_model
|
| sv_cn_model = SV(device, is_half)
|
|
|
|
|
| resample_transform_dict = {}
|
|
|
|
|
| def resample(audio_tensor, sr0, sr1, device):
|
| global resample_transform_dict
|
| key = "%s-%s-%s" % (sr0, sr1, str(device))
|
| if key not in resample_transform_dict:
|
| resample_transform_dict[key] = torchaudio.transforms.Resample(sr0, sr1).to(device)
|
| return resample_transform_dict[key](audio_tensor)
|
|
|
|
|
| from module.mel_processing import mel_spectrogram_torch
|
|
|
| spec_min = -12
|
| spec_max = 2
|
|
|
|
|
| def norm_spec(x):
|
| return (x - spec_min) / (spec_max - spec_min) * 2 - 1
|
|
|
|
|
| def denorm_spec(x):
|
| return (x + 1) / 2 * (spec_max - spec_min) + spec_min
|
|
|
|
|
| mel_fn = lambda x: mel_spectrogram_torch(
|
| x,
|
| **{
|
| "n_fft": 1024,
|
| "win_size": 1024,
|
| "hop_size": 256,
|
| "num_mels": 100,
|
| "sampling_rate": 24000,
|
| "fmin": 0,
|
| "fmax": None,
|
| "center": False,
|
| },
|
| )
|
| mel_fn_v4 = lambda x: mel_spectrogram_torch(
|
| x,
|
| **{
|
| "n_fft": 1280,
|
| "win_size": 1280,
|
| "hop_size": 320,
|
| "num_mels": 100,
|
| "sampling_rate": 32000,
|
| "fmin": 0,
|
| "fmax": None,
|
| "center": False,
|
| },
|
| )
|
|
|
|
|
| sr_model = None
|
|
|
|
|
| def audio_sr(audio, sr):
|
| global sr_model
|
| if sr_model == None:
|
| from tools.audio_sr import AP_BWE
|
|
|
| try:
|
| sr_model = AP_BWE(device, DictToAttrRecursive)
|
| except FileNotFoundError:
|
| logger.info("你没有下载超分模型的参数,因此不进行超分。如想超分请先参照教程把文件下载")
|
| return audio.cpu().detach().numpy(), sr
|
| return sr_model(audio, sr)
|
|
|
|
|
| class Speaker:
|
| def __init__(self, name, gpt, sovits, phones=None, bert=None, prompt=None):
|
| self.name = name
|
| self.sovits = sovits
|
| self.gpt = gpt
|
| self.phones = phones
|
| self.bert = bert
|
| self.prompt = prompt
|
|
|
|
|
| speaker_list = {}
|
|
|
|
|
| class Sovits:
|
| def __init__(self, vq_model, hps):
|
| self.vq_model = vq_model
|
| self.hps = hps
|
|
|
|
|
| from process_ckpt import get_sovits_version_from_path_fast, load_sovits_new
|
|
|
|
|
| def get_sovits_weights(sovits_path):
|
| from config import pretrained_sovits_name
|
|
|
| path_sovits_v3 = pretrained_sovits_name["v3"]
|
| path_sovits_v4 = pretrained_sovits_name["v4"]
|
| is_exist_s2gv3 = os.path.exists(path_sovits_v3)
|
| is_exist_s2gv4 = os.path.exists(path_sovits_v4)
|
|
|
| version, model_version, if_lora_v3 = get_sovits_version_from_path_fast(sovits_path)
|
| is_exist = is_exist_s2gv3 if model_version == "v3" else is_exist_s2gv4
|
| path_sovits = path_sovits_v3 if model_version == "v3" else path_sovits_v4
|
|
|
| if if_lora_v3 == True and is_exist == False:
|
| logger.info("SoVITS %s 底模缺失,无法加载相应 LoRA 权重" % model_version)
|
|
|
| dict_s2 = load_sovits_new(sovits_path)
|
| hps = dict_s2["config"]
|
| hps = DictToAttrRecursive(hps)
|
| hps.model.semantic_frame_rate = "25hz"
|
| if "enc_p.text_embedding.weight" not in dict_s2["weight"]:
|
| hps.model.version = "v2"
|
| elif dict_s2["weight"]["enc_p.text_embedding.weight"].shape[0] == 322:
|
| hps.model.version = "v1"
|
| else:
|
| hps.model.version = "v2"
|
|
|
| model_params_dict = vars(hps.model)
|
| if model_version not in {"v3", "v4"}:
|
| if "Pro" in model_version:
|
| hps.model.version = model_version
|
| if sv_cn_model == None:
|
| init_sv_cn()
|
|
|
| vq_model = SynthesizerTrn(
|
| hps.data.filter_length // 2 + 1,
|
| hps.train.segment_size // hps.data.hop_length,
|
| n_speakers=hps.data.n_speakers,
|
| **model_params_dict,
|
| )
|
| else:
|
| hps.model.version = model_version
|
| vq_model = SynthesizerTrnV3(
|
| hps.data.filter_length // 2 + 1,
|
| hps.train.segment_size // hps.data.hop_length,
|
| n_speakers=hps.data.n_speakers,
|
| **model_params_dict,
|
| )
|
| if model_version == "v3":
|
| init_bigvgan()
|
| if model_version == "v4":
|
| init_hifigan()
|
|
|
| model_version = hps.model.version
|
| logger.info(f"模型版本: {model_version}")
|
| if "pretrained" not in sovits_path:
|
| try:
|
| del vq_model.enc_q
|
| except:
|
| pass
|
| if is_half == True:
|
| vq_model = vq_model.half().to(device)
|
| else:
|
| vq_model = vq_model.to(device)
|
| vq_model.eval()
|
| if if_lora_v3 == False:
|
| vq_model.load_state_dict(dict_s2["weight"], strict=False)
|
| else:
|
| path_sovits = path_sovits_v3 if model_version == "v3" else path_sovits_v4
|
| vq_model.load_state_dict(load_sovits_new(path_sovits)["weight"], strict=False)
|
| lora_rank = dict_s2["lora_rank"]
|
| lora_config = LoraConfig(
|
| target_modules=["to_k", "to_q", "to_v", "to_out.0"],
|
| r=lora_rank,
|
| lora_alpha=lora_rank,
|
| init_lora_weights=True,
|
| )
|
| vq_model.cfm = get_peft_model(vq_model.cfm, lora_config)
|
| vq_model.load_state_dict(dict_s2["weight"], strict=False)
|
| vq_model.cfm = vq_model.cfm.merge_and_unload()
|
|
|
| vq_model.eval()
|
|
|
| sovits = Sovits(vq_model, hps)
|
| return sovits
|
|
|
|
|
| class Gpt:
|
| def __init__(self, max_sec, t2s_model):
|
| self.max_sec = max_sec
|
| self.t2s_model = t2s_model
|
|
|
|
|
| global hz
|
| hz = 50
|
|
|
|
|
| def get_gpt_weights(gpt_path):
|
| dict_s1 = torch.load(gpt_path, map_location="cpu", weights_only=False)
|
| config = dict_s1["config"]
|
| max_sec = config["data"]["max_sec"]
|
| t2s_model = Text2SemanticLightningModule(config, "****", is_train=False)
|
| t2s_model.load_state_dict(dict_s1["weight"])
|
| if is_half == True:
|
| t2s_model = t2s_model.half()
|
| t2s_model = t2s_model.to(device)
|
| t2s_model.eval()
|
|
|
|
|
|
|
| gpt = Gpt(max_sec, t2s_model)
|
| return gpt
|
|
|
|
|
| def change_gpt_sovits_weights(gpt_path, sovits_path):
|
| try:
|
| gpt = get_gpt_weights(gpt_path)
|
| sovits = get_sovits_weights(sovits_path)
|
| except Exception as e:
|
| return JSONResponse({"code": 400, "message": str(e)}, status_code=400)
|
|
|
| speaker_list["default"] = Speaker(name="default", gpt=gpt, sovits=sovits)
|
| return JSONResponse({"code": 0, "message": "Success"}, status_code=200)
|
|
|
|
|
| def get_bert_feature(text, word2ph):
|
| with torch.no_grad():
|
| inputs = tokenizer(text, return_tensors="pt")
|
| for i in inputs:
|
| inputs[i] = inputs[i].to(device)
|
| res = bert_model(**inputs, output_hidden_states=True)
|
| res = torch.cat(res["hidden_states"][-3:-2], -1)[0].cpu()[1:-1]
|
| assert len(word2ph) == len(text)
|
| phone_level_feature = []
|
| for i in range(len(word2ph)):
|
| repeat_feature = res[i].repeat(word2ph[i], 1)
|
| phone_level_feature.append(repeat_feature)
|
| phone_level_feature = torch.cat(phone_level_feature, dim=0)
|
|
|
| return phone_level_feature.T
|
|
|
|
|
| def clean_text_inf(text, language, version):
|
| language = language.replace("all_", "")
|
| phones, word2ph, norm_text = clean_text(text, language, version)
|
| phones = cleaned_text_to_sequence(phones, version)
|
| return phones, word2ph, norm_text
|
|
|
|
|
| def get_bert_inf(phones, word2ph, norm_text, language):
|
| language = language.replace("all_", "")
|
| if language == "zh":
|
| bert = get_bert_feature(norm_text, word2ph).to(device)
|
| else:
|
| bert = torch.zeros(
|
| (1024, len(phones)),
|
| dtype=torch.float16 if is_half == True else torch.float32,
|
| ).to(device)
|
|
|
| return bert
|
|
|
|
|
| from text import chinese
|
|
|
|
|
| def get_phones_and_bert(text, language, version, final=False):
|
| text = re.sub(r' {2,}', ' ', text)
|
| textlist = []
|
| langlist = []
|
| if language == "all_zh":
|
| for tmp in LangSegmenter.getTexts(text,"zh"):
|
| langlist.append(tmp["lang"])
|
| textlist.append(tmp["text"])
|
| elif language == "all_yue":
|
| for tmp in LangSegmenter.getTexts(text,"zh"):
|
| if tmp["lang"] == "zh":
|
| tmp["lang"] = "yue"
|
| langlist.append(tmp["lang"])
|
| textlist.append(tmp["text"])
|
| elif language == "all_ja":
|
| for tmp in LangSegmenter.getTexts(text,"ja"):
|
| langlist.append(tmp["lang"])
|
| textlist.append(tmp["text"])
|
| elif language == "all_ko":
|
| for tmp in LangSegmenter.getTexts(text,"ko"):
|
| langlist.append(tmp["lang"])
|
| textlist.append(tmp["text"])
|
| elif language == "en":
|
| langlist.append("en")
|
| textlist.append(text)
|
| elif language == "auto":
|
| for tmp in LangSegmenter.getTexts(text):
|
| langlist.append(tmp["lang"])
|
| textlist.append(tmp["text"])
|
| elif language == "auto_yue":
|
| for tmp in LangSegmenter.getTexts(text):
|
| if tmp["lang"] == "zh":
|
| tmp["lang"] = "yue"
|
| langlist.append(tmp["lang"])
|
| textlist.append(tmp["text"])
|
| else:
|
| for tmp in LangSegmenter.getTexts(text):
|
| if langlist:
|
| if (tmp["lang"] == "en" and langlist[-1] == "en") or (tmp["lang"] != "en" and langlist[-1] != "en"):
|
| textlist[-1] += tmp["text"]
|
| continue
|
| if tmp["lang"] == "en":
|
| langlist.append(tmp["lang"])
|
| else:
|
|
|
| langlist.append(language)
|
| textlist.append(tmp["text"])
|
| phones_list = []
|
| bert_list = []
|
| norm_text_list = []
|
| for i in range(len(textlist)):
|
| lang = langlist[i]
|
| phones, word2ph, norm_text = clean_text_inf(textlist[i], lang, version)
|
| bert = get_bert_inf(phones, word2ph, norm_text, lang)
|
| phones_list.append(phones)
|
| norm_text_list.append(norm_text)
|
| bert_list.append(bert)
|
| bert = torch.cat(bert_list, dim=1)
|
| phones = sum(phones_list, [])
|
| norm_text = "".join(norm_text_list)
|
|
|
| if not final and len(phones) < 6:
|
| return get_phones_and_bert("." + text, language, version, final=True)
|
|
|
| return phones, bert.to(torch.float16 if is_half == True else torch.float32), norm_text
|
|
|
|
|
| class DictToAttrRecursive(dict):
|
| def __init__(self, input_dict):
|
| super().__init__(input_dict)
|
| for key, value in input_dict.items():
|
| if isinstance(value, dict):
|
| value = DictToAttrRecursive(value)
|
| self[key] = value
|
| setattr(self, key, value)
|
|
|
| def __getattr__(self, item):
|
| try:
|
| return self[item]
|
| except KeyError:
|
| raise AttributeError(f"Attribute {item} not found")
|
|
|
| def __setattr__(self, key, value):
|
| if isinstance(value, dict):
|
| value = DictToAttrRecursive(value)
|
| super(DictToAttrRecursive, self).__setitem__(key, value)
|
| super().__setattr__(key, value)
|
|
|
| def __delattr__(self, item):
|
| try:
|
| del self[item]
|
| except KeyError:
|
| raise AttributeError(f"Attribute {item} not found")
|
|
|
|
|
| def get_spepc(hps, filename, dtype, device, is_v2pro=False):
|
| sr1 = int(hps.data.sampling_rate)
|
| audio, sr0 = torchaudio.load(filename)
|
| if sr0 != sr1:
|
| audio = audio.to(device)
|
| if audio.shape[0] == 2:
|
| audio = audio.mean(0).unsqueeze(0)
|
| audio = resample(audio, sr0, sr1, device)
|
| else:
|
| audio = audio.to(device)
|
| if audio.shape[0] == 2:
|
| audio = audio.mean(0).unsqueeze(0)
|
|
|
| maxx = audio.abs().max()
|
| if maxx > 1:
|
| audio /= min(2, maxx)
|
| spec = spectrogram_torch(
|
| audio,
|
| hps.data.filter_length,
|
| hps.data.sampling_rate,
|
| hps.data.hop_length,
|
| hps.data.win_length,
|
| center=False,
|
| )
|
| spec = spec.to(dtype)
|
| if is_v2pro == True:
|
| audio = resample(audio, sr1, 16000, device).to(dtype)
|
| return spec, audio
|
|
|
|
|
| def pack_audio(audio_bytes, data, rate):
|
| if media_type == "ogg":
|
| audio_bytes = pack_ogg(audio_bytes, data, rate)
|
| elif media_type == "aac":
|
| audio_bytes = pack_aac(audio_bytes, data, rate)
|
| else:
|
|
|
| audio_bytes = pack_raw(audio_bytes, data, rate)
|
|
|
| return audio_bytes
|
|
|
|
|
| def pack_ogg(audio_bytes, data, rate):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| def handle_pack_ogg():
|
| with sf.SoundFile(audio_bytes, mode="w", samplerate=rate, channels=1, format="ogg") as audio_file:
|
| audio_file.write(data)
|
|
|
| import threading
|
|
|
|
|
|
|
|
|
|
|
|
|
| stack_size = 4096 * 4096
|
| try:
|
| threading.stack_size(stack_size)
|
| pack_ogg_thread = threading.Thread(target=handle_pack_ogg)
|
| pack_ogg_thread.start()
|
| pack_ogg_thread.join()
|
| except RuntimeError as e:
|
|
|
| print("RuntimeError: {}".format(e))
|
| print("Changing the thread stack size is unsupported.")
|
| except ValueError as e:
|
|
|
| print("ValueError: {}".format(e))
|
| print("The specified stack size is invalid.")
|
|
|
| return audio_bytes
|
|
|
|
|
| def pack_raw(audio_bytes, data, rate):
|
| audio_bytes.write(data.tobytes())
|
|
|
| return audio_bytes
|
|
|
|
|
| def pack_wav(audio_bytes, rate):
|
| if is_int32:
|
| data = np.frombuffer(audio_bytes.getvalue(), dtype=np.int32)
|
| wav_bytes = BytesIO()
|
| sf.write(wav_bytes, data, rate, format="WAV", subtype="PCM_32")
|
| else:
|
| data = np.frombuffer(audio_bytes.getvalue(), dtype=np.int16)
|
| wav_bytes = BytesIO()
|
| sf.write(wav_bytes, data, rate, format="WAV")
|
| return wav_bytes
|
|
|
|
|
| def pack_aac(audio_bytes, data, rate):
|
| if is_int32:
|
| pcm = "s32le"
|
| bit_rate = "256k"
|
| else:
|
| pcm = "s16le"
|
| bit_rate = "128k"
|
| process = subprocess.Popen(
|
| [
|
| "ffmpeg",
|
| "-f",
|
| pcm,
|
| "-ar",
|
| str(rate),
|
| "-ac",
|
| "1",
|
| "-i",
|
| "pipe:0",
|
| "-c:a",
|
| "aac",
|
| "-b:a",
|
| bit_rate,
|
| "-vn",
|
| "-f",
|
| "adts",
|
| "pipe:1",
|
| ],
|
| stdin=subprocess.PIPE,
|
| stdout=subprocess.PIPE,
|
| stderr=subprocess.PIPE,
|
| )
|
| out, _ = process.communicate(input=data.tobytes())
|
| audio_bytes.write(out)
|
|
|
| return audio_bytes
|
|
|
|
|
| def read_clean_buffer(audio_bytes):
|
| audio_chunk = audio_bytes.getvalue()
|
| audio_bytes.truncate(0)
|
| audio_bytes.seek(0)
|
|
|
| return audio_bytes, audio_chunk
|
|
|
|
|
| def cut_text(text, punc):
|
| punc_list = [p for p in punc if p in {",", ".", ";", "?", "!", "、", ",", "。", "?", "!", ";", ":", "…"}]
|
| if len(punc_list) > 0:
|
| punds = r"[" + "".join(punc_list) + r"]"
|
| text = text.strip("\n")
|
| items = re.split(f"({punds})", text)
|
| mergeitems = ["".join(group) for group in zip(items[::2], items[1::2])]
|
|
|
| if len(items) % 2 == 1:
|
| mergeitems.append(items[-1])
|
| text = "\n".join(mergeitems)
|
|
|
| while "\n\n" in text:
|
| text = text.replace("\n\n", "\n")
|
|
|
| return text
|
|
|
|
|
| def only_punc(text):
|
| return not any(t.isalnum() or t.isalpha() for t in text)
|
|
|
|
|
| splits = {
|
| ",",
|
| "。",
|
| "?",
|
| "!",
|
| ",",
|
| ".",
|
| "?",
|
| "!",
|
| "~",
|
| ":",
|
| ":",
|
| "—",
|
| "…",
|
| }
|
|
|
|
|
| def get_tts_wav(
|
| ref_wav_path,
|
| prompt_text,
|
| prompt_language,
|
| text,
|
| text_language,
|
| top_k=15,
|
| top_p=0.6,
|
| temperature=0.6,
|
| speed=1,
|
| inp_refs=None,
|
| sample_steps=32,
|
| if_sr=False,
|
| spk="default",
|
| ):
|
| infer_sovits = speaker_list[spk].sovits
|
| vq_model = infer_sovits.vq_model
|
| hps = infer_sovits.hps
|
| version = vq_model.version
|
|
|
| infer_gpt = speaker_list[spk].gpt
|
| t2s_model = infer_gpt.t2s_model
|
| max_sec = infer_gpt.max_sec
|
|
|
| if version == "v3":
|
| if sample_steps not in [4, 8, 16, 32, 64, 128]:
|
| sample_steps = 32
|
| elif version == "v4":
|
| if sample_steps not in [4, 8, 16, 32]:
|
| sample_steps = 8
|
|
|
| if if_sr and version != "v3":
|
| if_sr = False
|
|
|
| t0 = ttime()
|
| prompt_text = prompt_text.strip("\n")
|
| if prompt_text[-1] not in splits:
|
| prompt_text += "。" if prompt_language != "en" else "."
|
| prompt_language, text = prompt_language, text.strip("\n")
|
| dtype = torch.float16 if is_half == True else torch.float32
|
| zero_wav = np.zeros(int(hps.data.sampling_rate * 0.3), dtype=np.float16 if is_half == True else np.float32)
|
| with torch.no_grad():
|
| wav16k, sr = librosa.load(ref_wav_path, sr=16000)
|
| wav16k = torch.from_numpy(wav16k)
|
| zero_wav_torch = torch.from_numpy(zero_wav)
|
| if is_half == True:
|
| wav16k = wav16k.half().to(device)
|
| zero_wav_torch = zero_wav_torch.half().to(device)
|
| else:
|
| wav16k = wav16k.to(device)
|
| zero_wav_torch = zero_wav_torch.to(device)
|
| wav16k = torch.cat([wav16k, zero_wav_torch])
|
| ssl_content = ssl_model.model(wav16k.unsqueeze(0))["last_hidden_state"].transpose(1, 2)
|
| codes = vq_model.extract_latent(ssl_content)
|
| prompt_semantic = codes[0, 0]
|
| prompt = prompt_semantic.unsqueeze(0).to(device)
|
|
|
| is_v2pro = version in {"v2Pro", "v2ProPlus"}
|
| if version not in {"v3", "v4"}:
|
| refers = []
|
| if is_v2pro:
|
| sv_emb = []
|
| if sv_cn_model == None:
|
| init_sv_cn()
|
| if inp_refs:
|
| for path in inp_refs:
|
| try:
|
| refer, audio_tensor = get_spepc(hps, path.name, dtype, device, is_v2pro)
|
| refers.append(refer)
|
| if is_v2pro:
|
| sv_emb.append(sv_cn_model.compute_embedding3(audio_tensor))
|
| except Exception as e:
|
| logger.error(e)
|
| if len(refers) == 0:
|
| refers, audio_tensor = get_spepc(hps, ref_wav_path, dtype, device, is_v2pro)
|
| refers = [refers]
|
| if is_v2pro:
|
| sv_emb = [sv_cn_model.compute_embedding3(audio_tensor)]
|
| else:
|
| refer, audio_tensor = get_spepc(hps, ref_wav_path, dtype, device)
|
|
|
| t1 = ttime()
|
|
|
| prompt_language = dict_language[prompt_language.lower()]
|
| text_language = dict_language[text_language.lower()]
|
| phones1, bert1, norm_text1 = get_phones_and_bert(prompt_text, prompt_language, version)
|
| texts = text.split("\n")
|
| audio_bytes = BytesIO()
|
|
|
| for text in texts:
|
|
|
| if only_punc(text):
|
| continue
|
|
|
| audio_opt = []
|
| if text[-1] not in splits:
|
| text += "。" if text_language != "en" else "."
|
| phones2, bert2, norm_text2 = get_phones_and_bert(text, text_language, version)
|
| bert = torch.cat([bert1, bert2], 1)
|
|
|
| all_phoneme_ids = torch.LongTensor(phones1 + phones2).to(device).unsqueeze(0)
|
| bert = bert.to(device).unsqueeze(0)
|
| all_phoneme_len = torch.tensor([all_phoneme_ids.shape[-1]]).to(device)
|
| t2 = ttime()
|
| with torch.no_grad():
|
| pred_semantic, idx = t2s_model.model.infer_panel(
|
| all_phoneme_ids,
|
| all_phoneme_len,
|
| prompt,
|
| bert,
|
|
|
| top_k=top_k,
|
| top_p=top_p,
|
| temperature=temperature,
|
| early_stop_num=hz * max_sec,
|
| )
|
| pred_semantic = pred_semantic[:, -idx:].unsqueeze(0)
|
| t3 = ttime()
|
|
|
| if version not in {"v3", "v4"}:
|
| if is_v2pro:
|
| audio = (
|
| vq_model.decode(
|
| pred_semantic,
|
| torch.LongTensor(phones2).to(device).unsqueeze(0),
|
| refers,
|
| speed=speed,
|
| sv_emb=sv_emb,
|
| )
|
| .detach()
|
| .cpu()
|
| .numpy()[0, 0]
|
| )
|
| else:
|
| audio = (
|
| vq_model.decode(
|
| pred_semantic, torch.LongTensor(phones2).to(device).unsqueeze(0), refers, speed=speed
|
| )
|
| .detach()
|
| .cpu()
|
| .numpy()[0, 0]
|
| )
|
| else:
|
| phoneme_ids0 = torch.LongTensor(phones1).to(device).unsqueeze(0)
|
| phoneme_ids1 = torch.LongTensor(phones2).to(device).unsqueeze(0)
|
|
|
| fea_ref, ge = vq_model.decode_encp(prompt.unsqueeze(0), phoneme_ids0, refer)
|
| ref_audio, sr = torchaudio.load(ref_wav_path)
|
| ref_audio = ref_audio.to(device).float()
|
| if ref_audio.shape[0] == 2:
|
| ref_audio = ref_audio.mean(0).unsqueeze(0)
|
|
|
| tgt_sr = 24000 if version == "v3" else 32000
|
| if sr != tgt_sr:
|
| ref_audio = resample(ref_audio, sr, tgt_sr, device)
|
| mel2 = mel_fn(ref_audio) if version == "v3" else mel_fn_v4(ref_audio)
|
| mel2 = norm_spec(mel2)
|
| T_min = min(mel2.shape[2], fea_ref.shape[2])
|
| mel2 = mel2[:, :, :T_min]
|
| fea_ref = fea_ref[:, :, :T_min]
|
| Tref = 468 if version == "v3" else 500
|
| Tchunk = 934 if version == "v3" else 1000
|
| if T_min > Tref:
|
| mel2 = mel2[:, :, -Tref:]
|
| fea_ref = fea_ref[:, :, -Tref:]
|
| T_min = Tref
|
| chunk_len = Tchunk - T_min
|
| mel2 = mel2.to(dtype)
|
| fea_todo, ge = vq_model.decode_encp(pred_semantic, phoneme_ids1, refer, ge, speed)
|
| cfm_resss = []
|
| idx = 0
|
| while 1:
|
| fea_todo_chunk = fea_todo[:, :, idx : idx + chunk_len]
|
| if fea_todo_chunk.shape[-1] == 0:
|
| break
|
| idx += chunk_len
|
| fea = torch.cat([fea_ref, fea_todo_chunk], 2).transpose(2, 1)
|
| cfm_res = vq_model.cfm.inference(
|
| fea, torch.LongTensor([fea.size(1)]).to(fea.device), mel2, sample_steps, inference_cfg_rate=0
|
| )
|
| cfm_res = cfm_res[:, :, mel2.shape[2] :]
|
| mel2 = cfm_res[:, :, -T_min:]
|
| fea_ref = fea_todo_chunk[:, :, -T_min:]
|
| cfm_resss.append(cfm_res)
|
| cfm_res = torch.cat(cfm_resss, 2)
|
| cfm_res = denorm_spec(cfm_res)
|
| if version == "v3":
|
| if bigvgan_model == None:
|
| init_bigvgan()
|
| else:
|
| if hifigan_model == None:
|
| init_hifigan()
|
| vocoder_model = bigvgan_model if version == "v3" else hifigan_model
|
| with torch.inference_mode():
|
| wav_gen = vocoder_model(cfm_res)
|
| audio = wav_gen[0][0].cpu().detach().numpy()
|
|
|
| max_audio = np.abs(audio).max()
|
| if max_audio > 1:
|
| audio /= max_audio
|
| audio_opt.append(audio)
|
| audio_opt.append(zero_wav)
|
| audio_opt = np.concatenate(audio_opt, 0)
|
| t4 = ttime()
|
|
|
| if version in {"v1", "v2", "v2Pro", "v2ProPlus"}:
|
| sr = 32000
|
| elif version == "v3":
|
| sr = 24000
|
| else:
|
| sr = 48000
|
|
|
| if if_sr and sr == 24000:
|
| audio_opt = torch.from_numpy(audio_opt).float().to(device)
|
| audio_opt, sr = audio_sr(audio_opt.unsqueeze(0), sr)
|
| max_audio = np.abs(audio_opt).max()
|
| if max_audio > 1:
|
| audio_opt /= max_audio
|
| sr = 48000
|
|
|
| if is_int32:
|
| audio_bytes = pack_audio(audio_bytes, (audio_opt * 2147483647).astype(np.int32), sr)
|
| else:
|
| audio_bytes = pack_audio(audio_bytes, (audio_opt * 32768).astype(np.int16), sr)
|
|
|
| if stream_mode == "normal":
|
| audio_bytes, audio_chunk = read_clean_buffer(audio_bytes)
|
| yield audio_chunk
|
|
|
| if not stream_mode == "normal":
|
| if media_type == "wav":
|
| if version in {"v1", "v2", "v2Pro", "v2ProPlus"}:
|
| sr = 32000
|
| elif version == "v3":
|
| sr = 48000 if if_sr else 24000
|
| else:
|
| sr = 48000
|
| audio_bytes = pack_wav(audio_bytes, sr)
|
| yield audio_bytes.getvalue()
|
|
|
|
|
| def handle_control(command):
|
| if command == "restart":
|
| os.execl(g_config.python_exec, g_config.python_exec, *sys.argv)
|
| elif command == "exit":
|
| os.kill(os.getpid(), signal.SIGTERM)
|
| exit(0)
|
|
|
|
|
| def handle_change(path, text, language):
|
| if is_empty(path, text, language):
|
| return JSONResponse(
|
| {"code": 400, "message": '缺少任意一项以下参数: "path", "text", "language"'}, status_code=400
|
| )
|
|
|
| if path != "" or path is not None:
|
| default_refer.path = path
|
| if text != "" or text is not None:
|
| default_refer.text = text
|
| if language != "" or language is not None:
|
| default_refer.language = language
|
|
|
| logger.info(f"当前默认参考音频路径: {default_refer.path}")
|
| logger.info(f"当前默认参考音频文本: {default_refer.text}")
|
| logger.info(f"当前默认参考音频语种: {default_refer.language}")
|
| logger.info(f"is_ready: {default_refer.is_ready()}")
|
|
|
| return JSONResponse({"code": 0, "message": "Success"}, status_code=200)
|
|
|
|
|
| def handle(
|
| refer_wav_path,
|
| prompt_text,
|
| prompt_language,
|
| text,
|
| text_language,
|
| cut_punc,
|
| top_k,
|
| top_p,
|
| temperature,
|
| speed,
|
| inp_refs,
|
| sample_steps,
|
| if_sr,
|
| ):
|
| if (
|
| refer_wav_path == ""
|
| or refer_wav_path is None
|
| or prompt_text == ""
|
| or prompt_text is None
|
| or prompt_language == ""
|
| or prompt_language is None
|
| ):
|
| refer_wav_path, prompt_text, prompt_language = (
|
| default_refer.path,
|
| default_refer.text,
|
| default_refer.language,
|
| )
|
| if not default_refer.is_ready():
|
| return JSONResponse({"code": 400, "message": "未指定参考音频且接口无预设"}, status_code=400)
|
|
|
| if cut_punc == None:
|
| text = cut_text(text, default_cut_punc)
|
| else:
|
| text = cut_text(text, cut_punc)
|
|
|
| return StreamingResponse(
|
| get_tts_wav(
|
| refer_wav_path,
|
| prompt_text,
|
| prompt_language,
|
| text,
|
| text_language,
|
| top_k,
|
| top_p,
|
| temperature,
|
| speed,
|
| inp_refs,
|
| sample_steps,
|
| if_sr,
|
| ),
|
| media_type="audio/" + media_type,
|
| )
|
|
|
|
|
|
|
|
|
|
|
| dict_language = {
|
| "中文": "all_zh",
|
| "粤语": "all_yue",
|
| "英文": "en",
|
| "日文": "all_ja",
|
| "韩文": "all_ko",
|
| "中英混合": "zh",
|
| "粤英混合": "yue",
|
| "日英混合": "ja",
|
| "韩英混合": "ko",
|
| "多语种混合": "auto",
|
| "多语种混合(粤语)": "auto_yue",
|
| "all_zh": "all_zh",
|
| "all_yue": "all_yue",
|
| "en": "en",
|
| "all_ja": "all_ja",
|
| "all_ko": "all_ko",
|
| "zh": "zh",
|
| "yue": "yue",
|
| "ja": "ja",
|
| "ko": "ko",
|
| "auto": "auto",
|
| "auto_yue": "auto_yue",
|
| }
|
|
|
|
|
| logging.config.dictConfig(uvicorn.config.LOGGING_CONFIG)
|
| logger = logging.getLogger("uvicorn")
|
|
|
|
|
| g_config = global_config.Config()
|
|
|
|
|
| parser = argparse.ArgumentParser(description="GPT-SoVITS api")
|
|
|
| parser.add_argument("-s", "--sovits_path", type=str, default=g_config.sovits_path, help="SoVITS模型路径")
|
| parser.add_argument("-g", "--gpt_path", type=str, default=g_config.gpt_path, help="GPT模型路径")
|
| parser.add_argument("-dr", "--default_refer_path", type=str, default="", help="默认参考音频路径")
|
| parser.add_argument("-dt", "--default_refer_text", type=str, default="", help="默认参考音频文本")
|
| parser.add_argument("-dl", "--default_refer_language", type=str, default="", help="默认参考音频语种")
|
| parser.add_argument("-d", "--device", type=str, default=g_config.infer_device, help="cuda / cpu")
|
| parser.add_argument("-a", "--bind_addr", type=str, default="0.0.0.0", help="default: 0.0.0.0")
|
| parser.add_argument("-p", "--port", type=int, default=g_config.api_port, help="default: 9880")
|
| parser.add_argument(
|
| "-fp", "--full_precision", action="store_true", default=False, help="覆盖config.is_half为False, 使用全精度"
|
| )
|
| parser.add_argument(
|
| "-hp", "--half_precision", action="store_true", default=False, help="覆盖config.is_half为True, 使用半精度"
|
| )
|
|
|
|
|
| parser.add_argument("-sm", "--stream_mode", type=str, default="close", help="流式返回模式, close / normal / keepalive")
|
| parser.add_argument("-mt", "--media_type", type=str, default="wav", help="音频编码格式, wav / ogg / aac")
|
| parser.add_argument("-st", "--sub_type", type=str, default="int16", help="音频数据类型, int16 / int32")
|
| parser.add_argument("-cp", "--cut_punc", type=str, default="", help="文本切分符号设定, 符号范围,.;?!、,。?!;:…")
|
|
|
| parser.add_argument("-hb", "--hubert_path", type=str, default=g_config.cnhubert_path, help="覆盖config.cnhubert_path")
|
| parser.add_argument("-b", "--bert_path", type=str, default=g_config.bert_path, help="覆盖config.bert_path")
|
|
|
| args = parser.parse_args()
|
| sovits_path = args.sovits_path
|
| gpt_path = args.gpt_path
|
| device = args.device
|
| port = args.port
|
| host = args.bind_addr
|
| cnhubert_base_path = args.hubert_path
|
| bert_path = args.bert_path
|
| default_cut_punc = args.cut_punc
|
|
|
|
|
| default_refer = DefaultRefer(args.default_refer_path, args.default_refer_text, args.default_refer_language)
|
|
|
|
|
| if sovits_path == "":
|
| sovits_path = g_config.pretrained_sovits_path
|
| logger.warning(f"未指定SoVITS模型路径, fallback后当前值: {sovits_path}")
|
| if gpt_path == "":
|
| gpt_path = g_config.pretrained_gpt_path
|
| logger.warning(f"未指定GPT模型路径, fallback后当前值: {gpt_path}")
|
|
|
|
|
| if default_refer.path == "" or default_refer.text == "" or default_refer.language == "":
|
| default_refer.path, default_refer.text, default_refer.language = "", "", ""
|
| logger.info("未指定默认参考音频")
|
| else:
|
| logger.info(f"默认参考音频路径: {default_refer.path}")
|
| logger.info(f"默认参考音频文本: {default_refer.text}")
|
| logger.info(f"默认参考音频语种: {default_refer.language}")
|
|
|
|
|
| is_half = g_config.is_half
|
| if args.full_precision:
|
| is_half = False
|
| if args.half_precision:
|
| is_half = True
|
| if args.full_precision and args.half_precision:
|
| is_half = g_config.is_half
|
| logger.info(f"半精: {is_half}")
|
|
|
|
|
| if args.stream_mode.lower() in ["normal", "n"]:
|
| stream_mode = "normal"
|
| logger.info("流式返回已开启")
|
| else:
|
| stream_mode = "close"
|
|
|
|
|
| if args.media_type.lower() in ["aac", "ogg"]:
|
| media_type = args.media_type.lower()
|
| elif stream_mode == "close":
|
| media_type = "wav"
|
| else:
|
| media_type = "ogg"
|
| logger.info(f"编码格式: {media_type}")
|
|
|
|
|
| if args.sub_type.lower() == "int32":
|
| is_int32 = True
|
| logger.info("数据类型: int32")
|
| else:
|
| is_int32 = False
|
| logger.info("数据类型: int16")
|
|
|
|
|
| cnhubert.cnhubert_base_path = cnhubert_base_path
|
| tokenizer = AutoTokenizer.from_pretrained(bert_path)
|
| bert_model = AutoModelForMaskedLM.from_pretrained(bert_path)
|
| ssl_model = cnhubert.get_model()
|
| if is_half:
|
| bert_model = bert_model.half().to(device)
|
| ssl_model = ssl_model.half().to(device)
|
| else:
|
| bert_model = bert_model.to(device)
|
| ssl_model = ssl_model.to(device)
|
| change_gpt_sovits_weights(gpt_path=gpt_path, sovits_path=sovits_path)
|
|
|
|
|
|
|
|
|
|
|
| app = FastAPI()
|
|
|
|
|
| @app.post("/set_model")
|
| async def set_model(request: Request):
|
| json_post_raw = await request.json()
|
| return change_gpt_sovits_weights(
|
| gpt_path=json_post_raw.get("gpt_model_path"), sovits_path=json_post_raw.get("sovits_model_path")
|
| )
|
|
|
|
|
| @app.get("/set_model")
|
| async def set_model(
|
| gpt_model_path: str = None,
|
| sovits_model_path: str = None,
|
| ):
|
| return change_gpt_sovits_weights(gpt_path=gpt_model_path, sovits_path=sovits_model_path)
|
|
|
|
|
| @app.post("/control")
|
| async def control(request: Request):
|
| json_post_raw = await request.json()
|
| return handle_control(json_post_raw.get("command"))
|
|
|
|
|
| @app.get("/control")
|
| async def control(command: str = None):
|
| return handle_control(command)
|
|
|
|
|
| @app.post("/change_refer")
|
| async def change_refer(request: Request):
|
| json_post_raw = await request.json()
|
| return handle_change(
|
| json_post_raw.get("refer_wav_path"), json_post_raw.get("prompt_text"), json_post_raw.get("prompt_language")
|
| )
|
|
|
|
|
| @app.get("/change_refer")
|
| async def change_refer(refer_wav_path: str = None, prompt_text: str = None, prompt_language: str = None):
|
| return handle_change(refer_wav_path, prompt_text, prompt_language)
|
|
|
|
|
| @app.post("/")
|
| async def tts_endpoint(request: Request):
|
| json_post_raw = await request.json()
|
| return handle(
|
| json_post_raw.get("refer_wav_path"),
|
| json_post_raw.get("prompt_text"),
|
| json_post_raw.get("prompt_language"),
|
| json_post_raw.get("text"),
|
| json_post_raw.get("text_language"),
|
| json_post_raw.get("cut_punc"),
|
| json_post_raw.get("top_k", 15),
|
| json_post_raw.get("top_p", 1.0),
|
| json_post_raw.get("temperature", 1.0),
|
| json_post_raw.get("speed", 1.0),
|
| json_post_raw.get("inp_refs", []),
|
| json_post_raw.get("sample_steps", 32),
|
| json_post_raw.get("if_sr", False),
|
| )
|
|
|
|
|
| @app.get("/")
|
| async def tts_endpoint(
|
| refer_wav_path: str = None,
|
| prompt_text: str = None,
|
| prompt_language: str = None,
|
| text: str = None,
|
| text_language: str = None,
|
| cut_punc: str = None,
|
| top_k: int = 15,
|
| top_p: float = 1.0,
|
| temperature: float = 1.0,
|
| speed: float = 1.0,
|
| inp_refs: list = Query(default=[]),
|
| sample_steps: int = 32,
|
| if_sr: bool = False,
|
| ):
|
| return handle(
|
| refer_wav_path,
|
| prompt_text,
|
| prompt_language,
|
| text,
|
| text_language,
|
| cut_punc,
|
| top_k,
|
| top_p,
|
| temperature,
|
| speed,
|
| inp_refs,
|
| sample_steps,
|
| if_sr,
|
| )
|
|
|
|
|
| if __name__ == "__main__":
|
| uvicorn.run(app, host=host, port=port, workers=1)
|
|
|