diff --git a/VC_inference.py b/VC_inference.py index 419a890..d76f883 100644 --- a/VC_inference.py +++ b/VC_inference.py @@ -12,7 +12,7 @@ import webbrowser device = "cuda:0" if torch.cuda.is_available() else "cpu" def create_vc_fn(model, hps, speaker_ids): - def vc_fn(original_speaker, target_speaker, record_audio, upload_audio, denoise): + def vc_fn(original_speaker, target_speaker, record_audio, upload_audio): input_audio = record_audio if record_audio is not None else upload_audio if input_audio is None: return "You need to record or upload an audio", None @@ -28,13 +28,6 @@ def create_vc_fn(model, hps, speaker_ids): with no_grad(): y = torch.FloatTensor(audio) y = y / max(-y.min(), y.max()) / 0.99 - if denoise: - torchaudio.save("infer.wav", y.cpu(), 22050, channels_first=True) - os.system(f"demucs --two-stems=vocals infer.wav") - y, sr = torchaudio.load(f"./separated/htdemucs/infer/vocals.wav", frame_offset=0, num_frames=-1, normalize=True, channels_first=True) - y = y.mean(dim=0).unsqueeze(0) - if sr != 22050: - y = torchaudio.transforms.Resample(orig_freq=sr, new_freq=22050)(y) y = y.to(device) spec = spectrogram_torch(y, hps.data.filter_length, hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length, @@ -80,12 +73,11 @@ if __name__ == "__main__": upload_audio = gr.Audio(label="or upload audio here", source="upload") source_speaker = gr.Dropdown(choices=speakers, value="User", label="source speaker") target_speaker = gr.Dropdown(choices=speakers, value=speakers[0], label="target speaker") - denoise_checkbox = gr.Checkbox(label="denoise using demucs", value=False) with gr.Column(): message_box = gr.Textbox(label="Message") converted_audio = gr.Audio(label='converted audio') btn = gr.Button("Convert!") - btn.click(vc_fn, inputs=[source_speaker, target_speaker, record_audio, upload_audio, denoise_checkbox], + btn.click(vc_fn, inputs=[source_speaker, target_speaker, record_audio, upload_audio], outputs=[message_box, converted_audio]) webbrowser.open("http://127.0.0.1:7860") app.launch(share=args.share)