From 7778d4c3fbc59a694e5db85b46e2c5b0fb11ef18 Mon Sep 17 00:00:00 2001 From: Anjok07 <68268275+Anjok07@users.noreply.github.com> Date: Tue, 23 Aug 2022 15:04:39 -0500 Subject: [PATCH] Add files via upload --- UVR.py | 444 ++++++++---- demucs/__pycache__/__init__.cpython-39.pyc | Bin 0 -> 165 bytes demucs/__pycache__/apply.cpython-39.pyc | Bin 0 -> 8105 bytes demucs/__pycache__/demucs.cpython-39.pyc | Bin 0 -> 14082 bytes demucs/__pycache__/hdemucs.cpython-39.pyc | Bin 0 -> 20265 bytes demucs/__pycache__/model.cpython-39.pyc | Bin 0 -> 6216 bytes demucs/__pycache__/model_v2.cpython-39.pyc | Bin 0 -> 5931 bytes demucs/__pycache__/pretrained.cpython-39.pyc | Bin 0 -> 4874 bytes demucs/__pycache__/repo.cpython-39.pyc | Bin 0 -> 6091 bytes demucs/__pycache__/spec.cpython-39.pyc | Bin 0 -> 1096 bytes demucs/__pycache__/states.cpython-39.pyc | Bin 0 -> 4443 bytes demucs/__pycache__/tasnet_v2.cpython-39.pyc | Bin 0 -> 12195 bytes demucs/__pycache__/utils.cpython-39.pyc | Bin 0 -> 15060 bytes demucs/apply.py | 64 +- demucs/utils.py | 70 +- inference_MDX.py | 429 ++++++++--- inference_demucs.py | 466 +++++++++--- inference_v5.py | 139 +++- inference_v5_ensemble.py | 704 ++++++++++++++----- 19 files changed, 1735 insertions(+), 581 deletions(-) create mode 100644 demucs/__pycache__/__init__.cpython-39.pyc create mode 100644 demucs/__pycache__/apply.cpython-39.pyc create mode 100644 demucs/__pycache__/demucs.cpython-39.pyc create mode 100644 demucs/__pycache__/hdemucs.cpython-39.pyc create mode 100644 demucs/__pycache__/model.cpython-39.pyc create mode 100644 demucs/__pycache__/model_v2.cpython-39.pyc create mode 100644 demucs/__pycache__/pretrained.cpython-39.pyc create mode 100644 demucs/__pycache__/repo.cpython-39.pyc create mode 100644 demucs/__pycache__/spec.cpython-39.pyc create mode 100644 demucs/__pycache__/states.cpython-39.pyc create mode 100644 demucs/__pycache__/tasnet_v2.cpython-39.pyc create mode 100644 demucs/__pycache__/utils.cpython-39.pyc diff --git a/UVR.py b/UVR.py index 1b23ec0..ecc083a 100644 --- a/UVR.py +++ b/UVR.py @@ -266,6 +266,7 @@ DEFAULT_DATA = { 'inst_only_b': False, 'lastDir': None, 'margin': 44100, + 'margin_d': 44100, 'mdx_ensem': 'MDX-Net: UVR-MDX-NET Main', 'mdx_ensem_b': 'No Model', 'mdx_only_ensem_a': 'MDX-Net: UVR-MDX-NET Main', @@ -285,6 +286,8 @@ DEFAULT_DATA = { 'ModelParams': 'Auto', 'mp3bit': '320k', 'n_fft_scale': 6144, + 'no_chunk': False, + 'no_chunk_d': False, 'noise_pro_select': 'Auto Select', 'noise_reduc': True, 'noisereduc_s': '3', @@ -298,7 +301,7 @@ DEFAULT_DATA = { 'save': True, 'saveFormat': 'Wav', 'selectdownload': 'VR Arc', - 'segment': 'None', + 'segment': 'Default', 'settest': False, 'shifts': 2, 'shifts_b': 2, @@ -432,26 +435,51 @@ class ThreadSafeConsole(tk.Text): """ Text Widget which is thread safe for tkinter """ + def __init__(self, master, **options): tk.Text.__init__(self, master, **options) self.queue = queue.Queue() self.update_me() + def write(self, line): self.queue.put(line) + + def percentage(self, line): + line = f"percentage_value_{line}" + self.queue.put(line) + + def remove(self, line): + line = f"remove_line_{line}" + self.queue.put(line) def clear(self): self.queue.put(None) def update_me(self): self.configure(state=tk.NORMAL) + try: while 1: line = self.queue.get_nowait() + if line is None: self.delete(1.0, tk.END) else: - self.insert(tk.END, str(line)) + if "percentage_value_" in str(line): + line = str(line) + line = line.replace("percentage_value_", "") + string_len = len(str(line)) + self.delete(f"end-{string_len + 1}c", tk.END) + self.insert(tk.END, f"\n{line}") + elif "remove_line_" in str(line): + line = str(line) + line = line.replace("remove_line_", "") + string_len = len(str(line)) + self.delete(f"end-{string_len}c", tk.END) + else: + self.insert(tk.END, str(line)) + self.see(tk.END) self.update_idletasks() except queue.Empty: @@ -711,6 +739,10 @@ class MainWindow(TkinterDnD.Tk): self.margin_var = tk.StringVar(value=data['margin']) except: self.margin_var = tk.StringVar(value=data_alt['margin']) + try: + self.margin_d_var = tk.StringVar(value=data['margin_d']) + except: + self.margin_d_var = tk.StringVar(value=data_alt['margin_d']) try: self.mdx_only_ensem_a_var = tk.StringVar(value=data['mdx_only_ensem_a']) except: @@ -783,6 +815,14 @@ class MainWindow(TkinterDnD.Tk): self.n_fft_scale_var = tk.StringVar(value=data['n_fft_scale']) except: self.n_fft_scale_var = tk.StringVar(value=data_alt['n_fft_scale']) + try: + self.no_chunk_var = tk.BooleanVar(value=data['no_chunk']) + except: + self.no_chunk_var = tk.BooleanVar(value=data_alt['no_chunk']) + try: + self.no_chunk_d_var = tk.BooleanVar(value=data['no_chunk_d']) + except: + self.no_chunk_d_var = tk.BooleanVar(value=data_alt['no_chunk_d']) try: self.noise_pro_select_var = tk.StringVar(value=data['noise_pro_select']) except: @@ -1014,6 +1054,9 @@ class MainWindow(TkinterDnD.Tk): self.stop_Button = ttk.Button(master=self, image=self.stop_img, command=self.stop_inf) + self.mdx_stop_Button = ttk.Button(master=self, + image=self.stop_img, + command=self.stop_inf) self.settings_Button = ttk.Button(master=self, image=self.help_img, command=self.settings) @@ -1253,7 +1296,7 @@ class MainWindow(TkinterDnD.Tk): background='#0e0e0f', font=self.font, foreground='#13a4c9') self.options_segment_Optionmenu = ttk.OptionMenu(self.options_Frame, self.segment_var, - None, 'None', '1', '5', '10', '15', '20', + None, 'Default', '1', '5', '10', '15', '20', '25', '30', '35', '40', '45', '50', '55', '60', '65', '70', '75', '80', '85', '90', '95', '100') @@ -1583,6 +1626,15 @@ class MainWindow(TkinterDnD.Tk): self.chunks_var.trace_add('write', lambda *args: self.update_states()) + self.chunks_d_var.trace_add('write', + lambda *args: self.update_states()) + + self.margin_d_var.trace_add('write', + lambda *args: self.update_states()) + + self.no_chunk_d_var.trace_add('write', + lambda *args: self.update_states()) + self.autocompensate_var.trace_add('write', lambda *args: self.update_states()) @@ -1669,6 +1721,10 @@ class MainWindow(TkinterDnD.Tk): Start the conversion for all the given mp3 and wav files """ + global stop_inf + + stop_inf = self.stop_inf_mdx + # -Get all variables- export_path = self.exportPath_var.get() input_paths = self.inputPaths @@ -1769,6 +1825,7 @@ class MainWindow(TkinterDnD.Tk): 'inst_only_b': self.inst_only_b_var.get(), 'instrumentalModel': instrumentalModel_path, 'margin': self.margin_var.get(), + 'margin_d': self.margin_d_var.get(), 'mdx_ensem': self.mdxensemchoose_var.get(), 'mdx_ensem_b': self.mdxensemchoose_b_var.get(), 'mdx_only_ensem_a': self.mdx_only_ensem_a_var.get(), @@ -1783,6 +1840,8 @@ class MainWindow(TkinterDnD.Tk): 'ModelParams': self.ModelParams_var.get(), 'mp3bit': self.mp3bit_var.get(), 'n_fft_scale': self.n_fft_scale_var.get(), + 'no_chunk': self.no_chunk_var.get(), + 'no_chunk_d': self.no_chunk_d_var.get(), 'noise_pro_select': self.noise_pro_select_var.get(), 'noise_reduc': self.noisereduc_var.get(), 'noisereduc_s': noisereduc_s, @@ -1829,6 +1888,7 @@ class MainWindow(TkinterDnD.Tk): 'wavtype': self.wavtype_var.get(), 'window': self, 'window_size': window_size, + 'stop_thread': stop_inf, }, daemon=True ) @@ -1839,16 +1899,7 @@ class MainWindow(TkinterDnD.Tk): confirm = tk.messagebox.askyesno(title='Confirmation', message='You are about to stop all active processes.\n\nAre you sure you wish to continue?') - - # if self.aiModel_var.get() == 'VR Architecture': - # inference = inference_v5 - # elif self.aiModel_var.get() == 'Ensemble Mode': - # inference = inference_v5_ensemble - # elif self.aiModel_var.get() == 'MDX-Net': - # inference = inference_MDX - # elif self.aiModel_var.get() == 'Demucs v3': - # inference = inference_demucs - + if confirm: inf.kill() button_widget = self.conversion_Button @@ -1864,6 +1915,18 @@ class MainWindow(TkinterDnD.Tk): else: pass + def stop_inf_mdx(self): + inf.kill() + button_widget = self.conversion_Button + button_widget.configure(state=tk.NORMAL) + #text = self.command_Text + #text.write('\n\nProcess stopped by user.') + torch.cuda.empty_cache() + importlib.reload(inference_v5) + importlib.reload(inference_v5_ensemble) + importlib.reload(inference_MDX) + importlib.reload(inference_demucs) + # Models def update_inputPaths(self): """Update the music file entry""" @@ -1980,6 +2043,14 @@ class MainWindow(TkinterDnD.Tk): j = ["UVR_MDXNET_Main"] for char in j: file_name_1 = file_name_1.replace(char, "UVR-MDX-NET Main") + + k = ["UVR_MDXNET_Inst_1"] + for char in k: + file_name_1 = file_name_1.replace(char, "UVR-MDX-NET Inst 1") + + l = ["UVR_MDXNET_Inst_2"] + for char in l: + file_name_1 = file_name_1.replace(char, "UVR-MDX-NET Inst 2") self.options_mdxnetModel_Optionmenu['menu'].add_radiobutton(label=file_name_1, command=tk._setit(self.mdxnetModel_var, file_name_1)) @@ -2948,6 +3019,19 @@ class MainWindow(TkinterDnD.Tk): self.downloadmodelOptions_mdx.configure(state=tk.DISABLED) except: pass + + if self.no_chunk_d_var.get() == False: + try: + self.chunk_d_entry.configure(state=tk.DISABLED) + self.margin_d_entry.configure(state=tk.DISABLED) + except: + pass + elif self.no_chunk_d_var.get() == True: + try: + self.chunk_d_entry.configure(state=tk.NORMAL) + self.margin_d_entry.configure(state=tk.NORMAL) + except: + pass if self.demucs_only_var.get() == True: self.demucsmodel_var.set(True) @@ -3114,6 +3198,7 @@ class MainWindow(TkinterDnD.Tk): self.inst_only_var.set(False) self.inst_only_b_var.set(False) self.margin_var.set(44100) + self.margin_d_var.set(44100) self.mdxensemchoose_var.set('MDX-Net: UVR-MDX-NET Main') self.mdxensemchoose_b_var.set('No Model') self.mdx_only_ensem_a_var.set('MDX-Net: UVR-MDX-NET Main') @@ -3129,6 +3214,8 @@ class MainWindow(TkinterDnD.Tk): self.ModelParams_var.set('Auto') self.mp3bit_var.set('320k') self.n_fft_scale_var.set(6144) + self.no_chunk_var.set(False) + self.no_chunk_d_var.set(True) self.noise_pro_select_var.set('Auto Select') self.noisereduc_var.set(True) self.noisereduc_s_var.set(3) @@ -3169,41 +3256,44 @@ class MainWindow(TkinterDnD.Tk): self.vr_basic_USER_model_param_5.set('Auto') self.wavtype_var.set('PCM_16') self.winSize_var.set('512') - - - + def advanced_vr_options(self): """ Open Advanced VR Options """ - top=Toplevel(self) + vr_opt=Toplevel(root) window_height = 630 window_width = 500 - top.title("Advanced VR Options") - - top.resizable(False, False) # This code helps to disable windows from resizing - - screen_width = top.winfo_screenwidth() - screen_height = top.winfo_screenheight() + screen_width = vr_opt.winfo_screenwidth() + screen_height = vr_opt.winfo_screenheight() x_cordinate = int((screen_width/2) - (window_width/2)) y_cordinate = int((screen_height/2) - (window_height/2)) - top.geometry("{}x{}+{}+{}".format(window_width, window_height, x_cordinate, y_cordinate)) + vr_opt.geometry("{}x{}+{}+{}".format(window_width, window_height, x_cordinate, y_cordinate)) - top.attributes("-topmost", True) + vr_opt.resizable(False, False) # This code helps to disable windows from resizing + + x = root.winfo_x() + y = root.winfo_y() + vr_opt.geometry("+%d+%d" %(x+57,y+110)) + vr_opt.wm_transient(root) + + vr_opt.title("Advanced VR Options") + + #vr_opt.attributes("-topmost", True) # change title bar icon - top.iconbitmap('img\\UVR-Icon-v2.ico') + vr_opt.iconbitmap('img\\UVR-Icon-v2.ico') def close_win(): - top.destroy() + vr_opt.destroy() self.settings() - tabControl = ttk.Notebook(top) + tabControl = ttk.Notebook(vr_opt) tab1 = ttk.Frame(tabControl) tab2 = ttk.Frame(tabControl) @@ -3269,7 +3359,7 @@ class MainWindow(TkinterDnD.Tk): l0.grid(row=12,column=0,padx=0,pady=5) def close_win_self(): - top.destroy() + vr_opt.destroy() l0=ttk.Button(frame0,text='Close Window', command=close_win_self) l0.grid(row=13,column=0,padx=0,pady=5) @@ -3307,131 +3397,143 @@ class MainWindow(TkinterDnD.Tk): l0=ttk.Checkbutton(frame0, text='Split Mode', variable=self.split_mode_var) l0.grid(row=9,column=0,padx=0,pady=5) - self.update_states() + #self.update_states() def advanced_demucs_options(self): """ Open Advanced Demucs Options """ - top= Toplevel(self) + demuc_opt= Toplevel(root) window_height = 750 window_width = 500 - top.title("Advanced Demucs Options") + demuc_opt.title("Advanced Demucs Options") - top.resizable(False, False) # This code helps to disable windows from resizing + demuc_opt.resizable(False, False) # This code helps to disable windows from resizing - screen_width = top.winfo_screenwidth() - screen_height = top.winfo_screenheight() + screen_width = demuc_opt.winfo_screenwidth() + screen_height = demuc_opt.winfo_screenheight() x_cordinate = int((screen_width/2) - (window_width/2)) y_cordinate = int((screen_height/2) - (window_height/2)) - top.geometry("{}x{}+{}+{}".format(window_width, window_height, x_cordinate, y_cordinate)) + demuc_opt.geometry("{}x{}+{}+{}".format(window_width, window_height, x_cordinate, y_cordinate)) - top.attributes("-topmost", True) + #demuc_opt.attributes("-topmost", True) + + x = root.winfo_x() + y = root.winfo_y() + demuc_opt.geometry("+%d+%d" %(x+57,y+45)) + demuc_opt.wm_transient(root) # change title bar icon - top.iconbitmap('img\\UVR-Icon-v2.ico') + demuc_opt.iconbitmap('img\\UVR-Icon-v2.ico') def close_win(): - top.destroy() + demuc_opt.destroy() self.settings() - tabControl = ttk.Notebook(top) - - tab1 = ttk.Frame(tabControl) - - tabControl.add(tab1, text ='Advanced Settings') - + tabControl = ttk.Notebook(demuc_opt) + tabControl.pack(expand = 1, fill ="both") - tab1.grid_rowconfigure(0, weight=1) - tab1.grid_columnconfigure(0, weight=1) + tabControl.grid_rowconfigure(0, weight=1) + tabControl.grid_columnconfigure(0, weight=1) - frame0=Frame(tab1, highlightbackground='red',highlightthicknes=0) + frame0=Frame(tabControl, highlightbackground='red',highlightthicknes=0) frame0.grid(row=0,column=0,padx=0,pady=30) - l0=tk.Label(frame0,text="Advanced Demucs Options",font=("Century Gothic", "13", "underline"), justify="center", fg="#13a4c9") + l0=tk.Label(frame0,text="Advanced Demucs Options",font=("Century Gothic", "13", "underline"), justify="center", fg="#13a4c9", width=50) l0.grid(row=0,column=0,padx=0,pady=10) - l0=tk.Label(frame0, text='Chunks (Set Manually)', font=("Century Gothic", "9"), foreground='#13a4c9') + l0=tk.Label(frame0, text='Shifts\n(Higher values use more resources and increase processing times)', font=("Century Gothic", "9"), foreground='#13a4c9') l0.grid(row=1,column=0,padx=0,pady=10) - l0=ttk.Entry(frame0, textvariable=self.chunks_d_var, justify='center') + l0=ttk.Entry(frame0, textvariable=self.shifts_b_var, justify='center') l0.grid(row=2,column=0,padx=0,pady=0) - l0=tk.Label(frame0, text='Chunk Margin', font=("Century Gothic", "9"), foreground='#13a4c9') + l0=tk.Label(frame0, text='Overlap', font=("Century Gothic", "9"), foreground='#13a4c9') l0.grid(row=3,column=0,padx=0,pady=10) - l0=ttk.Entry(frame0, textvariable=self.margin_var, justify='center') + l0=ttk.Entry(frame0, textvariable=self.overlap_b_var, justify='center') l0.grid(row=4,column=0,padx=0,pady=0) - l0=tk.Label(frame0, text='Shifts\n(Higher values use more resources and increase processing times)', font=("Century Gothic", "9"), foreground='#13a4c9') + l0=tk.Label(frame0, text='Segment', font=("Century Gothic", "9"), foreground='#13a4c9') l0.grid(row=5,column=0,padx=0,pady=10) - l0=ttk.Entry(frame0, textvariable=self.shifts_b_var, justify='center') + l0=ttk.Entry(frame0, textvariable=self.segment_var, justify='center') l0.grid(row=6,column=0,padx=0,pady=0) - l0=tk.Label(frame0, text='Overlap', font=("Century Gothic", "9"), foreground='#13a4c9') + l0=tk.Label(frame0, text='Chunks (Set Manually)', font=("Century Gothic", "9"), foreground='#13a4c9') l0.grid(row=7,column=0,padx=0,pady=10) - l0=ttk.Entry(frame0, textvariable=self.overlap_b_var, justify='center') - l0.grid(row=8,column=0,padx=0,pady=0) + self.chunk_d_entry=ttk.Entry(frame0, textvariable=self.chunks_d_var, justify='center') + self.chunk_d_entry.grid(row=8,column=0,padx=0,pady=0) - l0=tk.Label(frame0, text='Segment', font=("Century Gothic", "9"), foreground='#13a4c9') + l0=tk.Label(frame0, text='Chunk Margin', font=("Century Gothic", "9"), foreground='#13a4c9') l0.grid(row=9,column=0,padx=0,pady=10) - l0=ttk.Entry(frame0, textvariable=self.segment_var, justify='center') - l0.grid(row=10,column=0,padx=0,pady=0) + self.margin_d_entry=ttk.Entry(frame0, textvariable=self.margin_d_var, justify='center') + self.margin_d_entry.grid(row=10,column=0,padx=0,pady=0) + + l0=ttk.Checkbutton(frame0, text='Enable Chunks', variable=self.no_chunk_d_var) + l0.grid(row=11,column=0,padx=0,pady=10) l0=ttk.Checkbutton(frame0, text='Save Stems to Model & Track Name Directory', variable=self.audfile_var) - l0.grid(row=11,column=0,padx=0,pady=5) - - l0=ttk.Button(frame0,text='Open Demucs Model Folder', command=self.open_Modelfolder_de) l0.grid(row=12,column=0,padx=0,pady=0) - l0=ttk.Button(frame0,text='Back to Main Menu', command=close_win) + l0=ttk.Button(frame0,text='Open Demucs Model Folder', command=self.open_Modelfolder_de) l0.grid(row=13,column=0,padx=0,pady=10) + l0=ttk.Button(frame0,text='Back to Main Menu', command=close_win) + l0.grid(row=14,column=0,padx=0,pady=0) + def close_win_self(): - top.destroy() + demuc_opt.destroy() l0=ttk.Button(frame0,text='Close Window', command=close_win_self) - l0.grid(row=14,column=0,padx=0,pady=0) + l0.grid(row=15,column=0,padx=0,pady=10) + + l0=ttk.Label(frame0,text='\n') + l0.grid(row=16,column=0,padx=0,pady=50) + + self.update_states() def advanced_mdx_options(self): """ Open Advanced MDX Options """ - top= Toplevel(self) + mdx_net_opt= Toplevel(root) window_height = 740 window_width = 550 - top.title("Advanced MDX-Net Options") + mdx_net_opt.title("Advanced MDX-Net Options") - top.resizable(False, False) # This code helps to disable windows from resizing + mdx_net_opt.resizable(False, False) # This code helps to disable windows from resizing - screen_width = top.winfo_screenwidth() - screen_height = top.winfo_screenheight() + screen_width = mdx_net_opt.winfo_screenwidth() + screen_height = mdx_net_opt.winfo_screenheight() x_cordinate = int((screen_width/2) - (window_width/2)) y_cordinate = int((screen_height/2) - (window_height/2)) - top.geometry("{}x{}+{}+{}".format(window_width, window_height, x_cordinate, y_cordinate)) + mdx_net_opt.geometry("{}x{}+{}+{}".format(window_width, window_height, x_cordinate, y_cordinate)) - top.attributes("-topmost", True) + x = root.winfo_x() + y = root.winfo_y() + mdx_net_opt.geometry("+%d+%d" %(x+35,y+45)) + mdx_net_opt.wm_transient(root) # change title bar icon - top.iconbitmap('img\\UVR-Icon-v2.ico') + mdx_net_opt.iconbitmap('img\\UVR-Icon-v2.ico') def close_win(): - top.destroy() + mdx_net_opt.destroy() self.settings() - tabControl = ttk.Notebook(top) + tabControl = ttk.Notebook(mdx_net_opt) tab1 = ttk.Frame(tabControl) tab2 = ttk.Frame(tabControl) @@ -3501,7 +3603,7 @@ class MainWindow(TkinterDnD.Tk): l0.grid(row=14,column=0,padx=0,pady=0) def close_win_self(): - top.destroy() + mdx_net_opt.destroy() l0=ttk.Button(frame0,text='Close Window', command=close_win_self) l0.grid(row=15,column=0,padx=0,pady=10) @@ -3526,21 +3628,30 @@ class MainWindow(TkinterDnD.Tk): l0=ttk.OptionMenu(frame0, self.mixing_var, None, 'Default', 'Min_Mag', 'Max_Mag', 'Invert_p') l0.grid(row=4,column=0,padx=0,pady=0) - l0=tk.Label(frame0, text='Shifts\n(Higher values use more resources and increase processing times)', font=("Century Gothic", "9"), foreground='#13a4c9') + l0=tk.Label(frame0, text='Segments\n(Higher values use more resources and increase processing times)', font=("Century Gothic", "9"), foreground='#13a4c9') l0.grid(row=5,column=0,padx=0,pady=10) - l0=ttk.Entry(frame0, textvariable=self.shifts_var, justify='center') + l0=ttk.Entry(frame0, textvariable=self.segment_var, justify='center') l0.grid(row=6,column=0,padx=0,pady=0) - l0=tk.Label(frame0, text='Overlap', font=("Century Gothic", "9"), foreground='#13a4c9') + l0=tk.Label(frame0, text='Shifts\n(Higher values use more resources and increase processing times)', font=("Century Gothic", "9"), foreground='#13a4c9') l0.grid(row=7,column=0,padx=0,pady=10) - l0=ttk.Entry(frame0, textvariable=self.overlap_var, justify='center') + l0=ttk.Entry(frame0, textvariable=self.shifts_var, justify='center') l0.grid(row=8,column=0,padx=0,pady=0) - l0=ttk.Checkbutton(frame0, text='Split Mode', variable=self.split_mode_var) + l0=tk.Label(frame0, text='Overlap', font=("Century Gothic", "9"), foreground='#13a4c9') l0.grid(row=9,column=0,padx=0,pady=10) + l0=ttk.Entry(frame0, textvariable=self.overlap_var, justify='center') + l0.grid(row=10,column=0,padx=0,pady=0) + + l0=ttk.Checkbutton(frame0, text='Split Mode', variable=self.split_mode_var) + l0.grid(row=11,column=0,padx=0,pady=10) + + l0=ttk.Checkbutton(frame0, text='Enable Chunks', variable=self.no_chunk_var) + l0.grid(row=12,column=0,padx=0,pady=0) + self.update_states() frame0=Frame(tab3, highlightbackground='red',highlightthicknes=0) @@ -3589,7 +3700,7 @@ class MainWindow(TkinterDnD.Tk): def clear_cache(): - cachedir = "lib_v5/filelists/hashes/mdx_model_cache" + cachedir = "lib_v5/filelists/model_cache/mdx_model_cache" for basename in os.listdir(cachedir): if basename.endswith('.json'): @@ -3610,33 +3721,41 @@ class MainWindow(TkinterDnD.Tk): """ Open Ensemble Custom """ - top= Toplevel(self) + custom_ens_opt= Toplevel(root) window_height = 680 window_width = 900 - top.title("Customize Ensemble") + custom_ens_opt.title("Customize Ensemble") - top.resizable(False, False) # This code helps to disable windows from resizing + custom_ens_opt.resizable(False, False) # This code helps to disable windows from resizing - top.attributes("-topmost", True) + x = root.winfo_x() + y = root.winfo_y() + custom_ens_opt.geometry("+%d+%d" %(x+57,y+100)) + custom_ens_opt.wm_transient(root) - screen_width = top.winfo_screenwidth() - screen_height = top.winfo_screenheight() + screen_width = custom_ens_opt.winfo_screenwidth() + screen_height = custom_ens_opt.winfo_screenheight() x_cordinate = int((screen_width/2) - (window_width/2)) y_cordinate = int((screen_height/2) - (window_height/2)) - top.geometry("{}x{}+{}+{}".format(window_width, window_height, x_cordinate, y_cordinate)) + custom_ens_opt.geometry("{}x{}+{}+{}".format(window_width, window_height, x_cordinate, y_cordinate)) + + x = root.winfo_x() + y = root.winfo_y() + custom_ens_opt.geometry("+%d+%d" %(x-140,y+70)) + custom_ens_opt.wm_transient(root) # change title bar icon - top.iconbitmap('img\\UVR-Icon-v2.ico') + custom_ens_opt.iconbitmap('img\\UVR-Icon-v2.ico') def close_win(): - top.destroy() + custom_ens_opt.destroy() self.settings() - tabControl = ttk.Notebook(top) + tabControl = ttk.Notebook(custom_ens_opt) tab1 = ttk.Frame(tabControl) tab2 = ttk.Frame(tabControl) @@ -3791,7 +3910,7 @@ class MainWindow(TkinterDnD.Tk): l0.grid(row=11,column=2,padx=0,pady=0) def close_win_self(): - top.destroy() + custom_ens_opt.destroy() l0=ttk.Button(frame0,text='Close Window', command=close_win_self) l0.grid(row=13,column=1,padx=20,pady=0) @@ -3959,7 +4078,7 @@ class MainWindow(TkinterDnD.Tk): """ Open Help Guide """ - top= Toplevel(self) + help_guide_opt = Toplevel(self) if GetSystemMetrics(1) >= 900: window_height = 810 window_width = 1080 @@ -3969,28 +4088,32 @@ class MainWindow(TkinterDnD.Tk): else: window_height = 670 window_width = 930 - top.title("UVR Help Guide") + help_guide_opt.title("UVR Help Guide") - top.resizable(False, False) # This code helps to disable windows from resizing + help_guide_opt.resizable(False, False) # This code helps to disable windows from resizing - top.attributes("-topmost", True) - - screen_width = top.winfo_screenwidth() - screen_height = top.winfo_screenheight() + screen_width = help_guide_opt.winfo_screenwidth() + screen_height = help_guide_opt.winfo_screenheight() x_cordinate = int((screen_width/2) - (window_width/2)) y_cordinate = int((screen_height/2) - (window_height/2)) - top.geometry("{}x{}+{}+{}".format(window_width, window_height, x_cordinate, y_cordinate)) + help_guide_opt.geometry("{}x{}+{}+{}".format(window_width, window_height, x_cordinate, y_cordinate)) + + if GetSystemMetrics(1) >= 900: + x = root.winfo_x() + y = root.winfo_y() + help_guide_opt.geometry("+%d+%d" %(x-220,y+5)) + help_guide_opt.wm_transient(root) # change title bar icon - top.iconbitmap('img\\UVR-Icon-v2.ico') + help_guide_opt.iconbitmap('img\\UVR-Icon-v2.ico') def close_win(): - top.destroy() + help_guide_opt.destroy() self.settings() - tabControl = ttk.Notebook(top) + tabControl = ttk.Notebook(help_guide_opt) tab1 = ttk.Frame(tabControl) tab2 = ttk.Frame(tabControl) @@ -4289,34 +4412,37 @@ class MainWindow(TkinterDnD.Tk): update_button_var = tk.StringVar(value='Check for Updates') update_set_var = tk.StringVar(value='UVR Version Current') - top= Toplevel(self) + settings_menu = Toplevel(self) window_height = 780 window_width = 500 - top.title("Settings Guide") + settings_menu.title("Settings Guide") - top.resizable(False, False) # This code helps to disable windows from resizing + settings_menu.resizable(False, False) # This code helps to disable windows from resizing - top.attributes("-topmost", True) - - screen_width = top.winfo_screenwidth() - screen_height = top.winfo_screenheight() + screen_width = settings_menu.winfo_screenwidth() + screen_height = settings_menu.winfo_screenheight() x_cordinate = int((screen_width/2) - (window_width/2)) y_cordinate = int((screen_height/2) - (window_height/2)) - top.geometry("{}x{}+{}+{}".format(window_width, window_height, x_cordinate, y_cordinate)) + settings_menu.geometry("{}x{}+{}+{}".format(window_width, window_height, x_cordinate, y_cordinate)) + + x = root.winfo_x() + y = root.winfo_y() + settings_menu.geometry("+%d+%d" %(x+57,y+15)) + settings_menu.wm_transient(root) # change title bar icon - top.iconbitmap('img\\UVR-Icon-v2.ico') + settings_menu.iconbitmap('img\\UVR-Icon-v2.ico') def askyesorno(): """ Ask to Update """ - top_dialoge= Toplevel() + top_dialoge = Toplevel() window_height = 250 window_width = 370 @@ -4329,7 +4455,7 @@ class MainWindow(TkinterDnD.Tk): top_dialoge.attributes("-topmost", True) - top.attributes("-topmost", False) + settings_menu.attributes("-topmost", False) screen_width = top_dialoge.winfo_screenwidth() screen_height = top_dialoge.winfo_screenheight() @@ -4350,12 +4476,12 @@ class MainWindow(TkinterDnD.Tk): tabControl.grid_columnconfigure(0, weight=1) def no(): - top.attributes("-topmost", True) + settings_menu.attributes("-topmost", True) top_dialoge.destroy() def yes(): download_update() - top.attributes("-topmost", True) + settings_menu.attributes("-topmost", True) top_dialoge.destroy() frame0=Frame(tabControl,highlightbackground='red',highlightthicknes=0) @@ -4385,7 +4511,7 @@ class MainWindow(TkinterDnD.Tk): top_code.destroy() except: pass - top.destroy() + settings_menu.destroy() def close_win_custom_ensemble(): change_event() @@ -4415,10 +4541,10 @@ class MainWindow(TkinterDnD.Tk): change_event() def restart(): - top.destroy() + settings_menu.destroy() self.restart() - tabControl = ttk.Notebook(top) + tabControl = ttk.Notebook(settings_menu) tab1 = ttk.Frame(tabControl) tab2 = ttk.Frame(tabControl) @@ -4533,7 +4659,7 @@ class MainWindow(TkinterDnD.Tk): rlg.start() def open_bmac_m(): - top.attributes("-topmost", False) + settings_menu.attributes("-topmost", False) callback("https://www.buymeacoffee.com/uvr5") l0=ttk.Button(frame0,text=update_button_var.get(), command=start_check_updates) @@ -4598,7 +4724,7 @@ class MainWindow(TkinterDnD.Tk): global top_code - top_code= Toplevel() + top_code = Toplevel(settings_menu) window_height = 480 window_width = 320 @@ -4607,9 +4733,9 @@ class MainWindow(TkinterDnD.Tk): top_code.resizable(False, False) # This code helps to disable windows from resizing - top_code.attributes("-topmost", True) + # top_code.attributes("-topmost", True) - top.attributes("-topmost", False) + # settings_menu.attributes("-topmost", False) screen_width = top_code.winfo_screenwidth() screen_height = top_code.winfo_screenheight() @@ -4619,6 +4745,11 @@ class MainWindow(TkinterDnD.Tk): top_code.geometry("{}x{}+{}+{}".format(window_width, window_height, x_cordinate, y_cordinate)) + x = settings_menu.winfo_x() + y = settings_menu.winfo_y() + top_code.geometry("+%d+%d" %(x+90,y+135)) + top_code.wm_transient(settings_menu) + # change title bar icon top_code.iconbitmap('img\\UVR-Icon-v2.ico') @@ -4656,7 +4787,7 @@ class MainWindow(TkinterDnD.Tk): callback("https://www.buymeacoffee.com/uvr5") def quit(): - top.attributes("-topmost", True) + settings_menu.attributes("-topmost", True) top_code.destroy() l0=tk.Label(frame0, text=f'User Download Codes', font=("Century Gothic", "11", "underline"), foreground='#13a4c9') @@ -4719,10 +4850,6 @@ class MainWindow(TkinterDnD.Tk): top_code.resizable(False, False) # This code helps to disable windows from resizing - top_code.attributes("-topmost", True) - - top.attributes("-topmost", False) - screen_width = top_code.winfo_screenwidth() screen_height = top_code.winfo_screenheight() @@ -4731,6 +4858,11 @@ class MainWindow(TkinterDnD.Tk): top_code.geometry("{}x{}+{}+{}".format(window_width, window_height, x_cordinate, y_cordinate)) + x = settings_menu.winfo_x() + y = settings_menu.winfo_y() + top_code.geometry("+%d+%d" %(x+43,y+220)) + top_code.wm_transient(settings_menu) + # change title bar icon top_code.iconbitmap('img\\UVR-Icon-v2.ico') @@ -4752,7 +4884,7 @@ class MainWindow(TkinterDnD.Tk): top_code.destroy() def quit(): - top.attributes("-topmost", True) + settings_menu.attributes("-topmost", True) top_code.destroy() l0=tk.Label(frame0, text=f'Invalid Download Code', font=("Century Gothic", "11", "underline"), foreground='#13a4c9') @@ -5692,7 +5824,7 @@ class MainWindow(TkinterDnD.Tk): links = lib_v5.filelist.get_download_links(links, downloads='app_patch') url_link = f"{links}{pack_name}.exe" #print(url_link) - top.attributes("-topmost", False) + settings_menu.attributes("-topmost", False) try: if os.path.isfile(f"{cwd_path}/{pack_name}.exe"): self.download_progress_var.set('File already exists') @@ -5814,7 +5946,7 @@ class MainWindow(TkinterDnD.Tk): wget.download(url_7, download_links_file_temp, bar=download_progress_bar) move_lists_from_temp() self.download_progress_bar_var.set('Download list\'s refreshed!') - top.destroy() + settings_menu.destroy() self.settings(choose=True) except Exception as e: short_error = f'{e}' @@ -5851,7 +5983,7 @@ class MainWindow(TkinterDnD.Tk): wget.download(url_7, download_links_file_temp, bar=download_progress_bar) move_lists_from_temp() self.download_progress_bar_var.set('VIP: Download list\'s refreshed!') - top.destroy() + settings_menu.destroy() self.settings(choose=True) except Exception as e: short_error = f'{e}' @@ -5892,7 +6024,7 @@ class MainWindow(TkinterDnD.Tk): wget.download(url_7, download_links_file_temp, bar=download_progress_bar) move_lists_from_temp() self.download_progress_bar_var.set('Developer: Download list\'s refreshed!') - top.destroy() + settings_menu.destroy() self.settings(choose=True) except Exception as e: short_error = f'{e}' @@ -5983,7 +6115,7 @@ class MainWindow(TkinterDnD.Tk): self.download_progress_var.set('') self.download_stop_var.set(space_small) - top.protocol("WM_DELETE_WINDOW", change_event) + settings_menu.protocol("WM_DELETE_WINDOW", change_event) self.update_states() @@ -5991,7 +6123,8 @@ class MainWindow(TkinterDnD.Tk): """ Open Error Log """ - top= Toplevel(self) + error_log_screen= Toplevel(root) + if GetSystemMetrics(1) >= 900: window_height = 810 window_width = 1080 @@ -6002,31 +6135,42 @@ class MainWindow(TkinterDnD.Tk): window_height = 670 window_width = 930 - top.title("UVR Help Guide") + error_log_screen.title("UVR Help Guide") - top.resizable(False, False) # This code helps to disable windows from resizing + error_log_screen.resizable(False, False) # This code helps to disable windows from resizing - top.attributes("-topmost", True) + #error_log_screen.attributes("-topmost", True) - screen_width = top.winfo_screenwidth() - screen_height = top.winfo_screenheight() + screen_width = error_log_screen.winfo_screenwidth() + screen_height = error_log_screen.winfo_screenheight() x_cordinate = int((screen_width/2) - (window_width/2)) y_cordinate = int((screen_height/2) - (window_height/2)) - top.geometry("{}x{}+{}+{}".format(window_width, window_height, x_cordinate, y_cordinate)) + error_log_screen.geometry("{}x{}+{}+{}".format(window_width, window_height, x_cordinate, y_cordinate)) + + if GetSystemMetrics(1) >= 900: + x = root.winfo_x() + y = root.winfo_y() + error_log_screen.geometry("+%d+%d" %(x-220,y+5)) + error_log_screen.wm_transient(root) + + # x = root.winfo_x() + # y = root.winfo_y() + # error_log_screen.geometry("+%d+%d" %(x+43,y+220)) + # error_log_screen.wm_transient(root) # change title bar icon - top.iconbitmap('img\\UVR-Icon-v2.ico') + error_log_screen.iconbitmap('img\\UVR-Icon-v2.ico') def close_win(): - top.destroy() + error_log_screen.destroy() self.settings() def close_win_self(): - top.destroy() + error_log_screen.destroy() - tabControl = ttk.Notebook(top) + tabControl = ttk.Notebook(error_log_screen) tab1 = ttk.Frame(tabControl) @@ -6059,7 +6203,6 @@ class MainWindow(TkinterDnD.Tk): l0=ttk.Button(frame0,text='Close Window', command=close_win_self) l0.grid(row=6,column=0,padx=20,pady=0) - def copy_clip(self): copy_t = open("errorlog.txt", "r").read() pyperclip.copy(copy_t) @@ -6157,6 +6300,7 @@ class MainWindow(TkinterDnD.Tk): 'inst_only_b': self.inst_only_b_var.get(), 'lastDir': self.lastDir, 'margin': self.margin_var.get(), + 'margin_d': self.margin_d_var.get(), 'mdx_ensem': self.mdxensemchoose_var.get(), 'mdx_ensem_b': self.mdxensemchoose_b_var.get(), 'mdx_only_ensem_a': self.mdx_only_ensem_a_var.get(), @@ -6176,6 +6320,8 @@ class MainWindow(TkinterDnD.Tk): 'ModelParams': self.ModelParams_var.get(), 'mp3bit': self.mp3bit_var.get(), 'n_fft_scale': self.n_fft_scale_var.get(), + 'no_chunk': self.no_chunk_var.get(), + 'no_chunk_d': self.no_chunk_d_var.get(), 'noise_pro_select': self.noise_pro_select_var.get(), 'noise_reduc': self.noisereduc_var.get(), 'noisereduc_s': noisereduc_s, diff --git a/demucs/__pycache__/__init__.cpython-39.pyc b/demucs/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b52817ecb7dd8230d1b895b915b5b074756a7d8 GIT binary patch literal 165 zcmYe~<>g`kf=}9~la2uC#~=>8ZJ?c_lHviJ5uvZuvPWsYM_uU6+`Y)ZEhK h;+XjO%)HE!_;|g7%3B;Zx%nxjIjMFaJ3a$30{~FEb)Wwo4QIaK(FR{f$W}M45yN1_JHjXz*6UB|4q?tOPMm$4mL~@2Y zb7WD7J~^>Mc;AO!`A^baUdpeTxcZh=1aVV(jk&^ACHg7&4@0!e@08Iq#n zZPB6T%(<&3NPc0zC^+_M`JoOT6)`X44v|3 z%XCcjwVV{bRx91kIGJ|V$+mM&ZqO(16wsDx<=aK4sOr+KiT0#3HE5r9rqvv$oEes3 z+16}(&Y447hUHqP+w;!6%I8}P?UGY!FFK35=B0L)yg6^aZY*lfGSi(iKh{`*6(4J? z=oNQX`lhprnh7?En#sQ5oMlsN`myHNtmMt?oWpybo&L;V^K9X9%DKSIyIOg1|C9Wy zA@ghz+HR-QdSttH8|9X&u-pE_iv5PL_xx7N-gX1l^6c7nH)#4n1C?Ui4j*{D<#r+) zjd?#1UWbP*SNLIIZ+T+R^MYhRv~IhRz2{*be8skBpO3cvy08WJYt8eO{8wPNR@UR0 zd)wS|*_|+Kz4p+nbw$WG%4VFo!^4^vMRDr)x?b0dtvi0lLw6$xkml>%phk(J=m`2Zd+@8BcR`D1^Z>p1EUr zsYMOnw8~RCv#8Ne)gjM}&&*G>IuwRkk0+cgOR+SBk^4lmG$#+%XXC<;+{Qcgw~5^m z?J=!5AH|tH&u?rCWFoK82E{dNj3}as$|m0E-;fBcuN~?~nyv{$>V2K*O)FY@Q~Rkd zkzdsQNR#?4?V}&{ENShSKht*1rV(9J?beQkvW4=SgK`Sx6v~b&o6P9y{E{@KRZZ_+ zqBf~jsgH#(jWzK#MdmOo^&=hmoHQ^-<9{{UJ6djOeZ6M{FM>X!uP0PujlWZC7@%#b zmqKld+Gx(cj@DmM>+Y|lfm!p+0LN19=Z2=`?ZNXdbj^#v9w?u^fFp6D+0j8rq~ zcs0N7*PwNZot4qZ55uk1@)rW)E$qPfowGua7Td#-1N}wl9WEqYfhyH~Qk1~y@A^sJ9k9}4W5jZD zK^rwnhY$2t8SN~-(HSHSO==&__25vL{#b+3Yh)yUYV2B2bmNJ!WbAio?DTMKmc|bH z>;H;cJYV1ZLi@sqjmp(nuksnxsN^?4e)Zb@_acu+_g@Ki!sh*3Uepv}=l*-|zgvCV z4cvxDUgrMW5K8rS*kT^1rYpDZGv!0?EAyyy9>w|Vq+hkL-MMiCv;QY8y8ptM8>>{S z5Ui+HE7w~gj3>H*%y7t9mfZ3{%A9!eg&wEsK958~^*pNh0uow}7vfaZ zg(&0fkeJwPc>!Om0sIf#wpXpj z`6@YA_%`H=)#`57Z4Fus(TOeMbDRRty;huk2Tqg@awr73F;0x*Yedk*fUh;&BEZ{` ze)EgeMR9ooIT{d=)UpQ7#BcICxcSZWWln81yeA$qte5ddPa_$f@HusA>yGJJ$+_Lv zaoNK~4JUY7okjV$aL)^(kiSaTp@GGZAJZw!N0TebY$7A|9mr-;!!e8_vYCqJljBcX zO)WYYm6FE-nL5T}KrHBN{1dgO^X4SQ&a?KsW62Y$^fr&!%`prXt`Kck~@x5_;GxE*H4K zTnFcrF7p@Aqin?%Ih2If7g3{V9q24q6~~I9D5bg&@kZ)4XB0u}{;6@=6r)aQ_To_@``grTtNj%ih#Dh}FJ-=|7b8m3k|IqOE4+uIM%{)F84;akiC==3V(?2)yc9D&HF~Gy1T*1- zR^eIn-b9cW#-T1Jj*ypAs^wid$*iWu-;xtMCPS#e-x8LbgvCw;Q1ZUkn}UZzz#ykd ziDh0EWKm)?)E(;0(w|NCW<(lH&Gd}}oqx_OH;XsNtlliM8oJ2C7z%xTPV3F7e6g>) z6X=0;-6=7R)*{|heF_{tzi;$T_vXcnn3bmw=Z+wZK2g4a(rKa_LD8g`d`WXB#T3in zJ0<4jX{px@Uz0O`q5oX(l~msa)ptS8_bFJqfq55uOR|K6!2H;di^(@h>sAVZo?Q9? zGzH#iPA(?pmsI&c-<^}QvhPO;XlBe5 z$l1G4z~NqGfn}Ph1D!%AmZftZa$3A^_Lh5RdMh$_0J*T7oMHLv2nKhSz#*g ztYW6K=zEqGmw;X@XwaX+rzX`({HV61F^f&8daGeTljcDaxGc{$jRPZktX$f>oIkXW zv}H{eX0_gYL+f3T=SU8{i{c!d??rhQYdwDhJyyQ}je=H)3Fz2md6^aQT@zVGdXg16r6)O& zm6x&0vOM)?8k-V1q|@>;(o^E1EcbQfX8t_edm1`Di=CK!o6c&|ayG|K%e7@q{*=~}RXehaGK!Khz}Ek4oj68bHvet$jcx6ICbx1Tf*pOR0j{a!k}bfop3kWc7Kkx?2taOE;VMu@1!^YjKeE(1?xW_K(~9(pP=vgBR#RXUIkiSInw^5 z1kKW91v@BxfHf?HbA1zXoq0|>NJXz-#Z{AjAL13yV$GF4APa@Dopb*SCyI(XUr`X7 ziW?Jvt>HfaLI#*kbome~IEfP|lnn7Pur-9bh(T}ISJx9jE&;__l!(Ane&c0gto*j9$6G1T5C+HuAwPB|dM!tC9A>3C8U8tg69P6E~ zsMz-g!7}$c9-=5_Zzd#eCS(u}NFc)Sq3{DZ4f7Q~2dD;^iHm?8K_?qb=GE)CV1N_A zSK4lK5XTR=L|gNAyZ!^0`)(jk&JDW#HqN~V7<2d5yR`dmhd6t+^1R&%!L#TXLx?&p zKp1OVKo%2D-s4?F__Sq&``rP`iGY8LhfPR@g?j;7?C%r5!hl6K&Qk8yMDzp^;~P|1 zDP|c56PW+NX9#+*K?Gk4iXfh2)*4tC5F3x_B+emPC&&TuRDv=jh*pbkYLQyy(Ii4yEJ zV6nCBN>W<4aR~=J#1)>DFq9+#9EPpigAv&5-nI|O=Q}8qr;yzQMIkhyD->irjy91x z50=^_Y-WI09dD4>^uaddw3#fXdNCim#1YHdbq)&{p9GT!;J<<;!1>Xlgci11qrK9! zkRB)>&I@bX_5pm8qF`ol381xGYbFD!a|Fh~5NOO8)d{KltG8+JPwGgkK*)N}{v-*j zs3PjwxV>eEZE<0&X+vFmW3$%jmM8h!xUz9Vq2ofg!=Nzgx)-MtunS)@RiNWad~s#~ z%g588G=MQ9qONwrJpx6Ba_2vw*{lxCf#0Fo-=XrPA1DUaysCHv6{g$r{*~{}VqdqP zvs-}a0l9^QKmrZj^0wUve#k4g`Fj{t*3|<1I!#TN&aW6vwVWWD_8Y@x>^p;N>oxl~ z`m_6{E%ALSE+o)Xm1NfV1_7R!m3Uta?;Ak zP=(zl{N@)h$~he(fc1eqC6(30t#AUnV!i1UD6Fd{AYYu?az$-B&W^A{0!*AdAoW3y zgwJXCNH|2;qrzRtJ^_lN4AJAjj}+Jvr&O>9T~n3L6g+>cI;I>s{F8%0m1<2Vr`nY2 z7Y9;Nj=;&OIjT6M<6@FcblAxu{;w+U;6Q&=6i!?}P4s3|y~muwAVcGo#J&@j@*Q-4 zh&TEgiFP`z8{ei=6t(ApRL&rRPn(ARzyAIseX3ZrboxzGB#YlXjZ%mweW&${gwi3# zozd5k=FwUNj+E8UD13Ap=u+NV(o5f_jTv(q2a&O8*D2tD?@6%mu3wmtbqbm_9$C$z>L_>iY q+3T=#OmKs+G=;EQRuOC^h?@kc^2BU?8n7XRzdpH^&8U0yH~#}pC}f}j literal 0 HcmV?d00001 diff --git a/demucs/__pycache__/demucs.cpython-39.pyc b/demucs/__pycache__/demucs.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b8e73d3e7d7a85981a839eaa78dc7bb8f2a8f426 GIT binary patch literal 14082 zcmbVTTWlQHd7jJe>~fb}QY1~uloXF`+0hmyY5GofGP~bLgn#4{^ChddKA}v}ZXo{i*@?@Yu(dT(6(k6f_^rb~#iWE)y zegB!gQF0n|sW~&}%(=}u|M@T9|I3@2Dj4`3{p#PZEnYH=A2T!h%OLX%uJDIQe8V@J zMn&JviY0lwV#_^K$>44^Guzop)?^*K>1^jJc|2$QY;$6}P$|f>(=2XJRwlQnDpRKM zqT%QK{4K-J2ieWio?V&tC;Y-KqcYdV-_XT{tQZH(vmrp9PwvS zGMkpnqh!vXN6CCza-VNrG0OMd!FL!dWizo_Eu@8dw^fT8?UvWPW8w{pCTm_N>Z+jH zXf>j;l@x11D+*LKQjP7z>9*?are9+(qd%?_f8nnn35<&A8(1F`YcugBqi*_H-?=q` z6_RhQWWQu&jEaN#6q4+7m#?h8wEO*ni{En%X51T%Xu}P??ZDk=_JodZF8B zb-I!3H@26g{zvNWYunzi zA*Ocq+16%z>*}Q-+=|+rtFL_GN@(_Fo+Zg|Rn5Sj1- z62qJ{Gx(baedh68!1JP2vPyU=p)6z0NO|6T6!}xuqo^~3(meilr!+06eU|N%^OSQu zvn@;)XSPtSZnynzlh2dY>b0)dOyA_HRli-UR^_m$^PJvPdKzm%vxyhO$uHuq;uokq z60T)Pc9u6Y*-~E0#tsrW5I0B;(ueVYZ54L6Zygvti|p(;1m~R51C!eXa; z+(zVX1kFz9u0c#7QwvlC4g%)ww}Vjg>~}Cn{C@QG3Rsq-dra&w@)O4oA~5(Q<9m^p zWY!v92(Fr`wOiMd2^G`>6|~UfGFpQSIkAytlRcjV39`J(HqWD8DEft6lC3P5>RFVH zEgzSTWGu_~0X%>M7zYN<@#&rwnXwgFn>MZtt}HI6p4qch9;Y1(yJzA2iY$1`Tq)<1 zoZkKr96oHc!pP%vg?&30F`;vQGR^xcPDR*hhXD%Y#reZ8&igXcvYj}(OZ7CShE?jW z9scA}|C4paUqd~i7gGD8sXm5$4cFMOFD^q;;+VRS4>Cd4x8JlxZ2*hBHC1tZ$Ism| zDmhUb@?;W8;WFjR6|VK}x8+$AgtP1>vj8VPBWQB=vg#mMuu0DnGbgK*I zg1I|4+>?9DTn^2zyvT-B3dj)pQi*bbdd?L?J%?9iQ)W}P^hDl^qC=D?Z0s}aiv1eq z?(E3;eLiW57g)FPxu^+7(}iydCZi&t><*`d9V zRV=IF8v7N~T*y(z$a{+9EhL_QGb1EUZJ#8rwf!N9my+xyo>i|1(h)6nCAcx9zcn1; z+D1T)SSzLAOl>GhZjB1XQO*BOV+JJ*_j;(1TWp0mz^`s=W!N6eA<=x#2d( z!c4(S!j4x9&dApVOY2K+yUv#GYyDH=o;{1%ZghE z)fd!zP*~0gx#EnaEt0GsbfOKR-`Z5VC%jQLAYUdehR{E#@#6&wa({&12TZ{rPxaw8zi+Q{Xx!@859=+Ph}02nGJm1zZLj&>Q(r*)|HNhNx*`+?;&tgllN%L2g$G^r zu1Qcomr2k(<|M!#j05V|){4x%KbMICJ*iKl>Y>S~S5fvQmQ&*ix}Uy6P%hc}&)%Db(Z; zMkB@ay_Bj3DuuK#jQIkrKL>vWGjGHCGV|7?&D60bVIR(zlh!QW%>e1h|92+mKm~Jv zeH4Iuj1$RS_6AAfBFCYYLmdp(#tcj~$Sv3uCT;O+kYYnkj0@dY2}Kj@qTn!rP?!G> zu7OHeg4L>L($~g{Y9PJUCr}lVTMC%GuqyPks$NEBr0=0QO5KZVC_oPu!~vpa8b4cH zR(!)P8v9X~=I?GTYyujX{P~$w3nM-`RmrCKq$7q#4o9S*n&`nZahipcT-a1m0Gp!O zz7bOG-|(&@+h?)vniqN9>xpfcy`fhL& z2niK32rtp_BDKSom_Fpzb&AWdjsEL28lXq+q@OYPEY0!sR*|oznwFdj9CNk{<#gPI zgVNBmxd%T%GNNUH(tv-O8vi8S)vY}fI4JcjOVChTaL!DWB_0O5hnfx0&qMkAq5MQ_ zt2iw~KdDA!R10xNy*((;49bhtWi}@PJxuxblLRHEq@+aDFZ@0=K%dB8akGRPf}dS5 z2K}J_qp(@hxlN1xm0cMESL>gh|L%+bynE~EJ3j+>Rh}7Pc%)W!or!Q2N(#-ya+IRj zsw9iOP&Pa@dz~n!d-O{mlvGXfdk$A|A8;=eeCNPE4&}WE#eKhYcx>ql zqa3F}fc`O$96-^j1N(z|+-Z#a#lg7Kca2*@uQP{xg~?#;ABm?45TobmxHOzg5wqK$ zJ%EXTv8VZks4+7bdj_ra*oFN$j608YF9?tXXV7ii&x=}bZ7u-*&AtS8M~FV}CK8JAiNjTb*2i z0^#_<^+qj#l@{%QK|KJP*_~e7s6}UxgpWP7wh@I|!}?Xc5>;ilRF6WuuBWdXZ{l3N zWvXLoTX|WF*tQn#me|7?v|zp-mX9l!9m$Ch)WZP?0(e;c28(}_$qtevbG;GVP`i9| z0%#K5Z8@ocjoZL&wmm??R;Qsp%La0KV^+}i&knb4=Ed2J37Ah;PofAE85|&m_?e47m(&f)K0|r;kghWLt2dQM>-iz?UxR~RjSj2rRSN_lM(Y-+aaZpFNG1N6~5~o(G3|D1O+_-*(nbX+sF)<+NuzJ}zp4 z3H8F&&wy1H)84^V_m5%z_s0)@MVw`r)2a9YIM*`h<7Rxa_eiT0ErKHw7_tD>JR$(g z-^8cTY60Wx-tD*ml>@vFoOS|^-NPfD!n|#n_m9x>-t#^seWJhk5w1o29`uxzFY3?8 zQVa1Z^!*6t!WFXcwWlz*nf()xX8ej%_zD6&cJE|#64C$)3}7nz+IR6Ka8AB%snzH` z7$LVu3VNXB_@tW$;-h%ejNI6WkNU;i=)o_Li+is@aUWnIcY{7*yQ^vd9#1!6t$i9Q z?W^Twm(K)V^h=ixs&LQ;(Fw2XH`>)cVC0^L8jNOe@vETi0w&sSx7uS34p*%=6owcv z{QE%B_&8$B_q!hgLMf1X7Hn|{sE)T>SM3{!P&h5fG3K@n9@*$X=RXa8*#U+?ow_^% zG7V<920eYli^itUZzlXDNUDaKusnnt`j~Fot)5VMtns^igmg0iV*33szyWx%Ml$5< zhFAt?p&<4eFmFy%QsKK`*1P9!o*P?A7!AXTjyET!lg1m|y{vqhf^BEuEcp;{ESF_V z?j`gm2v6XTeHv$k_40$dhxG-n27`-k8T@O(3{pX-3G~=!9ez3Vg867Dl zhmoe~^zT})KGCKqJ=X<>q;~d=0snBG*4?MvbJ_^i->XxJm>IJR`!f^QT8kdowYCxi zl2)n2-i#B12Ji%;90%~+ez?)0`pxcu=GbZ-dActFQIb;-_Zc92q{lv;azVy>5(EY| zx{82p>a*mDeGL>Br?%S>_>@*WYQUGlufT}@crRRt{z|4N$x}gko~*$af59GTzeX2hoKe2r7AQ*_9HG*FVB-__EKq=kX?GPrBfVWp}L| zZQQH76!F%K0;@TIxJOC5kB@d6PYlj)QW(-7y3=GKo(R=o!(7UY6WY+!5i3OoKvwmsR+}7QgULFRhndhn zAt<$Y9n?0miQNoZ>IJ^oVseFv*n#|FiJK`us)4*WS?~rE`bjDdQsvQS!6~31qDF=ncThq%;EaaWDshf zBmE(}WFQ_HhdN->1q#%fxr=$ zJ(&ax8#WYV^BWvcNQ;=jzv2pi7m0DX0A%7`e+BqU3RZ^5g+(7rM*J(h6Q4ko9^P4a zMs(i)I9Zmkh4b@*<)9osn4dZJEb5>PhzH9~NLjy@u3G>CLL{|>nhsJ2t#_vnowzY1 zl`sIo^NYB`&m(~y2L`|ZpeNy;L137%3CKJR6SEN#$`S-|B6WyS`qoj_18&JMF*tON zjkGxiYY#PaZ9xwu0Q1^q8D$qi$LP;T+#Ul_BcQB%49&E?PvWc&nC!~|n5(uX7!4f2 z<^rE>_y~Mzt;0M5d!#a_Jq}=>nk^!PWXu8rV+Ds6o!%8Mhv<{&qYf((Iu?~5@rd-j zxMy7Pe~{~g^$upEb&NAwHBXtm7Qv5=De{j%ATgQ~7RckNw}hN|1yL|EvA*2Kq`Yp_ zri=k8!?vT7le%BsZ3QrDLr=l(fH4Go5Ow)Y6nLrV9!!ft11ttd3Hl3@7Rl^}GGgdV zE>FlY(wswB;uk;wh(in-&Ed&TY@&$d8gR!PWd%nsd}0D{Z1^`1VRgx*>`qlzx`mw# z=h4cTKrkvucM+ss0&EBJ1kj(7=4gvC@}w`Byh_OP!{`eU(}f->_ z0Ra1mf(Epli8IKVX)cQ#V&KypBiTT@h;s)N4+z_8<$VVc*cKzOL5eowWo$YE#Peby z_Byu+=v&{98FdSQGXRUtocdan-=84tjHqM8as#db{IyxYF>A-7Ae5(3OwliNyY|yQcQ{rrP zn~iQ*L2RIwZ}2C1HT4~~KE*`j&L6Rz-D!6e;Ymf-gSdX^x$2LZABdvzq$WChyKz$q zVEyM(mFSH69;+@hxs9Zf4dEsA)t|5cSf>6D0f)#_pyW45m278#S(3$;56KCHrzLf<*4$HD>83-{puOmbv!S|PIc5aWyz_>lxbqaP#(KZxD{E<2t-gq!&E5_4(SUV4Jb`6UE&N(_1}f{yes zs|Z*40iPJynM@;u(Z~OSfx=w%8>p7oo0F9y^|!3}7p!P{iMf{8t>6ZN*PH&~0EE9o zogZ=!C_N3VI0fE3{$_CPE~G1ac(7xdb^Q*SsIM}i$W7z-e~)>fgxY_zx~iWC1-hkV zhlxZ6tg5fE)$cR;I+C)v3cmORv+rXfE&xhM9l1$z*WsJ|hIfzQ{B67?0w2<6nm4VC z#1L4AQs#9TBMkJjB_;nf^P9$r{L%b-6?-Z#C}GVGJXo+l5-aMcze2eL)us_X@?Q>? zS}jGQQ{P9z&BRWHiqlWB2*hlL`n2Wb?Mo8jqIJP%+2~_TE;D(BNs|eOA2~uNn5E*A qVE-nt=|;@ literal 0 HcmV?d00001 diff --git a/demucs/__pycache__/hdemucs.cpython-39.pyc b/demucs/__pycache__/hdemucs.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..78b3fcbc4ef9875b8d9c414322b57514e6d3d858 GIT binary patch literal 20265 zcmbt+Ym6LMc3xF=^>cc99vluSlA=lyMRInj;X_(ICR!~ea=DTs%|19$K6=!DcBwoM>32-?`P*-9rkSN0WWK>b@W6p7Y&v&K-Aryky{W?$)=Lt2YeekN7hDvGH;d z-{AKVct+LmOwVeWZL4aTe9p8o)eL^^mfg-)vq@a8nnPT+m22m#d5O!l3hiRGgy+0h zXpObY)pC2hIxg?UR;4{rosj2JYqC96ooY{4r%mH?hBxMw?-*X$&#xW0ovF?sX56bF zrjo=QM9hRYiI~YGW)?A1-ZWySlbAz@IpED8W+sU_>}9SRa|d@`EG?}z1E=2gd<31) zZFT}DT=kt`!>@;`yQ18-<90mfrR`RcIqKYx~;n=^ILKD@}+L)W?WHzP+VLlsr*{A(+rU@pY$azG?S+~n;iaRlb7Hk9(UL*^#YUVRkJS8Ef`heysq?wcfUR*(e)kcZ(i!dYMDUp0!(w zvU}$4*dF$QXZ#h*GaA+d#!-Vg)3TECoT1-sW33Qn9yBbYhF`Q*_6$t0y{Je8-!bD1 z`mWf~xp53(bHi70p(YDot0`{1xR@?NTug@?XTz?lug1ChW;@tygWUY)%4!(fo*TMx z&Q)$_#gDD98{5GR6{<?)mv^5)N0=`e)96A zg=?<|p!sVrbk@4-*Dm|Pdf45#_WGB9#yyG3oKk*F4!A z*H-oK{KmGLLG?%R4cMq*mCTB1n={rY%xV1FIlPbH=KGUS=S1~Ce75ktgm1uMV3v(F z6QMPOdHs&LIG0hgh?l|WQJd;GBOXbENBilg@DQ*%!`hkM-@cyEx+F6TKBOalrF9M` zGIou~+++LZjM1|q>#CtnAYsnZy^v|oNC#r`R-EZ>hWk1a=dpjbT;+Wd#R4w4VNRMm zhxT_T$%Tr?k>$$V7|EpLw*6WyF4Yp=f#-6qc4O0RB`Nt@&Fj_?ooRN$INNA--B1=H zE^MeSh%MaKGsUTh$CH(>`>htJZ!lvHQk6_w8Bd&FS;T^AXN`|?ruETe*)mHR#nd{! zBcHM4EqERe1J>kOUdFS*j`%OvH2B*X6Bb3rR0QiPdnIq|j#;&ZF=erD#^d5kpY7CN zb+>(Ww}ATYa>iu;Tv?JWRVP*SKb(H z$Br1iatGb(l|ySS)0>F$!YH_fvyqK37lB>W@=+0CAu1y*MtSu$l+CSJy-Z}wuN{@W zEZ$j53AId+>3|U+{x(=fZ49K7SMPcGt436btlJ_$;fJ$O(Ponb@N-Saf0>F72QWM5tu&6I_0!Dhx3j| zzsBK0T<8_K`2>38be&NA3aeRaMhOr7%J1+@|^q;HI=uQodi z4p{Xv+JKO3b~ZLc#JZi1--57Yp8lu4iv2L08LuCoyVF(e+KTFKZUn<+bc{$Ld32a> zv>m^_5pFLyGRm;4Wss{UKuevqO-MBvYE$6}Lh0r_-hm77pT$PH&F(Mz*pR-{_SFgn zAb!zTZjHaGz? zTIX||t;KuhgF@H8g_%ztoS?a*Ut10#c$e$>{t7MxZSQ-qaW2iIB&Wa3#& zcTFV6=m=8V3(vLO_Oj=P)dlJbp2vgEeosfp z?GE4@kkhgw-+&C->r0(}XNu{%t4|>xx->Sld35L4FZDEnm-z4ug9{8OVTGSA-lgc9 zn^J5~WQHDT?h**{ycgRf*?4T-SJ>D!E&_mrtU;WW4Hw&NG|ph@V<~h2g-FeDmI>$M ze6l?H{X(1-8dG^TJE>)+O?DDzg;A)B?8plYo@4MLgU>K{g~25RaXz7FU~#qtdR3R1 z$U(+=nS=)FTPMCz4paUD1DOm8^z--ztlgL?nGkPg8Q+p+o4JvHHq!Vn2jr4t4hfH& z6^m~>)Az3JVm-rJlMfGE947l5+`*I+#su?`NpR|!>z2avnsv?YWqNim+sh#(1Ic1V zxxGw}aKi{znZpD8?Byw&*6dls%dA_$Dj+NJ1GFNppK=&LbH4CA{zYBC@d{GcEXFh-n|=Gx~La7OSD@p4`0R73R$kPuXez9XmgNM(9a|}MgfcsZkRA>2gKlFMI3H%1!fZV`k)B3RTDaE)tz9XOh zh7DNz0Jm24F&gOgPN8a(NO7Pxt69OQIgn8yF1`lhXuhG`4fj{Dxnf20Ha5o)ag3Q7+x2OlgIrIiu%3++GMpiyHKe3-U^_m3Y!fy4J`n>6JYQmp^;Ry zZ!clg!5L>6s1ntp+Z){=XfC((HtM)dY^Y{?qa_DZml_vmtGn6q5UO>@MGH9H%+G6} zFv69&rKmpO!Uw;DU~pOhW;be9nAx@Wz^iN75F9ql-)3w9e7LC2y<&hnnro$CA$b}L zF3S`6Y_NosvRdMMI35bl_sm1ky<9flTI^*)oCk1Hcklk z?$n-voU^@bI2}QgbK6qiin6;0sK~hSEtJac&P4W}i8s&VF)6q6J2G<)KKDX&K|2fm z^}|BcTcFD`PMaur8R{7*Z~)%ZUFSf-^U`HspSnf?mMl~aYM^EppuCR^<(LUR=#jWd zH(RSs(loPi;n$%j(`qA<`ZCEl?XK2^SQ>|5^B7m+N$@2ZnJdq;R&6`ZjLUffSy5r1|+Ot ziC#V=f`B~t%oR{!ui!xrt{A;in1RFwYPfA(Hr}z`diR>u8|&d@yOxC<-H6I-ph%pH z$D=X9DA%K0uj1LgiOy=64+}UCkIjIlLCF=IWs8T69_6eBtniI!G8%sx@5nXDdJY>1 zi${TVxC?MDE=CicsbhJ@WghelwMhAYjrzu);k(p}I6L?+z$SQ0a7iV~t((FBl(<|H z2OX$Z(lJB}6)Nob+^~U8rgYsH*@dtKTrmZOgcSj8G;AQHDLqD{eqoTx^dz;kTgI7x z>NfC<$$q3`La&QAE~KTu#yRRt-ZMYtwejAxmyOCe=ijHFf}f^|Fpf!x3vNo|h3B8DoueoO* zHl7A6gd%gK4A)KOkO*%k8K2NaR@$|vub;gpDs4kOi zoHIf>l-#U^Dz=xKZlIQ!e2B>zut)V}Jk8AxK`c91Z*G8XL0E>X>P_ap#^5amUquk- zG{Xhj3n4gT8z<%f-5`5a8_dNX#MxG}-30$@kZlI4#)Rt(ZZg#j)Jgdr*7FHJB53=^ z_y%hT3;?!CoM~)rL^RmGf!c9*$uYO;<1(DCUJbN-{8 zMc8*5-;?%%;2ad9rcALmg~`hNoU>d5!=_y&~gXrWL(q8P{T z$cGA5eu9e#20$-SP|{+;f3{XpLPC|jW zjui{Jv(ea!rSnCTx(?BA9*{P`KhVZ=;laoh4QDygaL#BA=gu1`khsbN72w_g#t<@s zvK-GB4d}dc)%Tt2Y5H}Y*o8#}xR;x$Gk_)cXkv&5$Rh4l38^O#EsB?+GsvTiqq^l! z`M~24G(deaNu~Jzw+2W(%j&80(Ta@$%3$07i}CkU^$6nw6Cy?GttC8+j6#E>zsRQ- z8N9&YB7;i|nhXd%6t(;D#3g85mH?XqpmP7br#^dc>;+G4u>cR4`l$IiKE1+#+hOj2 zW`@${tBjR?T;!9q{&CtL^%}D*GLY^|-$cFjdB%MKLA4;NA24I(Fm6=)^WP8^utlP< zkJtY6bwOGS#J5S|W?D;Rn+|wh4!n*>-5j++_pEcBGuHo<@IdCk`3zRU2C%IRZ1aIK z2+vTE46A~8RImX@p!5)RP&W8JHz~jepls9{h8>Ojv#`9tANwy_9czcLaKx1tZDVUL6Kt5?1h8jYFIlzD~VbawX8E^?ip`wo- zRI<#^O%HS{0tmG59_$98E)n`a!{P%%euJ-HVIX8K1TRD>wESfx469Yt4s-r2gMqxK zsMi8pB)`j7+0TPbeF||$K{3ITcrc)hEJO6>%mTj0pgGY3mim;Oc^vO$5Guv>aqBR| zaRKqrt)#@62wk;2+K|InVPALf^e6Zx+7lj9><=qv731spj(k{xpMaAv5USOTZ+j1m z0Hr@h5$*?B@1Qpe5t{QJ@eX;1p-aho4|qoqQ}B*@$M9S99`ugmx8yzKIrxQ@&^D@N ztl*<@9s{5ohWlkKpfLZd`y4k`wd01)##67~(tLf1YPX@xlH}k(H=>Z+tkV^Pl0?^V z#_?~}{f%LJQN!QTCWHWr3|~|otU`Lz*NbdB?yV;D7MrjVw3v#FP+oL9(6*dUwI|zW z`c}6NwHs|{P0$-0nrV-tM!VbEq+m*{N>@_LoId1=i$#D2s57ej0qj>DENfeY8KM+a zCAzL4^fx$8x3jJrz+8+Pml7*q;`Y*?8z@&?7;_Xa?c7$F&kAXO^ciPQc`?XkaUJbkG(V)Wf&%Tu`5+|d@wMA`-eGFzlk+1l{YfZhz+Xt%yj>%Y4Kx)9?fPB1I0lHXkh$w@~NGaNK7t6dM(VmX?et;A#u z0_biD>3DtvOX4KkOb8`#I!!ncY|lF{b+>$I{zwiSsq)=*ZPL?P!Toa$@~1_Ry=v3G zip7;<4Rnmya6YZHBstNFa<-v5`%7bwYPCqaFrxyQ>6n!vhkLan3?z^PfbyTOJ@ja*H=n8`>- zHIu0t(`$O2Q_yC%u`V~020%kpu5x^^%wyp(Ue93$Y@Gl$+jg_%s^N|afYs}u`3O~> zSPK^%4C}QmGa(Z+vREk*VTJTQhY_+%yEm-6DThg2k2I6(8`G06JAR|ltYhcEO8}i7 zW>uP+Km;-rRyd#V!bX=w#OZtU<)FDfploav=k#hA!qR{CtgCJ{Z_ans%GnLCarVNw zbI;75d-BPr&&}f%{$*1A>z!5;j%8wfZq`#y&1n-h+v(D1?qa91HOA@*qD9bcHFq?F z9PA!yys@}?r3D?|JqJ0qc-O!IP5{D{rMpB7KYsU@`T4!i+$D_Lcij8&h!(nkz5{TA56U5g z`Dhj)&QJ)8auO^>kKlPMdK6(<{8YvxxMLon?iv6LA^b!TP6FH+wJAA6PRq&g08Ve; zjK+g+z5;8aSp3G-yC_kBHSk^0U(f6w6dm|%*nc{NQ|b59x5Mev1CiA`5uNBg21sYY zm*p|fe%8R7z44sapDfEhG+0)^XXpGu$XdT1eTr?Lww4UvK0>8)c`$_yhfZsFG zX)n8O_a2XCd-Ksu?`-rqfZY{|CHT{t_k*4dSFE$q!QQ-=>z(uRz4Oec)}qtV%(|(c zdS?-T4(aEkv(bF?cyum01D)ED=o8T=qhry7k;8NAdpWQ8UZHm(JQ`hqmhuU&6g}aM zQ6q-mGJa?AJN_I!a-Q%i&lyR+CsFsOrQK&Dw)H)<^^COjjP&?J>G6jseKIHru!jq8^ zJ?tHPn~u#qAk4b%Sb{3=Z%PTen+iC~IZZv`n@|zGq&=#m0Q=aUX@@f~URwWvrXc3U z{wW}GwBk;qG2$$SS6CJSh0%3jK=B_EOrYg4>ZI&y5;X!kHa1%V)i$+e2cAD7IP`kx z@TRMpbVL>-xPV-3ZW8Pxkbv_fW<E&*>g)Khphn5`WIQ<_-@(E1qmlo&wbS=39&kpceAK(jC z+-hRu4bphZ99;@sa}6Hb3;NWS;C7)Lpju#E=e5gU0p|2TJ&D;9M)#Ej{->@B_d4il zq`wJF+&A$nU#ivn(%J>0FG8&KjR|iu2!9D^WYM~~p zyWp&Lw{YO=Z1<1t=;{{SH3T3#*M}-{KFSs3UFelocJFhVU{Wl~h_xD6ie*iu8BPzQOWezNK3opS>pB3-W|CBQqmku`^w!%r6{ zUe={uJ^H*VZEcho5M7ho6ASwsKNy*;+Tha0==G10*^r9lG6DI&2qk)!vLk8JdX8O3 zUpl~H!B4J?fTd6wL$c|Q9nK`l4U+z7hujSp;75@99twdB@5)36kSxyVc(t1%2w&V4 z|F4R2)79=%UauS7Vni^?lHrJvt|=Xb_c^C>okqNHx##-Mx0pVewp5kTp8)YU^_G5I8MM0w(CKw3% zuXyqYAL=Ujn~#>3K1gHe!+^=^7+H`(Pew~>zfN*1K`!SP9Eo9j{pBaOJL6nHiZ=9s z#u)L$g1Fq>ueyXQXx0z8o=P{Z66?FYbIkpZ?g!-3K*PxQTnG9g1-n{50|rBA=v$WTrg{_iw~dMN<>58l}%-9P3Ajj$d-vI)_-!2C-uOq0A4PggdNgY5H z4J6dJnTfaZba$$yfdrHlsg4Z^3{6U0qw3fY3s0Q$HNO?t_P8|Qa`Bjc)});zkih$y zgz}Sk*$c@tNnc!gxdqjG9Ma!oK;u<4N5A^oj-s)%niu!_+79lA0j0o`1fF~TE%n!# z`)@FiUBXRMwJG|l83hqKON(S*s#X`i=2? zw~^C^8H}NwvCN@=*3k1$><@BNEZP63%h0e^LLG?x$i}s|oJEtG)qj>$`slHKmPCt5 z8q10KEqSLSH2YA-KN%J3gflYo7c922Gnv@6Qs01)n}{TP+RC{y!A}rCWkQF0+(X3G zGwn)hL2(Di5UTy4ygMn%yG&^BW?@av@!~*a9X0mou9toE6t5D{KKwQ=B6J=J3$QVNY(8E&2unzZ>hg95zS2;ur0!j`?9$?EsXv1S zTK6bRhd}i9N3i1}jy;XC4Xudeh2;!fVza6B7Z)G26<2Pqh!vM+TQTaA^cyYUNpM)3 zaAAIKO#OYP3ImWWeg>~dbRjeBy5DSR6E8Qu8aQ%MuRB*5^3SqXTT(}C-8!ees<;)D z%RnePCF?j_Bcy%G!Xh01raM(n1ss|7yh*Mh;<})@xC#PS zV@(5a)PaK!*SVML%-%1(mxJ@B8R4RZtUXuG4t_Pls(?a3BZF1B@jheW0*JL1D}BCG zPRfD`u;xRnHfIk!ef1NE2`MWJIS6Q{pEJcoK0jB~3prRLSxs3tE~)xw48F$z+Pie| zWP!xSPQk1GIRja_8lQN(VgK^wYI5KHi(J2BNQX0=teW1mjx~$-oH=VvgAMFd($$kf zBiof*HQlZR8C*W~BFNi>S1xXg(lb|IN#8Sf>3^AAj@{$kpB%Ov9iQip1*#oC3xR7$){|m@wWX7Ov#w_i{a4@qyP%aW}sQ z{??PrJ*caozKXDjdcj534VXDn$6KLNxbM^5Y`3(BU2qiC%NynJSQ*3QW2f!m??s67 zXmL?HjK~yITn(oEF0T&=`&n7|g>ygp*7j$gpDSyyF<68p=8-ZF?F=qO zT<%rEJWN*=$_1D?=wfcdpf$-g#me=cGT>YJw{19fPe5W#?Up<^Ztht$hTLdH<=tXv zuZ_V8-SSFr>)0jqs*LYAu5g31qDQ#CJ=*i#3bx=xl!G~^gtDLyk#2>A?@i!h#{?bi zNvAO3mEhWlt8*|_ofeNMxLfnu1~%~6V)G1UPIExnz+b|w4vsnyVuk*pRH*OnhrARv zg-ml(IQN9xuult=m3^L4m-?5iM(}nvz+EF>Wf&pO$wknX_R~|_jQl+W;!<2+SO10) z0R!?qxIvLCRP)&9HR!>r1-K_b3(^SHzhj=i&BRJ_c7Q#X#|iZZOc56H`+T~F0Qbdi zXg2bf_?FX$65J5&CeFwJpfHwHvpZ~MAE~N;i!xs$p`OEo0gMC!g^%$xc#KAZ#Ap>@ zs99X<%4KHoi!_k)jCtJLnMm2Q?&k=9CPPd4vxg}Hgu)+lpoj%X0VCQ4a{vLrW&}Y? z;)KZ4Il#YjnzWHWA#1qVmGDNEq@C`_LQ046Ck~=9vK9eqS$N%=xLr|3DLUKHIS-_U zQvEmFDu~9VR7FaaN&NUu;yZ=!G&Tponrx3h#yx-o@OYov1%uT6CTV*H?gr@l8^@yq zA)W3@w@tA~9*?Gxw>-$pwCDiFI}5vJ8L+S%&932|W^g2$WKPxx=fN^+oWXbwVzXJe zgODddgP9s@_|qnvw6A_M9ACp7C)8Zv7F?U~iWnO#)$rs$1o<|(hkm7-MYarYWWYU- zo9r;^HsIZVSip#dEr$`Y?Hj)pj>EH`d6*-MmVP^pA)_$XW@Ik@K<45BlspI*MzeP$ zf+ZTZ-6{PpcP6af(WH()$M3An%%R)3fzmsc#J>+a;jwkL@;+h?@$E2fQ1l+eyy<$6 zM-O6tEZmGaim{e(^J0uP)!-$7SHy_*{OHIy^KyIRDD2CKkB&=E%Dsm=oZ)_79~3)u zn)Z%)<9+mQ;Z3VjmIjOU}#!w`a8bIyH6T?f~eE1!8 zG7p_p(LR`7SqoBVO^Eq4vm@JE0{o*;iSxWDhQE)q^50kg=o|m!gFk*A34^R6djA9R zfjQ-khWtQ(zytwbf5@l*%q-Te5a(+QDCCjc!=H8u z;KJlp^YT-}!Cz~10-Vt09!Y?>Ow+p+=Mr^Hm34iWb!8eVfLmsJnX@RWnjJ)A3<@{; zyNw3!-l_MQv7oDh^Ym>NCR?xCJNMz#r;zdw;Cbd7_`q%;L64#o`$s?y8w-#@U1`b;@LH01n4JD7SQlW(AJi~8}e;c;*d!C#X3CL)%Wt|ZAn&*WcVly>lu z;v%Jm8if$$br3hmGDvuv)qIP=FEV%s!JMU$``q|_Z@CniZ-#-;F$qVASqU9cApVpK z&tFHDPosb0g$wP+5zEG%QU0@%-@((pF**vceHr@!%k5yg2uo-1rnFq1GqXnRbT(yEg{|%fOB@h4r literal 0 HcmV?d00001 diff --git a/demucs/__pycache__/model.cpython-39.pyc b/demucs/__pycache__/model.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b2f95a8c8038acf9acb03489963a22795993344 GIT binary patch literal 6216 zcmaJ_%WoUU8Q<9#DT<<~haa(>b_+Cx*~*IDrcLVBd0CZ{)K&spZUR*Swc-rPm6yBJ z>`;=0NK>vXPJ@(dHk9!KxL!hGQp~qg*-#1HAr1Mx}hqE*D&3xZC z-`k&=DH(V^c=zwy_Vb4EPx=^tY<#?qH&JM~!C7c*8FvisaQBJ9U19AMj;xI$FYw|MV`GX>@e=+^e43Z>Kh0-&1^;D0%brR9KY0m_GLDR6 zc49C?nL8F|shP4AJ3qBkqi!qn*x^>n7LAmhm>6-nf&J~Z^_rc{YzI7$qM?E~^24kY z^*Z&?e=KB@O}7KiMO06MLy?t&sM}Mze`?}LLNDoch0Ka|&47Ac(s*VQ&Y2x1s*|~Q zgGl%?b3^@KvobpoVKcLMfXV!r&sjEAuQx(JN$T~_jsM-beRK0+g7Y`uh<4&fn=2xD zq~h-8!{6Pn-}58CB|2dA<~=`%>Z@_cg`}x#E1O(&dW~eW6LS$RcOT0cU{l4L5UUJU zVvFpsqB*)e#JE8i4jOD&B`8j+fS~XY& zzY^ZVnVf>biWXq2fdK`<$8U$=ZE=>=7_vZC(H~u-VkAki#t1? zj+BIiB(4#z3CV7eRUADs!7-X48Pptm3!T*1(Q1Ot0>#^W-pg3IzM#wG|QB1fXJj1LHd zabm0(A9{ULLET90@Y+rD$dpCp5a~xIGj@xb9JW?-vqF9=2~;TwqD1+GrdApEHW0bN zvswCImwuFV<3!xx8WG5jn?p)69$-Rm`&o)ALyGc+FJRq3>dS1N$rtfGK~Dh#biexI zQgm{9Hm$zdxB9kXJ7hp`UFg}IJ%#bOZ1f$hBFe1PhOpaxK#{@a&POI%`y;byJqKf) zIu{L?8k)oS_+08N8W)Vdo4Us??ZcfD3+mz?u9iGJxv}ti-Nf&7VKG`uo@xs(?kQLv zZ$Ah_Z(Dd<2`nrZTb>F!BJsjFY6-clH-JrsbnFEUkvUvFhQ`swEw7z^g$D-9)|Sjv z?acMTWx!p>eC9O6*jJgo7l{3=09LmBuBa8{C7N{e<1%O4SCG15qwPnL2$RgF^QA{? zETH)8l@teLh@W6ZuEchQU4($W!sM$Mo461XgVBXt!3WsKI0N%WZOT5fh{0f(dH8~+ zcO~ABG_Qnr;B7zlxM&0&pDcOKk2Q@H>4WJA0{k5uf15A&peqrVjDKE@LBjVhgbO={^8@`&kDeLA#+!>i9RK}p)KU; z>*$>l;p?kk{^^hO{N=UNBEG-GOV8`4bcLrYe+EKB1Zey`URp=9$$wyI&s-oR%cyJl zg_)Tm2bj*FbIAuXUSLXwU8R9Zn>Qq#xf6R^ELGwM~~NR9ezT9Ahrt75fz z0Zt(`7a`>*1Ut*kekpb3r)qj_h8;iaq{5A*}<%_GjqI%GtZ?9pBN{M8Q5Cron;iCrWw zLx~-1o2x->0+@1(L6S10;7oyNa-cO!-gWPm7sVcpP3$m`(;@UY+XiTi-5kV&Fq_zb z@E;2Qwg4qUBmd3Pv+TXy^j`IDEPZVY9n^GXOuU0->Ik|q(jc!X zy$xb3GF>`|k-wr%N_h}MrZf*|OZRjFayLj+Rv27pRv7RL@m7FTQ&M1*S$FO}%xu#2 z%+VnkLbGmHL_8}!5W78OdeA7jL`z*52O*IKz@qD1Bu}L%xUHSnsg%5nzWgmUgqM65 z4Fz=CTxJ#wj7D0=csen-N2`c6nVXAuHa#X=a*d|nqv?{51bI)ZC53=>oj+vLNI{2E zo;g9p#X;ugKb!8pfIzezQ!_i~Ry#Tj-5qWWRX*B0vBuKr2Glcz272wj5 z7E#WOnIb_TtYEvPu`^y8obgG%KP3`pyYEJ*Y5k9fP0~cS}k&M%OKl>^od+mJZY~m1N9CPvLqDr!&XDxiNnMCs2+9<8^(xGy;ba z(MSddRCV&&eu87i>B;jYP)S*U;KNX7w1KuZZFoOV96+F9yJ?WyNCI|sKds_cS)F7~G_K*|O29FYWT@Pif7{?;0>7trMMsur+VB(gy zE_;+P#@@}2Q0;;Euc+iD2YpTTeEu_lB$U7yGZpCS74s7GvrGOD(;5Kcogs4Q`{ZzIt$5nvAKB61Qn5oH#{Tp=TbK@y?(aKvn_C*v_o}#w>~qn5PoKntTUzN=iNTMaj3R zrwg%Fd7t_pP(#L4XRrBPQN}1AP(vmyv$}CtuG1$KO*5!B`cgnUd+D zC^@+AflSsa@(sY}?)y#Z(LJxPugVqr_zg9B!7BBpskuYVTWD%UN!0^MRW12FYILqm zu3mPiq3WOfJ~d~RfU}z4>zKHOH~Ar&A~Q`?l)vrw-)d02A_)QrlmE&b70X6j^EM9y z9Hc*ClB1w6z|G@=+tXlJxVzKBJ Ir;COE13yy{vj6}9 literal 0 HcmV?d00001 diff --git a/demucs/__pycache__/model_v2.cpython-39.pyc b/demucs/__pycache__/model_v2.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7db8dd6c11301d3ead73d0c0d6c11bf6510bedff GIT binary patch literal 5931 zcmcIoPi!1l8J{;ZJG)-*t{vNP+@ys8wJ58mPE+W=(4=W&rBIT9S_gq~H6HJK-W_Ll zX8mT?cB0vnBej=QMdCtF_64cHl>;{}+&OT1Cr}ZUIf29hae?3WW_G=sq}-U*d^_*o z_rCYN@4sKKR}DPB`}6NNo97JUb83t~HX7IPCLDkpoQ1|({?FDJ+Gc2Wtu>1o+~Rg< zcS~!fuCwMa;~j&Sxbwi^j_G4Bt&t)mnZp?MvYWQIM+XmWWazy;KI>7R?@ih+=w@ZvY5`s9ZC-^P#cD zxXG;tQ){4WmI3oaJ2N~xH4jVN%GiRDu_F^b4mW_`zP;MCRedwyffTJYh$BB#)u`X~LjRtS ziJI;NoQudyg8M>MgQ(X}_4l!fEs4CO-xE?*Jk0>llhm(lqFLEtl6IAIGl+yQl@sd! zrlssegj>qu0S5DJK4WUi^ID;wB%b$~@$Yx9UtYhHVE^^EqV4$Z`ie;Irg3lm&JW-7 zZuybl7F{rU{gxj@-ikZX8*>qQyDu&G?#Vg`n#G$Cw+vQg3+#SF zGj(}Lbc=`{KJ;C}dmV4`8bHPlAzpMvN7jI4U@};U_EUy#fz3w@Uo;DB=cb-T0%PBn z(^xM_RP+Vn>}7DP+lP$IQ0FOtrYYy}r`WzcLwy=7QBQd*mV3VBr_o5XHn0YMRlN7> z`3Z^{O_t+#=prJ&D?CqCJr8W?hxA?Zyq&%u<|CRT5;|q5dcpLT2t(Z6$^NvZBqAhn zjc83sd@HO`(mfL#qY)BA&9U#`BQv(OC?1+e#=s;&JO>WW8v~0*Y-8KXtY-|<@ThHb zqtE0DeT|Q3@icF@fez1~-fWhXvnPUfCskIGa^-N5`mK(#xu5#V-VFQ%l5Dr)Xje^1 zu_dI4T0(veiy@>fKNMrf)9kHDq5C~r^9<&VcwA$%;IcfAzKMgUYv+aToVG>aq^Xt><5_8-+c5$jp2#%iO*qPA@wyj$K*w{TX-iPQVn{NmjH%V zNmzVd+od{hJeWFQYc`+Zv#?Aheu~fGA65$%sth4tP|k{0IxXwR*MQ}iWM=h?Pk&C& zZ{IwwP#+1&7kKHpc1)bSalG^OLtkEJx7(R44w1nm)zl8FfST^v}@W`%_=uDAa z%j$rotP1F4Wa-LT1+bDqw7jXT0a(pu0H?EAz*<^AY#iarvKq0IH$kc){ zIFpq=Ve;$QqTaE6r%dTD2f9$s)$NTLgt1P(=B z4w`5cENd?a#u(&(j})piKiX*M@Q%jD#kA6V{t!4P%%kHVaJ9yF>F$d75Jf> z`+U%|B4-6*!$m^fX|CLQk#Ng3mu z3+Mag`$Mz?4tW@gs&4(y6>S;sk@Rhau}|XN?T2a53-7t-L0r$}!7dOs-4n4Kiml0-}E?$(sTv5t{gP=oa}n>zCc{Zn;<8OBWtPu@{7)+mkWz z4)&!hXc?)4yCvhU8&O2D8OtDQyJ^sUoCJt|FPRk1TzsWZ+CIEpkO!?z+%AF=oCf4} za}mq00FJ)>7-?I5N*r9V>xX?kSqn7X5Zt#2AK6s2DdhD+5w%ZDRFeKbHg<33n-mw7 zh$I0=2W9NRl(3a0IS~80rO3` zBULWcM3oCERMtB;?bS80}$n+!EQrmyk} z4Zls`I)N3MSoM+H?P?(-)_5A5nnw0C;!`PzxY$=teryV_$=9hG%=2iH)M>#eLD5%6 zr%ZkXU|g&+=Se@a%BrT#YRoY!%*L;V_i4Om*&O;S=x<=%S+=NCn+1$COxtt-9l-lD zd3^cgY>k4%QAqv?8VGI-*+AmUUPpe0pxoFtb=dX_LUilUrr>eAv_$iqa|nmHd4}ed zxenDSkHbiXn@a{lD7LdShG7Z04Cd!+kw^m%t=(C}mmxpOv2)})lnmGwwEUM5 z-H&wUJprxP<_^Zk$Hk^D3_!r$LbKtF$now4(DD<#N@6s|EIdYFI)a33FWD`Mg?b5a zY$1@^g-?S;od_b53$9phFKd_JhvA5vJe0YhLxG`#$i4LoMU;@czvr-nc41fC7niG} z{`g+7lEX#3ppcVP$jA*Aqjgb#`;h78&32t;dz z(Z1qd(zbrsp0N3}GTn=DmNv>5iQDoccM}F**JNOpf^HWSfWaW8)fqd*4T_w|YQao{ zPy6_8rHPw#;(o|oP2J?FmTAR_;U1j^7ILc(heHVNCWup9R8?YZi^!*k;s&{yjUHmD zEAFc7iwz>?GJHlS=9f3%Dk$6E7&Dx@d_W@JtjcTnlkXDH7Q)6Mko~qq5RO>{tLyLQ z7U27|%F0U%PA*%Dji>l|4y^=K?p znEmXal2$Qqn%hUnvJT9_6!x2fLYv4SHI|)E^^Simb}Um4hfy;tLHTVx(>$zY=3$+e zj#!FZb9)9E8?r0i2fRl%&4&@F+}2KT6~!L%f~tI1h#m@H$N#~j3q zJyhM`1LZ9m8hIRum99Nm-C$pyr3E&nQj*iGqI3e?^cZ1#|g$G?s`V~fX z5y4>Ectc`lVcul^*hjz6`uRf4$DPXKo@I#leM;35RyT%6-Tmw*a-p!knNhn9u3>G?85AYo?arh zhKEFQm0whqR9;fClY+eYx8xtpYo7QC@k5mF^ehWPu1a=mx=)|JpYxqd2mSqyg5T7i z|G74ER#E;%jpIKXjeGcFPgNAAcnVXQ7O9?!T9354?&)gAGdvSKBeLo_FW2Q|NF=Xib0V!5bL@Avw_vzQ$X)CavmN#~=X`YG>}q@Cz={fu`;(tXj{`Z@1h{k(U+ zKI{$GN4$~xs5e@_;9XFaP~E#2UfNf^%dFtN5$fTUXX=K_`Y$Tpo9hZ2_=oDf#Rl2X zbH#g`onog^zr${X?}p~a)leT**qI+Rc9xxcZhG&r^K2OX*N&|@;uYB_yTC58OYAax zgI!_n4>|As-pX&Xw=m-a_BMM5^>y|xyNY^@O|tjcwdbl=VmI0Q*j>f$x7Zjfff{EY zvT-&6YJzFYN^z=lalRR_T4UX9@lf!f)(DwfZ?Z6oOU?nQWTB|0#-l(~QJa-O1QCe- znOddt+jh_pwNA(npd%P}bDL9?;F62uFkKr0`I!jDBDLNs!pbw^pLvlP~w zB3uev%``vXEC-P!`ybN&i;ZYUHv2ve)_aY@{l<>$mR+YL*}cIm=S|+3s0z`FKN=rr zQK_;P)LQMes8+7jqA)I%oAq%fk2qduPyI@*5k%vu#pNlAMrtzNuE(jd7Q}ICHbph$ zskKe#ipwO5!yjGO!1n-OJcL33ivkFJq3tS(0=eo72Q=VEEw#(lpwWOh(_Gwa^Kuv$ zHJ(Rbnp<9cy!2pp88R{JbmJctei0o%%|G~P^>G~Xc=di`qq(^{6ULjO*;;-4#ghLp zXawtFJ#2{8hftP3*Nj-mscURzHM`sDktR#6opdnc`t3$dh%gp!VT+g!r>F&$Uq)S~ z`yBo>5Eedpw@*PN%7(PG7wWE>s2i|9W-yakFJOVPonto3Gl%uPFu+kTq98QZ&jx4& zBWVmS%;AzY54YMi9Lf+!*j89O|tR9Ng*uu(e>-QU$j)g5cll7`r6g>t>3o?Z$V^XqNV z#{3-|x*WCR+IHx#^Jcr17N|Gi>uu5}C&^>J)e1P2pPF)vL_}6xLVr3a7bLX!l_*$` z-vd)ZkhD3ZI;!^4(rlFcDtdrFgXEDKbgwe)aQGt~_b~h%jRacW)cAl1X}h|xgqrA}jK;M@-OKGO@i)Rw)bACR z`RLsH1q&reLiJlmkVjMueG3P ztu&Du5ghJG9XofUS}fqKTA$pZl}S~Ws;TyWI+xesNhS75cjd9;Pe2@w?&LE+C!UH_ zbvjovA%;)U?*AQwVjl}}9Sz<`#Q?GMz2g-vevQ~g6kZ`y1RO5BaO%kO`y^)1hH|@9 z(AbgG32E?KSYZO66tH62D&gBwhhAE`qZU99s5Wr#sB+{h`uQ0@P5ud=#?%)S|Fq6+ zsay&nT^z{MxvikQ2}es{dCly*cp%d&>T=hkyOPI(MaL&@k)G*E#wU1^)yKc0TS=QV z_%!Iw2mfE}+P+zu829XOV!AvveWT*pledE#leZ_!o_%xb)>KfrbJMdYrh`ftOn(Rw zhikW{Dgk;~<>vJC^x7nP?}TgR8y{ARxzx(yL)w?sw{H8@t+db$8q&?CL*3R^H{i3> z5<%PuMOw(}%pL!HFzGp&cW^}k0~u9z5iApB7!GHc9F4-XZ?#cs>%$Z-jD?@nu>(p0 zJ9-IuhnAjtv^2Z2bpP{(*%|-g;>_&)a%vDD5lRVVsSXElQsOz1c$-IGV?a0F*(#^7 z_8LY=2QuE#o(T@QO~+7?`S5$-{HSL7rQn?HB3*GmY~U!=uAwtirpjkQzqW}9rrhMO zAiB=>G!h4m=`Y~=z%k%{uSt%U1z5?=fqS>hwF$HsnFsGtm*;eOec-K864G0*0FFQ? zKH~I)^PS{qOs*w#HjpUaE99jA08QZ9gA4pNPPhUjJ4e^!^L zN5s@JrYQl#KR(9F_fRA8!SB+XU!drWeBRg&q8dBy$z}8|8uAE7<5TC0AZmv)!{alw zzzJOpMlj%G+NfYm{00g|I)}_j6hPDGbiabe=P{y8PVDeUTIy+P3JpT63|#7lwxK6F zJwza3eb*3h?}>&GrX%gq_++B(nfppN($cxBa84k8AGH(GGbHXMWs|d=~qM0G`0`YnR=BbW>6kw2P zpOjD^jsOwKz8y@9AM!%#-`5iC6=JvL!DN3EvD?|x~kOI^=HU* zHVys_Rk~Y+yODvYBd5OxOgLWjX^FM;_BN`cwm6%CR!oq}H3JaZS z@)2Ayvoo?v{uLE;|4xbCL@v@3N{-~>*N>LI z?pC+1yXBxkF<&mle~C#zqa)KwKgPQ)wd;66NXqehr%J8Nn|nF}K{3aFLp%2L2v;kB zv$$R7gpEwCwlseJIWc}s#b;DV=O9&mjhZV|ylS@6%D)3YzKTLY{z?XF;Gfl}Q5!P8 z6DQ-!Ij>~0bdDZQ$r%9=L(tC95fYe2SjFp10}`f~u?yVt>&!$x3X`TBmP{DBvKBh; z_jL6?)pI~L1evN|WoqiVHaIXTwjBD8!QVJ0rz63fkO?jFcYN__6y&}RWkPa6mP7qA zHX%H%6wQ^nxjBi7qP8+uypiU7pEb+ApXQ?G`Z}I2X|9FGdsJJK4-7=^ofdI5(LzGd z literal 0 HcmV?d00001 diff --git a/demucs/__pycache__/repo.cpython-39.pyc b/demucs/__pycache__/repo.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..982ebb16b36b6460588aef5b2565ecc1471e4a08 GIT binary patch literal 6091 zcmb7ITW=f372erhE|(NV)3PkvY1(e>v<*|6Q350>0>hCnb#AtjI&oYR3WybFB(1#M zrDvD6L{LBxxouwBpeWFn)PVHp-_RdW6zwnA*FNRBZ+S_-b5^|QmL^NgaBe%7GvE2n zIqOePmkm7s_~*ZtcTX9{f2cC~*r+TZCI3byjiw=mG(*ubo2H<$6-{Q3rz=Y zc35l{`QL52vLKzX)G0U1ovG#&#ua2StaPTE)51{BdR0xS@``!RXr6f6kghB}F=R>A zwyow#Wvdxk{+lVMWaWw7Jca&gSw(+!+icFFc0$%rtEpLxK8^ZGIfMEP*XK|_C1+8e z#ppArotAT`&7pQy+8-J9GyT8cQC+PP6{U&id!1OS(9^0LCqWwP&5K?TwZonaqE!#W zE~VNJA|>;2cp;MBvcH;m@ycFPqkNTL_u78ssUTfb+FMp0{=E)fMPDaQ)x-*3f^{$> z`mWyl=%N>EY$J@@{^T~wk1>LPQJHUHJuUPsCP&`;MEzY^4L zW>eSv)elx~)0vX2!2cLi4r5=o_@r%`VDh0C7m$()GG#OcZrhXsk}|WCJi#rz3~BsI z>sT+=t;}t;BEO?rt*qRlNqZrcE3MYUo*(8t&kRk|arX7i>u)aIO_WZSu14$egQXiP zd6354rMsWpY2EfCe^qsG7fZJxtk%sqluA?Ar5j69b$ab&iSDM+-PA5-JV!e)D7Bf>X2Zm8I`p>-^J@UgqdczZVD?r3b5Gt=f zb2;-_s;so8He|4>k~FhI73ETL zbQR2Db zbu;=$wA7C&`^iy@uBtR&^p)cm9gSJ6mvo6{p#$qFWLdG*l5rbqY$d7A?BzHP^#$rB z4bm@AHd5%kMA*=TgI^#etZ2eEYZkX1fBDxidU*K?SHE6R);TC6t{(v4>$h&k2vr5jlEdqGbCeRHZ+9TODzUw9m1F?gZ zcSSNY5K_EhY>B`a7%ed{Zy2Bca!d^pI|NC96qd{_8%k~ujCJuyBsbp!Rt92VoxywS zVqjl39zM?sUReBEyqt-KCUAW%E)i|2NG8PU5H?to2|DS9L0Wh8d5qF^%3h-E3}p;1 z(%1>fvqFM@gD6N_t=G|+kYXE9Js0ZeinF3B`lrUCX$;5eA7I!av=RJS9E1cM#8@{T zfu9&{&xC#%tw-UFq45?vhwz@ASn+6Vli){8d<&I+l~x-Fu%>y>h|b$aDmLeHc1>Y` zm^aO*7IEf%BQocW)LI8z2Fw$(@JDOQN}U1vnNRz#UXOdB^divXN&qA6AGY2e+f-0N z${%ZLR3ILvYkZyji#N3#J>*ECQ+3eXx4qT3N|RJJ=8#Al0gP$B>}Qsm=t zI)g!v11f|}uop>iG-ZaFAuk_#a)Axx4L>(7jKFw&PcDemy0 zfd!=NNXZjq%oBM|N&}Z+*8pd%Z2`9HwiFLcJts|+3y%!_9{uk)@6k#VlaJhe!!23SEo? z9Vp{hgQMYa$rR;pY^z@#Ul`r?_a`^~Dpoj_+I-rC$(!h(8ZUJ;vynoKnC)X!zCW|^ z)mnhctrS-dC|Jac39pFhBGhoX}s`3=^EX`GA&Ux&U*C34YnaCAyVK;d~o0k%)3STA&i~)ORMPD;WGfQbICzMb&msf|{tcjkNEf zWfqWdv@^3vE#;*Ro& z2Xnu5NN8mq*wX0O=g-h_38dHQ$Y+u55hDNu9|j)FetNWWGJG2Y^#Zc`-o+DXYHmGn z@oYcP$tEFJ$NntEmwTc(nk#8X#FxA)qJo?&iScMhV9Dp0B`s#j+8(QcHNk5&<~4J% zUubOlov>cW3M~q@^POMAWSXYcH;@e(o_(bQJ6}NmV+%qA>mrNm!R@DF{d zPhySC!n~Regco>E1cGLX#&Yb@v>=qMJTkJ0eh&lm2b7VK%g;c52#4A1{{0rUWER=b z{hKwbaO!8+^!4{4XmqN@`t(6R@TZtS6Noq#JDzxH_q^@|c3G2{_iv&%LP`k#uCT3I z!F52r)ZRwg_t0t!NXcbnL+YjYZX&YF{lUq|j{HHY?~m*%SEJ3_6d(~5KFfiSxsc}n zp}-oJo}|E$XQ)3x&yemIR`<{_acDj-(cKqFW4G}1q_gJzEQq1$TZNWXy5n)ZbgGoF4!4F`yg2K@zTNGfytc6c56W&Wos! ztC?>>pXD+f)m=nUDC1jLULMH~u?tI#w@2E@d(`)-bYOGT`&Xnrdr^VY*5ALu3D6!2 zUg%Q-V=Kq&ejg!ohX*|#uXi!QpN7l3qGkiD)K)*INg=W##d2Y=oPRni(pN*Ok!Ug} z-Ry$kG@)0&LD{dVx!`xZ;U;T2hfi>HDa1?IamosJBSeatz9eLoJH03kI%*sr=3WlF zS$s5-Ji9IVA!%n50vxGb=3WMQ;*;tX2O|>t>|+-oO5vI=5Px# literal 0 HcmV?d00001 diff --git a/demucs/__pycache__/spec.cpython-39.pyc b/demucs/__pycache__/spec.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..85b11eb9e6cf343c212ee1a0658df4aef60b507d GIT binary patch literal 1096 zcmY*XOK;RL5VoDf$)*nq6j};`J-{J{BEboXNhc&R9qBNwX3`hQ#V(`_U_)+ zUOUc2yN_%FhDm2a7$J#`eAdvfIT#x-$_hY}0r^HhklL;tO)jXQ+PP#kso22MM1x=2 zS|Q6sv*IPJtTi~furcNM6<_sMh8X2dn^xc`19+K#jMP!7TQa65+3w(?X+g-7d~p^(GlqzubKCeMq{#zj)89JMMR z0T4P5ol_tBB4yY8CvVMD)rRx`WX5MGbtCf_NFy_w%3O04c^QgsmDFw{I@6BkmsU-* zD=ZXzjnv+(1YAuU$-SxM0}i4?nQDfmdWJH$Kyp_5H3LRxg0p{O^UegSkSHvA)rcK9 z%Eg6;&_Z~CfDQ+)T0njm;TC{#`Y4;ajT?7x!&Wj*gE_T^JNFTq*DJ_Y5inhbH?oxK z9_}VxzJc*z_s$3<#CWuC$=Gl$E8!x5~yoRbhPURa8wn_w>d6vKQ7hwtn@&Et; literal 0 HcmV?d00001 diff --git a/demucs/__pycache__/states.cpython-39.pyc b/demucs/__pycache__/states.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e5606810d26926a5d73c17ed1b467d55282b6b35 GIT binary patch literal 4443 zcmZ`+OLyeN6|R!hFS-4g2Y6{T4b4;we!R^+&vOMm#Ni zF@xG!@r-DSS=7#n%VJK0?)@oqm}k8b8!ujQKA<)qj9#W2crvL8#ca4$=Gx$I9{7G_73k&k~KDivHeajCaa2!7xee3QwmnimW;2bU|Hk6pAof)BZTvv5aV;T&#Dud za!kFSWTC>*jK41P;lS3>?yV(S$vSr0s+o#_4Ynm6z&6PSYMpt7H3wln&`ICg#@&vz ztkMwARII&?M+=$8v`=HQyU&>fG}%{g-n_PYPsLKL{xsXnw^o;9wWagH>b?87!*{H6 z`f;XL--)o%TX`yCNv~d9UKMeF*j206*s49)GqdZlo_IknVsdo>1#>)JU|HVxNAOGt|Hf zauxcyx;Cm8aORP7;JnX->oJcVx}0sxmzcVLm2vy`0m^JaH-uNPKQdAIvwO&Y&Efp& zMRowkiyH0~-0Qek3oidscts^)MfI=z6Mj(3mbE{cDZC?oP}j|(ddQdAhtC}}ibhf2 zoZaE_3+!r6;CYeRomGp*Yi#?A)3acHAM^WqzNi)cA^2a^0I8Y%owtcLQAwPjC-c5V zSBWY3D~VEgeN3#SAWne2W5`7c14Yu+!3$9;<4EjT62_wC2l(X@uLmF-(KHHrNg4+L zrAX9P+fN6zV3BDoBBOq+u`yGzKxr))KT%0Wue-7FXp_cOT1uBDt7EPVb13LTq`Mp8 zxzaOQ;_-9h)($p~ZAjX^xg>yt8B^F7s8{<#*qKrq= zq6_d^?rNqpPxc6N84jZE78J=;u_@YI?YP0C9v0U2;F7Q{XKB|i)-vu^yeBVG8_}vP z&eB_-HW^wzejCqf2?cX~@L-Pn`1AOZvjAq`4@_Bbd~n3ayE>n9>UffE^#7Wgk-6wn zw@?%uipAv1g(J^uw#hRn*zeizSPx-ExR2@u8#za8>23sSBOj(B`;HFa@(3Uv#F8|h5MyUw z8|rc>$!)n)lF;@_m%|vxKeA2@7s-rz4h5*oKD@N*9Pk6Eec^1vQi6SiSn#p)2WQ)Z z1swSVIzoS|BHl>=(25}7lOT&oN5;`PVv^IoYWSAngU`9VivA~^eVq{9&HIBJ6lVT~ zi|s!{!f5x|zMkgYC{;Jw(|&i&OgNEE(W=M*V~j_(F}@uYT9#B29rsn|+)0%k$GVnh zfhZR+QeLBC3dC3G(E_nC*#==oHBm4RDC19C^vT!od`rQK6~dBz9drG#ha+ z#L>>kMbt+yv0(-QVN^YWppR-tu!V4b<3PSCm#E`v2zL=&!o@fJMfMas;IP@u@fb?P ztQ~%Phms?LZ>i_d*x^Kjmx3tGp`(^#5$F)T{DW5kc(qZI>yG6|uoeeHg#ci?(Ny_R z55SDPcN`2JOu|GdKMzRxA52j2puO@XMcc3Vi>;<{lHAnZ*we9k>)kN~DT6aLS~yMC zAc%G&rj7~heobQ|ekhW4PXVOpI_&k5hh}D10<_^8&5ER?W^=ww%kN=T ztIqQltz+OD;*9HBFs|WC)CoWvgC7_%^LD zr7uO+GQilSr);(Ww`@lGH_ib{=Wb$%qBjiDqY9Ke$hqjR`5$>7t*5k+bduO@Ek%Rq z{EuL-5i4T^tl)lKPDuTO!f)W^}6PI#9dO z0G~#aparH*f@|DfzvkPal%YL84ws0YNfsdMF2mg9z2!VjS6b)=tDZ|i7VnM|nroA; zr>4uO%VTpLueFB6hZ4fmtKEW$Td>RuyhrwvKcV7v6ercU)4-^5>BroP5H{Infl(!k z{U*YWgM>$t)SiMS`HFZ#zEPJ@kPv?a!L{iO!L$zVKvusQqBQhb8hQl7V1Hlt^8k$U7hEtGL>BkAb=bnVzm4f6_(elRI{rzPfqsvg#eDfZ6*o~(TB}LcZQd=PK@)sP z-`yD8vqB~5Ypnd}q{VFrwt>NZxY?|(BY$@@M^Kea4@wd-#Mr^FLn*Q)mDH literal 0 HcmV?d00001 diff --git a/demucs/__pycache__/tasnet_v2.cpython-39.pyc b/demucs/__pycache__/tasnet_v2.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..651e9d0857803234ca6afcea19ac8fb977c1d425 GIT binary patch literal 12195 zcmeHN+ix7#d7sS%{SW$dp3)0Y=&3-Tnxd%s`@WgIQIzcj zX^R4##hg9o%(~dV4a>+WXE|fY84JgeGoIy)BWGVYft-mfXCHF*hm**e z%yK5eC&B|5#eRG_7*65KRQ6>OIfue&ABuh-qYT3?*wW-6S`tb99}kd4DVSn zDyGGuJBD|XzZ?@M*G{cfw!g5W?HFm?e@>hf&)(6!)8b6{6k>+1A@+3oHA@{Ar?c1z zahhs#YD^pwN5wJmq;SPk;%RYQoCu$}Tky_`vna27=g`ix;vAPPqwF-saXuTzQE>z{ z4lP?49GUClqx z5mt0nssy{AD#b)d$(VGu?i~dJ)?-_@Xq0levBZ|wzVCmv=5C#+Ln=} zL5GxHG`7qwE7rG-#7guXV=I^FNp4!(vXQ3$JkloZ)TEYJ2emCHaX7z_Xlv$*mKZ9t%SbYVeYQEokuF4*gNLdNHVhR>}dR7*wMDl(A?3H($~WNEK_%P~7r^F!U><%8qI)&7kfTmu1ipeiVxhZ@krxh4jN(Lo}mWs~IVq zzecK*ma2>9O1$cgMeU`bjL}{Ox3j;SjQkC>$3@i$8sBKgKFx}I;zZU^G|*-`FqKQI z*-}L9dVF7#Q<&+$U!I#?yb%c*Exz1bYppL{6w!Lz+E~2t_I3Yi&vVkjEzYP1*!QB%bJ%?oEYZmY4@O(E;Sh95LTT9?!4A$k^prjP5BMoD+@ z8`m9U%5Ze+p{?5wEyT*klwQ^+bvccaRVqICl(R&10TH1Q@AM2|hWo#*E$bn1wgT{J zr5w%6rF_h@zpYuCmj`7`s^VO$d2=C%=0*GfJXU)!iIyH{Ae9Fu#T*JO3W$0@8&~Ep zDNBla`@zM(K(4!#)2d_m)P2SgA@)wSqrXECYq6eyk!%|~=m22Rd@}}=?oce37>P+h zN^C&DSbg7Q{>}H@nKJU@cQFgpz^JHI|g=xdRj3Ws(EeJy(}a; zbP0wuug|RV&QNE2bU>|$pt|Zxu_1zZsK4ujmDBIT18TNp!;f!oh*|fQ`O_{)`LtWT zGC$NvHE2gceb!C?-IjElt>&3DHB_(>MC(4j)oV==$XT}?33qd~hM`e~G}m2jH>W>Z-tkyZIt0Kp=$ zaIz89{mM8bkEk!p3Z^ZICCW1toTcC#1?MT4p@3MN{6z}BNWqsV_yr2SOu_RAl)iCa zR|Vg%)`KYWedglw5Pd#{;3D4W1On|?*|7DZepUyU?f#6iZs-Rd+C~|?xTJUX(^ON& zc(%J8LwW@9Lz1JcM-YchppV?w=egI4A*nlMr%RfSgoFwYN0n{{^;(#%_a?Fz&d_i$pn#SX<{?J?eavuQh77NWYqOzj5`nyYTL5_o`cq+@&A};S00Nrv8h59 z@i&!2T@VyhvEP~JEch*1Td6fYd%-8Bw0nQ@%PqMX$gqQMA{MgwA_%5rQaGu1CI+@C zD_R+qM`&b6Dd5;KiakkzOTkkVJWauI1eYXHqI`w|(gG?Dp}tA#k*|ur-)M#HI>k%A z|6V(&r(YyVRVHTnCdIynK#gVuUlsLwm*|<)E>RICf8r`kE!4lG8zv}s@87?UUvNz3 zLRAzUdZ=%DA_7xa*$8#dWMu8q1`tUXSz@^L;I@!wnJ%gXwC+Q3A_N*(L zB)Z`lrMtk~iuu;d_@Hc}iF|Fo(@mA%%O{ z{fc{P;mnoFxl>opU8p=agY<=kb63vH^hQ@Ptw1%-zgGKhi7s}#&rz{}NNGa~)( z3C3 zLclsN#IVAnjTS6YpDNG_O7gR%S`isxY$Y3YC(lVGt-mR+A*u9+(yK~e zS9+!=4MXeCQ6`mqg>f*XB1}^kZ)dE#v0aeayFm7Nutn7PL+XIA>*zoh0|JSmhmnmO zHj}%^3LVB4iJ1bhWrz78Y!&1$qsD-UtxesgOa-kkCM+Sb0LAnVYu^eHXbH>owt z{dj1>Z#0pN#RWD$Q6`y4_Q7uiZ{}TRMgKBdeD7K^ioT1<*gcJNM&Emh)1&Yq?dMS$ zFq!of{5-PB*xqL2Hrby{?tt+9k&r}izFh`|^(gdFn*V9-(Eqn8i4kPa+@RSR!77FN z@PEYayd>WR2AK0eD19Aqm4`wd*5KH4!djixwZ6GEUn!;1gdxT<^f{_eR+yTcOT7!5 zwMbkcg>4=to+R-if0e%H*oCM@yO!iD4&bY{~V?6DjLrx${^{ zkb54s1XYnWMK&(IiN1$}Rs5M-Gb*2X1(Yt8&906}>9^!64RQs6w=d4@T(06{KwgOr zY1Zd%&B#53-(R6(^gIHsOlGBRI>v-y!GxsO0oODTGX{S@u^u|b3B70>0ADp8cBZ;( zLfRv&>%Pz=!O(q(-IIRELUS1rSatAQFPdBIp@xuzX3I7ngdq3|ywBuX+kvn|96V+n zHYsbtIp$=>mfbwYUo3p_;@oyV$)Ud8bYdq2H~URJVFkYPr#;qjS%?ido?zg;eC-YL zIKk&pi&x#$PVN68G81Xt$CKMq_2`}O;6VPdaEPX(MCFoD(P_DheRr+S*-pVBEw=e zHrL>SNctkQ5S!aM=x4>4yuJoBvymA8bdb{b)!uQoN+<`Rn0jnkpcT5t8X+TPU(l3J zB9V@bnbH8$xprM#xq(B9RcKAj7u11^Vk2H9F7+0>u$JnAU?)ipRVEM*L;fPgN8r`( zxywp8Q0S%P+teSiOXeCW|LCzIFP||BmD?acxw6RF<=G)f0{!q#^>r;8u%%5+2=&cl1 zuaMo=E4HUDOOBu6Vz4#dN55TrK_adk9HrDkI4+clr5m8s9tNgp);7v&c?&b{JTW}i zF4Y?u)jyWti6(LSl^P(91HZ<vjCz$BJgUFsl-zRC9H!#wi ztwQ3+FG0HmhXnx4ZJW;fK>pC>00fYH1{QeiLJRulD9X!0AS9zZcVc2UQY)yn~qjSczhRP&n%yz)k?){F_3sY7lLn4F|dK7qo3iD?fCRNeTA z?O1$*R%GFO!00@=H_rFQ5OI?phGDrR z5_Fy6>jhN>!*3v^o5)JZod;}w6i9kbObCd-@ zV}m_0e-2JOOnCE{9Z!s%0Uy#7S=9994QU_m43N@TcY+!1$Zrf+;uT_^ zO{@7es%ay_4lzqo-q6uRccbR;A$k!(LT9Lk9FMU!cW^+0w1KpXGvRL9MEZPeY@4(` z_@`?YC>hdWM6UDx0%(Nkg8arLO6#>w>POzJ)p77B+*6Q9I6Amo={g*D>6NFu3qD6Q zm&x?>P!fmna8DvPVmqWNGU7~Wkh#8R;*3xkwQ#B*bveTa4{t{kC+B`Zwm3&C0bgNDVjw$0kWUPeE0F7Jdg4{&&Xq3 z@tKdLf9mf@8g=K{%wju3T-W6(xin}TO=Syw3Nx=fvcIGzGZ2Y3hJgry1jos?4TlC0 zumhhRgi?ObT%7H1RJ zYq%vu=WMv<0P71Wt2;sY5sKW;$&c};tT4cMb2#v?`|`V#lcQTRKJ=)INbRPMPmuRM zjkU*>p4oGpyXQZ&am1zwxIErB5s5KcW>@6Zf(?l@6mZf}=m-aT2i&f(AfdL?%_cZx zt;CFT2er`NF%HxGY}|E9PYm;&kGlwSAC-`|Q<0aQ2<(QYJHH!~05WBzLZa91o~kIw z(;sZUSDJ}w0R-?8W~%b*Fk5jBjl&(zkiUyu&uKU5Oh<(Oj1-<9{|bz`5kzneN(4=1 z-V9{AU*<+@Q<<+`dy6*4|@EegtMVQNgg@Rz$&2~hlw=Xtmyop7_qt2iV+~Wg82`_Y zE6CrWLC{gwZt|faevd}d1AeyLJ@Iq)Gcn&&PeL@7=xcmb1jHMN;q;$kz&_n>WTfK= zg}$V|?!hv{+i1D|F^gS$nDd(f&2k2F==V?_YmMdd-3;>;b*}YQ)^dIzS#+d@l0rZ2rX{M`VZBv_l#f%A%bu1#@kGk#j z;7hr5JD@=q4gCl2BMB9kXYwSq^b~>zC5llV4ypT}_&W!j$H8$Fx|Ci_xQeLKZ?OBS zQ|>wGSU1Wvm-M2|SE==D)Vj|4OlPcrogyv$Md?r(W}psV8$H`>vS#G%?-238xFh+(=<5yDqn=)8wu&TI~^6Jhdk>PBcp}&(9}X@&4Ct-*{B+6 z1Wh<0{78sUwki8G1!oW_eL>!&#OHJ|!+Q*E5k_iyCOu14J!;2JFD<0*!|ze{JY~cC zLxt%uH}wOOI?QY6jgTkEkNJA&03GyrkaAu~?|-~VTN}~3J8%t_^a-P6Sf+Du;*E&| w6GtXqoj5-6+{8X--^8JbX}+KWH{zr|4!80&xsjmP;59jpt1FX871NadUu_}Cn*aa+ literal 0 HcmV?d00001 diff --git a/demucs/__pycache__/utils.cpython-39.pyc b/demucs/__pycache__/utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c91d721e53218e3b0ac9bb73bbb7442483eb4235 GIT binary patch literal 15060 zcmch8dvF{_df&YEi3JFNAf5y{e2UAXz=z&CN~A=HI!Pz!LQ;}%E!}dkJqs+bFZ9fU zBs}Y=OkN~kML8wsBy~Pr0$g1xVG`&4IOk=Tlh|==Cr<1*$yCbcl&el=Dydw>aZ=$` zqWpf{y9+Ex`>#}%oSvS3_t#&4-|zdn;SCOE4g5ZN+bQjT#V~%GA3J|Zd|bp63|odF z3}IG{nprSS#;vMVvkSJ0+f}FFAkD5O3JLX27Lw|nDx}prQ^=@ywvbitfx>`#=L$LX z9xM#1cenmfVMu-NDeO`2ZhOOpVf8&y7*X%h!d|?c>R4@GVP9>$Fy0x@L}60pPZg%{ zPE;pq(}ii3m#ogz_7`S*XnG(}? z61UC5NpV^17qfTF!e_+G;*dCu*eUTjaRfEYLSDQgj)~)lJ^HdC9uX(*7~;fjt1u_> z;v~j-TDNp_W$_$QVBv?S!{*Az-PAHs+Yn_qwZBpHv8%P(s+|CFX0Kkf*>+hjE~GO8Ew1mgyyCd zIgvGEY?A^-sVxfxNpAe_mn-Xj z-L2G{t7NL)VroEKsewO;4!y^uU4! zKD^Mqe%XD_E%}w|iEHj@_qF`>PQAeQ-Q_TB2G5>8y@bKE7UxQh+Ue%4upvv!y>}%D zT7Gc)$uo~X`K0Rao{9C44DNl3-^P^iLeCAB8?CBv7k!s}QCVs=TERj-9Vdciuj$85 z%?p=fhr@`?%dw52#!eNy5GVCy;;a`GbyT35>BR$@EJadR4zo{+u{c@v>q}@WT`y|l z#My8uEOt^;x{2VfAtzD#{>zu1edo=6|%a4x3|U4$qh= zr;&y|kPOwb#~JCDed*UrehE^w_j3^IG4?V(La=RUu2{D%IT@LowlKleR^5pZf(6xg zWKDwcBU@OjhI}z}1mfQS>weQ#%xg;lhEE_rISq!@?@438__9U$*2d-m+om>tPc7di z%-5GLIH=}c*Mq&T`Ahmi(=UZ?*l@4+Mt}X3>s1@zL~>u%3qn^^K(YXPc22doqo`Z* z>L?Ay0~>g5y;WQE2VT(z47qKLB=@+1IfoQUxevAAEc&5WqKN7KxC#k8HlE-N zg3t(|?S9Mgg`gjn3#wZs@jdJx>@jGK4&~7gF{}r-)vkEcSX$kY`><>kATYd&ajgvQgZl5@*N6^P! zWGybiUOd4MA%J)?w#Z&jzhkwnw*7$>LEL;`P!oq{1mQGhOc+c(Z+u{G=OGPW=fQ06 zn`Kj&!n%|Gz|w8q=2F=UeQVD(CvU5c4ggS!>Q!E%+4& z=Iz*;I}_Wh{;k*!8$>O3xpH&I=F6VvYC!EZpD&@|-^F8WoWO$3ov${qCc*P_MZJ>S z3ENe$9TSz%uLW_UTy1zE_C2X6$)EBu1aV@WLgB70Pcwh$m>3$5FOwM!n{( z0(6ySqt=5hkjJG}2!9Fk-%#kqJAL-dGmk&>*kh^}012aDy;dn$uj6?c&$UWyeF5*6 z@DwUbm=Jy+UEIhVdu;A(`B-o)*hnAi#0u6ID|f*%-eQ0id;taJiwI(;Rj)iC;<6lQ zqg*W4Lbe(xlPi&iWE+KildmG}8u%|!YP9NMABjm`mL>xR%Lpj8H|8~gLF`EgZwTnn zS^*dk)`#Yb7S%(M^%jWzHkcm(z(UC(VB^FlaIX^OPuHiBv-pslD`#NZVKMB5%NSA; zYRM1%yQw%Ky*dDBniTC(JgC z?_cPnTURA0)|^LvXskfg;~yl@5*Ty^SRVAUf&c9lupz?4N>bP#S|%CPnF7M!Vhn1K zu^GaovR>ubuFd=k(Mi^V{!N8)g1p`Vz|Sq=QR;~B7C<#tZoN|Sv4&Qo zsicC!IpBILTviYyPN_jYc1}`&0%JoBOJB4AV$xUC`;BI7Uw!lS*r6cHTk;gj>9tf# zwqxBo8mObj|1io0Y}80hgT*DeS8^5ZxeEv`ehh}TncexZD7jp%)(Rq_(jIMFS#uEL zu+yB4(a7GUFmYhF0gk+F)Sq{Z(77c{qn$8~jpI>bB@tp3S1q~lw(&)Xqjus)6Km-o zL)q?EJ*!jC7-!HD;mpQY)DmcQ?s_gbIqZ^IX>w@D8Wzv3R#tr|@3Qo}LWlYj>j~3_ z(h+D;2_Vmffu>9yQK9*YnHMZ$+sYY<*Z%^T54V_zjH(w?R9xb}9sDiOky;tgK{|_=fRKdO}hgFRJD3 z=&rzrr7HsLa~=8pG?-{fV8X7sq*Ts(cjjKmC$w0*_|=Ot#3b&WM<82_kuX?js};l+ zz=avacCAt`*vm42s`qXMYMU=TrhdP6;lAkt>DbK%w(>@$87Jw30UAlism{>il&Ap6 zd$;7XXu1Hl!+-+$CO%+=!>H2&9r7RPyVfNhR2sS1Y3k3TPB4bRaDXE4FAEmRDJY*b zVP(sHZ5 zs^+{s=TmdW4sDU5HT1^DwuNv^~z+1Fx1cz1J7DmgEkJEodqq;t% zSOamg2Q)3TN0Ezf6k#$UgweDD?@pMvG^Sn)udJyQqBUgtsyIE^54eT4%*Ch ztMuV!D##vr=$;^?SfQ`MgiU0uV6wtI528^->WKlt?mfjQqeT0BSUoo&8AUnsWmJpyvEumgEj@ANWNR*B;$}yOYvbwj3 z?!Z5)7$gg7Bz7#bq{T6pPwQ3XI%_&`HL)+fo0J zj1IfNd0=J1GDQ0auuC^acdm=Bgr%jKO{E0BBO)sn<7}}Ae@m;%c&=DnYk5_jBdIvb}Wpsdd_gH2&p;G(Ul>PVEl=CB-!nn8O5FJ<+ve3!992mp!pgvt?DY{+6s$&X!B%XRWq=5L{n zwv9kL2lkizI#MnJY7H;7Dn(3q2`;Rlxai4v;6_DoVd?xXb`yH7wI zf$I_gp;kKw?~A!*K>G}~hagm}_MRy7ktu(I;1m|rU^Eo%5zdEBdl>wh5D93skuVAU zGZc;7w%em3(cUYP?J+$2qO5F&sg+bT8ttXb6dBYTjYj9;B!|H;9u1)7`7j-gVy{m` z6C#V&CZjPi@RZRWi}r~e-uvM89t4p3c?v`9m!udHd!or1Bicv9T?~Kdm`45cC^dpo z|G8IcRP24A)Nzy=TeZ;ZXs;a3Ms}Br#-fQCqdgVnpjjuPeF!I`34~K&CYri!A~yZa zbbA^wFdI!10zyX3MC153GST&C&V&Pih%*?ojlMAMC>5o}{tq+l{n7qtxW7E)8|q@V z<7;6w49hhAktHvNIYr~_3|Of>8xC$_MWWegR!Idj+!H3F+1v0hwMWC@&5>x7BLUrq zqfs#%rFl>QBykON?v8F~!iZ*jBRrr+cwpxUZ>QS_qv7ZvMmHOcL*3{Q^|VaF2!}s1 z*M1Xa(!e3ZYOKcQXfy=;;zn*X1Zj4tGtxsM3s3K%NWD9U9EWl6v3JY(>R}o;(cw+- z$rffHl2C`kZyRFPF&yKzjTId|YkXt{n`dA=^4}Nng!|Ahwj8$AEacyT|48;#VCRKq z6Mz`v00$KCaCw~*h+5?)taU(IT}Unk&#IV z;KU8#M0T&E6`KEORRdwytI<4$Z#KN)!`MIL0vEey=AIzN>b1{4m=2#LhDHZjquFc( zm2e&I0`{#u%S=`WF?6tLCzy86!Bux%5AnJl2HNUxn@1_IlHeHvY=dbh{hs@+)0hua z0J-Zl7HaTI1Zo%^DsgH)VE?)&D>w-2ZyV#T)F4mRQRnLX70$lZj8!@N3rbi0yH5tT<&Ux2npv)pStqPUT{UtFs1| zHS!u^Q(}tB&T_ALLER6xTj@&K2PX_`3La1_iWb8z#ISoplE#qAu5wX7N7CfgH18jv zkHY;C`WTAvFttzrh{jt?2-@ylD^7Rw;(;Am_XsZUnhLtzQ(%X>>E2BQ@;zqRahATH zVgT15jsfbWMm3PHFq}4bVSa<{3}}a}R?3@9{rs4Joppqp&2pxET zl2F~L+a?s&2mn)(z=!FbFRQ&5`RVppm;vO^LeY+mQOWNM;T1xeeSlKq(LR(J01%mo za&j}wt>gd_)D;m0q{c&Ma}duES{S3M70!gQWibT+Mx{(*4q{--UV@)$b5FDfN_r9_ zo5Z*PgM#lVQ(`ij+#IG~F&fwl_yw=j6rSm5T){Nao{0tln7)U4;}{{pi-Kvh(UgK` zQ_-|=01KuyOhd`HdeDkMZ5HiL?*M8G8mOV%kM_zDs_ibf4b{HgD@Uldt6Ve{?FWSp zL_-RyO)IFjUqQ8z=s*{$c}AEFL0@k)nhrqDLfp&%s2BsNnu*3h@q+*uMs#o| zAOd8Y0c3-8f?s<8AmtFi)qa#m4GoGC2JT}_0&pEtH#6KZ<0%*l=Z$FZcB%d{oqV(a zy3GJiS<&9j@hz;?ssqRqsZ{}F2EYPXR$$PUN5Lrv$PbE74u=x}LNt)g&Bc8cuCagQpI|b2)>#9>J(aqQd}!Gm`*X?PF@r$9B#cAn-UK z`*FH8dKu|Ky@L{-T75n)zscC2VUR|!@hEPg;Gmx%2)@G`@O_?l&$?Bd%mD5* zTsX6(090G{*5Ndoliy;+Z!>t0!JlJ5wWvKNe}S>P4E`d6zr^4#Gx#eE{wjmN#^A3r z_!|uVCW37RckZDIPW~;H{M!uv4g;E-a|={A`2E&}{6YEah-hu66rIxQ%1r!eX8Z#L zP)v;$)LBp%((0ir!sXAh2vwl`dkp?Qg4hQ9!Erb!1KnrgB>pSR`iBS#S*nI&(1d4L z{v)Q+0N$k^&uMP~txKuIIL9iN87TSwj~V+X43rK$A^$1kKhL120p&07bCUrzpZsSG zw$)vV2VzyP84vGXKlzI+`Og{r5(7n%UuJBK!AS(r{;gWEa|#UiA=PE5Z{Pe2midT3mPuLddni(`zr}+8j={fYpolQZ zml60t%34cf6rM<(Ed>K?-mCj6>yhl^+}|RDK*|pOY(r1TtF*&;Qp` zJ^6X`C>I!9W3Y>uZzJt%B<46G4?lD3JGXIAigUSyCwLWs;8i3zHsl;VG1SwYQ#Ly2 zjFu@ct1}#RepAPlrWHqC=KVXyRB0P0a`3If%ybs+rO_h1ENS?+^(C3?5+G(%#gUHl zkT8%$74U~AcqwR2ax{hP8n~xZWePaX>V-v(uUJdv<)kQ`4IH5jl2yIyB9s7LvtPwoDQ)J^t7p^SA?}_^?c=M(gZ|)>< zyj!OS1ot75-!}gkE;rWa!_EP4I~~Fos#MkCb|%V1>6P@lDc{2wGVm#7sM6pmK^3W-Cst;y59Y z{{{1oldCsKcgS$~3!A)UQ>ptmaeWD-$6ZChI7}t3r=VDoweq<*jq3ak zaM#oeOUuRHcP@Yp4cBO;iUzdt;@|=XPR?*gi+9+7zA-9~E(&FVP~>#_!x0MGQ42?q zu6(bdpC9DaCEP1i_Yv6w8^jeu9?wCfukw=X?_yeNZt}k~rg+OnPwKLV{5_`q4+j60 zLI1{!v#LF1(fky->IBNrQ%gjBQsHl&dk;e}5oJ^045`#CeCiTspZ#3cw=J)rC3vEE zu;Fqu-hl^**J;Ri^8~+EeGYKO+Ujr}-nQZsc+j>ic(&{?wVB4Y&1``yD!f(0^B3pg z*Kn?)w}6U$_cfdqbSTh(zJ`N)D*p|IxQTf|RQ@JEYXyldQXrZ>`Gta6XBX4OLdH7LTS# zJw^0%yhpLy4!Y!+ImloSf;cO1YN<)6-RAc(jCcYh1*Y+P60k!%Im(Lt94$XZjbBH4 zK-WbIIWk@ymfdhGu^DCTj;dG2XTAXp&IN!oG zAD(eR>6#xWaL&bR?~w9pKa&jBLO!uuVD%&v4kkzRDI;ZVQ48ugQ!JJ!zKcc0vYIbQ zvV2y_I=vB;gi~{;l-H6_2}ZVFNU3RqhoB*TADSo`_&cPD!XS`#Nh@nPmIa-v{{0}C zl7~qjJiVWV80p1*6ig!^3w;1qdEe+>BGhIZ=KhcigsggyRgWToU5cyGIMu~%#mHEK zt@kbS3gwwaHLCeY{%I715OXmI$gTJBwJrMT!Q&yV-VzF>@@VWnC#4n@VtnI3$!k&r zDMD{cvF=$dBx6S1Cxv$x?xh>jYX&%uJBl{cIh$_#2tU~Yfb@>${ug^9yKvtPIr0&F zm-xB!M^atH6Yy$5_uw6W;-OT8z?JiYjX!b0@jL!R#1To6!k>U7c$kEDQvFE`)>Gs_ z(4lx_zE!K;`U1xK(oMhA!U8|g9al(ow|_@>x-)ddySN6rNuasp#_R*l&8d_tet4_T>$h8!aq(<)(jloj zU_br`4)Nhf?&xG&&W+)No0!{|x^hKvS7)R$kFxMJE57O%th{|oDx^6e&fG9Jj{d=e z(G|GkzlP%SCm8%m2Ar6Dlyl$iL&X$J4eX$~#ooxfu!0hDj#W6DzNO$`n0N_~D%&Hx zYQA$%ppd%Jkoe<`U`L<5Ei$lUQqvDks=GP*y5*?&C=TH0^-VwQTU_^1`_|^Z?NAn@ zIKk1YDSU>p?WwC-%L!=Q)dj3q>DE$J?UKn(o*38y75C zf`8+JqleK0d&jfG`$luPjX03aW=FHR?C?-NogRM$y#ML=BCZAJvV+h!9q({=} F{|l0OR~Y~R literal 0 HcmV?d00001 diff --git a/demucs/apply.py b/demucs/apply.py index fdb096c..7920ad5 100644 --- a/demucs/apply.py +++ b/demucs/apply.py @@ -10,11 +10,13 @@ inteprolation between chunks, as well as the "shift trick". from concurrent.futures import ThreadPoolExecutor import random import typing as tp +from multiprocessing import Process,Queue,Pipe import torch as th from torch import nn from torch.nn import functional as F import tqdm +import tkinter as tk from .demucs import Demucs from .hdemucs import HDemucs @@ -22,6 +24,7 @@ from .utils import center_trim, DummyPoolExecutor Model = tp.Union[Demucs, HDemucs] +progress_bar_num = 0 class BagOfModels(nn.Module): def __init__(self, models: tp.List[Model], @@ -107,7 +110,6 @@ class TensorChunk: assert out.shape[-1] == target_length return out - def tensor_chunk(tensor_or_chunk): if isinstance(tensor_or_chunk, TensorChunk): return tensor_or_chunk @@ -115,10 +117,9 @@ def tensor_chunk(tensor_or_chunk): assert isinstance(tensor_or_chunk, th.Tensor) return TensorChunk(tensor_or_chunk) - -def apply_model(model, mix, shifts=1, split=True, - overlap=0.25, transition_power=1., progress=False, device=None, - num_workers=0, pool=None): +def apply_model(model, mix, gui_progress_bar: tk.Variable, widget_text: tk.Text, update_prog, total_files, file_num, inference_type, shifts=1, split=True, + overlap=0.25, transition_power=1., progress=True, device=None, + num_workers=0, pool=None, segmen=False): """ Apply model to a given mixture. @@ -136,6 +137,12 @@ def apply_model(model, mix, shifts=1, split=True, When `device` is different from `mix.device`, only local computations will be on `device`, while the entire tracks will be stored on `mix.device`. """ + + base_text = 'File {file_num}/{total_files} '.format(file_num=file_num, + total_files=total_files) + + global fut_length + if device is None: device = mix.device else: @@ -145,7 +152,12 @@ def apply_model(model, mix, shifts=1, split=True, pool = ThreadPoolExecutor(num_workers) else: pool = DummyPoolExecutor() + kwargs = { + 'gui_progress_bar': gui_progress_bar, + 'widget_text': widget_text, + 'update_prog': update_prog, + 'segmen': segmen, 'shifts': shifts, 'split': split, 'overlap': overlap, @@ -153,17 +165,35 @@ def apply_model(model, mix, shifts=1, split=True, 'progress': progress, 'device': device, 'pool': pool, + 'total_files': total_files, + 'file_num': file_num, + 'inference_type': inference_type } + if isinstance(model, BagOfModels): # Special treatment for bag of model. # We explicitely apply multiple times `apply_model` so that the random shifts # are different for each model. + global bag_num + global current_model + global progress_bar + global prog_bar + #global percent_prog_del + + #percent_prog_del = gui_progress_bar.get() + + progress_bar = 0 + prog_bar = 0 estimates = 0 totals = [0] * len(model.sources) + bag_num = len(model.models) + fut_length = 0 + current_model = 0 #(bag_num + 1) for sub_model, weight in zip(model.models, model.weights): original_model_device = next(iter(sub_model.parameters())).device sub_model.to(device) - + fut_length += fut_length + current_model += 1 out = apply_model(sub_model, mix, **kwargs) sub_model.to(original_model_device) for k, inst_weight in enumerate(weight): @@ -179,6 +209,7 @@ def apply_model(model, mix, shifts=1, split=True, model.to(device) assert transition_power >= 1, "transition_power < 1 leads to weird behavior." batch, channels, length = mix.shape + if split: kwargs['split'] = False out = th.zeros(batch, len(model.sources), channels, length, device=mix.device) @@ -202,9 +233,26 @@ def apply_model(model, mix, shifts=1, split=True, future = pool.submit(apply_model, model, chunk, **kwargs) futures.append((future, offset)) offset += segment - if progress: - futures = tqdm.tqdm(futures, unit_scale=scale, ncols=120, unit='seconds') for future, offset in futures: + if segmen: + fut_length = len(futures) + full_fut_length = (fut_length * bag_num) + send_back = full_fut_length * 2 + progress_bar += 100 + prog_bar += 1 + full_step = (progress_bar / full_fut_length) + percent_prog = f"{base_text}Demucs Inference Progress: {prog_bar}/{full_fut_length} | {round(full_step)}%" + if inference_type == 'demucs_only': + update_prog(gui_progress_bar, total_files, file_num, + step=(0.1 + (1.7/send_back * prog_bar))) + elif inference_type == 'inference_mdx': + update_prog(gui_progress_bar, total_files, file_num, + step=(0.35 + (1.05/send_back * prog_bar))) + elif inference_type == 'inference_vr': + update_prog(gui_progress_bar, total_files, file_num, + step=(0.6 + (0.7/send_back * prog_bar))) + widget_text.percentage(percent_prog) + #gui_progress_bar.set(step) chunk_out = future.result() chunk_length = chunk_out.shape[-1] out[..., offset:offset + segment] += (weight[:chunk_length] * chunk_out).to(mix.device) diff --git a/demucs/utils.py b/demucs/utils.py index 22f3b9a..f09bc18 100644 --- a/demucs/utils.py +++ b/demucs/utils.py @@ -22,6 +22,7 @@ import socket import tempfile import warnings import zlib +import tkinter as tk from diffq import UniformQuantizer, DiffQuantizer import torch as th @@ -228,7 +229,7 @@ def tensor_chunk(tensor_or_chunk): return TensorChunk(tensor_or_chunk) -def apply_model_v1(model, mix, shifts=None, split=False, progress=False): +def apply_model_v1(model, mix, gui_progress_bar: tk.Variable, widget_text: tk.Text, update_prog, total_files, file_num, inference_type, shifts=None, split=False, progress=False, segmen=True): """ Apply model to a given mixture. @@ -242,6 +243,10 @@ def apply_model_v1(model, mix, shifts=None, split=False, progress=False): Useful for model with large memory footprint like Tasnet. progress (bool): if True, show a progress bar (requires split=True) """ + + base_text = 'File {file_num}/{total_files} '.format(file_num=file_num, + total_files=total_files) + channels, length = mix.size() device = mix.device if split: @@ -249,11 +254,31 @@ def apply_model_v1(model, mix, shifts=None, split=False, progress=False): shift = model.samplerate * 10 offsets = range(0, length, shift) scale = 10 + progress_bar = 0 + prog_bar = 0 if progress: offsets = tqdm.tqdm(offsets, unit_scale=scale, ncols=120, unit='seconds') for offset in offsets: + if segmen: + fut_length = len(offsets) + send_back = fut_length * 2 + progress_bar += 100 + prog_bar += 1 + if inference_type == 'demucs_only': + update_prog(gui_progress_bar, total_files, file_num, + step=(0.1 + (1.7/send_back * prog_bar))) + elif inference_type == 'inference_mdx': + update_prog(gui_progress_bar, total_files, file_num, + step=(0.35 + (1.05/send_back * prog_bar))) + elif inference_type == 'inference_vr': + update_prog(gui_progress_bar, total_files, file_num, + step=(0.6 + (0.7/send_back * prog_bar))) + step = (progress_bar / fut_length) + percent_prog = f"{base_text}Demucs v1 Inference Progress: {prog_bar}/{fut_length} | {round(step)}%" + widget_text.percentage(percent_prog) + #gui_progress_bar.set(step) chunk = mix[..., offset:offset + shift] - chunk_out = apply_model_v1(model, chunk, shifts=shifts) + chunk_out = apply_model_v1(model, chunk, gui_progress_bar, widget_text, update_prog, total_files, file_num, inference_type, shifts=shifts) out[..., offset:offset + shift] = chunk_out offset += shift return out @@ -265,7 +290,7 @@ def apply_model_v1(model, mix, shifts=None, split=False, progress=False): out = 0 for offset in offsets[:shifts]: shifted = mix[..., offset:offset + length + max_shift] - shifted_out = apply_model_v1(model, shifted) + shifted_out = apply_model_v1(model, shifted, gui_progress_bar, widget_text, update_prog, total_files, file_num, inference_type) out += shifted_out[..., max_shift - offset:max_shift - offset + length] out /= shifts return out @@ -277,8 +302,8 @@ def apply_model_v1(model, mix, shifts=None, split=False, progress=False): out = model(padded.unsqueeze(0))[0] return center_trim(out, mix) -def apply_model_v2(model, mix, shifts=None, split=False, - overlap=0.25, transition_power=1., progress=False): +def apply_model_v2(model, mix, gui_progress_bar: tk.Variable, widget_text: tk.Text, update_prog, total_files, file_num, inference_type, shifts=None, split=False, + overlap=0.25, transition_power=1., progress=False, segmen=True): """ Apply model to a given mixture. @@ -292,6 +317,16 @@ def apply_model_v2(model, mix, shifts=None, split=False, Useful for model with large memory footprint like Tasnet. progress (bool): if True, show a progress bar (requires split=True) """ + + global prog_space + global percent_prog + + percent_prog = 0 + + base_text = 'File {file_num}/{total_files} '.format(file_num=file_num, + total_files=total_files) + + #widget_text.remove(percent_prog) assert transition_power >= 1, "transition_power < 1 leads to weird behavior." device = mix.device channels, length = mix.shape @@ -313,9 +348,30 @@ def apply_model_v2(model, mix, shifts=None, split=False, # If the overlap < 50%, this will translate to linear transition when # transition_power is 1. weight = (weight / weight.max())**transition_power + progress_bar = 0 + prog_bar = 0 for offset in offsets: + if segmen: + fut_length = len(offsets) + send_back = fut_length * 2 + progress_bar += 100 + prog_bar += 1 + if inference_type == 'demucs_only': + update_prog(gui_progress_bar, total_files, file_num, + step=(0.1 + (1.7/send_back * prog_bar))) + elif inference_type == 'inference_mdx': + update_prog(gui_progress_bar, total_files, file_num, + step=(0.35 + (1.05/send_back * prog_bar))) + elif inference_type == 'inference_vr': + update_prog(gui_progress_bar, total_files, file_num, + step=(0.6 + (0.7/send_back * prog_bar))) + step = (progress_bar / fut_length) + percent_prog = f"{base_text}Demucs v2 Inference Progress: {prog_bar}/{fut_length} | {round(step)}%" + prog_space = len(percent_prog) + prog_space = prog_bar*prog_space + widget_text.percentage(percent_prog) chunk = TensorChunk(mix, offset, segment) - chunk_out = apply_model_v2(model, chunk, shifts=shifts) + chunk_out = apply_model_v2(model, chunk, gui_progress_bar, widget_text, update_prog, total_files, file_num, inference_type, shifts=shifts) chunk_length = chunk_out.shape[-1] out[..., offset:offset + segment] += weight[:chunk_length] * chunk_out sum_weight[offset:offset + segment] += weight[:chunk_length] @@ -331,7 +387,7 @@ def apply_model_v2(model, mix, shifts=None, split=False, for _ in range(shifts): offset = random.randint(0, max_shift) shifted = TensorChunk(padded_mix, offset, length + max_shift - offset) - shifted_out = apply_model_v2(model, shifted) + shifted_out = apply_model_v2(model, shifted, gui_progress_bar, widget_text, update_prog, total_files, file_num, inference_type) out += shifted_out[..., max_shift - offset:] out /= shifts return out diff --git a/inference_MDX.py b/inference_MDX.py index 56b3486..79c736c 100644 --- a/inference_MDX.py +++ b/inference_MDX.py @@ -35,6 +35,7 @@ import pydub import shutil import soundfile as sf import subprocess +from UVR import MainWindow import sys import time import time # Timer @@ -61,45 +62,50 @@ class Predictor(): self.noise_pro_select_set_var = tk.StringVar(value='MDX-NET_Noise_Profile_14_kHz') self.compensate_v_var = tk.StringVar(value=1.03597672895) - top= Toplevel() + mdx_model_set = Toplevel() - top.geometry("740x550") - window_height = 740 - window_width = 550 + mdx_model_set.geometry("490x515") + window_height = 490 + window_width = 515 - top.title("Specify Parameters") + mdx_model_set.title("Specify Parameters") - top.resizable(False, False) # This code helps to disable windows from resizing + mdx_model_set.resizable(False, False) # This code helps to disable windows from resizing - top.attributes("-topmost", True) + mdx_model_set.attributes("-topmost", True) - screen_width = top.winfo_screenwidth() - screen_height = top.winfo_screenheight() + screen_width = mdx_model_set.winfo_screenwidth() + screen_height = mdx_model_set.winfo_screenheight() x_cordinate = int((screen_width/2) - (window_width/2)) y_cordinate = int((screen_height/2) - (window_height/2)) - top.geometry("{}x{}+{}+{}".format(window_width, window_height, x_cordinate, y_cordinate)) + mdx_model_set.geometry("{}x{}+{}+{}".format(window_width, window_height, x_cordinate, y_cordinate)) + + x = main_window.winfo_x() + y = main_window.winfo_y() + mdx_model_set.geometry("+%d+%d" %(x+50,y+150)) + mdx_model_set.wm_transient(main_window) # change title bar icon - top.iconbitmap('img\\UVR-Icon-v2.ico') + mdx_model_set.iconbitmap('img\\UVR-Icon-v2.ico') - tabControl = ttk.Notebook(top) + mdx_model_set_window = ttk.Notebook(mdx_model_set) - tabControl.pack(expand = 1, fill ="both") + mdx_model_set_window.pack(expand = 1, fill ="both") - tabControl.grid_rowconfigure(0, weight=1) - tabControl.grid_columnconfigure(0, weight=1) + mdx_model_set_window.grid_rowconfigure(0, weight=1) + mdx_model_set_window.grid_columnconfigure(0, weight=1) - frame0=Frame(tabControl,highlightbackground='red',highlightthicknes=0) + frame0=Frame(mdx_model_set_window,highlightbackground='red',highlightthicknes=0) frame0.grid(row=0,column=0,padx=0,pady=0) - frame0.tkraise(frame0) + #frame0.tkraise(frame0) space_small = ' '*20 space_small_1 = ' '*10 - l0=tk.Label(frame0, text=f'{space_small}Stem Type{space_small}', font=("Century Gothic", "9"), foreground='#13a4c9') + l0=tk.Label(frame0, text=f'\n{space_small}Stem Type{space_small}', font=("Century Gothic", "9"), foreground='#13a4c9') l0.grid(row=3,column=0,padx=0,pady=5) l0=ttk.OptionMenu(frame0, self.mdxnetModeltype_var, None, 'Vocals', 'Instrumental', 'Other', 'Bass', 'Drums') @@ -160,18 +166,15 @@ class Predictor(): torch.cuda.empty_cache() gui_progress_bar.set(0) widget_button.configure(state=tk.NORMAL) # Enable Button - top.destroy() + self.okVar.set(1) + stop_button() + mdx_model_set.destroy() return l0=ttk.Button(frame0,text="Stop Process", command=stop) l0.grid(row=13,column=1,padx=0,pady=30) - def change_event(): - self.okVar.set(1) - #top.destroy() - pass - - top.protocol("WM_DELETE_WINDOW", change_event) + mdx_model_set.protocol("WM_DELETE_WINDOW", stop) frame0.wait_variable(self.okVar) @@ -217,13 +220,13 @@ class Predictor(): stem_text_b = 'Vocals' elif stemset_n == '(Other)': stem_text_a = 'Other' - stem_text_b = 'the no \"Other\" track' + stem_text_b = 'mixture without selected stem' elif stemset_n == '(Drums)': stem_text_a = 'Drums' - stem_text_b = 'no \"Drums\" track' + stem_text_b = 'mixture without selected stem' elif stemset_n == '(Bass)': stem_text_a = 'Bass' - stem_text_b = 'No \"Bass\" track' + stem_text_b = 'mixture without selected stem' else: stem_text_a = 'Vocals' stem_text_b = 'Instrumental' @@ -263,7 +266,7 @@ class Predictor(): widget_text.write(base_text + 'Setting Demucs model to \"UVR_Demucs_Model_1\".\n\n') demucs_model_set = 'UVR_Demucs_Model_1' - top.destroy() + mdx_model_set.destroy() def prediction_setup(self): @@ -287,6 +290,10 @@ class Predictor(): self.demucs.to(device) self.demucs.load_state_dict(state) widget_text.write('Done!\n') + if not data['segment'] == 'Default': + widget_text.write(base_text + 'Segments is only available in Demucs v3. Please use \"Chunks\" instead.\n') + else: + pass if demucs_model_version == 'v2': if '48' in demucs_model_set: @@ -306,6 +313,10 @@ class Predictor(): self.demucs.to(device) self.demucs.load_state_dict(torch.load("models/Demucs_Models/"f"{demucs_model_set}")) widget_text.write('Done!\n') + if not data['segment'] == 'Default': + widget_text.write(base_text + 'Segments is only available in Demucs v3. Please use \"Chunks\" instead.\n') + else: + pass self.demucs.eval() if demucs_model_version == 'v3': @@ -324,6 +335,37 @@ class Predictor(): widget_text.write('Done!\n') if isinstance(self.demucs, BagOfModels): widget_text.write(base_text + f"Selected Demucs model is a bag of {len(self.demucs.models)} model(s).\n") + + if data['segment'] == 'Default': + segment = None + if isinstance(self.demucs, BagOfModels): + if segment is not None: + for sub in self.demucs.models: + sub.segment = segment + else: + if segment is not None: + sub.segment = segment + else: + try: + segment = int(data['segment']) + if isinstance(self.demucs, BagOfModels): + if segment is not None: + for sub in self.demucs.models: + sub.segment = segment + else: + if segment is not None: + sub.segment = segment + if split_mode: + widget_text.write(base_text + "Segments set to "f"{segment}.\n") + except: + segment = None + if isinstance(self.demucs, BagOfModels): + if segment is not None: + for sub in self.demucs.models: + sub.segment = segment + else: + if segment is not None: + sub.segment = segment self.onnx_models = {} c = 0 @@ -394,13 +436,13 @@ class Predictor(): if data['modelFolder']: vocal_path = '{save_path}/{file_name}.wav'.format( save_path=save_path, - file_name = f'{os.path.basename(_basename)}_{vocal_name}_{model_set_name}',) + file_name = f'{os.path.basename(_basename)}_{vocal_name}_{mdx_model_name}',) vocal_path_mp3 = '{save_path}/{file_name}.mp3'.format( save_path=save_path, - file_name = f'{os.path.basename(_basename)}_{vocal_name}_{model_set_name}',) + file_name = f'{os.path.basename(_basename)}_{vocal_name}_{mdx_model_name}',) vocal_path_flac = '{save_path}/{file_name}.flac'.format( save_path=save_path, - file_name = f'{os.path.basename(_basename)}_{vocal_name}_{model_set_name}',) + file_name = f'{os.path.basename(_basename)}_{vocal_name}_{mdx_model_name}',) else: vocal_path = '{save_path}/{file_name}.wav'.format( save_path=save_path, @@ -428,13 +470,13 @@ class Predictor(): if data['modelFolder']: Instrumental_path = '{save_path}/{file_name}.wav'.format( save_path=save_path, - file_name = f'{os.path.basename(_basename)}_{Instrumental_name}_{model_set_name}',) + file_name = f'{os.path.basename(_basename)}_{Instrumental_name}_{mdx_model_name}',) Instrumental_path_mp3 = '{save_path}/{file_name}.mp3'.format( save_path=save_path, - file_name = f'{os.path.basename(_basename)}_{Instrumental_name}_{model_set_name}',) + file_name = f'{os.path.basename(_basename)}_{Instrumental_name}_{mdx_model_name}',) Instrumental_path_flac = '{save_path}/{file_name}.flac'.format( save_path=save_path, - file_name = f'{os.path.basename(_basename)}_{Instrumental_name}_{model_set_name}',) + file_name = f'{os.path.basename(_basename)}_{Instrumental_name}_{mdx_model_name}',) else: Instrumental_path = '{save_path}/{file_name}.wav'.format( save_path=save_path, @@ -461,13 +503,13 @@ class Predictor(): if data['modelFolder']: non_reduced_vocal_path = '{save_path}/{file_name}.wav'.format( save_path=save_path, - file_name = f'{os.path.basename(_basename)}_{vocal_name}_{model_set_name}_No_Reduction',) + file_name = f'{os.path.basename(_basename)}_{vocal_name}_{mdx_model_name}_No_Reduction',) non_reduced_vocal_path_mp3 = '{save_path}/{file_name}.mp3'.format( save_path=save_path, - file_name = f'{os.path.basename(_basename)}_{vocal_name}_{model_set_name}_No_Reduction',) + file_name = f'{os.path.basename(_basename)}_{vocal_name}_{mdx_model_name}_No_Reduction',) non_reduced_vocal_path_flac = '{save_path}/{file_name}.flac'.format( save_path=save_path, - file_name = f'{os.path.basename(_basename)}_{vocal_name}_{model_set_name}_No_Reduction',) + file_name = f'{os.path.basename(_basename)}_{vocal_name}_{mdx_model_name}_No_Reduction',) else: non_reduced_vocal_path = '{save_path}/{file_name}.wav'.format( save_path=save_path, @@ -482,13 +524,13 @@ class Predictor(): if data['modelFolder']: non_reduced_Instrumental_path = '{save_path}/{file_name}.wav'.format( save_path=save_path, - file_name = f'{os.path.basename(_basename)}_{Instrumental_name}_{model_set_name}_No_Reduction',) + file_name = f'{os.path.basename(_basename)}_{Instrumental_name}_{mdx_model_name}_No_Reduction',) non_reduced_Instrumental_path_mp3 = '{save_path}/{file_name}.mp3'.format( save_path=save_path, - file_name = f'{os.path.basename(_basename)}_{Instrumental_name}_{model_set_name}_No_Reduction',) + file_name = f'{os.path.basename(_basename)}_{Instrumental_name}_{mdx_model_name}_No_Reduction',) non_reduced_Instrumental_path_flac = '{save_path}/{file_name}.flac'.format( save_path=save_path, - file_name = f'{os.path.basename(_basename)}_{Instrumental_name}_{model_set_name}_No_Reduction',) + file_name = f'{os.path.basename(_basename)}_{Instrumental_name}_{mdx_model_name}_No_Reduction',) else: non_reduced_Instrumental_path = '{save_path}/{file_name}.wav'.format( save_path=save_path, @@ -918,19 +960,21 @@ class Predictor(): widget_text.write(base_text + 'Completed Separation!\n') def demix(self, mix): + global chunk_set + # 1 = demucs only # 0 = onnx only if data['chunks'] == 'Full': chunk_set = 0 - else: - chunk_set = data['chunks'] - - if data['chunks'] == 'Auto': + widget_text.write(base_text + "Chunk size user-set to \"Full\"... \n") + elif data['chunks'] == 'Auto': if data['gpu'] == 0: try: gpu_mem = round(torch.cuda.get_device_properties(0).total_memory/1.074e+9) except: widget_text.write(base_text + 'NVIDIA GPU Required for conversion!\n') + data['gpu'] = -1 + pass if int(gpu_mem) <= int(6): chunk_set = int(5) widget_text.write(base_text + 'Chunk size auto-set to 5... \n') @@ -954,9 +998,9 @@ class Predictor(): if int(sys_mem) >= int(17): chunk_set = int(60) widget_text.write(base_text + 'Chunk size auto-set to 60... \n') - elif data['chunks'] == 'Full': + elif data['chunks'] == '0': chunk_set = 0 - widget_text.write(base_text + "Chunk size set to full... \n") + widget_text.write(base_text + "Chunk size user-set to \"Full\"... \n") else: chunk_set = int(data['chunks']) widget_text.write(base_text + "Chunk size user-set to "f"{chunk_set}... \n") @@ -986,29 +1030,33 @@ class Predictor(): segmented_mix[skip] = mix[:,start:end].copy() if end == samples: break - if not data['demucsmodel']: sources = self.demix_base(segmented_mix, margin_size=margin) elif data['demucs_only']: - if split_mode == True: + if no_chunk_demucs == False: sources = self.demix_demucs_split(mix) - if split_mode == False: + if no_chunk_demucs == True: sources = self.demix_demucs(segmented_mix, margin_size=margin) else: # both, apply spec effects base_out = self.demix_base(segmented_mix, margin_size=margin) - #print(split_mode) - if demucs_model_version == 'v1': - demucs_out = self.demix_demucs_v1(segmented_mix, margin_size=margin) + if no_chunk_demucs == False: + demucs_out = self.demix_demucs_v1_split(mix) + if no_chunk_demucs == True: + demucs_out = self.demix_demucs_v1(segmented_mix, margin_size=margin) if demucs_model_version == 'v2': - demucs_out = self.demix_demucs_v2(segmented_mix, margin_size=margin) + if no_chunk_demucs == False: + demucs_out = self.demix_demucs_v2_split(mix) + if no_chunk_demucs == True: + demucs_out = self.demix_demucs_v2(segmented_mix, margin_size=margin) if demucs_model_version == 'v3': - if split_mode == True: + if no_chunk_demucs == False: demucs_out = self.demix_demucs_split(mix) - if split_mode == False: + if no_chunk_demucs == True: demucs_out = self.demix_demucs(segmented_mix, margin_size=margin) + nan_count = np.count_nonzero(np.isnan(demucs_out)) + np.count_nonzero(np.isnan(base_out)) if nan_count > 0: print('Warning: there are {} nan values in the array(s).'.format(nan_count)) @@ -1040,10 +1088,15 @@ class Predictor(): onnxitera = len(mixes) onnxitera_calc = onnxitera * 2 gui_progress_bar_onnx = 0 - widget_text.write(base_text + "Running ONNX Inference...\n") - widget_text.write(base_text + "Processing "f"{onnxitera} slices... ") + progress_bar = 0 + print(' Running ONNX Inference...') + if onnxitera == 1: + widget_text.write(base_text + f"Running ONNX Inference... ") + else: + widget_text.write(base_text + f"Running ONNX Inference...{space}\n") + for mix in mixes: gui_progress_bar_onnx += 1 if data['demucsmodel']: @@ -1053,6 +1106,15 @@ class Predictor(): update_progress(**progress_kwargs, step=(0.1 + (0.9/onnxitera * gui_progress_bar_onnx))) + progress_bar += 100 + step = (progress_bar / onnxitera) + + if onnxitera == 1: + pass + else: + percent_prog = f"{base_text}MDX-Net Inference Progress: {gui_progress_bar_onnx}/{onnxitera} | {round(step)}%" + widget_text.percentage(percent_prog) + cmix = mixes[mix] sources = [] n_sample = cmix.shape[1] @@ -1088,21 +1150,35 @@ class Predictor(): chunked_sources.append(sources) _sources = np.concatenate(chunked_sources, axis=-1) del self.onnx_models - widget_text.write('Done!\n') + + if onnxitera == 1: + widget_text.write('Done!\n') + else: + widget_text.write('\n') + return _sources def demix_demucs(self, mix, margin_size): - #print('shift_set ', shift_set) processed = {} demucsitera = len(mix) demucsitera_calc = demucsitera * 2 gui_progress_bar_demucs = 0 - widget_text.write(base_text + "Split Mode is off. (Chunks enabled for Demucs Model)\n") - widget_text.write(base_text + "Running Demucs Inference...\n") - widget_text.write(base_text + "Processing "f"{len(mix)} slices... ") + progress_bar = 0 + if demucsitera == 1: + widget_text.write(base_text + f"Running Demucs Inference... ") + else: + widget_text.write(base_text + f"Running Demucs Inference...{space}\n") + print(' Running Demucs Inference...') for nmix in mix: gui_progress_bar_demucs += 1 + progress_bar += 100 + step = (progress_bar / demucsitera) + if demucsitera == 1: + pass + else: + percent_prog = f"{base_text}Demucs Inference Progress: {gui_progress_bar_demucs}/{demucsitera} | {round(step)}%" + widget_text.percentage(percent_prog) update_progress(**progress_kwargs, step=(0.35 + (1.05/demucsitera_calc * gui_progress_bar_demucs))) cmix = mix[nmix] @@ -1110,8 +1186,17 @@ class Predictor(): ref = cmix.mean(0) cmix = (cmix - ref.mean()) / ref.std() with torch.no_grad(): - #print(split_mode) - sources = apply_model(self.demucs, cmix[None], split=split_mode, device=device, overlap=overlap_set, shifts=shift_set, progress=False)[0] + sources = apply_model(self.demucs, cmix[None], + gui_progress_bar, + widget_text, + update_prog, + split=split_mode, + device=device, + overlap=overlap_set, + shifts=shift_set, + progress=False, + segmen=False, + **progress_demucs_kwargs)[0] sources = (sources * ref.std() + ref.mean()).cpu().numpy() sources[[0,1]] = sources[[1,0]] @@ -1123,17 +1208,21 @@ class Predictor(): sources = list(processed.values()) sources = np.concatenate(sources, axis=-1) - widget_text.write('Done!\n') + + if demucsitera == 1: + widget_text.write('Done!\n') + else: + widget_text.write('\n') #print('the demucs model is done running') return sources def demix_demucs_split(self, mix): - - #print('shift_set ', shift_set) - widget_text.write(base_text + "Split Mode is on. (Chunks disabled for Demucs Model)\n") - widget_text.write(base_text + "Running Demucs Inference...\n") - widget_text.write(base_text + "Processing "f"{len(mix)} slices... ") + + if split_mode: + widget_text.write(base_text + f"Running Demucs Inference...{space}\n") + else: + widget_text.write(base_text + f"Running Demucs Inference... ") print(' Running Demucs Inference...') mix = torch.tensor(mix, dtype=torch.float32) @@ -1141,14 +1230,26 @@ class Predictor(): mix = (mix - ref.mean()) / ref.std() with torch.no_grad(): - sources = apply_model(self.demucs, mix[None], split=split_mode, device=device, overlap=overlap_set, shifts=shift_set, progress=False)[0] + sources = apply_model(self.demucs, + mix[None], + gui_progress_bar, + widget_text, + update_prog, + split=split_mode, + device=device, + overlap=overlap_set, + shifts=shift_set, + progress=False, + segmen=True, + **progress_demucs_kwargs)[0] - widget_text.write('Done!\n') + if split_mode: + widget_text.write('\n') + else: + widget_text.write('Done!\n') sources = (sources * ref.std() + ref.mean()).cpu().numpy() sources[[0,1]] = sources[[1,0]] - - #print('the demucs model is done running') return sources @@ -1157,11 +1258,21 @@ class Predictor(): demucsitera = len(mix) demucsitera_calc = demucsitera * 2 gui_progress_bar_demucs = 0 - widget_text.write(base_text + "Running Demucs v1 Inference...\n") - widget_text.write(base_text + "Processing "f"{len(mix)} slices... ") + progress_bar = 0 print(' Running Demucs Inference...') + if demucsitera == 1: + widget_text.write(base_text + f"Running Demucs v1 Inference... ") + else: + widget_text.write(base_text + f"Running Demucs v1 Inference...{space}\n") for nmix in mix: gui_progress_bar_demucs += 1 + progress_bar += 100 + step = (progress_bar / demucsitera) + if demucsitera == 1: + pass + else: + percent_prog = f"{base_text}Demucs v1 Inference Progress: {gui_progress_bar_demucs}/{demucsitera} | {round(step)}%" + widget_text.percentage(percent_prog) update_progress(**progress_kwargs, step=(0.35 + (1.05/demucsitera_calc * gui_progress_bar_demucs))) cmix = mix[nmix] @@ -1169,7 +1280,15 @@ class Predictor(): ref = cmix.mean(0) cmix = (cmix - ref.mean()) / ref.std() with torch.no_grad(): - sources = apply_model_v1(self.demucs, cmix.to(device), split=split_mode, shifts=shift_set) + sources = apply_model_v1(self.demucs, + cmix.to(device), + gui_progress_bar, + widget_text, + update_prog, + split=split_mode, + segmen=False, + shifts=shift_set, + **progress_demucs_kwargs) sources = (sources * ref.std() + ref.mean()).cpu().numpy() sources[[0,1]] = sources[[1,0]] @@ -1181,7 +1300,44 @@ class Predictor(): sources = list(processed.values()) sources = np.concatenate(sources, axis=-1) - widget_text.write('Done!\n') + + if demucsitera == 1: + widget_text.write('Done!\n') + else: + widget_text.write('\n') + + return sources + + def demix_demucs_v1_split(self, mix): + + print(' Running Demucs Inference...') + if split_mode: + widget_text.write(base_text + f"Running Demucs v1 Inference...{space}\n") + else: + widget_text.write(base_text + f"Running Demucs v1 Inference... ") + + mix = torch.tensor(mix, dtype=torch.float32) + ref = mix.mean(0) + mix = (mix - ref.mean()) / ref.std() + + with torch.no_grad(): + sources = apply_model_v1(self.demucs, + mix.to(device), + gui_progress_bar, + widget_text, + update_prog, + split=split_mode, + segmen=True, + shifts=shift_set, + **progress_demucs_kwargs) + sources = (sources * ref.std() + ref.mean()).cpu().numpy() + sources[[0,1]] = sources[[1,0]] + + if split_mode: + widget_text.write('\n') + else: + widget_text.write('Done!\n') + return sources def demix_demucs_v2(self, mix, margin_size): @@ -1189,11 +1345,22 @@ class Predictor(): demucsitera = len(mix) demucsitera_calc = demucsitera * 2 gui_progress_bar_demucs = 0 - widget_text.write(base_text + "Running Demucs v2 Inference...\n") - widget_text.write(base_text + "Processing "f"{len(mix)} slices... ") - print(' Running Demucs Inference...') + progress_bar = 0 + if demucsitera == 1: + widget_text.write(base_text + f"Running Demucs v2 Inference... ") + else: + widget_text.write(base_text + f"Running Demucs v2 Inference...{space}\n") + for nmix in mix: gui_progress_bar_demucs += 1 + progress_bar += 100 + step = (progress_bar / demucsitera) + if demucsitera == 1: + pass + else: + percent_prog = f"{base_text}Demucs v2 Inference Progress: {gui_progress_bar_demucs}/{demucsitera} | {round(step)}%" + widget_text.percentage(percent_prog) + update_progress(**progress_kwargs, step=(0.35 + (1.05/demucsitera_calc * gui_progress_bar_demucs))) cmix = mix[nmix] @@ -1201,7 +1368,16 @@ class Predictor(): ref = cmix.mean(0) cmix = (cmix - ref.mean()) / ref.std() with torch.no_grad(): - sources = apply_model_v2(self.demucs, cmix.to(device), split=split_mode, overlap=overlap_set, shifts=shift_set) + sources = apply_model_v2(self.demucs, + cmix.to(device), + gui_progress_bar, + widget_text, + update_prog, + split=split_mode, + segmen=False, + overlap=overlap_set, + shifts=shift_set, + **progress_demucs_kwargs) sources = (sources * ref.std() + ref.mean()).cpu().numpy() sources[[0,1]] = sources[[1,0]] @@ -1213,9 +1389,46 @@ class Predictor(): sources = list(processed.values()) sources = np.concatenate(sources, axis=-1) - widget_text.write('Done!\n') + + if demucsitera == 1: + widget_text.write('Done!\n') + else: + widget_text.write('\n') + return sources + def demix_demucs_v2_split(self, mix): + print(' Running Demucs Inference...') + + if split_mode: + widget_text.write(base_text + f"Running Demucs v2 Inference...{space}\n") + else: + widget_text.write(base_text + f"Running Demucs v2 Inference... ") + + mix = torch.tensor(mix, dtype=torch.float32) + ref = mix.mean(0) + mix = (mix - ref.mean()) / ref.std() + with torch.no_grad(): + sources = apply_model_v2(self.demucs, + mix.to(device), + gui_progress_bar, + widget_text, + update_prog, + split=split_mode, + segmen=True, + overlap=overlap_set, + shifts=shift_set, + **progress_demucs_kwargs) + + sources = (sources * ref.std() + ref.mean()).cpu().numpy() + sources[[0,1]] = sources[[1,0]] + + if split_mode: + widget_text.write('\n') + else: + widget_text.write('Done!\n') + + return sources data = { @@ -1240,6 +1453,7 @@ data = { 'modelFolder': False, 'mp3bit': '320k', 'n_fft_scale': 6144, + 'no_chunk': False, 'noise_pro_select': 'Auto Select', 'noisereduc_s': 3, 'non_red': False, @@ -1247,6 +1461,7 @@ data = { 'normalize': False, 'overlap': 0.5, 'saveFormat': 'Wav', + 'segment': 'Default', 'shifts': 0, 'split_mode': False, 'voc_only': False, @@ -1286,6 +1501,7 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress_var: tk.Variable, + stop_thread, **kwargs: dict): global widget_text @@ -1299,8 +1515,10 @@ def main(window: tk.Wm, global n_fft_scale_set global dim_f_set global progress_kwargs + global progress_demucs_kwargs global base_text global model_set_name + global mdx_model_name global stemset_n global stem_text_a global stem_text_b @@ -1325,17 +1543,20 @@ def main(window: tk.Wm, global stime global model_hash global demucs_switch + global no_chunk_demucs global inst_only global voc_only + global space + global main_window + global stop_button - - # Update default settings - default_chunks = data['chunks'] - default_noisereduc_s = data['noisereduc_s'] + stop_button = stop_thread widget_text = text_widget gui_progress_bar = progress_var widget_button = button_widget + main_window = window + #Error Handling @@ -1361,6 +1582,15 @@ def main(window: tk.Wm, data.update(kwargs) + global update_prog + + # Update default settings + update_prog = update_progress + default_chunks = data['chunks'] + default_noisereduc_s = data['noisereduc_s'] + no_chunk_demucs = data['no_chunk'] + space = ' '*90 + if data['DemucsModel_MDX'] == "Tasnet v1": demucs_model_set_name = 'tasnet.th' demucs_model_version = 'v1' @@ -1436,6 +1666,10 @@ def main(window: tk.Wm, mdx_model_name = 'UVR_MDXNET_KARA' elif model_set_name == 'UVR-MDX-NET Main': mdx_model_name = 'UVR_MDXNET_Main' + elif model_set_name == 'UVR-MDX-NET Inst 1': + mdx_model_name = 'UVR_MDXNET_Inst_1' + elif model_set_name == 'UVR-MDX-NET Inst 2': + mdx_model_name = 'UVR_MDXNET_Inst_2' else: mdx_model_name = data['mdxnetModel'] @@ -1583,12 +1817,18 @@ def main(window: tk.Wm, _basename = f'{data["export_path"]}/{str(randomnum)}_{file_num}_{os.path.splitext(os.path.basename(music_file))[0]}' else: _basename = f'{data["export_path"]}/{file_num}_{os.path.splitext(os.path.basename(music_file))[0]}' + + + inference_type = 'inference_mdx' + # -Get text and update progress- base_text = get_baseText(total_files=len(data['input_paths']), file_num=file_num) progress_kwargs = {'progress_var': progress_var, 'total_files': len(data['input_paths']), 'file_num': file_num} + progress_demucs_kwargs = {'total_files': len(data['input_paths']), + 'file_num': file_num, 'inference_type': inference_type} if 'UVR' in demucs_model_set: @@ -1603,10 +1843,11 @@ def main(window: tk.Wm, if stemset_n == '(Instrumental)': if not 'UVR' in demucs_model_set: - widget_text.write(base_text + 'The selected Demucs model cannot be used with this model.\n') - widget_text.write(base_text + 'Only 2 stem Demucs models are compatible with this model.\n') - widget_text.write(base_text + 'Setting Demucs model to \"UVR_Demucs_Model_1\".\n\n') - demucs_model_set = 'UVR_Demucs_Model_1' + if data['demucsmodel']: + widget_text.write(base_text + 'The selected Demucs model cannot be used with this model.\n') + widget_text.write(base_text + 'Only 2 stem Demucs models are compatible with this model.\n') + widget_text.write(base_text + 'Setting Demucs model to \"UVR_Demucs_Model_1\".\n\n') + demucs_model_set = 'UVR_Demucs_Model_1' try: if float(data['noisereduc_s']) >= 11: @@ -1904,7 +2145,7 @@ def main(window: tk.Wm, text_widget.write(f'\nError Received:\n\n') text_widget.write(f'Could not write audio file.\n') text_widget.write(f'This could be due to low storage on target device or a system permissions issue.\n') - text_widget.write(f"\nFor raw error details, go to the Error Log tab in the Help Guide.\n") + text_widget.write(f"\nGo to the Settings Menu and click \"Open Error Log\" for raw error details.\n") text_widget.write(f'\nIf the error persists, please contact the developers.\n\n') text_widget.write(f'Time Elapsed: {time.strftime("%H:%M:%S", time.gmtime(int(time.perf_counter() - stime)))}') try: @@ -2013,7 +2254,7 @@ def main(window: tk.Wm, text_widget.write("\n" + base_text + f'Separation failed for the following audio file:\n') text_widget.write(base_text + f'"{os.path.basename(music_file)}"\n') text_widget.write(f'\nError Received:\n') - text_widget.write("\nFor raw error details, go to the Error Log tab in the Help Guide.\n") + text_widget.write("\nGo to the Settings Menu and click \"Open Error Log\" for raw error details.\n") text_widget.write("\n" + f'Please address the error and try again.' + "\n") text_widget.write(f'If this error persists, please contact the developers with the error details.\n\n') text_widget.write(f'Time Elapsed: {time.strftime("%H:%M:%S", time.gmtime(int(time.perf_counter() - stime)))}') diff --git a/inference_demucs.py b/inference_demucs.py index 085be79..825d731 100644 --- a/inference_demucs.py +++ b/inference_demucs.py @@ -6,6 +6,7 @@ from demucs.pretrained import get_model as _gm from demucs.tasnet_v2 import ConvTasNet from demucs.utils import apply_model_v1 from demucs.utils import apply_model_v2 +import demucs.apply from diffq import DiffQuantizer from lib_v5 import spec_utils from lib_v5.model_param_init import ModelParameters @@ -30,6 +31,7 @@ import tkinter as tk import torch import torch.hub import traceback # Error Message Recent Calls +import threading import warnings import zlib @@ -58,8 +60,8 @@ class Predictor(): self.demucs.to(device) self.demucs.load_state_dict(state) widget_text.write('Done!\n') - if not data['segment'] == 'None': - widget_text.write(base_text + 'Segments is only available in Demucs v3. Please use \"Chunks\" instead.\n') + if not data['segment'] == 'Default': + widget_text.write(base_text + 'Note: Segments only available for Demucs v3\n') else: pass @@ -81,8 +83,8 @@ class Predictor(): self.demucs.to(device) self.demucs.load_state_dict(torch.load("models/Demucs_Models/"f"{demucs_model_set_name}")) widget_text.write('Done!\n') - if not data['segment'] == 'None': - widget_text.write(base_text + 'Segments is only available in Demucs v3. Please use \"Chunks\" instead.\n') + if not data['segment'] == 'Default': + widget_text.write(base_text + 'Note: Segments only available for Demucs v3\n') else: pass self.demucs.eval() @@ -101,7 +103,7 @@ class Predictor(): if isinstance(self.demucs, BagOfModels): widget_text.write(base_text + f"Selected model is a bag of {len(self.demucs.models)} models.\n") - if data['segment'] == 'None': + if data['segment'] == 'Default': segment = None if isinstance(self.demucs, BagOfModels): if segment is not None: @@ -120,7 +122,8 @@ class Predictor(): else: if segment is not None: sub.segment = segment - widget_text.write(base_text + "Segments set to "f"{segment}.\n") + if split_mode: + widget_text.write(base_text + "Segments set to "f"{segment}.\n") except: segment = None if isinstance(self.demucs, BagOfModels): @@ -145,7 +148,7 @@ class Predictor(): mix = mix.T sources = self.demix(mix.T) - widget_text.write(base_text + 'Inferences complete!\n') + widget_text.write(base_text + 'Inference complete!\n') #Main Save Path save_path = os.path.dirname(_basename) @@ -155,6 +158,25 @@ class Predictor(): drums_name = '(Drums)' bass_name = '(Bass)' + if stemset_n == '(Vocals)': + stem_text_a = 'Vocals' + stem_text_b = 'Instrumental' + elif stemset_n == '(Instrumental)': + stem_text_a = 'Instrumental' + stem_text_b = 'Vocals' + elif stemset_n == '(Other)': + stem_text_a = 'Other' + stem_text_b = 'mixture without selected stem' + elif stemset_n == '(Drums)': + stem_text_a = 'Drums' + stem_text_b = 'mixture without selected stem' + elif stemset_n == '(Bass)': + stem_text_a = 'Bass' + stem_text_b = 'mixture without selected stem' + else: + stem_text_a = 'Vocals' + stem_text_b = 'Instrumental' + vocals_path = '{save_path}/{file_name}.wav'.format( save_path=save_path, file_name = f'{os.path.basename(_basename)}_{vocals_name}',) @@ -202,8 +224,6 @@ class Predictor(): save_path=save_path, file_name = f'{os.path.basename(_basename)}_{bass_name}',) - - #If not 'All Stems' if stemset_n == '(Vocals)': @@ -273,7 +293,7 @@ class Predictor(): if not data['demucs_stems'] == 'All Stems': if data['inst_only_b']: - widget_text.write(base_text + 'Preparing mixture without selected stem...') + widget_text.write(base_text + 'Preparing mixture without selected stem... ') else: widget_text.write(base_text + 'Saving Stem(s)... ') else: @@ -415,7 +435,7 @@ class Predictor(): widget_text.write('Done!\n') update_progress(**progress_kwargs, - step=(0.9)) + step=(1)) if data['demucs_stems'] == 'All Stems': pass @@ -430,7 +450,7 @@ class Predictor(): 'files':[str(music_file), vocal_path], } ] - widget_text.write(base_text + 'Saving Instrumental... ') + widget_text.write(base_text + f'Saving {stem_text_b}... ') for i, e in tqdm(enumerate(finalfiles)): wave, specs = {}, {} @@ -469,7 +489,6 @@ class Predictor(): step=(1)) sf.write(Instrumental_path, normalization_set(spec_utils.cmb_spectrogram_to_wave(-v_spec, mp)), mp.param['sr'], subtype=wav_type_set) - if data['inst_only_b']: if file_exists_v == 'there': @@ -482,7 +501,6 @@ class Predictor(): widget_text.write('Done!\n') - if not data['demucs_stems'] == 'All Stems': if data['saveFormat'] == 'Mp3': @@ -604,76 +622,65 @@ class Predictor(): widget_text.write(base_text + 'Completed Separation!\n') def demix(self, mix): - + global chunk_set # 1 = demucs only # 0 = onnx only + if data['chunks_d'] == 'Full': - if split_mode == True: - chunk_set = 0 - else: - widget_text.write(base_text + "Chunk size set to full... \n") - chunk_set = 0 - else: - chunk_set = data['chunks'] - - if data['chunks_d'] == 'Auto': - if split_mode == True: - widget_text.write(base_text + "Split Mode is on (Chunks disabled).\n") - chunk_set = 0 - else: - widget_text.write(base_text + "Split Mode is off (Chunks enabled).\n") - if data['gpu'] == 0: - try: - gpu_mem = round(torch.cuda.get_device_properties(0).total_memory/1.074e+9) - except: - widget_text.write(base_text + 'NVIDIA GPU Required for conversion!\n') - if int(gpu_mem) <= int(6): - chunk_set = int(10) - widget_text.write(base_text + 'Chunk size auto-set to 10... \n') - if gpu_mem in [7, 8, 9]: - chunk_set = int(30) - widget_text.write(base_text + 'Chunk size auto-set to 30... \n') - if gpu_mem in [10, 11, 12, 13, 14, 15]: - chunk_set = int(50) - widget_text.write(base_text + 'Chunk size auto-set to 50... \n') - if int(gpu_mem) >= int(16): - chunk_set = int(0) - widget_text.write(base_text + 'Chunk size auto-set to Full... \n') - if data['gpu'] == -1: - sys_mem = psutil.virtual_memory().total >> 30 - if int(sys_mem) <= int(4): - chunk_set = int(5) + chunk_set = 0 + elif data['chunks_d'] == 'Auto': + if data['gpu'] == 0: + try: + gpu_mem = round(torch.cuda.get_device_properties(0).total_memory/1.074e+9) + except: + widget_text.write(base_text + 'NVIDIA GPU Required for conversion!\n') + if int(gpu_mem) <= int(6): + chunk_set = int(5) + if no_chunk_demucs: widget_text.write(base_text + 'Chunk size auto-set to 5... \n') - if sys_mem in [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]: - chunk_set = int(10) + if gpu_mem in [7, 8, 9, 10, 11, 12, 13, 14, 15]: + chunk_set = int(10) + if no_chunk_demucs: widget_text.write(base_text + 'Chunk size auto-set to 10... \n') - if sys_mem in [17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 32]: - chunk_set = int(40) + if int(gpu_mem) >= int(16): + chunk_set = int(40) + if no_chunk_demucs: widget_text.write(base_text + 'Chunk size auto-set to 40... \n') - if int(sys_mem) >= int(33): - chunk_set = int(0) - widget_text.write(base_text + 'Chunk size auto-set to Full... \n') + if data['gpu'] == -1: + sys_mem = psutil.virtual_memory().total >> 30 + if int(sys_mem) <= int(4): + chunk_set = int(1) + if no_chunk_demucs: + widget_text.write(base_text + 'Chunk size auto-set to 1... \n') + if sys_mem in [5, 6, 7, 8]: + chunk_set = int(10) + if no_chunk_demucs: + widget_text.write(base_text + 'Chunk size auto-set to 10... \n') + if sys_mem in [9, 10, 11, 12, 13, 14, 15, 16]: + chunk_set = int(25) + if no_chunk_demucs: + widget_text.write(base_text + 'Chunk size auto-set to 25... \n') + if int(sys_mem) >= int(17): + chunk_set = int(60) + if no_chunk_demucs: + widget_text.write(base_text + 'Chunk size auto-set to 60... \n') + elif data['chunks_d'] == str(0): + chunk_set = 0 + if no_chunk_demucs: + widget_text.write(base_text + "Chunk size set to full... \n") else: - if split_mode == True: - widget_text.write(base_text + "Split Mode is on (Chunks disabled).\n") - chunk_set = 0 - else: - widget_text.write(base_text + "Split Mode is off (Chunks enabled).\n") - if data['chunks_d'] == 'Full': - chunk_set = int(0) - widget_text.write(base_text + "Chunk size set to full... \n") - else: - chunk_set = data['chunks_d'] - widget_text.write(base_text + "Chunk size user-set to "f"{chunk_set}... \n") + chunk_set = int(data['chunks_d']) + if no_chunk_demucs: + widget_text.write(base_text + "Chunk size user-set to "f"{chunk_set}... \n") samples = mix.shape[-1] margin = margin_set chunk_size = chunk_set*44100 assert not margin == 0, 'margin cannot be zero!' + if margin > chunk_size: margin = chunk_size - segmented_mix = {} if chunk_set == 0 or samples < chunk_size: @@ -692,27 +699,45 @@ class Predictor(): if end == samples: break - if demucs_model_version == 'v1': - sources = self.demix_demucs_v1(segmented_mix, margin_size=margin) - if demucs_model_version == 'v2': - sources = self.demix_demucs_v2(segmented_mix, margin_size=margin) + if demucs_model_version == 'v1': + if no_chunk_demucs == False: + sources = self.demix_demucs_v1_split(mix) + if no_chunk_demucs == True: + sources = self.demix_demucs_v1(segmented_mix, margin_size=margin) + if demucs_model_version == 'v2': + if no_chunk_demucs == False: + sources = self.demix_demucs_v2_split(mix) + if no_chunk_demucs == True: + sources = self.demix_demucs_v2(segmented_mix, margin_size=margin) if demucs_model_version == 'v3': - sources = self.demix_demucs(segmented_mix, margin_size=margin) + if no_chunk_demucs == False: + sources = self.demix_demucs_split(mix) + if no_chunk_demucs == True: + sources = self.demix_demucs(segmented_mix, margin_size=margin) - return sources - - def demix_demucs(self, mix, margin_size): + return sources + def demix_demucs(self, mix, margin_size): processed = {} demucsitera = len(mix) demucsitera_calc = demucsitera * 2 gui_progress_bar_demucs = 0 + progress_bar = 0 + if demucsitera == 1: + widget_text.write(base_text + f"Running Demucs Inference... ") + else: + widget_text.write(base_text + f"Running Demucs Inference...{space}\n") - widget_text.write(base_text + "Running Demucs Inference...\n") - widget_text.write(base_text + "Processing "f"{len(mix)} slices... ") print(' Running Demucs Inference...') for nmix in mix: gui_progress_bar_demucs += 1 + progress_bar += 100 + step = (progress_bar / demucsitera) + if demucsitera == 1: + pass + else: + percent_prog = f"{base_text}Demucs Inference Progress: {gui_progress_bar_demucs}/{demucsitera} | {round(step)}%" + widget_text.percentage(percent_prog) update_progress(**progress_kwargs, step=(0.1 + (1.7/demucsitera_calc * gui_progress_bar_demucs))) cmix = mix[nmix] @@ -720,7 +745,17 @@ class Predictor(): ref = cmix.mean(0) cmix = (cmix - ref.mean()) / ref.std() with torch.no_grad(): - sources = apply_model(self.demucs, cmix[None], split=split_mode, device=device, overlap=overlap_set, shifts=shift_set, progress=False)[0] + sources = apply_model(self.demucs, cmix[None], + gui_progress_bar, + widget_text, + update_prog, + split=split_mode, + device=device, + overlap=overlap_set, + shifts=shift_set, + progress=False, + segmen=False, + **progress_demucs_kwargs)[0] sources = (sources * ref.std() + ref.mean()).cpu().numpy() sources[[0,1]] = sources[[1,0]] @@ -732,7 +767,49 @@ class Predictor(): sources = list(processed.values()) sources = np.concatenate(sources, axis=-1) - widget_text.write('Done!\n') + + if demucsitera == 1: + widget_text.write('Done!\n') + else: + widget_text.write('\n') + #print('the demucs model is done running') + + return sources + + def demix_demucs_split(self, mix): + + if split_mode: + widget_text.write(base_text + f"Running Demucs Inference...{space}\n") + else: + widget_text.write(base_text + f"Running Demucs Inference... ") + print(' Running Demucs Inference...') + + mix = torch.tensor(mix, dtype=torch.float32) + ref = mix.mean(0) + mix = (mix - ref.mean()) / ref.std() + + with torch.no_grad(): + sources = apply_model(self.demucs, + mix[None], + gui_progress_bar, + widget_text, + update_prog, + split=split_mode, + device=device, + overlap=overlap_set, + shifts=shift_set, + progress=False, + segmen=True, + **progress_demucs_kwargs)[0] + + if split_mode: + widget_text.write('\n') + else: + widget_text.write('Done!\n') + + sources = (sources * ref.std() + ref.mean()).cpu().numpy() + sources[[0,1]] = sources[[1,0]] + return sources def demix_demucs_v1(self, mix, margin_size): @@ -740,19 +817,37 @@ class Predictor(): demucsitera = len(mix) demucsitera_calc = demucsitera * 2 gui_progress_bar_demucs = 0 - widget_text.write(base_text + "Running Demucs v1 Inference...\n") - widget_text.write(base_text + "Processing "f"{len(mix)} slices... ") + progress_bar = 0 print(' Running Demucs Inference...') + if demucsitera == 1: + widget_text.write(base_text + f"Running Demucs v1 Inference... ") + else: + widget_text.write(base_text + f"Running Demucs v1 Inference...{space}\n") for nmix in mix: gui_progress_bar_demucs += 1 + progress_bar += 100 + step = (progress_bar / demucsitera) + if demucsitera == 1: + pass + else: + percent_prog = f"{base_text}Demucs v1 Inference Progress: {gui_progress_bar_demucs}/{demucsitera} | {round(step)}%" + widget_text.percentage(percent_prog) update_progress(**progress_kwargs, - step=(0.35 + (1.05/demucsitera_calc * gui_progress_bar_demucs))) + step=(0.1 + (1.7/demucsitera_calc * gui_progress_bar_demucs))) cmix = mix[nmix] cmix = torch.tensor(cmix, dtype=torch.float32) ref = cmix.mean(0) cmix = (cmix - ref.mean()) / ref.std() with torch.no_grad(): - sources = apply_model_v1(self.demucs, cmix.to(device), split=split_mode, shifts=shift_set) + sources = apply_model_v1(self.demucs, + cmix.to(device), + gui_progress_bar, + widget_text, + update_prog, + split=split_mode, + segmen=False, + shifts=shift_set, + **progress_demucs_kwargs) sources = (sources * ref.std() + ref.mean()).cpu().numpy() sources[[0,1]] = sources[[1,0]] @@ -764,7 +859,44 @@ class Predictor(): sources = list(processed.values()) sources = np.concatenate(sources, axis=-1) - widget_text.write('Done!\n') + + if demucsitera == 1: + widget_text.write('Done!\n') + else: + widget_text.write('\n') + + return sources + + def demix_demucs_v1_split(self, mix): + + print(' Running Demucs Inference...') + if split_mode: + widget_text.write(base_text + f"Running Demucs v1 Inference...{space}\n") + else: + widget_text.write(base_text + f"Running Demucs v1 Inference... ") + + mix = torch.tensor(mix, dtype=torch.float32) + ref = mix.mean(0) + mix = (mix - ref.mean()) / ref.std() + + with torch.no_grad(): + sources = apply_model_v1(self.demucs, + mix.to(device), + gui_progress_bar, + widget_text, + update_prog, + split=split_mode, + segmen=True, + shifts=shift_set, + **progress_demucs_kwargs) + sources = (sources * ref.std() + ref.mean()).cpu().numpy() + sources[[0,1]] = sources[[1,0]] + + if split_mode: + widget_text.write('\n') + else: + widget_text.write('Done!\n') + return sources def demix_demucs_v2(self, mix, margin_size): @@ -772,20 +904,39 @@ class Predictor(): demucsitera = len(mix) demucsitera_calc = demucsitera * 2 gui_progress_bar_demucs = 0 - widget_text.write(base_text + "Running Demucs v2 Inference...\n") - widget_text.write(base_text + "Processing "f"{len(mix)} slices... ") - print(' Running Demucs Inference...') + progress_bar = 0 + if demucsitera == 1: + widget_text.write(base_text + f"Running Demucs v2 Inference... ") + else: + widget_text.write(base_text + f"Running Demucs v2 Inference...{space}\n") + for nmix in mix: gui_progress_bar_demucs += 1 + progress_bar += 100 + step = (progress_bar / demucsitera) + if demucsitera == 1: + pass + else: + percent_prog = f"{base_text}Demucs v2 Inference Progress: {gui_progress_bar_demucs}/{demucsitera} | {round(step)}%" + widget_text.percentage(percent_prog) + update_progress(**progress_kwargs, - step=(0.35 + (1.05/demucsitera_calc * gui_progress_bar_demucs))) + step=(0.1 + (1.7/demucsitera_calc * gui_progress_bar_demucs))) cmix = mix[nmix] cmix = torch.tensor(cmix, dtype=torch.float32) ref = cmix.mean(0) cmix = (cmix - ref.mean()) / ref.std() - shift_set = 0 with torch.no_grad(): - sources = apply_model_v2(self.demucs, cmix.to(device), split=split_mode, overlap=overlap_set, shifts=shift_set) + sources = apply_model_v2(self.demucs, + cmix.to(device), + gui_progress_bar, + widget_text, + update_prog, + split=split_mode, + segmen=False, + overlap=overlap_set, + shifts=shift_set, + **progress_demucs_kwargs) sources = (sources * ref.std() + ref.mean()).cpu().numpy() sources[[0,1]] = sources[[1,0]] @@ -797,8 +948,47 @@ class Predictor(): sources = list(processed.values()) sources = np.concatenate(sources, axis=-1) - widget_text.write('Done!\n') + + if demucsitera == 1: + widget_text.write('Done!\n') + else: + widget_text.write('\n') + return sources + + def demix_demucs_v2_split(self, mix): + print(' Running Demucs Inference...') + + if split_mode: + widget_text.write(base_text + f"Running Demucs v2 Inference...{space}\n") + else: + widget_text.write(base_text + f"Running Demucs v2 Inference... ") + + mix = torch.tensor(mix, dtype=torch.float32) + ref = mix.mean(0) + mix = (mix - ref.mean()) / ref.std() + with torch.no_grad(): + sources = apply_model_v2(self.demucs, + mix.to(device), + gui_progress_bar, + widget_text, + update_prog, + split=split_mode, + segmen=True, + overlap=overlap_set, + shifts=shift_set, + **progress_demucs_kwargs) + + sources = (sources * ref.std() + ref.mean()).cpu().numpy() + sources[[0,1]] = sources[[1,0]] + + if split_mode: + widget_text.write('\n') + else: + widget_text.write('Done!\n') + + return sources + data = { 'audfile': True, @@ -811,19 +1001,19 @@ data = { 'gpu': -1, 'input_paths': None, 'inst_only_b': False, - 'margin': 44100, + 'margin_d': 44100, 'mp3bit': '320k', + 'no_chunk_d': False, 'normalize': False, 'overlap_b': 0.25, 'saveFormat': 'Wav', - 'segment': 'None', + 'segment': 'Default', 'settest': False, 'shifts_b': 2, 'split_mode': False, 'voc_only_b': False, 'wavtype': 'PCM_16', } -default_chunks = data['chunks_d'] def update_progress(progress_var, total_files, file_num, step: float = 1): """Calculate the progress for the progress widget in the GUI""" @@ -850,7 +1040,7 @@ def hide_opt(): yield finally: sys.stdout = old_stdout - + def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress_var: tk.Variable, **kwargs: dict): @@ -861,6 +1051,7 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress global _basename global _mixture global progress_kwargs + global progress_demucs_kwargs global base_text global model_set_name global stemset_n @@ -872,11 +1063,15 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress global split_mode global demucs_model_set_name global demucs_model_version - global wav_type_set + global no_chunk_demucs + global space global flac_type_set global mp3_bit_set global normalization_set + global update_prog + + update_prog = update_progress wav_type_set = data['wavtype'] @@ -899,6 +1094,7 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress ffmp_err = """audioread\__init__.py", line 116, in audio_open""" sf_write_err = "sf.write" model_adv_set_err = "Got invalid dimensions for input" + demucs_model_missing_err = "is neither a single pre-trained model or a bag of models." try: with open('errorlog.txt', 'w') as f: @@ -911,7 +1107,7 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress randomnum = randrange(100000, 1000000) data.update(kwargs) - + if data['wavtype'] == '32-bit Float': wav_type_set = 'FLOAT' elif data['wavtype'] == '64-bit Float': @@ -921,6 +1117,9 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress flac_type_set = data['flactype'] mp3_bit_set = data['mp3bit'] + default_chunks = data['chunks_d'] + no_chunk_demucs = data['no_chunk_d'] + if data['normalize'] == True: normalization_set = spec_utils.normalize @@ -1057,10 +1256,10 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress overlap_set = float(data['overlap_b']) channel_set = int(data['channel']) - margin_set = int(data['margin']) + margin_set = int(data['margin_d']) shift_set = int(data['shifts_b']) - split_mode = data['split_mode'] + space = ' '*90 #print('Split? ', split_mode) @@ -1133,6 +1332,7 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress #if ('models/MDX_Net_Models/' + model_set + '.onnx') + inference_type = 'demucs_only' # -Get text and update progress- base_text = get_baseText(total_files=len(data['input_paths']), @@ -1140,6 +1340,8 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress progress_kwargs = {'progress_var': progress_var, 'total_files': len(data['input_paths']), 'file_num': file_num} + progress_demucs_kwargs = {'total_files': len(data['input_paths']), + 'file_num': file_num, 'inference_type': inference_type} try: @@ -1389,7 +1591,7 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress text_widget.write(f'\nError Received:\n\n') text_widget.write(f'Could not write audio file.\n') text_widget.write(f'This could be due to low storage on target device or a system permissions issue.\n') - text_widget.write(f"\nFor raw error details, go to the Error Log tab in the Help Guide.\n") + text_widget.write(f"\nGo to the Settings Menu and click \"Open Error Log\" for raw error details.\n") text_widget.write(f'\nIf the error persists, please contact the developers.\n\n') text_widget.write(f'Time Elapsed: {time.strftime("%H:%M:%S", time.gmtime(int(time.perf_counter() - stime)))}') try: @@ -1456,6 +1658,50 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress button_widget.configure(state=tk.NORMAL) # Enable Button return + if model_adv_set_err in message: + text_widget.write("\n" + base_text + f'Separation failed for the following audio file:\n') + text_widget.write(base_text + f'"{os.path.basename(music_file)}"\n') + text_widget.write(f'\nError Received:\n\n') + text_widget.write(f'The current ONNX model settings are not compatible with the selected \nmodel.\n\n') + text_widget.write(f'Please re-configure the advanced ONNX model settings accordingly and try \nagain.\n\n') + text_widget.write(f'Time Elapsed: {time.strftime("%H:%M:%S", time.gmtime(int(time.perf_counter() - stime)))}') + try: + with open('errorlog.txt', 'w') as f: + f.write(f'Last Error Received:\n\n' + + f'Error Received while processing "{os.path.basename(music_file)}":\n' + + f'Process Method: Demucs v3\n\n' + + f'The current ONNX model settings are not compatible with the selected model.\n\n' + + f'Please re-configure the advanced ONNX model settings accordingly and try again.\n\n' + + message + f'\nError Time Stamp [{datetime.now().strftime("%Y-%m-%d %H:%M:%S")}]\n') + except: + pass + torch.cuda.empty_cache() + progress_var.set(0) + button_widget.configure(state=tk.NORMAL) # Enable Button + return + + if demucs_model_missing_err in message: + text_widget.write("\n" + base_text + f'Separation failed for the following audio file:\n') + text_widget.write(base_text + f'"{os.path.basename(music_file)}"\n') + text_widget.write(f'\nError Received:\n\n') + text_widget.write(f'The selected Demucs model is missing.\n\n') + text_widget.write(f'Please download the model or make sure it is in the correct directory.\n\n') + text_widget.write(f'Time Elapsed: {time.strftime("%H:%M:%S", time.gmtime(int(time.perf_counter() - stime)))}') + try: + with open('errorlog.txt', 'w') as f: + f.write(f'Last Error Received:\n\n' + + f'Error Received while processing "{os.path.basename(music_file)}":\n' + + f'Process Method: Demucs v3\n\n' + + f'The selected Demucs model is missing.\n\n' + + f'Please download the model or make sure it is in the correct directory.\n\n' + + message + f'\nError Time Stamp [{datetime.now().strftime("%Y-%m-%d %H:%M:%S")}]\n') + except: + pass + torch.cuda.empty_cache() + progress_var.set(0) + button_widget.configure(state=tk.NORMAL) # Enable Button + return + print(traceback_text) print(type(e).__name__, e) @@ -1476,7 +1722,7 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress text_widget.write("\n" + base_text + f'Separation failed for the following audio file:\n') text_widget.write(base_text + f'"{os.path.basename(music_file)}"\n') text_widget.write(f'\nError Received:\n') - text_widget.write("\nFor raw error details, go to the Error Log tab in the Help Guide.\n") + text_widget.write("\nGo to the Settings Menu and click \"Open Error Log\" for raw error details.\n") text_widget.write("\n" + f'Please address the error and try again.' + "\n") text_widget.write(f'If this error persists, please contact the developers with the error details.\n\n') text_widget.write(f'Time Elapsed: {time.strftime("%H:%M:%S", time.gmtime(int(time.perf_counter() - stime)))}') @@ -1500,3 +1746,17 @@ if __name__ == '__main__': main() print("Successfully completed music demixing.");print('Total time: {0:.{1}f}s'.format(time.time() - start_time, 1)) +## Grave yard + + # def prog_val(): + # def thread(): + # global source + # source = apply_model(self.demucs, cmix[None], split=split_mode, device=device, overlap=overlap_set, shifts=shift_set, progress=True, )[0] + # th = threading.Thread(target=thread) + # th.start() + # print('wait') + # val = demucs.apply.progress_bar_num + # th.join() + # print('continue') + + # return source \ No newline at end of file diff --git a/inference_v5.py b/inference_v5.py index e6af489..54acd40 100644 --- a/inference_v5.py +++ b/inference_v5.py @@ -103,7 +103,7 @@ def determineModelFolderName(): def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress_var: tk.Variable, **kwargs: dict): - global model_params_d + global gui_progress_bar global nn_arch_sizes global nn_architecture @@ -115,9 +115,10 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress global flac_type_set global mp3_bit_set + global space wav_type_set = data['wavtype'] - + gui_progress_bar = progress_var #Error Handling runtimeerr = "CUDNN error executing cudnnSetTensorNdDescriptor" @@ -127,6 +128,7 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress file_err = "FileNotFoundError" ffmp_err = """audioread\__init__.py", line 116, in audio_open""" sf_write_err = "sf.write" + demucs_model_missing_err = "is neither a single pre-trained model or a bag of models." try: with open('errorlog.txt', 'w') as f: @@ -382,8 +384,12 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress global default_window_size global default_agg global normalization_set + global update_prog + + update_prog = update_progress default_window_size = data['window_size'] default_agg = data['agg'] + space = ' '*90 stime = time.perf_counter() progress_var.set(0) @@ -432,6 +438,9 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress else: base_name = f'{data["export_path"]}/{file_num}_{os.path.splitext(os.path.basename(music_file))[0]}' + global inference_type + + inference_type = 'inference_vr' model_name = os.path.basename(data[f'{data["useModel"]}Model']) model = vocal_remover.models[data['useModel']] device = vocal_remover.devices[data['useModel']] @@ -441,6 +450,8 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress progress_kwargs = {'progress_var': progress_var, 'total_files': len(data['input_paths']), 'file_num': file_num} + progress_demucs_kwargs = {'total_files': len(data['input_paths']), + 'file_num': file_num, 'inference_type': inference_type} update_progress(**progress_kwargs, step=0) @@ -503,7 +514,7 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress model_hash = hashlib.md5(open(ModelName,'rb').read()).hexdigest() model_params = [] model_params = lib_v5.filelist.provide_model_param_hash(model_hash) - print(model_params) + #print(model_params) if model_params[0] == 'Not Found Using Hash': model_params = [] model_params = lib_v5.filelist.provide_model_param_name(ModelName) @@ -622,8 +633,6 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress text_widget.write(base_text + 'Loading the stft of audio source...') text_widget.write(' Done!\n') - - text_widget.write(base_text + "Please Wait...\n") X_spec_m = spec_utils.combine_spectrograms(X_spec_s, mp) @@ -631,22 +640,47 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress def inference(X_spec, device, model, aggressiveness): - def _execute(X_mag_pad, roi_size, n_window, device, model, aggressiveness): + def _execute(X_mag_pad, roi_size, n_window, device, model, aggressiveness, tta=False): model.eval() + global active_iterations + global progress_value + with torch.no_grad(): preds = [] iterations = [n_window] - total_iterations = sum(iterations) - - text_widget.write(base_text + "Processing "f"{total_iterations} Slices... ") + if data['tta']: + total_iterations = sum(iterations) + total_iterations = total_iterations*2 + else: + total_iterations = sum(iterations) + + if tta: + active_iterations = sum(iterations) + active_iterations = active_iterations - 2 + total_iterations = total_iterations - 2 + else: + active_iterations = 0 - for i in tqdm(range(n_window)): - update_progress(**progress_kwargs, - step=(0.1 + (0.8/n_window * i))) + progress_bar = 0 + for i in range(n_window): + active_iterations += 1 + if data['demucsmodelVR']: + update_progress(**progress_kwargs, + step=(0.1 + (0.5/total_iterations * active_iterations))) + else: + update_progress(**progress_kwargs, + step=(0.1 + (0.8/total_iterations * active_iterations))) start = i * roi_size + progress_bar += 100 + progress_value = progress_bar + active_iterations_step = active_iterations*100 + step = (active_iterations_step / total_iterations) + + percent_prog = f"{base_text}Inference Progress: {active_iterations}/{total_iterations} | {round(step)}%" + text_widget.percentage(percent_prog) X_mag_window = X_mag_pad[None, :, :, start:start + data['window_size']] X_mag_window = torch.from_numpy(X_mag_window).to(device) @@ -656,7 +690,6 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress preds.append(pred[0]) pred = np.concatenate(preds, axis=2) - text_widget.write('Done!\n') return pred def preprocess(X_spec): @@ -691,7 +724,7 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress X_mag_pre, ((0, 0), (0, 0), (pad_l, pad_r)), mode='constant') pred_tta = _execute(X_mag_pad, roi_size, n_window, - device, model, aggressiveness) + device, model, aggressiveness, tta=True) pred_tta = pred_tta[:, :, roi_size // 2:] pred_tta = pred_tta[:, :, :n_frame] @@ -702,17 +735,16 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress aggressiveness = {'value': aggresive_set, 'split_bin': mp.param['band'][1]['crop_stop']} if data['tta']: - text_widget.write(base_text + "Running Inferences (TTA)...\n") + text_widget.write(base_text + f"Running Inferences (TTA)... {space}\n") else: - text_widget.write(base_text + "Running Inference...\n") + text_widget.write(base_text + f"Running Inference... {space}\n") pred, X_mag, X_phase = inference(X_spec_m, device, model, aggressiveness) - update_progress(**progress_kwargs, - step=0.9) - # Postprocess + text_widget.write('\n') + if data['postprocess']: try: text_widget.write(base_text + 'Post processing...') @@ -743,19 +775,38 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress v_spec_m = X_spec_m - y_spec_m def demix_demucs(mix): - #print('shift_set ', shift_set) - text_widget.write(base_text + "Running Demucs Inference...\n") - text_widget.write(base_text + "Processing... ") + print(' Running Demucs Inference...') + if split_mode: + text_widget.write(base_text + f'Running Demucs Inference... {space}') + else: + text_widget.write(base_text + f'Running Demucs Inference... ') + mix = torch.tensor(mix, dtype=torch.float32) ref = mix.mean(0) mix = (mix - ref.mean()) / ref.std() - + widget_text = text_widget with torch.no_grad(): - sources = apply_model(demucs, mix[None], split=split_mode, device=device, overlap=overlap_set, shifts=shift_set, progress=False)[0] - - text_widget.write('Done!\n') + sources = apply_model(demucs, + mix[None], + gui_progress_bar, + widget_text, + update_prog, + split=split_mode, + device=device, + overlap=overlap_set, + shifts=shift_set, + progress=False, + segmen=True, + **progress_demucs_kwargs)[0] + + if split_mode: + text_widget.write('\n') + else: + update_progress(**progress_kwargs, + step=0.9) + text_widget.write('Done!\n') sources = (sources * ref.std() + ref.mean()).cpu().numpy() sources[[0,1]] = sources[[1,0]] @@ -774,15 +825,9 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress if data['demucsmodelVR']: demucs = HDemucs(sources=["other", "vocals"]) - text_widget.write(base_text + 'Loading Demucs model... ') - update_progress(**progress_kwargs, - step=0.95) path_d = Path('models/Demucs_Models/v3_repo') #print('What Demucs model was chosen? ', demucs_model_set) demucs = _gm(name=demucs_model_set, repo=path_d) - text_widget.write('Done!\n') - - #print('segment: ', data['segment']) if data['segment'] == 'None': segment = None @@ -803,7 +848,7 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress else: if segment is not None: sub.segment = segment - text_widget.write(base_text + "Segments set to "f"{segment}.\n") + #text_widget.write(base_text + "Segments set to "f"{segment}.\n") except: segment = None if isinstance(demucs, BagOfModels): @@ -814,8 +859,6 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress if segment is not None: sub.segment = segment - #print('segment port-process: ', segment) - demucs.cpu() demucs.eval() @@ -1039,7 +1082,7 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress text_widget.write(f'\nError Received:\n\n') text_widget.write(f'Could not write audio file.\n') text_widget.write(f'This could be due to low storage on target device or a system permissions issue.\n') - text_widget.write(f"\nFor raw error details, go to the Error Log tab in the Help Guide.\n") + text_widget.write(f"\nGo to the Settings Menu and click \"Open Error Log\" for raw error details.\n") text_widget.write(f'\nIf the error persists, please contact the developers.\n\n') text_widget.write(f'Time Elapsed: {time.strftime("%H:%M:%S", time.gmtime(int(time.perf_counter() - stime)))}') try: @@ -1084,6 +1127,28 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress button_widget.configure(state=tk.NORMAL) # Enable Button return + if demucs_model_missing_err in message: + text_widget.write("\n" + base_text + f'Separation failed for the following audio file:\n') + text_widget.write(base_text + f'"{os.path.basename(music_file)}"\n') + text_widget.write(f'\nError Received:\n\n') + text_widget.write(f'The selected Demucs model is missing.\n\n') + text_widget.write(f'Please download the model or make sure it is in the correct directory.\n\n') + text_widget.write(f'Time Elapsed: {time.strftime("%H:%M:%S", time.gmtime(int(time.perf_counter() - stime)))}') + try: + with open('errorlog.txt', 'w') as f: + f.write(f'Last Error Received:\n\n' + + f'Error Received while processing "{os.path.basename(music_file)}":\n' + + f'Process Method: VR Architecture\n\n' + + f'The selected Demucs model is missing.\n\n' + + f'Please download the model or make sure it is in the correct directory.\n\n' + + message + f'\nError Time Stamp [{datetime.now().strftime("%Y-%m-%d %H:%M:%S")}]\n') + except: + pass + torch.cuda.empty_cache() + progress_var.set(0) + button_widget.configure(state=tk.NORMAL) # Enable Button + return + print(traceback_text) print(type(e).__name__, e) print(message) @@ -1103,7 +1168,7 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress text_widget.write("\n" + base_text + f'Separation failed for the following audio file:\n') text_widget.write(base_text + f'"{os.path.basename(music_file)}"\n') text_widget.write(f'\nError Received:\n') - text_widget.write("\nFor raw error details, go to the Error Log tab in the Help Guide.\n") + text_widget.write("\Go to the Settings Menu and click \"Open Error Log\" for raw error details.\n") text_widget.write("\n" + f'Please address the error and try again.' + "\n") text_widget.write(f'If this error persists, please contact the developers with the error details.\n\n') text_widget.write(f'Time Elapsed: {time.strftime("%H:%M:%S", time.gmtime(int(time.perf_counter() - stime)))}') diff --git a/inference_v5_ensemble.py b/inference_v5_ensemble.py index a32df73..85a0f40 100644 --- a/inference_v5_ensemble.py +++ b/inference_v5_ensemble.py @@ -70,35 +70,35 @@ class Predictor(): self.noise_pro_select_set_var = tk.StringVar(value='MDX-NET_Noise_Profile_14_kHz') self.compensate_v_var = tk.StringVar(value=1.03597672895) - top= Toplevel() + mdx_model_set = Toplevel() - top.geometry("740x550") - window_height = 740 - window_width = 550 + mdx_model_set.geometry("490x515") + window_height = 490 + window_width = 515 - top.title("Specify Parameters") + mdx_model_set.title("Specify Parameters") - top.resizable(False, False) # This code helps to disable windows from resizing + mdx_model_set.resizable(False, False) # This code helps to disable windows from resizing - screen_width = top.winfo_screenwidth() - screen_height = top.winfo_screenheight() + screen_width = mdx_model_set.winfo_screenwidth() + screen_height = mdx_model_set.winfo_screenheight() x_cordinate = int((screen_width/2) - (window_width/2)) y_cordinate = int((screen_height/2) - (window_height/2)) - top.geometry("{}x{}+{}+{}".format(window_width, window_height, x_cordinate, y_cordinate)) + mdx_model_set.geometry("{}x{}+{}+{}".format(window_width, window_height, x_cordinate, y_cordinate)) # change title bar icon - top.iconbitmap('img\\UVR-Icon-v2.ico') + mdx_model_set.iconbitmap('img\\UVR-Icon-v2.ico') - tabControl = ttk.Notebook(top) + mdx_model_set_window = ttk.Notebook(mdx_model_set) - tabControl.pack(expand = 1, fill ="both") + mdx_model_set_window.pack(expand = 1, fill ="both") - tabControl.grid_rowconfigure(0, weight=1) - tabControl.grid_columnconfigure(0, weight=1) + mdx_model_set_window.grid_rowconfigure(0, weight=1) + mdx_model_set_window.grid_columnconfigure(0, weight=1) - frame0=Frame(tabControl,highlightbackground='red',highlightthicknes=0) + frame0=Frame(mdx_model_set_window,highlightbackground='red',highlightthicknes=0) frame0.grid(row=0,column=0,padx=0,pady=0) frame0.tkraise(frame0) @@ -167,22 +167,19 @@ class Predictor(): torch.cuda.empty_cache() gui_progress_bar.set(0) widget_button.configure(state=tk.NORMAL) # Enable Button - top.destroy() + self.okVar.set(1) + stop_button() + mdx_model_set.destroy() return l0=ttk.Button(frame0,text="Stop Process", command=stop) l0.grid(row=13,column=1,padx=0,pady=30) - #print('print from top ', model_hash) + #print('print from mdx_model_set ', model_hash) #source_val = 0 - def change_event(): - self.okVar.set(1) - #top.destroy() - pass - - top.protocol("WM_DELETE_WINDOW", change_event) + mdx_model_set.protocol("WM_DELETE_WINDOW", stop) frame0.wait_variable(self.okVar) @@ -237,16 +234,16 @@ class Predictor(): with open(f"lib_v5/filelists/model_cache/mdx_model_cache/{model_hash}.json", "w") as outfile: outfile.write(mdx_model_params_r) - if stemset_n == '(Instrumental)': if not 'UVR' in demucs_model_set: - widget_text.write(base_text + 'The selected Demucs model cannot be used with this model.\n') - widget_text.write(base_text + 'Only 2 stem Demucs models are compatible with this model.\n') - widget_text.write(base_text + 'Setting Demucs model to \"UVR_Demucs_Model_1\".\n\n') - demucs_model_set = 'UVR_Demucs_Model_1' + if demucs_switch == 'on': + widget_text.write(base_text + 'The selected Demucs model cannot be used with this model.\n') + widget_text.write(base_text + 'Only 2 stem Demucs models are compatible with this model.\n') + widget_text.write(base_text + 'Setting Demucs model to \"UVR_Demucs_Model_1\".\n\n') + demucs_model_set = 'UVR_Demucs_Model_1' - top.destroy() + mdx_model_set.destroy() def prediction_setup(self): @@ -278,6 +275,10 @@ class Predictor(): self.demucs.to(device) self.demucs.load_state_dict(state) widget_text.write('Done!\n') + if not data['segment'] == 'Default': + widget_text.write(base_text + 'Note: Segments only available for Demucs v3\n') + else: + pass elif 'tasnet-beb46fac.th' in demucs_model_set or 'tasnet_extra-df3777b2.th' in demucs_model_set or \ 'demucs48_hq-28a1282c.th' in demucs_model_set or'demucs-e07c671f.th' in demucs_model_set or \ @@ -300,6 +301,10 @@ class Predictor(): self.demucs.to(device) self.demucs.load_state_dict(torch.load("models/Demucs_Models/"f"{demucs_model_set}")) widget_text.write('Done!\n') + if not data['segment'] == 'Default': + widget_text.write(base_text + 'Note: Segments only available for Demucs v3\n') + else: + pass self.demucs.eval() else: @@ -318,6 +323,37 @@ class Predictor(): widget_text.write('Done!\n') if isinstance(self.demucs, BagOfModels): widget_text.write(base_text + f"Selected Demucs model is a bag of {len(self.demucs.models)} model(s).\n") + + if data['segment'] == 'Default': + segment = None + if isinstance(self.demucs, BagOfModels): + if segment is not None: + for sub in self.demucs.models: + sub.segment = segment + else: + if segment is not None: + sub.segment = segment + else: + try: + segment = int(data['segment']) + if isinstance(self.demucs, BagOfModels): + if segment is not None: + for sub in self.demucs.models: + sub.segment = segment + else: + if segment is not None: + sub.segment = segment + if split_mode: + widget_text.write(base_text + "Segments set to "f"{segment}.\n") + except: + segment = None + if isinstance(self.demucs, BagOfModels): + if segment is not None: + for sub in self.demucs.models: + sub.segment = segment + else: + if segment is not None: + sub.segment = segment self.onnx_models = {} c = 0 @@ -595,48 +631,84 @@ class Predictor(): widget_text.write(base_text + 'Completed Separation!\n\n') def demix(self, mix): - # 1 = demucs only - # 0 = onnx only + if data['chunks'] == 'Full': chunk_set = 0 - else: - chunk_set = data['chunks'] - - if data['chunks'] == 'Auto': + widget_text.write(base_text + "Chunk size user-set to \"Full\"... \n") + elif data['chunks'] == 'Auto': if data['gpu'] == 0: try: gpu_mem = round(torch.cuda.get_device_properties(0).total_memory/1.074e+9) except: widget_text.write(base_text + 'NVIDIA GPU Required for conversion!\n') + data['gpu'] = -1 + pass if int(gpu_mem) <= int(6): chunk_set = int(5) - widget_text.write(base_text + 'Chunk size auto-set to 5... \n') + if demucs_only == 'on': + if no_chunk_demucs: + widget_text.write(base_text + 'Chunk size auto-set to 5... \n') + else: + widget_text.write(base_text + 'Chunk size auto-set to 5... \n') if gpu_mem in [7, 8, 9, 10, 11, 12, 13, 14, 15]: chunk_set = int(10) - widget_text.write(base_text + 'Chunk size auto-set to 10... \n') + if demucs_only == 'on': + if no_chunk_demucs: + widget_text.write(base_text + 'Chunk size auto-set to 10... \n') + else: + widget_text.write(base_text + 'Chunk size auto-set to 10... \n') if int(gpu_mem) >= int(16): chunk_set = int(40) - widget_text.write(base_text + 'Chunk size auto-set to 40... \n') + if demucs_only == 'on': + if no_chunk_demucs: + widget_text.write(base_text + 'Chunk size auto-set to 40... \n') + else: + widget_text.write(base_text + 'Chunk size auto-set to 40... \n') if data['gpu'] == -1: sys_mem = psutil.virtual_memory().total >> 30 if int(sys_mem) <= int(4): chunk_set = int(1) - widget_text.write(base_text + 'Chunk size auto-set to 1... \n') + if demucs_only == 'on': + if no_chunk_demucs: + widget_text.write(base_text + 'Chunk size auto-set to 1... \n') + else: + widget_text.write(base_text + 'Chunk size auto-set to 1... \n') if sys_mem in [5, 6, 7, 8]: chunk_set = int(10) - widget_text.write(base_text + 'Chunk size auto-set to 10... \n') + if demucs_only == 'on': + if no_chunk_demucs: + widget_text.write(base_text + 'Chunk size auto-set to 10... \n') + else: + widget_text.write(base_text + 'Chunk size auto-set to 10... \n') if sys_mem in [9, 10, 11, 12, 13, 14, 15, 16]: chunk_set = int(25) - widget_text.write(base_text + 'Chunk size auto-set to 25... \n') + if demucs_only == 'on': + if no_chunk_demucs: + widget_text.write(base_text + 'Chunk size auto-set to 25... \n') + else: + widget_text.write(base_text + 'Chunk size auto-set to 25... \n') + if int(sys_mem) >= int(17): chunk_set = int(60) - widget_text.write(base_text + 'Chunk size auto-set to 60... \n') - elif data['chunks'] == 'Full': + if demucs_only == 'on': + if no_chunk_demucs: + widget_text.write(base_text + 'Chunk size auto-set to 60... \n') + else: + widget_text.write(base_text + 'Chunk size auto-set to 60... \n') + elif data['chunks'] == '0': chunk_set = 0 - widget_text.write(base_text + "Chunk size set to full... \n") + if demucs_only == 'on': + if no_chunk_demucs: + widget_text.write(base_text + "Chunk size user-set to \"Full\"... \n") + else: + widget_text.write(base_text + "Chunk size user-set to \"Full\"... \n") else: chunk_set = int(data['chunks']) - widget_text.write(base_text + "Chunk size user-set to "f"{chunk_set}... \n") + if demucs_only == 'on': + if no_chunk_demucs: + widget_text.write(base_text + "Chunk size user-set to "f"{chunk_set}... \n") + else: + widget_text.write(base_text + "Chunk size user-set to "f"{chunk_set}... \n") samples = mix.shape[-1] margin = margin_set @@ -673,16 +745,22 @@ class Predictor(): 'demucs.th' in demucs_model_set or \ 'demucs_extra.th' in demucs_model_set or 'light.th' in demucs_model_set or \ 'light_extra.th' in demucs_model_set or 'v1' in demucs_model_set or '.gz' in demucs_model_set: - sources = self.demix_demucs_v1(segmented_mix, margin_size=margin) + if no_chunk_demucs == False: + sources = self.demix_demucs_v1_split(mix) + if no_chunk_demucs == True: + sources = self.demix_demucs_v1(segmented_mix, margin_size=margin) elif 'tasnet-beb46fac.th' in demucs_model_set or 'tasnet_extra-df3777b2.th' in demucs_model_set or \ 'demucs48_hq-28a1282c.th' in demucs_model_set or'demucs-e07c671f.th' in demucs_model_set or \ 'demucs_extra-3646af93.th' in demucs_model_set or 'demucs_unittest-09ebc15f.th' in demucs_model_set or \ 'v2' in demucs_model_set: - sources = self.demix_demucs_v2(segmented_mix, margin_size=margin) + if no_chunk_demucs == False: + sources = self.demix_demucs_v2_split(mix) + if no_chunk_demucs == True: + sources = self.demix_demucs_v2(segmented_mix, margin_size=margin) else: - if split_mode == True: + if no_chunk_demucs == False: sources = self.demix_demucs_split(mix) - if split_mode == False: + if no_chunk_demucs == True: sources = self.demix_demucs(segmented_mix, margin_size=margin) else: # both, apply spec effects base_out = self.demix_base(segmented_mix, margin_size=margin) @@ -690,16 +768,22 @@ class Predictor(): 'demucs.th' in demucs_model_set or \ 'demucs_extra.th' in demucs_model_set or 'light.th' in demucs_model_set or \ 'light_extra.th' in demucs_model_set or 'v1' in demucs_model_set or '.gz' in demucs_model_set: - demucs_out = self.demix_demucs_v1(segmented_mix, margin_size=margin) + if no_chunk_demucs == False: + demucs_out = self.demix_demucs_v1_split(mix) + if no_chunk_demucs == True: + demucs_out = self.demix_demucs_v1(segmented_mix, margin_size=margin) elif 'tasnet-beb46fac.th' in demucs_model_set or 'tasnet_extra-df3777b2.th' in demucs_model_set or \ 'demucs48_hq-28a1282c.th' in demucs_model_set or'demucs-e07c671f.th' in demucs_model_set or \ 'demucs_extra-3646af93.th' in demucs_model_set or 'demucs_unittest-09ebc15f.th' in demucs_model_set or \ 'v2' in demucs_model_set: - demucs_out = self.demix_demucs_v2(segmented_mix, margin_size=margin) + if no_chunk_demucs == False: + demucs_out = self.demix_demucs_v2_split(mix) + if no_chunk_demucs == True: + demucs_out = self.demix_demucs_v2(segmented_mix, margin_size=margin) else: - if split_mode == True: + if no_chunk_demucs == False: demucs_out = self.demix_demucs_split(mix) - if split_mode == False: + if no_chunk_demucs == True: demucs_out = self.demix_demucs(segmented_mix, margin_size=margin) nan_count = np.count_nonzero(np.isnan(demucs_out)) + np.count_nonzero(np.isnan(base_out)) if nan_count > 0: @@ -731,17 +815,33 @@ class Predictor(): onnxitera = len(mixes) onnxitera_calc = onnxitera * 2 gui_progress_bar_onnx = 0 - widget_text.write(base_text + "Running ONNX Inference...\n") - widget_text.write(base_text + "Processing "f"{onnxitera} slices... ") + progress_bar = 0 + print(' Running ONNX Inference...') + + if onnxitera == 1: + widget_text.write(base_text + f"Running ONNX Inference... ") + else: + widget_text.write(base_text + f"Running ONNX Inference...{space}\n") + for mix in mixes: gui_progress_bar_onnx += 1 - if demucs_switch == 'on': + if data['demucsmodel']: update_progress(**progress_kwargs, step=(0.1 + (0.5/onnxitera_calc * gui_progress_bar_onnx))) else: update_progress(**progress_kwargs, - step=(0.1 + (0.9/onnxitera * gui_progress_bar_onnx))) + step=(0.1 + (0.8/onnxitera * gui_progress_bar_onnx))) + + progress_bar += 100 + step = (progress_bar / onnxitera) + + if onnxitera == 1: + pass + else: + percent_prog = f"{base_text}MDX-Net Inference Progress: {gui_progress_bar_onnx}/{onnxitera} | {round(step)}%" + widget_text.percentage(percent_prog) + cmix = mixes[mix] sources = [] n_sample = cmix.shape[1] @@ -774,28 +874,38 @@ class Predictor(): end = None sources.append(tar_signal[:,start:end]) - chunked_sources.append(sources) _sources = np.concatenate(chunked_sources, axis=-1) del self.onnx_models - widget_text.write('Done!\n') + + if onnxitera == 1: + widget_text.write('Done!\n') + else: + widget_text.write('\n') + return _sources def demix_demucs(self, mix, margin_size): - #print('shift_set ', shift_set) processed = {} demucsitera = len(mix) demucsitera_calc = demucsitera * 2 gui_progress_bar_demucs = 0 + progress_bar = 0 + if demucsitera == 1: + widget_text.write(base_text + f"Running Demucs Inference... ") + else: + widget_text.write(base_text + f"Running Demucs Inference...{space}\n") - widget_text.write(base_text + "Split Mode is off. (Chunks enabled for Demucs Model)\n") - - widget_text.write(base_text + "Running Demucs Inference...\n") - widget_text.write(base_text + "Processing "f"{len(mix)} slices... ") - print('Running Demucs Inference...') - + print(' Running Demucs Inference...') for nmix in mix: gui_progress_bar_demucs += 1 + progress_bar += 100 + step = (progress_bar / demucsitera) + if demucsitera == 1: + pass + else: + percent_prog = f"{base_text}Demucs Inference Progress: {gui_progress_bar_demucs}/{demucsitera} | {round(step)}%" + widget_text.percentage(percent_prog) update_progress(**progress_kwargs, step=(0.35 + (1.05/demucsitera_calc * gui_progress_bar_demucs))) cmix = mix[nmix] @@ -803,7 +913,17 @@ class Predictor(): ref = cmix.mean(0) cmix = (cmix - ref.mean()) / ref.std() with torch.no_grad(): - sources = apply_model(self.demucs, cmix[None], split=split_mode, device=device, overlap=overlap_set, shifts=shift_set, progress=False)[0] + sources = apply_model(self.demucs, cmix[None], + gui_progress_bar, + widget_text, + update_prog, + split=split_mode, + device=device, + overlap=overlap_set, + shifts=shift_set, + progress=False, + segmen=False, + **progress_demucs_kwargs)[0] sources = (sources * ref.std() + ref.mean()).cpu().numpy() sources[[0,1]] = sources[[1,0]] @@ -815,60 +935,49 @@ class Predictor(): sources = list(processed.values()) sources = np.concatenate(sources, axis=-1) - widget_text.write('Done!\n') - return sources + if demucsitera == 1: + widget_text.write('Done!\n') + else: + widget_text.write('\n') + #print('the demucs model is done running') + + return sources + def demix_demucs_split(self, mix): - - #print('shift_set ', shift_set) - widget_text.write(base_text + "Split Mode is on. (Chunks disabled for Demucs Model)\n") - widget_text.write(base_text + "Running Demucs Inference...\n") - widget_text.write(base_text + "Processing "f"{len(mix)} slices... ") + + if split_mode: + widget_text.write(base_text + f"Running Demucs Inference...{space}\n") + else: + widget_text.write(base_text + f"Running Demucs Inference... ") print(' Running Demucs Inference...') - + mix = torch.tensor(mix, dtype=torch.float32) ref = mix.mean(0) mix = (mix - ref.mean()) / ref.std() with torch.no_grad(): - sources = apply_model(self.demucs, mix[None], split=split_mode, device=device, overlap=overlap_set, shifts=shift_set, progress=False)[0] + sources = apply_model(self.demucs, + mix[None], + gui_progress_bar, + widget_text, + update_prog, + split=split_mode, + device=device, + overlap=overlap_set, + shifts=shift_set, + progress=False, + segmen=True, + **progress_demucs_kwargs)[0] - widget_text.write('Done!\n') + if split_mode: + widget_text.write('\n') + else: + widget_text.write('Done!\n') sources = (sources * ref.std() + ref.mean()).cpu().numpy() sources[[0,1]] = sources[[1,0]] - return sources - - def demix_demucs_v2(self, mix, margin_size): - processed = {} - demucsitera = len(mix) - demucsitera_calc = demucsitera * 2 - gui_progress_bar_demucs = 0 - widget_text.write(base_text + "Running Demucs v2 Inference...\n") - widget_text.write(base_text + "Processing "f"{len(mix)} slices... ") - print(' Running Demucs Inference...') - for nmix in mix: - gui_progress_bar_demucs += 1 - update_progress(**progress_kwargs, - step=(0.35 + (1.05/demucsitera_calc * gui_progress_bar_demucs))) - cmix = mix[nmix] - cmix = torch.tensor(cmix, dtype=torch.float32) - ref = cmix.mean(0) - cmix = (cmix - ref.mean()) / ref.std() - with torch.no_grad(): - sources = apply_model_v2(self.demucs, cmix.to(device), split=split_mode, overlap=overlap_set, shifts=shift_set) - sources = (sources * ref.std() + ref.mean()).cpu().numpy() - sources[[0,1]] = sources[[1,0]] - - start = 0 if nmix == 0 else margin_size - end = None if nmix == list(mix.keys())[::-1][0] else -margin_size - if margin_size == 0: - end = None - processed[nmix] = sources[:,:,start:end].copy() - - sources = list(processed.values()) - sources = np.concatenate(sources, axis=-1) - widget_text.write('Done!\n') + return sources def demix_demucs_v1(self, mix, margin_size): @@ -876,11 +985,21 @@ class Predictor(): demucsitera = len(mix) demucsitera_calc = demucsitera * 2 gui_progress_bar_demucs = 0 - widget_text.write(base_text + "Running Demucs v1 Inference...\n") - widget_text.write(base_text + "Processing "f"{len(mix)} slices... ") + progress_bar = 0 print(' Running Demucs Inference...') + if demucsitera == 1: + widget_text.write(base_text + f"Running Demucs v1 Inference... ") + else: + widget_text.write(base_text + f"Running Demucs v1 Inference...{space}\n") for nmix in mix: gui_progress_bar_demucs += 1 + progress_bar += 100 + step = (progress_bar / demucsitera) + if demucsitera == 1: + pass + else: + percent_prog = f"{base_text}Demucs v1 Inference Progress: {gui_progress_bar_demucs}/{demucsitera} | {round(step)}%" + widget_text.percentage(percent_prog) update_progress(**progress_kwargs, step=(0.35 + (1.05/demucsitera_calc * gui_progress_bar_demucs))) cmix = mix[nmix] @@ -888,7 +1007,15 @@ class Predictor(): ref = cmix.mean(0) cmix = (cmix - ref.mean()) / ref.std() with torch.no_grad(): - sources = apply_model_v1(self.demucs, cmix.to(device), split=split_mode, shifts=shift_set) + sources = apply_model_v1(self.demucs, + cmix.to(device), + gui_progress_bar, + widget_text, + update_prog, + split=split_mode, + segmen=False, + shifts=shift_set, + **progress_demucs_kwargs) sources = (sources * ref.std() + ref.mean()).cpu().numpy() sources[[0,1]] = sources[[1,0]] @@ -900,22 +1027,135 @@ class Predictor(): sources = list(processed.values()) sources = np.concatenate(sources, axis=-1) - widget_text.write('Done!\n') + + if demucsitera == 1: + widget_text.write('Done!\n') + else: + widget_text.write('\n') + return sources + + def demix_demucs_v1_split(self, mix): -def update_progress(progress_var, total_files, file_num, step: float = 1): - """Calculate the progress for the progress widget in the GUI""" - base = (100 / total_files) - progress = base * (file_num - 1) - progress += base * step + print(' Running Demucs Inference...') + if split_mode: + widget_text.write(base_text + f"Running Demucs v1 Inference...{space}\n") + else: + widget_text.write(base_text + f"Running Demucs v1 Inference... ") + + mix = torch.tensor(mix, dtype=torch.float32) + ref = mix.mean(0) + mix = (mix - ref.mean()) / ref.std() - progress_var.set(progress) + with torch.no_grad(): + sources = apply_model_v1(self.demucs, + mix.to(device), + gui_progress_bar, + widget_text, + update_prog, + split=split_mode, + segmen=True, + shifts=shift_set, + **progress_demucs_kwargs) + sources = (sources * ref.std() + ref.mean()).cpu().numpy() + sources[[0,1]] = sources[[1,0]] -def get_baseText(total_files, file_num): - """Create the base text for the command widget""" - text = 'File {file_num}/{total_files} '.format(file_num=file_num, - total_files=total_files) - return text + if split_mode: + widget_text.write('\n') + else: + widget_text.write('Done!\n') + + return sources + + def demix_demucs_v2(self, mix, margin_size): + processed = {} + demucsitera = len(mix) + demucsitera_calc = demucsitera * 2 + gui_progress_bar_demucs = 0 + progress_bar = 0 + if demucsitera == 1: + widget_text.write(base_text + f"Running Demucs v2 Inference... ") + else: + widget_text.write(base_text + f"Running Demucs v2 Inference...{space}\n") + + for nmix in mix: + gui_progress_bar_demucs += 1 + progress_bar += 100 + step = (progress_bar / demucsitera) + if demucsitera == 1: + pass + else: + percent_prog = f"{base_text}Demucs v2 Inference Progress: {gui_progress_bar_demucs}/{demucsitera} | {round(step)}%" + widget_text.percentage(percent_prog) + + update_progress(**progress_kwargs, + step=(0.35 + (1.05/demucsitera_calc * gui_progress_bar_demucs))) + cmix = mix[nmix] + cmix = torch.tensor(cmix, dtype=torch.float32) + ref = cmix.mean(0) + cmix = (cmix - ref.mean()) / ref.std() + with torch.no_grad(): + sources = apply_model_v2(self.demucs, + cmix.to(device), + gui_progress_bar, + widget_text, + update_prog, + split=split_mode, + segmen=False, + overlap=overlap_set, + shifts=shift_set, + **progress_demucs_kwargs) + sources = (sources * ref.std() + ref.mean()).cpu().numpy() + sources[[0,1]] = sources[[1,0]] + + start = 0 if nmix == 0 else margin_size + end = None if nmix == list(mix.keys())[::-1][0] else -margin_size + if margin_size == 0: + end = None + processed[nmix] = sources[:,:,start:end].copy() + + sources = list(processed.values()) + sources = np.concatenate(sources, axis=-1) + + if demucsitera == 1: + widget_text.write('Done!\n') + else: + widget_text.write('\n') + + return sources + + def demix_demucs_v2_split(self, mix): + print(' Running Demucs Inference...') + + if split_mode: + widget_text.write(base_text + f"Running Demucs v2 Inference...{space}\n") + else: + widget_text.write(base_text + f"Running Demucs v2 Inference... ") + + mix = torch.tensor(mix, dtype=torch.float32) + ref = mix.mean(0) + mix = (mix - ref.mean()) / ref.std() + with torch.no_grad(): + sources = apply_model_v2(self.demucs, + mix.to(device), + gui_progress_bar, + widget_text, + update_prog, + split=split_mode, + segmen=True, + overlap=overlap_set, + shifts=shift_set, + **progress_demucs_kwargs) + + sources = (sources * ref.std() + ref.mean()).cpu().numpy() + sources[[0,1]] = sources[[1,0]] + + if split_mode: + widget_text.write('\n') + else: + widget_text.write('Done!\n') + + return sources warnings.filterwarnings("ignore") cpu = torch.device('cpu') @@ -938,22 +1178,6 @@ class VocalRemover(object): self.models = defaultdict(lambda: None) self.devices = defaultdict(lambda: None) # self.offset = model.offset - - - -def update_progress(progress_var, total_files, file_num, step: float = 1): - """Calculate the progress for the progress widget in the GUI""" - base = (100 / total_files) - progress = base * (file_num - 1) - progress += base * step - - progress_var.set(progress) - -def get_baseText(total_files, file_num): - """Create the base text for the command widget""" - text = 'File {file_num}/{total_files} '.format(file_num=file_num, - total_files=total_files) - return text def determineModelFolderName(): """ @@ -1008,6 +1232,7 @@ data = { 'mdx_only_ensem_e': 'No Model', 'mixing': 'Default', 'mp3bit': '320k', + 'no_chunk': False, 'noise_pro_select': 'Auto Select', 'noisereduc_s': 3, 'non_red': False, @@ -1016,6 +1241,7 @@ data = { 'overlap': 0.5, 'postprocess': True, 'saveFormat': 'wav', + 'segment': 'Default', 'shifts': 0, 'split_mode': False, 'tta': True, @@ -1051,8 +1277,10 @@ default_noisereduc_s = data['noisereduc_s'] def update_progress(progress_var, total_files, file_num, step: float = 1): """Calculate the progress for the progress widget in the GUI""" - base = (100 / total_files) - progress = base * (file_num - 1) + + total_count = model_count * total_files + base = (100 / total_count) + progress = base * current_model_bar - base progress += base * step progress_var.set(progress) @@ -1061,9 +1289,14 @@ def get_baseText(total_files, file_num): """Create the base text for the command widget""" text = 'File {file_num}/{total_files} '.format(file_num=file_num, total_files=total_files) + return text -def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress_var: tk.Variable, +def main(window: tk.Wm, + text_widget: tk.Text, + button_widget: tk.Button, + progress_var: tk.Variable, + stop_thread, **kwargs: dict): global widget_text @@ -1071,6 +1304,7 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress global music_file global default_chunks global default_noisereduc_s + global gui_progress_bar global base_name global progress_kwargs global base_text @@ -1081,6 +1315,7 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress global compensate global autocompensate global demucs_model_set + global progress_demucs_kwargs global channel_set global margin_set global overlap_set @@ -1091,14 +1326,17 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress global split_mode global demucs_switch global demucs_only + global no_chunk_demucs global wav_type_set global flac_type_set global mp3_bit_set global model_hash + global space global stime global stemset_n global source_val global widget_button + global stop_button wav_type_set = data['wavtype'] @@ -1107,9 +1345,11 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress default_noisereduc_s = data['noisereduc_s'] autocompensate = data['autocompensate'] + stop_button = stop_thread widget_text = text_widget gui_progress_bar = progress_var widget_button = button_widget + space = ' '*90 #Error Handling @@ -1202,6 +1442,12 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress normalization_set(wav_vocals), mp.param['sr'], subtype=wav_type_set) data.update(kwargs) + + global update_prog + + update_prog = update_progress + no_chunk_demucs = data['no_chunk'] + space = ' '*90 if data['DemucsModel_MDX'] == "Tasnet v1": demucs_model_set_name = 'tasnet.th' @@ -1694,7 +1940,11 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress vr_ensem_e_name = data['vr_ensem_e'] vr_ensem_e = f'models/Main_Models/{vr_ensem_e_name}.pth' vr_param_ens_e = data['vr_basic_USER_model_param_5'] - + + basic_vr_ensemble_list = [vr_ensem_a_name, vr_ensem_b_name, vr_ensem_c_name, vr_ensem_d_name, vr_ensem_e_name] + no_models = basic_vr_ensemble_list.count('No Model') + vr_ensem_count = 5 - no_models + if data['vr_ensem_c'] == 'No Model' and data['vr_ensem_d'] == 'No Model' and data['vr_ensem_e'] == 'No Model': Basic_Ensem = [ { @@ -1970,6 +2220,10 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress mdx_ensem = 'UVR_MDXNET_KARA' elif mdx_net_model_name == 'UVR-MDX-NET Main': mdx_ensem = 'UVR_MDXNET_Main' + elif mdx_net_model_name == 'UVR-MDX-NET Inst 1': + mdx_ensem = 'UVR_MDXNET_Inst_1' + elif mdx_net_model_name == 'UVR-MDX-NET Inst 2': + mdx_ensem = 'UVR_MDXNET_Inst_2' else: mdx_ensem = mdx_net_model_name @@ -2008,6 +2262,10 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress mdx_ensem_b = 'UVR_MDXNET_KARA' elif mdx_net_model_name == 'UVR-MDX-NET Main': mdx_ensem_b = 'UVR_MDXNET_Main' + elif mdx_net_model_name == 'UVR-MDX-NET Inst 1': + mdx_ensem_b = 'UVR_MDXNET_Inst_1' + elif mdx_net_model_name == 'UVR-MDX-NET Inst 2': + mdx_ensem_b = 'UVR_MDXNET_Inst_2' else: mdx_ensem_b = mdx_net_model_name @@ -2020,6 +2278,9 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress mdx_ensem_b = 'pass' mdx_model_run_mul_b = 'pass' + multi_ai_ensemble_list = [vr_ensem_name, vr_ensem_mdx_a_name, vr_ensem_mdx_b_name, vr_ensem_mdx_c_name, data['mdx_ensem'], data['mdx_ensem_b']] + no_multi_models = multi_ai_ensemble_list.count('No Model') + multi_ensem_count = 6 - no_multi_models if data['vr_ensem'] == 'No Model' and data['vr_ensem_mdx_a'] == 'No Model' and data['vr_ensem_mdx_b'] == 'No Model' and data['vr_ensem_mdx_c'] == 'No Model': mdx_vr = [ @@ -2287,6 +2548,10 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress mdx_only_ensem_a = 'UVR_MDXNET_KARA' elif mdx_net_model_name == 'UVR-MDX-NET Main': mdx_only_ensem_a = 'UVR_MDXNET_Main' + elif mdx_net_model_name == 'UVR-MDX-NET Inst 1': + mdx_only_ensem_a = 'UVR_MDXNET_Inst_1' + elif mdx_net_model_name == 'UVR-MDX-NET Inst 2': + mdx_only_ensem_a = 'UVR_MDXNET_Inst_2' else: mdx_only_ensem_a = mdx_net_model_name @@ -2325,6 +2590,10 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress mdx_only_ensem_b = 'UVR_MDXNET_KARA' elif mdx_net_model_name == 'UVR-MDX-NET Main': mdx_only_ensem_b = 'UVR_MDXNET_Main' + elif mdx_net_model_name == 'UVR-MDX-NET Inst 1': + mdx_only_ensem_b = 'UVR_MDXNET_Inst_1' + elif mdx_net_model_name == 'UVR-MDX-NET Inst 2': + mdx_only_ensem_b = 'UVR_MDXNET_Inst_2' else: mdx_only_ensem_b = mdx_net_model_name @@ -2363,6 +2632,10 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress mdx_only_ensem_c = 'UVR_MDXNET_KARA' elif mdx_net_model_name == 'UVR-MDX-NET Main': mdx_only_ensem_c = 'UVR_MDXNET_Main' + elif mdx_net_model_name == 'UVR-MDX-NET Inst 1': + mdx_only_ensem_c = 'UVR_MDXNET_Inst_1' + elif mdx_net_model_name == 'UVR-MDX-NET Inst 2': + mdx_only_ensem_c = 'UVR_MDXNET_Inst_2' else: mdx_only_ensem_c = mdx_net_model_name @@ -2401,6 +2674,10 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress mdx_only_ensem_d = 'UVR_MDXNET_KARA' elif mdx_net_model_name == 'UVR-MDX-NET Main': mdx_only_ensem_d = 'UVR_MDXNET_Main' + elif mdx_net_model_name == 'UVR-MDX-NET Inst 1': + mdx_only_ensem_d = 'UVR_MDXNET_Inst_1' + elif mdx_net_model_name == 'UVR-MDX-NET Inst 2': + mdx_only_ensem_d = 'UVR_MDXNET_Inst_2' else: mdx_only_ensem_d = mdx_net_model_name @@ -2439,6 +2716,10 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress mdx_only_ensem_e = 'UVR_MDXNET_KARA' elif mdx_net_model_name == 'UVR-MDX-NET Main': mdx_only_ensem_e = 'UVR_MDXNET_Main' + elif mdx_net_model_name == 'UVR-MDX-NET Inst 1': + mdx_only_ensem_e = 'UVR_MDXNET_Inst_1' + elif mdx_net_model_name == 'UVR-MDX-NET Inst 2': + mdx_only_ensem_e = 'UVR_MDXNET_Inst_2' else: mdx_only_ensem_e = mdx_net_model_name @@ -2701,31 +2982,46 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress } ] + basic_md_ensemble_list = [data['mdx_only_ensem_a'], data['mdx_only_ensem_b'], data['mdx_only_ensem_c'], data['mdx_only_ensem_d'], data['mdx_only_ensem_e']] + no_basic_md_models = basic_md_ensemble_list.count('No Model') + basic_md_ensem_count = 5 - no_basic_md_models + + global model_count + if data['ensChoose'] == 'Multi-AI Ensemble': loops = mdx_vr ensefolder = 'Multi_AI_Ensemble_Outputs' ensemode = 'Multi_AI_Ensemble' + model_count = multi_ensem_count if data['ensChoose'] == 'Basic VR Ensemble': loops = Basic_Ensem ensefolder = 'Basic_VR_Outputs' ensemode = 'Multi_VR_Ensemble' + model_count = vr_ensem_count if data['ensChoose'] == 'Basic MD Ensemble': loops = mdx_demuc_only ensefolder = 'Basic_MDX_Net_Demucs_Ensemble' ensemode = 'Basic_MDX_Net_Demucs_Ensemble' + model_count = basic_md_ensem_count + + global current_model_bar + + current_model_bar = 0 #Prepare Audiofile(s) for file_num, music_file in enumerate(data['input_paths'], start=1): # -Get text and update progress- + + current_model = 1 + + base_text = get_baseText(total_files=len(data['input_paths']), file_num=file_num) progress_kwargs = {'progress_var': progress_var, 'total_files': len(data['input_paths']), - 'file_num': file_num} - update_progress(**progress_kwargs, - step=0) + 'file_num': file_num} try: @@ -2757,7 +3053,7 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress text_widget.write('Detected Free Space: ' + str(free_space) + ' GB' + '\n\n') except: pass - + #Prepare to loop models for i, c in tqdm(enumerate(loops), disable=True, desc='Iterations..'): @@ -2833,10 +3129,16 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress if c['model_location'] == 'pass': pass else: + model_name = c['model_name'] + text_widget.write(f'Ensemble Mode - {model_name} - Model {current_model}/{model_count}\n\n') + current_model += 1 + current_model_bar += 1 + update_progress(**progress_kwargs, + step=0) presentmodel = Path(c['model_location']) if presentmodel.is_file(): - print(f'The file {presentmodel} exists') + pass else: if data['ensChoose'] == 'Multi-AI Ensemble': text_widget.write(base_text + 'Model "' + c['model_name'] + '.pth" is missing.\n') @@ -2851,14 +3153,11 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress text_widget.write(base_text + 'Model "' + c['model_name'] + '.pth" is missing.\n') text_widget.write(base_text + 'Installation of v5 Model Expansion Pack required to use this model.\n\n') continue - - text_widget.write(c['loop_name'] + '\n\n') - text_widget.write(base_text + 'Loading ' + c['model_name_c'] + '... ') + text_widget.write(base_text + 'Loading VR model... ') aggresive_set = float(data['agg']/100) - model_size = math.ceil(os.stat(c['model_location']).st_size / 1024) nn_architecture = '{}KB'.format(min(nn_arch_sizes, key=lambda x:abs(x-model_size))) @@ -2917,9 +3216,7 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress ModelName_1=(c['model_name']) - #print('model param function output ', model_params) - - print('Model Parameters:', model_params[0]) + #print('Model Parameters:', model_params[0]) text_widget.write(base_text + 'Loading assigned model parameters ' + '\"' + model_params[1] + '\"... ') mp = ModelParameters(model_params[0]) @@ -2974,7 +3271,6 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress text_widget.write(base_text + 'Loading the stft of audio source... ') text_widget.write('Done!\n') - text_widget.write(base_text + "Please Wait...\n") X_spec_m = spec_utils.combine_spectrograms(X_spec_s, mp) @@ -2982,22 +3278,47 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress def inference(X_spec, device, model, aggressiveness): - def _execute(X_mag_pad, roi_size, n_window, device, model, aggressiveness): + def _execute(X_mag_pad, roi_size, n_window, device, model, aggressiveness, tta=False): model.eval() + global active_iterations + global progress_value + with torch.no_grad(): preds = [] iterations = [n_window] - total_iterations = sum(iterations) - - text_widget.write(base_text + "Processing "f"{total_iterations} Slices... ") + if data['tta']: + total_iterations = sum(iterations) + total_iterations = total_iterations*2 + else: + total_iterations = sum(iterations) + + if tta: + active_iterations = sum(iterations) + active_iterations = active_iterations - 2 + total_iterations = total_iterations - 2 + else: + active_iterations = 0 - for i in tqdm(range(n_window)): - update_progress(**progress_kwargs, - step=(0.1 + (0.8/n_window * i))) + progress_bar = 0 + for i in range(n_window): + active_iterations += 1 + if data['demucsmodelVR']: + update_progress(**progress_kwargs, + step=(0.1 + (0.5/total_iterations * active_iterations))) + else: + update_progress(**progress_kwargs, + step=(0.1 + (0.8/total_iterations * active_iterations))) start = i * roi_size + progress_bar += 100 + progress_value = progress_bar + active_iterations_step = active_iterations*100 + step = (active_iterations_step / total_iterations) + + percent_prog = f"{base_text}Inference Progress: {active_iterations}/{total_iterations} | {round(step)}%" + text_widget.percentage(percent_prog) X_mag_window = X_mag_pad[None, :, :, start:start + data['window_size']] X_mag_window = torch.from_numpy(X_mag_window).to(device) @@ -3007,8 +3328,6 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress preds.append(pred[0]) pred = np.concatenate(preds, axis=2) - - text_widget.write('Done!\n') return pred def preprocess(X_spec): @@ -3043,29 +3362,28 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress X_mag_pre, ((0, 0), (0, 0), (pad_l, pad_r)), mode='constant') pred_tta = _execute(X_mag_pad, roi_size, n_window, - device, model, aggressiveness) + device, model, aggressiveness, tta=True) pred_tta = pred_tta[:, :, roi_size // 2:] pred_tta = pred_tta[:, :, :n_frame] return (pred + pred_tta) * 0.5 * coef, X_mag, np.exp(1.j * X_phase) else: return pred * coef, X_mag, np.exp(1.j * X_phase) - + aggressiveness = {'value': aggresive_set, 'split_bin': mp.param['band'][1]['crop_stop']} if data['tta']: - text_widget.write(base_text + "Running Inferences (TTA)... \n") + text_widget.write(base_text + f"Running Inferences (TTA)... {space}\n") else: - text_widget.write(base_text + "Running Inference... \n") + text_widget.write(base_text + f"Running Inference... {space}\n") pred, X_mag, X_phase = inference(X_spec_m, device, model, aggressiveness) + + text_widget.write('\n') - # update_progress(**progress_kwargs, - # step=0.8) - - # Postprocess + if data['postprocess']: try: text_widget.write(base_text + 'Post processing...') @@ -3154,6 +3472,8 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress ################################### if data['ensChoose'] == 'Multi-AI Ensemble' or data['ensChoose'] == 'Basic MD Ensemble': + + if data['demucsmodel']: demucs_switch = 'on' else: @@ -3194,11 +3514,11 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress mdx_name = c['mdx_model_name'] - if c['mdx_model_name'] == 'pass': pass else: - text_widget.write('Ensemble Mode - Running Model - ' + post_mdx_name + '\n\n') + text_widget.write(f'Ensemble Mode - {post_mdx_name} - Model {current_model}/{model_count}\n\n') + #text_widget.write('Ensemble Mode - Running Model - ' + post_mdx_name + '\n\n') if c['mdx_model_run'] == 'no': if 'UVR' in mdx_name: @@ -3244,15 +3564,20 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress #print(model_params_mdx) - print('demucs_only? ', demucs_only) + #print('demucs_only? ', demucs_only) + + if demucs_only == 'on': + inference_type = 'demucs_only' + else: + inference_type = 'inference_mdx' + progress_demucs_kwargs = {'total_files': len(data['input_paths']), + 'file_num': file_num, 'inference_type': inference_type} + if data['noise_pro_select'] == 'Auto Select': noise_pro_set = noise_pro else: noise_pro_set = data['noise_pro_select'] - - update_progress(**progress_kwargs, - step=0) if data['noisereduc_s'] == 'None': pass @@ -3266,6 +3591,12 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress e = os.path.join(data["export_path"]) + current_model += 1 + current_model_bar += 1 + + update_progress(**progress_kwargs, + step=0) + pred = Predictor() if c['mdx_model_run'] == 'yes': @@ -3273,6 +3604,13 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress widget_text.write(base_text + 'Only vocal and instrumental MDX-Net models are supported in \nensemble mode.\n') widget_text.write(base_text + 'Moving on to next model...\n\n') continue + if stemset_n == '(Instrumental)': + if not 'UVR' in demucs_model_set: + if data['demucsmodel']: + widget_text.write(base_text + 'The selected Demucs model cannot be used with this model.\n') + widget_text.write(base_text + 'Only 2 stem Demucs models are compatible with this model.\n') + widget_text.write(base_text + 'Setting Demucs model to \"UVR_Demucs_Model_1\".\n\n') + demucs_model_set = 'UVR_Demucs_Model_1' if modeltype == 'Not Set' or \ noise_pro == 'Not Set' or \ stemset_n == 'Not Set' or \ @@ -4203,7 +4541,7 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress text_widget.write(f'\nError Received:\n\n') text_widget.write(f'Could not write audio file.\n') text_widget.write(f'This could be due to low storage on target device or a system permissions issue.\n') - text_widget.write(f"\nFor raw error details, go to the Error Log tab in the Help Guide.\n") + text_widget.write(f"\nGo to the Settings Menu and click \"Open Error Log\" for raw error details.\n") text_widget.write(f'\nIf the error persists, please contact the developers.\n\n') text_widget.write(f'Time Elapsed: {time.strftime("%H:%M:%S", time.gmtime(int(time.perf_counter() - stime)))}') try: @@ -4313,7 +4651,7 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress text_widget.write("\n" + base_text + f'Separation failed for the following audio file:\n') text_widget.write(base_text + f'"{os.path.basename(music_file)}"\n') text_widget.write(f'\nError Received:\n') - text_widget.write("\nFor raw error details, go to the Error Log tab in the Help Guide.\n") + text_widget.write("\nGo to the Settings Menu and click \"Open Error Log\" for raw error details.\n") text_widget.write("\n" + f'Please address the error and try again.' + "\n") text_widget.write(f'If this error persists, please contact the developers with the error details.\n\n') text_widget.write(f'Time Elapsed: {time.strftime("%H:%M:%S", time.gmtime(int(time.perf_counter() - stime)))}') @@ -4324,7 +4662,7 @@ def main(window: tk.Wm, text_widget: tk.Text, button_widget: tk.Button, progress update_progress(**progress_kwargs, step=1) - print('Done!') + #print('Done!') progress_var.set(0) if not data['ensChoose'] == 'Manual Ensemble':