Skip to content
Snippets Groups Projects
Commit 06698849 authored by chinyun_yu_joey's avatar chinyun_yu_joey
Browse files

feat: deterministic shifts in input samples

parent 45d23949
No related branches found
Tags submission-test-demucs-split-5
No related merge requests found
......@@ -8,3 +8,4 @@ musdb
SoundFile
scipy
norbert
tqdm
......@@ -3,22 +3,28 @@
import numpy as np
import soundfile as sf
import torch
from tqdm import tqdm
from evaluator.music_demixing import MusicDemixingPredictor
def separate(
audio,
rate,
model,
shifts=10,
device="cpu"
):
# convert numpy audio to torch
audio_torch = torch.tensor(audio.T).float().to(device)
max_shift = int(rate * 0.5)
with torch.no_grad():
y = model(audio_torch.unsqueeze(0)).squeeze()
y = torch.zeros_like(audio_torch).repeat(4, 1, 1)
for shift in tqdm(torch.linspace(0, max_shift, steps=shifts, dtype=torch.long).tolist()):
shifted_y = model(audio_torch.unsqueeze(0)[..., shift:]).squeeze()
y[..., shift:] += shifted_y
y /= shifts
return y.numpy()
......@@ -45,6 +51,7 @@ class DemucsPredictor(MusicDemixingPredictor):
# Step 3: Perform separation
estimates = separate(
x,
rate,
self.separator,
)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment