|
"""Chunked decoder experiment.""" |
|
import os |
|
from os.path import join as p_join |
|
|
|
from audiocraft.data.audio import audio_write |
|
from datasets import load_dataset |
|
import torch |
|
from multibanddiffusion import MultiBandDiffusion |
|
|
|
|
|
cache_dir = "audio" |
|
os.makedirs(cache_dir, exist_ok=True) |
|
num_codes = 3 |
|
mbd_model = MultiBandDiffusion.from_pretrained(num_codebooks_decoder=num_codes, num_codebooks_encoder=num_codes) |
|
configs = [ |
|
[75, 55], |
|
[75, 65], |
|
[150, 120], |
|
[150, 140], |
|
] |
|
concat_strategy = ["first", "crossfade", "last"] |
|
|
|
|
|
def test_hf(hf_dataset: str, sample_size: int = 128, batch_size: int = 32, skip_enhancer: bool = False): |
|
output_dir = p_join(cache_dir, os.path.basename(hf_dataset)) |
|
os.makedirs(output_dir, exist_ok=True) |
|
dataset = load_dataset(hf_dataset, split="test") |
|
dataset = dataset.select(range(sample_size)) |
|
dataset = dataset.map( |
|
lambda batch: {k: [v] for k, v in batch.items()}, |
|
batched=True, |
|
batch_size=batch_size |
|
) |
|
for data in dataset: |
|
sr_list = [d["sampling_rate"] for d in data["audio"]] |
|
assert len(set(sr_list)) == 1, sr_list |
|
sr = sr_list[0] |
|
array = [d["array"] for d in data["audio"]] |
|
max_length = max([len(a) for a in array]) |
|
array = [a + [0] * (max_length - len(a)) for a in array] |
|
wav = torch.as_tensor(array, dtype=torch.float32).unsqueeze_(1) |
|
tokens = mbd_model.wav_to_tokens(wav, sr) |
|
for chunk, stride in configs: |
|
for s in concat_strategy: |
|
re_wav, sr = mbd_model.tokens_to_wav( |
|
tokens, chunk_length=chunk, stride=stride, concat_strategy=s, skip_enhancer=skip_enhancer |
|
) |
|
for idx, one_wav in enumerate(re_wav): |
|
if skip_enhancer: |
|
output = p_join(output_dir, f"reconstructed_{num_codes}codes.{chunk}chunks.{stride}strides.{s}", str(idx)) |
|
else: |
|
output = p_join(output_dir, f"reconstructed_{num_codes}codes.{chunk}chunks.{stride}strides.{s}.enhancer", str(idx)) |
|
audio_write(output, one_wav, sr, strategy="loudness", loudness_compressor=True) |
|
|
|
|
|
if __name__ == '__main__': |
|
test_hf("japanese-asr/ja_asr.reazonspeech_test", sample_size=64, batch_size=16) |
|
test_hf("japanese-asr/ja_asr.jsut_basic5000", sample_size=64, batch_size=16) |
|
test_hf("japanese-asr/ja_asr.reazonspeech_test", sample_size=64, batch_size=16, skip_enhancer=True) |
|
test_hf("japanese-asr/ja_asr.jsut_basic5000", sample_size=64, batch_size=16, skip_enhancer=True) |
|
|