Add ASMR VAD inference support and model assets
This commit is contained in:
parent
26ea9aba51
commit
d605aadef3
9 changed files with 1259 additions and 37 deletions
1
.gitignore
vendored
1
.gitignore
vendored
|
|
@ -1 +1,2 @@
|
||||||
mp3/
|
mp3/
|
||||||
|
__pycache__/
|
||||||
|
|
@ -1,17 +0,0 @@
|
||||||
{
|
|
||||||
// 可以在这里控制各种生成字幕的参数, 下面这个链接里的参数都可以控制
|
|
||||||
// https://github.com/SYSTRAN/faster-whisper/blob/dea24cbcc6cbef23ff599a63be0bbb647a0b23d6/faster_whisper/transcribe.py#L733
|
|
||||||
// 默认只在这里写了少量参数, 有需要改别的参数的话可以直接加到下面
|
|
||||||
|
|
||||||
"vad_parameters": {
|
|
||||||
// VAD检测阈值
|
|
||||||
// 太大会导致漏翻, 太小可能会导致时间轴不准或文本质量下降(幻听)
|
|
||||||
"threshold": 0.5,
|
|
||||||
},
|
|
||||||
|
|
||||||
// 避免时间轴向前偏移过长的问题
|
|
||||||
"max_initial_timestamp": 30,
|
|
||||||
|
|
||||||
"repetition_penalty": 1.1,
|
|
||||||
|
|
||||||
}
|
|
||||||
300
infer.py
300
infer.py
|
|
@ -1,12 +1,93 @@
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
|
import json
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Any, Iterable
|
from typing import Any, Iterable
|
||||||
|
|
||||||
|
import librosa
|
||||||
|
import numpy as np
|
||||||
from faster_whisper import WhisperModel
|
from faster_whisper import WhisperModel
|
||||||
|
|
||||||
|
|
||||||
|
class AsmrVadModel:
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
model_path: Path,
|
||||||
|
force_cpu: bool = False,
|
||||||
|
num_threads: int = 1,
|
||||||
|
) -> None:
|
||||||
|
try:
|
||||||
|
import onnxruntime as ort
|
||||||
|
except ImportError as exc:
|
||||||
|
raise RuntimeError("onnxruntime is required for --vad-mode asmr") from exc
|
||||||
|
|
||||||
|
try:
|
||||||
|
from transformers import WhisperFeatureExtractor
|
||||||
|
except ImportError as exc:
|
||||||
|
raise RuntimeError("transformers is required for --vad-mode asmr") from exc
|
||||||
|
|
||||||
|
metadata_path = model_path.with_name("model_metadata.json")
|
||||||
|
metadata = {
|
||||||
|
"whisper_model_name": "openai/whisper-base",
|
||||||
|
"frame_duration_ms": 20,
|
||||||
|
"total_duration_ms": 30000,
|
||||||
|
}
|
||||||
|
if metadata_path.exists():
|
||||||
|
metadata.update(json.loads(metadata_path.read_text(encoding="utf-8")))
|
||||||
|
|
||||||
|
self.sample_rate = 16000
|
||||||
|
self.frame_duration_ms = int(metadata.get("frame_duration_ms", 20))
|
||||||
|
self.chunk_duration_ms = int(metadata.get("total_duration_ms", 30000))
|
||||||
|
self.chunk_samples = int(self.chunk_duration_ms * self.sample_rate / 1000)
|
||||||
|
|
||||||
|
opts = ort.SessionOptions()
|
||||||
|
opts.inter_op_num_threads = num_threads
|
||||||
|
opts.intra_op_num_threads = num_threads
|
||||||
|
|
||||||
|
providers = ["CPUExecutionProvider"]
|
||||||
|
if not force_cpu and "CUDAExecutionProvider" in ort.get_available_providers():
|
||||||
|
providers.insert(0, "CUDAExecutionProvider")
|
||||||
|
|
||||||
|
whisper_model_name = metadata.get("whisper_model_name", "openai/whisper-base")
|
||||||
|
local_whisper_path = Path(whisper_model_name)
|
||||||
|
if local_whisper_path.exists():
|
||||||
|
feature_extractor_source = str(local_whisper_path)
|
||||||
|
elif Path("model").exists():
|
||||||
|
feature_extractor_source = "model"
|
||||||
|
else:
|
||||||
|
feature_extractor_source = whisper_model_name
|
||||||
|
|
||||||
|
self.feature_extractor = WhisperFeatureExtractor.from_pretrained(feature_extractor_source)
|
||||||
|
self.session = ort.InferenceSession(str(model_path), providers=providers, sess_options=opts)
|
||||||
|
self.input_name = self.session.get_inputs()[0].name
|
||||||
|
self.output_names = [output.name for output in self.session.get_outputs()]
|
||||||
|
self.providers = self.session.get_providers()
|
||||||
|
|
||||||
|
def load_audio(self, audio_path: Path) -> np.ndarray:
|
||||||
|
audio, _ = librosa.load(str(audio_path), sr=self.sample_rate, mono=True)
|
||||||
|
return audio.astype(np.float32, copy=False)
|
||||||
|
|
||||||
|
def predict_probabilities(self, audio: np.ndarray) -> np.ndarray:
|
||||||
|
probabilities: list[np.ndarray] = []
|
||||||
|
for start in range(0, len(audio), self.chunk_samples):
|
||||||
|
chunk = audio[start : start + self.chunk_samples]
|
||||||
|
if len(chunk) < self.chunk_samples:
|
||||||
|
chunk = np.pad(chunk, (0, self.chunk_samples - len(chunk)), mode="constant")
|
||||||
|
|
||||||
|
features = self.feature_extractor(
|
||||||
|
chunk,
|
||||||
|
sampling_rate=self.sample_rate,
|
||||||
|
return_tensors="np",
|
||||||
|
).input_features
|
||||||
|
logits = self.session.run(self.output_names, {self.input_name: features})[0][0]
|
||||||
|
probabilities.append(1.0 / (1.0 + np.exp(-logits)))
|
||||||
|
|
||||||
|
if not probabilities:
|
||||||
|
return np.array([], dtype=np.float32)
|
||||||
|
return np.concatenate(probabilities, axis=0)
|
||||||
|
|
||||||
|
|
||||||
def load_model(
|
def load_model(
|
||||||
device: str,
|
device: str,
|
||||||
compute_type: str,
|
compute_type: str,
|
||||||
|
|
@ -40,6 +121,79 @@ def format_timestamp(seconds: float) -> str:
|
||||||
return f"[{minutes:02d}:{secs:02d}.{centiseconds:02d}]"
|
return f"[{minutes:02d}:{secs:02d}.{centiseconds:02d}]"
|
||||||
|
|
||||||
|
|
||||||
|
def detect_asmr_speech_segments(
|
||||||
|
audio: np.ndarray,
|
||||||
|
vad_model: AsmrVadModel,
|
||||||
|
threshold: float,
|
||||||
|
neg_threshold: float | None,
|
||||||
|
min_speech_duration_ms: int,
|
||||||
|
min_silence_duration_ms: int,
|
||||||
|
speech_pad_ms: int,
|
||||||
|
) -> list[dict[str, float]]:
|
||||||
|
speech_probs = vad_model.predict_probabilities(audio)
|
||||||
|
if speech_probs.size == 0:
|
||||||
|
return []
|
||||||
|
|
||||||
|
frame_ms = vad_model.frame_duration_ms
|
||||||
|
min_speech_frames = max(1, int(round(min_speech_duration_ms / frame_ms)))
|
||||||
|
min_silence_frames = max(1, int(round(min_silence_duration_ms / frame_ms)))
|
||||||
|
speech_pad_frames = max(0, int(round(speech_pad_ms / frame_ms)))
|
||||||
|
neg_threshold = max(threshold - 0.15, 0.01) if neg_threshold is None else neg_threshold
|
||||||
|
|
||||||
|
raw_segments: list[tuple[int, int]] = []
|
||||||
|
triggered = False
|
||||||
|
current_start = 0
|
||||||
|
temp_end: int | None = None
|
||||||
|
|
||||||
|
for frame_idx, speech_prob in enumerate(speech_probs):
|
||||||
|
if speech_prob >= threshold and not triggered:
|
||||||
|
triggered = True
|
||||||
|
current_start = frame_idx
|
||||||
|
temp_end = None
|
||||||
|
continue
|
||||||
|
|
||||||
|
if not triggered:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if speech_prob < neg_threshold:
|
||||||
|
if temp_end is None:
|
||||||
|
temp_end = frame_idx
|
||||||
|
elif frame_idx - temp_end >= min_silence_frames:
|
||||||
|
if temp_end - current_start >= min_speech_frames:
|
||||||
|
raw_segments.append((current_start, temp_end))
|
||||||
|
triggered = False
|
||||||
|
temp_end = None
|
||||||
|
elif temp_end is not None:
|
||||||
|
temp_end = None
|
||||||
|
|
||||||
|
if triggered:
|
||||||
|
end_frame = temp_end if temp_end is not None else len(speech_probs)
|
||||||
|
if end_frame - current_start >= min_speech_frames:
|
||||||
|
raw_segments.append((current_start, end_frame))
|
||||||
|
|
||||||
|
segments: list[dict[str, float]] = []
|
||||||
|
for idx, (start_frame, end_frame) in enumerate(raw_segments):
|
||||||
|
prev_end = raw_segments[idx - 1][1] if idx > 0 else 0
|
||||||
|
next_start = raw_segments[idx + 1][0] if idx + 1 < len(raw_segments) else len(speech_probs)
|
||||||
|
padded_start = max(prev_end, start_frame - speech_pad_frames)
|
||||||
|
padded_end = min(next_start, end_frame + speech_pad_frames)
|
||||||
|
segments.append(
|
||||||
|
{
|
||||||
|
"start": padded_start * frame_ms / 1000,
|
||||||
|
"end": padded_end * frame_ms / 1000,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
return segments
|
||||||
|
|
||||||
|
|
||||||
|
def build_clip_timestamps(segments: list[dict[str, float]]) -> list[float]:
|
||||||
|
clip_timestamps: list[float] = []
|
||||||
|
for segment in segments:
|
||||||
|
clip_timestamps.extend([segment["start"], segment["end"]])
|
||||||
|
return clip_timestamps
|
||||||
|
|
||||||
|
|
||||||
def write_lrc(segments: Iterable, output_path: Path) -> None:
|
def write_lrc(segments: Iterable, output_path: Path) -> None:
|
||||||
lines = []
|
lines = []
|
||||||
for seg in segments:
|
for seg in segments:
|
||||||
|
|
@ -57,17 +211,21 @@ def transcribe_file(
|
||||||
model: WhisperModel,
|
model: WhisperModel,
|
||||||
audio_path: Path,
|
audio_path: Path,
|
||||||
beam_size: int,
|
beam_size: int,
|
||||||
|
language: str,
|
||||||
|
task: str,
|
||||||
vad: bool,
|
vad: bool,
|
||||||
vad_parameters: dict | None,
|
vad_parameters: dict | None,
|
||||||
|
clip_timestamps: str | list[float] = "0",
|
||||||
extra_generation_args: dict[str, Any] | None = None,
|
extra_generation_args: dict[str, Any] | None = None,
|
||||||
) -> tuple[list, str, float]:
|
) -> tuple[list, str, float]:
|
||||||
segments_iter, info = model.transcribe(
|
segments_iter, info = model.transcribe(
|
||||||
str(audio_path),
|
str(audio_path),
|
||||||
task="translate",
|
task=task,
|
||||||
beam_size=beam_size,
|
beam_size=beam_size,
|
||||||
vad_filter=vad,
|
vad_filter=vad,
|
||||||
vad_parameters=vad_parameters if vad else None,
|
vad_parameters=vad_parameters if vad else None,
|
||||||
language="ja",
|
clip_timestamps=clip_timestamps,
|
||||||
|
language=language,
|
||||||
**(extra_generation_args or {}),
|
**(extra_generation_args or {}),
|
||||||
)
|
)
|
||||||
print(f"[{audio_path.name}] Detected language: {info.language} (prob={info.language_probability:.2f})")
|
print(f"[{audio_path.name}] Detected language: {info.language} (prob={info.language_probability:.2f})")
|
||||||
|
|
@ -98,7 +256,24 @@ def parse_args() -> argparse.Namespace:
|
||||||
help="Path to an audio file or directory (default: ./mp3).",
|
help="Path to an audio file or directory (default: ./mp3).",
|
||||||
)
|
)
|
||||||
parser.add_argument("--beam-size", type=int, default=5, help="Beam size for decoding.")
|
parser.add_argument("--beam-size", type=int, default=5, help="Beam size for decoding.")
|
||||||
parser.add_argument("--no-vad", action="store_true", help="Disable VAD (voice activity detection) filtering.")
|
parser.add_argument(
|
||||||
|
"--language",
|
||||||
|
default="ja",
|
||||||
|
help="Input language code passed to Whisper (default: ja). Use 'auto' for auto-detection.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--task",
|
||||||
|
default="translate",
|
||||||
|
choices=["transcribe", "translate"],
|
||||||
|
help="Whisper task mode (default: translate).",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--vad-mode",
|
||||||
|
default="asmr",
|
||||||
|
choices=["asmr", "builtin", "none"],
|
||||||
|
help="VAD mode: asmr ONNX model, faster-whisper builtin VAD, or none (default: asmr).",
|
||||||
|
)
|
||||||
|
parser.add_argument("--no-vad", action="store_true", help=argparse.SUPPRESS)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--vad-threshold",
|
"--vad-threshold",
|
||||||
type=float,
|
type=float,
|
||||||
|
|
@ -111,17 +286,51 @@ def parse_args() -> argparse.Namespace:
|
||||||
default=None,
|
default=None,
|
||||||
help="Optional silence probability threshold for VAD (useful to smooth speech end).",
|
help="Optional silence probability threshold for VAD (useful to smooth speech end).",
|
||||||
)
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--vad-min-speech-ms",
|
||||||
|
type=int,
|
||||||
|
default=300,
|
||||||
|
help="Minimum speech duration (ms) kept by VAD (default: 300).",
|
||||||
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--vad-min-silence-ms",
|
"--vad-min-silence-ms",
|
||||||
type=int,
|
type=int,
|
||||||
default=400,
|
default=100,
|
||||||
help="Minimum silence (ms) to cut a speech chunk when VAD is enabled (default: 400).",
|
help="Minimum silence (ms) to cut a speech chunk when VAD is enabled (default: 100).",
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--vad-pad-ms",
|
"--vad-pad-ms",
|
||||||
type=int,
|
type=int,
|
||||||
default=500,
|
default=200,
|
||||||
help="Padding (ms) added before/after each detected speech chunk (default: 500).",
|
help="Padding (ms) added before/after each detected speech chunk (default: 200).",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--vad-model-path",
|
||||||
|
default="vad_models/Whisper-Vad-EncDec-ASMR-onnx/model.onnx",
|
||||||
|
help="Path to the external ASMR VAD ONNX model.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--vad-force-cpu",
|
||||||
|
action="store_true",
|
||||||
|
help="Force the external ASMR VAD to run on CPU.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--vad-num-threads",
|
||||||
|
type=int,
|
||||||
|
default=1,
|
||||||
|
help="CPU thread count for the external ASMR VAD (default: 1).",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--max-initial-timestamp",
|
||||||
|
type=float,
|
||||||
|
default=30.0,
|
||||||
|
help="Maximum initial timestamp passed to Whisper decoding (default: 30).",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--repetition-penalty",
|
||||||
|
type=float,
|
||||||
|
default=1.1,
|
||||||
|
help="Repetition penalty passed to Whisper decoding (default: 1.1).",
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--device",
|
"--device",
|
||||||
|
|
@ -148,6 +357,8 @@ def parse_args() -> argparse.Namespace:
|
||||||
|
|
||||||
def main() -> None:
|
def main() -> None:
|
||||||
args = parse_args()
|
args = parse_args()
|
||||||
|
if args.no_vad:
|
||||||
|
args.vad_mode = "none"
|
||||||
target = Path(args.audio)
|
target = Path(args.audio)
|
||||||
|
|
||||||
audio_files = collect_audio_files(target)
|
audio_files = collect_audio_files(target)
|
||||||
|
|
@ -166,37 +377,86 @@ def main() -> None:
|
||||||
else:
|
else:
|
||||||
print(f"Using device={device_used}, compute_type={compute_used}")
|
print(f"Using device={device_used}, compute_type={compute_used}")
|
||||||
|
|
||||||
vad_parameters = {
|
builtin_vad_parameters = {
|
||||||
"threshold": args.vad_threshold,
|
"threshold": args.vad_threshold,
|
||||||
"neg_threshold": args.vad_neg_threshold,
|
"neg_threshold": args.vad_neg_threshold,
|
||||||
|
"min_speech_duration_ms": args.vad_min_speech_ms,
|
||||||
"min_silence_duration_ms": args.vad_min_silence_ms,
|
"min_silence_duration_ms": args.vad_min_silence_ms,
|
||||||
"speech_pad_ms": args.vad_pad_ms,
|
"speech_pad_ms": args.vad_pad_ms,
|
||||||
}
|
}
|
||||||
if args.no_vad:
|
asmr_vad: AsmrVadModel | None = None
|
||||||
vad_parameters = None
|
if args.vad_mode == "builtin":
|
||||||
else:
|
|
||||||
print(
|
print(
|
||||||
"VAD enabled: "
|
"Built-in VAD enabled: "
|
||||||
f"threshold={vad_parameters['threshold']}, "
|
f"threshold={builtin_vad_parameters['threshold']}, "
|
||||||
f"neg_threshold={vad_parameters['neg_threshold']}, "
|
f"neg_threshold={builtin_vad_parameters['neg_threshold']}, "
|
||||||
f"min_silence_ms={vad_parameters['min_silence_duration_ms']}, "
|
f"min_speech_ms={builtin_vad_parameters['min_speech_duration_ms']}, "
|
||||||
f"pad_ms={vad_parameters['speech_pad_ms']}"
|
f"min_silence_ms={builtin_vad_parameters['min_silence_duration_ms']}, "
|
||||||
|
f"pad_ms={builtin_vad_parameters['speech_pad_ms']}"
|
||||||
)
|
)
|
||||||
|
elif args.vad_mode == "asmr":
|
||||||
|
vad_model_path = Path(args.vad_model_path)
|
||||||
|
if not vad_model_path.exists():
|
||||||
|
raise FileNotFoundError(f"ASMR VAD model not found: {vad_model_path}")
|
||||||
|
asmr_vad = AsmrVadModel(
|
||||||
|
model_path=vad_model_path,
|
||||||
|
force_cpu=args.vad_force_cpu,
|
||||||
|
num_threads=args.vad_num_threads,
|
||||||
|
)
|
||||||
|
print(
|
||||||
|
"ASMR VAD enabled: "
|
||||||
|
f"model={vad_model_path}, "
|
||||||
|
f"providers={asmr_vad.providers}, "
|
||||||
|
f"threshold={args.vad_threshold}, "
|
||||||
|
f"neg_threshold={args.vad_neg_threshold}, "
|
||||||
|
f"min_speech_ms={args.vad_min_speech_ms}, "
|
||||||
|
f"min_silence_ms={args.vad_min_silence_ms}, "
|
||||||
|
f"pad_ms={args.vad_pad_ms}"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
print("VAD disabled")
|
||||||
|
|
||||||
generation_args: dict[str, Any] = {
|
generation_args: dict[str, Any] = {
|
||||||
"max_initial_timestamp": 30,
|
"max_initial_timestamp": args.max_initial_timestamp,
|
||||||
"repetition_penalty": 1.1,
|
"repetition_penalty": args.repetition_penalty,
|
||||||
}
|
}
|
||||||
|
|
||||||
for idx, audio_path in enumerate(audio_files, start=1):
|
for idx, audio_path in enumerate(audio_files, start=1):
|
||||||
print(f"\n[{idx}/{len(audio_files)}] Processing {audio_path}")
|
print(f"\n[{idx}/{len(audio_files)}] Processing {audio_path}")
|
||||||
try:
|
try:
|
||||||
|
use_builtin_vad = args.vad_mode == "builtin"
|
||||||
|
clip_timestamps: str | list[float] = "0"
|
||||||
|
if args.vad_mode == "asmr" and asmr_vad is not None:
|
||||||
|
audio = asmr_vad.load_audio(audio_path)
|
||||||
|
speech_segments = detect_asmr_speech_segments(
|
||||||
|
audio=audio,
|
||||||
|
vad_model=asmr_vad,
|
||||||
|
threshold=args.vad_threshold,
|
||||||
|
neg_threshold=args.vad_neg_threshold,
|
||||||
|
min_speech_duration_ms=args.vad_min_speech_ms,
|
||||||
|
min_silence_duration_ms=args.vad_min_silence_ms,
|
||||||
|
speech_pad_ms=args.vad_pad_ms,
|
||||||
|
)
|
||||||
|
if speech_segments:
|
||||||
|
kept_duration = sum(segment["end"] - segment["start"] for segment in speech_segments)
|
||||||
|
print(
|
||||||
|
"ASMR VAD kept "
|
||||||
|
f"{len(speech_segments)} segments "
|
||||||
|
f"({kept_duration:.2f}s speech)"
|
||||||
|
)
|
||||||
|
clip_timestamps = build_clip_timestamps(speech_segments)
|
||||||
|
else:
|
||||||
|
print("ASMR VAD found no speech segments; falling back to full-audio decoding.")
|
||||||
|
|
||||||
segments, _, _ = transcribe_file(
|
segments, _, _ = transcribe_file(
|
||||||
model=model,
|
model=model,
|
||||||
audio_path=audio_path,
|
audio_path=audio_path,
|
||||||
beam_size=args.beam_size,
|
beam_size=args.beam_size,
|
||||||
vad=not args.no_vad,
|
language=args.language,
|
||||||
vad_parameters=vad_parameters,
|
task=args.task,
|
||||||
|
vad=use_builtin_vad,
|
||||||
|
vad_parameters=builtin_vad_parameters if use_builtin_vad else None,
|
||||||
|
clip_timestamps=clip_timestamps,
|
||||||
extra_generation_args=generation_args,
|
extra_generation_args=generation_args,
|
||||||
)
|
)
|
||||||
write_lrc(segments, audio_path.with_suffix(".lrc"))
|
write_lrc(segments, audio_path.with_suffix(".lrc"))
|
||||||
|
|
|
||||||
35
vad_models/Whisper-Vad-EncDec-ASMR-onnx/.gitattributes
vendored
Normal file
35
vad_models/Whisper-Vad-EncDec-ASMR-onnx/.gitattributes
vendored
Normal file
|
|
@ -0,0 +1,35 @@
|
||||||
|
*.7z filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.arrow filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.bin filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.ftz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.gz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.h5 filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.joblib filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.model filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.npy filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.npz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.onnx filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.ot filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.parquet filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pb filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pickle filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pkl filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pt filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pth filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.rar filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||||
|
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.tar filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.tflite filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.tgz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.wasm filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.xz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.zip filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.zst filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
||||||
224
vad_models/Whisper-Vad-EncDec-ASMR-onnx/README.md
Normal file
224
vad_models/Whisper-Vad-EncDec-ASMR-onnx/README.md
Normal file
|
|
@ -0,0 +1,224 @@
|
||||||
|
---
|
||||||
|
language:
|
||||||
|
- ja
|
||||||
|
- multilingual
|
||||||
|
tags:
|
||||||
|
- voice-activity-detection
|
||||||
|
- vad
|
||||||
|
- whisper
|
||||||
|
- onnx
|
||||||
|
- speech-detection
|
||||||
|
- audio-classification
|
||||||
|
- asmr
|
||||||
|
- japanese
|
||||||
|
- whispered-speech
|
||||||
|
license: mit
|
||||||
|
base_model: openai/whisper-base
|
||||||
|
library_name: transformers
|
||||||
|
pipeline_tag: audio-classification
|
||||||
|
---
|
||||||
|
|
||||||
|
# Whisper-base Voice Activity Detection (VAD) for Japanese ASMR - ONNX
|
||||||
|
|
||||||
|
## Model Description
|
||||||
|
|
||||||
|
This is a refined Whisper-based Voice Activity Detection (VAD) model that leverages the pre-trained Whisper encoder with a lightweight non-autoregressive decoder for high-precision speech activity detection. While fine-tuned on Japanese ASMR content for optimal performance on soft speech and whispers, the model retains Whisper's robust multilingual foundation, enabling effective speech detection across diverse languages and acoustic conditions. It has been optimized and exported to ONNX format for efficient inference across different platforms. For the full training code, configs, and ONNX export utilities, see the GitHub repository: [TransWithAI/whisper-vad](https://github.com/TransWithAI/whisper-vad).
|
||||||
|
|
||||||
|
This work builds upon recent research demonstrating the positive transfer of Whisper's speech representations to VAD tasks, as shown in [WhisperSeg](https://github.com/nianlonggu/WhisperSeg) and related work.
|
||||||
|
|
||||||
|
### Key Features
|
||||||
|
|
||||||
|
- **Architecture**: Encoder-Decoder model based on whisper-base
|
||||||
|
- **Frame Resolution**: 20ms per frame for precise temporal detection
|
||||||
|
- **Input Duration**: Processes 30-second audio chunks
|
||||||
|
- **Output**: Frame-level speech/non-speech predictions
|
||||||
|
- **Optimized**: ONNX format for cross-platform deployment
|
||||||
|
- **Real-time capable**: Fast non-autoregressive inference
|
||||||
|
|
||||||
|
### Model Architecture Details
|
||||||
|
|
||||||
|
- **Base Model**: OpenAI whisper-base encoder (frozen during training)
|
||||||
|
- **Decoder**: 2-layer transformer decoder with 8 attention heads
|
||||||
|
- **Processing**:
|
||||||
|
- Input: 30-second audio chunks (480,000 samples @ 16kHz)
|
||||||
|
- Features: 80-channel log-mel spectrogram
|
||||||
|
- Output: 1500 frame predictions (20ms per frame)
|
||||||
|
|
||||||
|
## Performance
|
||||||
|
|
||||||
|
- **Frame Duration**: 20ms per frame for precise temporal detection
|
||||||
|
- **Processing Speed**: ~100x real-time on CPU (single-sample processing)
|
||||||
|
- **Batch Processing**: Currently limited to batch size of 1 due to ONNX export constraints, but single-sample inference is extremely fast
|
||||||
|
- **Specialized Training**: Japanese ASMR and whispered speech
|
||||||
|
- **Generalization**: Despite being fine-tuned on Japanese ASMR, the model inherits Whisper's strong multilingual capabilities and can effectively detect speech in various languages and acoustic environments
|
||||||
|
|
||||||
|
### Advantages over Native Whisper VAD
|
||||||
|
|
||||||
|
- **No hallucinations**: Discriminative model cannot generate spurious text
|
||||||
|
- **Much faster**: Single forward pass, non-autoregressive inference
|
||||||
|
- **Higher precision**: 20ms frame-level temporal resolution vs Whisper's 30s chunks
|
||||||
|
- **Robust**: Focal loss training handles speech/silence imbalance effectively
|
||||||
|
- **Lightweight**: Decoder adds minimal parameters to base Whisper encoder
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Quick Start with ONNX Runtime
|
||||||
|
|
||||||
|
```python
|
||||||
|
import numpy as np
|
||||||
|
import onnxruntime as ort
|
||||||
|
from transformers import WhisperFeatureExtractor
|
||||||
|
import librosa
|
||||||
|
|
||||||
|
# Load model
|
||||||
|
session = ort.InferenceSession("model.onnx")
|
||||||
|
feature_extractor = WhisperFeatureExtractor.from_pretrained("openai/whisper-base")
|
||||||
|
|
||||||
|
# Load and preprocess audio
|
||||||
|
audio, sr = librosa.load("audio.wav", sr=16000)
|
||||||
|
audio_chunk = audio[:480000] # 30 seconds
|
||||||
|
|
||||||
|
# Extract features
|
||||||
|
inputs = feature_extractor(
|
||||||
|
audio_chunk,
|
||||||
|
sampling_rate=16000,
|
||||||
|
return_tensors="np"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Run inference
|
||||||
|
outputs = session.run(None, {session.get_inputs()[0].name: inputs.input_features})
|
||||||
|
predictions = outputs[0] # Shape: [1, 1500] - 1500 frames of 20ms each
|
||||||
|
|
||||||
|
# Apply threshold
|
||||||
|
speech_frames = predictions[0] > 0.5
|
||||||
|
```
|
||||||
|
|
||||||
|
### Using the Provided Inference Script
|
||||||
|
|
||||||
|
The model repository includes a comprehensive `inference.py` script with advanced features:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from inference import WhisperVADInference
|
||||||
|
|
||||||
|
# Initialize model
|
||||||
|
vad = WhisperVADInference(
|
||||||
|
model_path="model.onnx",
|
||||||
|
threshold=0.5, # Speech detection threshold
|
||||||
|
min_speech_duration=0.25, # Minimum speech segment duration
|
||||||
|
min_silence_duration=0.1 # Minimum silence between segments
|
||||||
|
)
|
||||||
|
|
||||||
|
# Process audio file
|
||||||
|
segments = vad.process_audio("audio.wav")
|
||||||
|
|
||||||
|
# Segments format: List of (start_time, end_time) tuples
|
||||||
|
for start, end in segments:
|
||||||
|
print(f"Speech detected: {start:.2f}s - {end:.2f}s")
|
||||||
|
```
|
||||||
|
|
||||||
|
### Streaming/Real-time Processing
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Process audio stream in chunks
|
||||||
|
vad = WhisperVADInference("model.onnx", streaming=True)
|
||||||
|
|
||||||
|
for audio_chunk in audio_stream:
|
||||||
|
speech_active = vad.process_chunk(audio_chunk)
|
||||||
|
if speech_active:
|
||||||
|
# Handle speech detection
|
||||||
|
pass
|
||||||
|
```
|
||||||
|
|
||||||
|
## Input/Output Specifications
|
||||||
|
|
||||||
|
### Input
|
||||||
|
- **Audio Format**: 16kHz mono audio
|
||||||
|
- **Chunk Size**: 30 seconds (480,000 samples)
|
||||||
|
- **Feature Type**: 80-channel log-mel spectrogram
|
||||||
|
- **Shape**: `[1, 80, 3000]` (batch size fixed to 1 - see note below)
|
||||||
|
|
||||||
|
### Output
|
||||||
|
- **Type**: Frame-level probabilities
|
||||||
|
- **Shape**: `[1, 1500]` (batch size fixed to 1)
|
||||||
|
- **Frame Duration**: 20ms per frame
|
||||||
|
- **Range**: [0, 1] probability of speech presence
|
||||||
|
|
||||||
|
**Note on Batch Processing**: Currently, the ONNX model only supports batch size of 1 due to export limitations between PyTorch transformers and ONNX. However, single-sample inference is highly optimized and runs extremely fast (~100x real-time on CPU), making sequential processing still very efficient for most use cases.
|
||||||
|
|
||||||
|
## Training Details
|
||||||
|
|
||||||
|
### Training Configuration
|
||||||
|
- **Dataset**: ~500 hours Japanese ASMR audio recordings with accurate speech timestamps
|
||||||
|
- **Loss Function**: Focal Loss (α=0.25, γ=2.0) for class imbalance
|
||||||
|
- **Optimizer**: AdamW with learning rate 1.5e-3
|
||||||
|
- **Batch Size**: 128
|
||||||
|
- **Training Duration**: 5 epochs
|
||||||
|
- **Hardware**: Single GPU training with mixed precision (bf16)
|
||||||
|
|
||||||
|
### Data Processing
|
||||||
|
- Audio segmented into 30-second chunks
|
||||||
|
- Frame-level labels generated from word-level timestamps
|
||||||
|
- Augmentation: None (relying on Whisper's pre-training robustness)
|
||||||
|
|
||||||
|
## Limitations and Considerations
|
||||||
|
|
||||||
|
1. **Fixed Duration**: Model expects 30-second chunks; shorter audio needs padding
|
||||||
|
2. **Training Specialization**: While the model performs well across languages and environments due to Whisper's strong multilingual foundation, it excels particularly at:
|
||||||
|
- Japanese ASMR content (primary training data)
|
||||||
|
- Whispered and soft speech detection
|
||||||
|
- Quiet, intimate audio environments
|
||||||
|
3. **Generalization**: The model can effectively handle various languages and normal speech volumes, though performance may be slightly better on content similar to the training data
|
||||||
|
4. **Background Noise**: Performance may degrade in very noisy conditions
|
||||||
|
5. **Music/Singing**: Primarily trained on speech; may have variable performance on singing
|
||||||
|
|
||||||
|
## Model Files
|
||||||
|
|
||||||
|
- `model.onnx`: ONNX model file
|
||||||
|
- `model_metadata.json`: Model configuration and parameters
|
||||||
|
- `inference.py`: Ready-to-use inference script with post-processing
|
||||||
|
- `requirements.txt`: Python dependencies
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip install onnxruntime # or onnxruntime-gpu for GPU support
|
||||||
|
pip install librosa transformers numpy
|
||||||
|
```
|
||||||
|
|
||||||
|
## Applications
|
||||||
|
|
||||||
|
- **ASMR Content Processing**: Detect whispered speech and subtle vocalizations in ASMR recordings
|
||||||
|
- **Japanese Audio Processing**: Optimized for Japanese language content, especially soft speech
|
||||||
|
- **Transcription Pre-processing**: Filter out silence before ASR, particularly effective for whispered content
|
||||||
|
- **Audio Indexing**: Identify speech segments in long recordings
|
||||||
|
- **Real-time Communication**: Detect active speech in calls/meetings
|
||||||
|
- **Audio Analytics**: Speech/silence ratio analysis for ASMR and meditation content
|
||||||
|
- **Subtitle Alignment**: Accurate timing for subtitles, including whispered dialogue
|
||||||
|
|
||||||
|
## Citation
|
||||||
|
|
||||||
|
If you use this model, please cite:
|
||||||
|
|
||||||
|
```bibtex
|
||||||
|
@misc{whisper-vad,
|
||||||
|
title={Whisper-VAD: Whisper-based Voice Activity Detection},
|
||||||
|
author={Grider},
|
||||||
|
year={2025},
|
||||||
|
publisher={Hugging Face},
|
||||||
|
howpublished={\url{https://huggingface.co/TransWithAI/Whisper-Vad-EncDec-ASMR-onnx}}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## References
|
||||||
|
|
||||||
|
- Original Whisper Paper: [Robust Speech Recognition via Large-Scale Weak Supervision](https://arxiv.org/abs/2212.04356)
|
||||||
|
- WhisperSeg: [Positive Transfer of the Whisper Speech Transformer to Human and Animal Voice Activity Detection](https://doi.org/10.1101/2023.09.30.560270)
|
||||||
|
- GitHub: [https://github.com/nianlonggu/WhisperSeg](https://github.com/nianlonggu/WhisperSeg)
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
## Acknowledgments
|
||||||
|
|
||||||
|
This model builds upon OpenAI's Whisper model and implements architectural refinements for efficient voice activity detection.
|
||||||
690
vad_models/Whisper-Vad-EncDec-ASMR-onnx/inference.py
Normal file
690
vad_models/Whisper-Vad-EncDec-ASMR-onnx/inference.py
Normal file
|
|
@ -0,0 +1,690 @@
|
||||||
|
#!/usr/bin/env python
|
||||||
|
"""ONNX inference script for encoder_only_decoder VAD model - Silero-style implementation.
|
||||||
|
|
||||||
|
This implementation follows Silero VAD's architecture for cleaner, more efficient processing:
|
||||||
|
- Fixed-size chunk processing for consistent behavior
|
||||||
|
- State management for streaming capability
|
||||||
|
- Hysteresis-based speech detection (dual threshold)
|
||||||
|
- Simplified segment extraction with proper padding
|
||||||
|
"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
import warnings
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Callable, Dict, List, Optional, Tuple
|
||||||
|
|
||||||
|
import librosa
|
||||||
|
import numpy as np
|
||||||
|
import torch
|
||||||
|
from transformers import WhisperFeatureExtractor
|
||||||
|
|
||||||
|
|
||||||
|
class WhisperVADOnnxWrapper:
|
||||||
|
"""ONNX wrapper for Whisper-based VAD model following Silero's architecture."""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
model_path: str,
|
||||||
|
metadata_path: Optional[str] = None,
|
||||||
|
force_cpu: bool = False,
|
||||||
|
num_threads: int = 1,
|
||||||
|
):
|
||||||
|
"""Initialize ONNX model wrapper.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
model_path: Path to ONNX model file
|
||||||
|
metadata_path: Path to metadata JSON file (optional)
|
||||||
|
force_cpu: Force CPU execution even if GPU is available
|
||||||
|
num_threads: Number of CPU threads for inference
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
import onnxruntime as ort
|
||||||
|
except ImportError:
|
||||||
|
raise ImportError(
|
||||||
|
"onnxruntime not installed. Install with:\n"
|
||||||
|
" pip install onnxruntime # For CPU\n"
|
||||||
|
" pip install onnxruntime-gpu # For GPU"
|
||||||
|
)
|
||||||
|
|
||||||
|
self.model_path = model_path
|
||||||
|
|
||||||
|
# Load metadata
|
||||||
|
if metadata_path is None:
|
||||||
|
metadata_path = model_path.replace('.onnx', '_metadata.json')
|
||||||
|
|
||||||
|
if os.path.exists(metadata_path):
|
||||||
|
with open(metadata_path, 'r') as f:
|
||||||
|
self.metadata = json.load(f)
|
||||||
|
else:
|
||||||
|
warnings.warn("No metadata file found. Using default values.")
|
||||||
|
self.metadata = {
|
||||||
|
'whisper_model_name': 'openai/whisper-base',
|
||||||
|
'frame_duration_ms': 20,
|
||||||
|
'total_duration_ms': 30000,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Initialize feature extractor
|
||||||
|
self.feature_extractor = WhisperFeatureExtractor.from_pretrained(
|
||||||
|
self.metadata['whisper_model_name']
|
||||||
|
)
|
||||||
|
|
||||||
|
# Set up ONNX Runtime session
|
||||||
|
opts = ort.SessionOptions()
|
||||||
|
opts.inter_op_num_threads = num_threads
|
||||||
|
opts.intra_op_num_threads = num_threads
|
||||||
|
|
||||||
|
providers = ['CPUExecutionProvider']
|
||||||
|
if not force_cpu and 'CUDAExecutionProvider' in ort.get_available_providers():
|
||||||
|
providers.insert(0, 'CUDAExecutionProvider')
|
||||||
|
|
||||||
|
self.session = ort.InferenceSession(model_path, providers=providers, sess_options=opts)
|
||||||
|
|
||||||
|
# Get input/output info
|
||||||
|
self.input_name = self.session.get_inputs()[0].name
|
||||||
|
self.output_names = [out.name for out in self.session.get_outputs()]
|
||||||
|
|
||||||
|
# Model parameters
|
||||||
|
self.sample_rate = 16000 # Whisper uses 16kHz
|
||||||
|
self.frame_duration_ms = self.metadata.get('frame_duration_ms', 20)
|
||||||
|
self.chunk_duration_ms = self.metadata.get('total_duration_ms', 30000)
|
||||||
|
self.chunk_samples = int(self.chunk_duration_ms * self.sample_rate / 1000)
|
||||||
|
self.frames_per_chunk = int(self.chunk_duration_ms / self.frame_duration_ms)
|
||||||
|
|
||||||
|
# Initialize state
|
||||||
|
self.reset_states()
|
||||||
|
|
||||||
|
print(f"Model loaded: {model_path}")
|
||||||
|
print(f" Providers: {providers}")
|
||||||
|
print(f" Chunk duration: {self.chunk_duration_ms}ms")
|
||||||
|
print(f" Frame duration: {self.frame_duration_ms}ms")
|
||||||
|
|
||||||
|
def reset_states(self):
|
||||||
|
"""Reset internal states for new audio stream."""
|
||||||
|
self._context = None
|
||||||
|
self._last_chunk = None
|
||||||
|
|
||||||
|
def _validate_input(self, audio: np.ndarray, sr: int) -> np.ndarray:
|
||||||
|
"""Validate and preprocess input audio.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
audio: Input audio array
|
||||||
|
sr: Sample rate
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Preprocessed audio at 16kHz
|
||||||
|
"""
|
||||||
|
if audio.ndim > 1:
|
||||||
|
# Convert to mono if multi-channel
|
||||||
|
audio = audio.mean(axis=0 if audio.shape[0] > audio.shape[1] else 1)
|
||||||
|
|
||||||
|
# Resample if needed
|
||||||
|
if sr != self.sample_rate:
|
||||||
|
import librosa
|
||||||
|
audio = librosa.resample(audio, orig_sr=sr, target_sr=self.sample_rate)
|
||||||
|
|
||||||
|
return audio
|
||||||
|
|
||||||
|
def __call__(self, audio_chunk: np.ndarray, sr: int = 16000) -> np.ndarray:
|
||||||
|
"""Process a single audio chunk.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
audio_chunk: Audio chunk to process
|
||||||
|
sr: Sample rate
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Frame-level speech probabilities
|
||||||
|
"""
|
||||||
|
# Validate input
|
||||||
|
audio_chunk = self._validate_input(audio_chunk, sr)
|
||||||
|
|
||||||
|
# Ensure chunk is correct size
|
||||||
|
if len(audio_chunk) < self.chunk_samples:
|
||||||
|
audio_chunk = np.pad(
|
||||||
|
audio_chunk,
|
||||||
|
(0, self.chunk_samples - len(audio_chunk)),
|
||||||
|
mode='constant'
|
||||||
|
)
|
||||||
|
elif len(audio_chunk) > self.chunk_samples:
|
||||||
|
audio_chunk = audio_chunk[:self.chunk_samples]
|
||||||
|
|
||||||
|
# Extract features
|
||||||
|
inputs = self.feature_extractor(
|
||||||
|
audio_chunk,
|
||||||
|
sampling_rate=self.sample_rate,
|
||||||
|
return_tensors="np"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Run inference
|
||||||
|
outputs = self.session.run(
|
||||||
|
self.output_names,
|
||||||
|
{self.input_name: inputs.input_features}
|
||||||
|
)
|
||||||
|
|
||||||
|
# Apply sigmoid to get probabilities
|
||||||
|
frame_logits = outputs[0][0] # Remove batch dimension
|
||||||
|
frame_probs = 1 / (1 + np.exp(-frame_logits))
|
||||||
|
|
||||||
|
return frame_probs
|
||||||
|
|
||||||
|
def audio_forward(self, audio: np.ndarray, sr: int = 16000) -> np.ndarray:
|
||||||
|
"""Process full audio file in chunks (Silero-style).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
audio: Full audio array
|
||||||
|
sr: Sample rate
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Concatenated frame probabilities for entire audio
|
||||||
|
"""
|
||||||
|
audio = self._validate_input(audio, sr)
|
||||||
|
self.reset_states()
|
||||||
|
|
||||||
|
all_probs = []
|
||||||
|
|
||||||
|
# Process in chunks
|
||||||
|
for i in range(0, len(audio), self.chunk_samples):
|
||||||
|
chunk = audio[i:i + self.chunk_samples]
|
||||||
|
|
||||||
|
# Pad last chunk if needed
|
||||||
|
if len(chunk) < self.chunk_samples:
|
||||||
|
chunk = np.pad(chunk, (0, self.chunk_samples - len(chunk)), mode='constant')
|
||||||
|
|
||||||
|
# Get predictions for chunk
|
||||||
|
chunk_probs = self.__call__(chunk, self.sample_rate)
|
||||||
|
all_probs.append(chunk_probs)
|
||||||
|
|
||||||
|
# Concatenate all probabilities
|
||||||
|
if all_probs:
|
||||||
|
return np.concatenate(all_probs)
|
||||||
|
return np.array([])
|
||||||
|
|
||||||
|
|
||||||
|
def get_speech_timestamps(
|
||||||
|
audio: np.ndarray,
|
||||||
|
model,
|
||||||
|
threshold: float = 0.5,
|
||||||
|
sampling_rate: int = 16000,
|
||||||
|
min_speech_duration_ms: int = 250,
|
||||||
|
max_speech_duration_s: float = float('inf'),
|
||||||
|
min_silence_duration_ms: int = 100,
|
||||||
|
speech_pad_ms: int = 30,
|
||||||
|
return_seconds: bool = False,
|
||||||
|
neg_threshold: Optional[float] = None,
|
||||||
|
progress_tracking_callback: Optional[Callable[[float], None]] = None,
|
||||||
|
) -> List[Dict[str, float]]:
|
||||||
|
"""Extract speech timestamps from audio using Silero-style processing.
|
||||||
|
|
||||||
|
This function implements Silero VAD's approach with:
|
||||||
|
- Dual threshold (positive and negative) for hysteresis
|
||||||
|
- Proper segment padding
|
||||||
|
- Minimum duration filtering
|
||||||
|
- Maximum duration handling with intelligent splitting
|
||||||
|
|
||||||
|
Args:
|
||||||
|
audio: Input audio array
|
||||||
|
model: VAD model (WhisperVADOnnxWrapper instance)
|
||||||
|
threshold: Speech threshold (default: 0.5)
|
||||||
|
sampling_rate: Audio sample rate
|
||||||
|
min_speech_duration_ms: Minimum speech segment duration
|
||||||
|
max_speech_duration_s: Maximum speech segment duration
|
||||||
|
min_silence_duration_ms: Minimum silence to split segments
|
||||||
|
speech_pad_ms: Padding to add to speech segments
|
||||||
|
return_seconds: Return times in seconds vs samples
|
||||||
|
neg_threshold: Negative threshold for hysteresis (default: threshold - 0.15)
|
||||||
|
progress_tracking_callback: Progress callback function
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of speech segments with start/end times
|
||||||
|
"""
|
||||||
|
# Convert to numpy if torch tensor
|
||||||
|
if torch.is_tensor(audio):
|
||||||
|
audio = audio.numpy()
|
||||||
|
|
||||||
|
# Validate audio
|
||||||
|
if audio.ndim > 1:
|
||||||
|
audio = audio.mean(axis=0 if audio.shape[0] > audio.shape[1] else 1)
|
||||||
|
|
||||||
|
# Get frame probabilities for entire audio
|
||||||
|
model.reset_states()
|
||||||
|
speech_probs = model.audio_forward(audio, sampling_rate)
|
||||||
|
|
||||||
|
# Calculate frame parameters
|
||||||
|
frame_duration_ms = model.frame_duration_ms
|
||||||
|
frame_samples = int(sampling_rate * frame_duration_ms / 1000)
|
||||||
|
|
||||||
|
# Convert durations to frames
|
||||||
|
min_speech_frames = int(min_speech_duration_ms / frame_duration_ms)
|
||||||
|
min_silence_frames = int(min_silence_duration_ms / frame_duration_ms)
|
||||||
|
speech_pad_frames = int(speech_pad_ms / frame_duration_ms)
|
||||||
|
max_speech_frames = int(max_speech_duration_s * 1000 / frame_duration_ms) if max_speech_duration_s != float('inf') else len(speech_probs)
|
||||||
|
|
||||||
|
# Set negative threshold for hysteresis
|
||||||
|
if neg_threshold is None:
|
||||||
|
neg_threshold = max(threshold - 0.15, 0.01)
|
||||||
|
|
||||||
|
# Track speech segments
|
||||||
|
triggered = False
|
||||||
|
speeches = []
|
||||||
|
current_speech = {}
|
||||||
|
current_probs = [] # Track probabilities for current segment
|
||||||
|
temp_end = 0
|
||||||
|
|
||||||
|
# Process each frame
|
||||||
|
for i, speech_prob in enumerate(speech_probs):
|
||||||
|
# Report progress
|
||||||
|
if progress_tracking_callback:
|
||||||
|
progress = (i + 1) / len(speech_probs) * 100
|
||||||
|
progress_tracking_callback(progress)
|
||||||
|
|
||||||
|
# Track probabilities for current segment
|
||||||
|
if triggered:
|
||||||
|
current_probs.append(float(speech_prob))
|
||||||
|
|
||||||
|
# Speech onset detection
|
||||||
|
if speech_prob >= threshold and not triggered:
|
||||||
|
triggered = True
|
||||||
|
current_speech['start'] = i
|
||||||
|
current_probs = [float(speech_prob)] # Start tracking probabilities
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Check for maximum speech duration
|
||||||
|
if triggered and 'start' in current_speech:
|
||||||
|
duration = i - current_speech['start']
|
||||||
|
if duration > max_speech_frames:
|
||||||
|
# Force end segment at max duration
|
||||||
|
current_speech['end'] = current_speech['start'] + max_speech_frames
|
||||||
|
# Calculate probability statistics for segment
|
||||||
|
if current_probs:
|
||||||
|
current_speech['avg_prob'] = np.mean(current_probs)
|
||||||
|
current_speech['min_prob'] = np.min(current_probs)
|
||||||
|
current_speech['max_prob'] = np.max(current_probs)
|
||||||
|
speeches.append(current_speech)
|
||||||
|
current_speech = {}
|
||||||
|
current_probs = []
|
||||||
|
triggered = False
|
||||||
|
temp_end = 0
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Speech offset detection with hysteresis
|
||||||
|
if speech_prob < neg_threshold and triggered:
|
||||||
|
if not temp_end:
|
||||||
|
temp_end = i
|
||||||
|
|
||||||
|
# Check if silence is long enough
|
||||||
|
if i - temp_end >= min_silence_frames:
|
||||||
|
# End current speech segment
|
||||||
|
current_speech['end'] = temp_end
|
||||||
|
|
||||||
|
# Check minimum duration
|
||||||
|
if current_speech['end'] - current_speech['start'] >= min_speech_frames:
|
||||||
|
# Calculate probability statistics for segment
|
||||||
|
if current_probs:
|
||||||
|
current_speech['avg_prob'] = np.mean(current_probs[:temp_end - current_speech['start']])
|
||||||
|
current_speech['min_prob'] = np.min(current_probs[:temp_end - current_speech['start']])
|
||||||
|
current_speech['max_prob'] = np.max(current_probs[:temp_end - current_speech['start']])
|
||||||
|
speeches.append(current_speech)
|
||||||
|
|
||||||
|
current_speech = {}
|
||||||
|
current_probs = []
|
||||||
|
triggered = False
|
||||||
|
temp_end = 0
|
||||||
|
|
||||||
|
# Reset temp_end if speech resumes
|
||||||
|
elif speech_prob >= threshold and temp_end:
|
||||||
|
temp_end = 0
|
||||||
|
|
||||||
|
# Handle speech that continues to the end
|
||||||
|
if triggered and 'start' in current_speech:
|
||||||
|
current_speech['end'] = len(speech_probs)
|
||||||
|
if current_speech['end'] - current_speech['start'] >= min_speech_frames:
|
||||||
|
# Calculate probability statistics for segment
|
||||||
|
if current_probs:
|
||||||
|
current_speech['avg_prob'] = np.mean(current_probs)
|
||||||
|
current_speech['min_prob'] = np.min(current_probs)
|
||||||
|
current_speech['max_prob'] = np.max(current_probs)
|
||||||
|
speeches.append(current_speech)
|
||||||
|
|
||||||
|
# Apply padding to segments
|
||||||
|
for i, speech in enumerate(speeches):
|
||||||
|
# Add padding
|
||||||
|
if i == 0:
|
||||||
|
speech['start'] = max(0, speech['start'] - speech_pad_frames)
|
||||||
|
else:
|
||||||
|
speech['start'] = max(speeches[i-1]['end'], speech['start'] - speech_pad_frames)
|
||||||
|
|
||||||
|
if i < len(speeches) - 1:
|
||||||
|
speech['end'] = min(speeches[i+1]['start'], speech['end'] + speech_pad_frames)
|
||||||
|
else:
|
||||||
|
speech['end'] = min(len(speech_probs), speech['end'] + speech_pad_frames)
|
||||||
|
|
||||||
|
# Convert to time units
|
||||||
|
if return_seconds:
|
||||||
|
for speech in speeches:
|
||||||
|
speech['start'] = speech['start'] * frame_duration_ms / 1000
|
||||||
|
speech['end'] = speech['end'] * frame_duration_ms / 1000
|
||||||
|
else:
|
||||||
|
# Convert frames to samples
|
||||||
|
for speech in speeches:
|
||||||
|
speech['start'] = speech['start'] * frame_samples
|
||||||
|
speech['end'] = speech['end'] * frame_samples
|
||||||
|
|
||||||
|
return speeches
|
||||||
|
|
||||||
|
|
||||||
|
class VADIterator:
|
||||||
|
"""Stream iterator for real-time VAD processing (Silero-style)."""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
model,
|
||||||
|
threshold: float = 0.5,
|
||||||
|
sampling_rate: int = 16000,
|
||||||
|
min_silence_duration_ms: int = 100,
|
||||||
|
speech_pad_ms: int = 30,
|
||||||
|
):
|
||||||
|
"""Initialize VAD iterator for streaming.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
model: WhisperVADOnnxWrapper instance
|
||||||
|
threshold: Speech detection threshold
|
||||||
|
sampling_rate: Audio sample rate
|
||||||
|
min_silence_duration_ms: Minimum silence duration
|
||||||
|
speech_pad_ms: Speech padding in milliseconds
|
||||||
|
"""
|
||||||
|
self.model = model
|
||||||
|
self.threshold = threshold
|
||||||
|
self.neg_threshold = max(threshold - 0.15, 0.01)
|
||||||
|
self.sampling_rate = sampling_rate
|
||||||
|
|
||||||
|
# Calculate frame-based parameters
|
||||||
|
self.frame_duration_ms = model.frame_duration_ms
|
||||||
|
self.min_silence_frames = min_silence_duration_ms / self.frame_duration_ms
|
||||||
|
self.speech_pad_frames = speech_pad_ms / self.frame_duration_ms
|
||||||
|
|
||||||
|
self.reset_states()
|
||||||
|
|
||||||
|
def reset_states(self):
|
||||||
|
"""Reset iterator state."""
|
||||||
|
self.model.reset_states()
|
||||||
|
self.triggered = False
|
||||||
|
self.temp_end = 0
|
||||||
|
self.current_frame = 0
|
||||||
|
self.buffer = np.array([])
|
||||||
|
self.speech_start = 0
|
||||||
|
|
||||||
|
def __call__(self, audio_chunk: np.ndarray, return_seconds: bool = False) -> Optional[Dict]:
|
||||||
|
"""Process audio chunk and detect speech boundaries.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
audio_chunk: Audio chunk to process
|
||||||
|
return_seconds: Return times in seconds vs samples
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with 'start' or 'end' key when speech boundary detected
|
||||||
|
"""
|
||||||
|
# Add to buffer
|
||||||
|
self.buffer = np.concatenate([self.buffer, audio_chunk]) if len(self.buffer) > 0 else audio_chunk
|
||||||
|
|
||||||
|
# Check if we have enough samples for a full chunk
|
||||||
|
if len(self.buffer) < self.model.chunk_samples:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Process full chunk
|
||||||
|
chunk = self.buffer[:self.model.chunk_samples]
|
||||||
|
self.buffer = self.buffer[self.model.chunk_samples:]
|
||||||
|
|
||||||
|
# Get frame predictions
|
||||||
|
frame_probs = self.model(chunk, self.sampling_rate)
|
||||||
|
|
||||||
|
results = []
|
||||||
|
|
||||||
|
# Process each frame
|
||||||
|
for prob in frame_probs:
|
||||||
|
self.current_frame += 1
|
||||||
|
|
||||||
|
# Speech onset
|
||||||
|
if prob >= self.threshold and not self.triggered:
|
||||||
|
self.triggered = True
|
||||||
|
self.speech_start = self.current_frame - self.speech_pad_frames
|
||||||
|
start_time = max(0, self.speech_start * self.frame_duration_ms / 1000) if return_seconds else \
|
||||||
|
max(0, self.speech_start * self.frame_duration_ms * 16)
|
||||||
|
return {'start': start_time}
|
||||||
|
|
||||||
|
# Speech offset
|
||||||
|
if prob < self.neg_threshold and self.triggered:
|
||||||
|
if not self.temp_end:
|
||||||
|
self.temp_end = self.current_frame
|
||||||
|
elif self.current_frame - self.temp_end >= self.min_silence_frames:
|
||||||
|
# End speech
|
||||||
|
end_frame = self.temp_end + self.speech_pad_frames
|
||||||
|
end_time = end_frame * self.frame_duration_ms / 1000 if return_seconds else \
|
||||||
|
end_frame * self.frame_duration_ms * 16
|
||||||
|
self.triggered = False
|
||||||
|
self.temp_end = 0
|
||||||
|
return {'end': end_time}
|
||||||
|
elif prob >= self.threshold and self.temp_end:
|
||||||
|
self.temp_end = 0
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def load_audio(audio_path: str, sampling_rate: int = 16000) -> np.ndarray:
|
||||||
|
"""Load audio file and convert to target sample rate.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
audio_path: Path to audio file
|
||||||
|
sampling_rate: Target sample rate
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Audio array at target sample rate
|
||||||
|
"""
|
||||||
|
audio, sr = librosa.load(audio_path, sr=sampling_rate)
|
||||||
|
return audio
|
||||||
|
|
||||||
|
|
||||||
|
def save_segments(segments: List[Dict], output_path: str, format: str = 'json'):
|
||||||
|
"""Save speech segments to file.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
segments: List of speech segments
|
||||||
|
output_path: Output file path
|
||||||
|
format: Output format (json, txt, csv, srt)
|
||||||
|
"""
|
||||||
|
if format == 'json':
|
||||||
|
with open(output_path, 'w') as f:
|
||||||
|
json.dump({'segments': segments}, f, indent=2)
|
||||||
|
|
||||||
|
elif format == 'txt':
|
||||||
|
with open(output_path, 'w') as f:
|
||||||
|
for i, seg in enumerate(segments, 1):
|
||||||
|
start = seg['start']
|
||||||
|
end = seg['end']
|
||||||
|
duration = end - start
|
||||||
|
f.write(f"{i:3d}. {start:8.3f}s - {end:8.3f}s (duration: {duration:6.3f}s)\n")
|
||||||
|
|
||||||
|
elif format == 'csv':
|
||||||
|
import csv
|
||||||
|
with open(output_path, 'w', newline='') as f:
|
||||||
|
writer = csv.DictWriter(f, fieldnames=['start', 'end', 'duration'])
|
||||||
|
writer.writeheader()
|
||||||
|
for seg in segments:
|
||||||
|
row = {
|
||||||
|
'start': seg['start'],
|
||||||
|
'end': seg['end'],
|
||||||
|
'duration': seg['end'] - seg['start']
|
||||||
|
}
|
||||||
|
writer.writerow(row)
|
||||||
|
|
||||||
|
elif format == 'srt':
|
||||||
|
with open(output_path, 'w') as f:
|
||||||
|
for i, seg in enumerate(segments, 1):
|
||||||
|
start_s = seg['start']
|
||||||
|
end_s = seg['end']
|
||||||
|
|
||||||
|
# Convert to SRT timestamp format
|
||||||
|
def seconds_to_srt(seconds):
|
||||||
|
hours = int(seconds // 3600)
|
||||||
|
minutes = int((seconds % 3600) // 60)
|
||||||
|
secs = int(seconds % 60)
|
||||||
|
millis = int((seconds % 1) * 1000)
|
||||||
|
return f"{hours:02d}:{minutes:02d}:{secs:02d},{millis:03d}"
|
||||||
|
|
||||||
|
f.write(f"{i}\n")
|
||||||
|
f.write(f"{seconds_to_srt(start_s)} --> {seconds_to_srt(end_s)}\n")
|
||||||
|
|
||||||
|
# Write speech probability information if available
|
||||||
|
if 'avg_prob' in seg:
|
||||||
|
f.write(f"Speech [Avg: {seg['avg_prob']:.2%}, Min: {seg['min_prob']:.2%}, Max: {seg['max_prob']:.2%}]\n\n")
|
||||||
|
else:
|
||||||
|
f.write(f"[Speech]\n\n")
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description='Silero-style ONNX inference for Whisper-based VAD model'
|
||||||
|
)
|
||||||
|
parser.add_argument('--model', required=True, help='Path to ONNX model file')
|
||||||
|
parser.add_argument('--audio', required=True, help='Path to audio file')
|
||||||
|
parser.add_argument('--output', help='Output file path (default: audio_path.vad.json)')
|
||||||
|
parser.add_argument('--format', choices=['json', 'txt', 'csv', 'srt'],
|
||||||
|
default='json', help='Output format')
|
||||||
|
parser.add_argument('--threshold', type=float, default=0.5,
|
||||||
|
help='Speech detection threshold (0.0-1.0)')
|
||||||
|
parser.add_argument('--neg-threshold', type=float, default=None,
|
||||||
|
help='Negative threshold for hysteresis (default: threshold - 0.15)')
|
||||||
|
parser.add_argument('--min-speech-duration', type=int, default=250,
|
||||||
|
help='Minimum speech duration in ms')
|
||||||
|
parser.add_argument('--min-silence-duration', type=int, default=100,
|
||||||
|
help='Minimum silence duration in ms')
|
||||||
|
parser.add_argument('--speech-pad', type=int, default=30,
|
||||||
|
help='Speech padding in ms')
|
||||||
|
parser.add_argument('--max-speech-duration', type=float, default=float('inf'),
|
||||||
|
help='Maximum speech duration in seconds')
|
||||||
|
parser.add_argument('--metadata', help='Path to metadata JSON file')
|
||||||
|
parser.add_argument('--force-cpu', action='store_true',
|
||||||
|
help='Force CPU execution even if GPU is available')
|
||||||
|
parser.add_argument('--threads', type=int, default=1,
|
||||||
|
help='Number of CPU threads')
|
||||||
|
parser.add_argument('--stream', action='store_true',
|
||||||
|
help='Use streaming mode (demonstrate VADIterator)')
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
# Check files exist
|
||||||
|
if not os.path.exists(args.model):
|
||||||
|
print(f"Error: Model file not found: {args.model}")
|
||||||
|
return 1
|
||||||
|
|
||||||
|
if not os.path.exists(args.audio):
|
||||||
|
print(f"Error: Audio file not found: {args.audio}")
|
||||||
|
return 1
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Initialize model
|
||||||
|
print("Loading model...")
|
||||||
|
model = WhisperVADOnnxWrapper(
|
||||||
|
model_path=args.model,
|
||||||
|
metadata_path=args.metadata,
|
||||||
|
force_cpu=args.force_cpu,
|
||||||
|
num_threads=args.threads,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Load audio
|
||||||
|
print(f"Loading audio: {args.audio}")
|
||||||
|
audio = load_audio(args.audio)
|
||||||
|
duration = len(audio) / 16000
|
||||||
|
print(f"Audio duration: {duration:.2f}s")
|
||||||
|
|
||||||
|
if args.stream:
|
||||||
|
# Demonstrate streaming mode
|
||||||
|
print("\nUsing streaming mode (VADIterator)...")
|
||||||
|
vad_iterator = VADIterator(
|
||||||
|
model=model,
|
||||||
|
threshold=args.threshold,
|
||||||
|
min_silence_duration_ms=args.min_silence_duration,
|
||||||
|
speech_pad_ms=args.speech_pad,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Simulate streaming by processing in small chunks
|
||||||
|
chunk_size = 16000 # 1 second chunks
|
||||||
|
segments = []
|
||||||
|
current_segment = {}
|
||||||
|
|
||||||
|
for i in range(0, len(audio), chunk_size):
|
||||||
|
chunk = audio[i:i + chunk_size]
|
||||||
|
result = vad_iterator(chunk, return_seconds=True)
|
||||||
|
|
||||||
|
if result:
|
||||||
|
if 'start' in result:
|
||||||
|
current_segment = {'start': result['start'] + i/16000}
|
||||||
|
print(f" Speech started: {current_segment['start']:.2f}s")
|
||||||
|
elif 'end' in result and current_segment:
|
||||||
|
current_segment['end'] = result['end'] + i/16000
|
||||||
|
segments.append(current_segment)
|
||||||
|
print(f" Speech ended: {current_segment['end']:.2f}s")
|
||||||
|
current_segment = {}
|
||||||
|
|
||||||
|
# Handle ongoing speech at end
|
||||||
|
if current_segment and 'start' in current_segment:
|
||||||
|
current_segment['end'] = duration
|
||||||
|
segments.append(current_segment)
|
||||||
|
else:
|
||||||
|
# Use batch mode with Silero-style processing
|
||||||
|
print("\nProcessing with Silero-style speech detection...")
|
||||||
|
|
||||||
|
# Progress callback
|
||||||
|
def progress_callback(percent):
|
||||||
|
print(f"\rProgress: {percent:.1f}%", end='', flush=True)
|
||||||
|
|
||||||
|
# Get speech timestamps
|
||||||
|
segments = get_speech_timestamps(
|
||||||
|
audio=audio,
|
||||||
|
model=model,
|
||||||
|
threshold=args.threshold,
|
||||||
|
sampling_rate=16000,
|
||||||
|
min_speech_duration_ms=args.min_speech_duration,
|
||||||
|
min_silence_duration_ms=args.min_silence_duration,
|
||||||
|
speech_pad_ms=args.speech_pad,
|
||||||
|
max_speech_duration_s=args.max_speech_duration,
|
||||||
|
return_seconds=True,
|
||||||
|
neg_threshold=args.neg_threshold,
|
||||||
|
progress_tracking_callback=progress_callback,
|
||||||
|
)
|
||||||
|
print() # New line after progress
|
||||||
|
|
||||||
|
# Display results
|
||||||
|
print(f"\nFound {len(segments)} speech segments:")
|
||||||
|
total_speech = sum(seg['end'] - seg['start'] for seg in segments)
|
||||||
|
print(f"Total speech: {total_speech:.2f}s ({total_speech/duration*100:.1f}%)")
|
||||||
|
|
||||||
|
if segments:
|
||||||
|
print("\nSegments:")
|
||||||
|
for i, seg in enumerate(segments[:10], 1): # Show first 10
|
||||||
|
duration_seg = seg['end'] - seg['start']
|
||||||
|
print(f" {i:2d}. {seg['start']:7.3f}s - {seg['end']:7.3f}s (duration: {duration_seg:5.3f}s)")
|
||||||
|
if len(segments) > 10:
|
||||||
|
print(f" ... and {len(segments) - 10} more segments")
|
||||||
|
|
||||||
|
# Save results
|
||||||
|
output_path = args.output
|
||||||
|
if not output_path:
|
||||||
|
base = os.path.splitext(args.audio)[0]
|
||||||
|
output_path = f"{base}.vad.{args.format}"
|
||||||
|
|
||||||
|
save_segments(segments, output_path, format=args.format)
|
||||||
|
print(f"\nResults saved to: {output_path}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error: {e}")
|
||||||
|
import traceback
|
||||||
|
traceback.print_exc()
|
||||||
|
return 1
|
||||||
|
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
exit(main())
|
||||||
BIN
vad_models/Whisper-Vad-EncDec-ASMR-onnx/model.onnx
(Stored with Git LFS)
Normal file
BIN
vad_models/Whisper-Vad-EncDec-ASMR-onnx/model.onnx
(Stored with Git LFS)
Normal file
Binary file not shown.
21
vad_models/Whisper-Vad-EncDec-ASMR-onnx/model_metadata.json
Normal file
21
vad_models/Whisper-Vad-EncDec-ASMR-onnx/model_metadata.json
Normal file
|
|
@ -0,0 +1,21 @@
|
||||||
|
{
|
||||||
|
"model_type": "encoder_decoder",
|
||||||
|
"whisper_model_name": "openai/whisper-base",
|
||||||
|
"decoder_layers": 2,
|
||||||
|
"decoder_heads": 8,
|
||||||
|
"input_shape": [
|
||||||
|
1,
|
||||||
|
80,
|
||||||
|
3000
|
||||||
|
],
|
||||||
|
"output_shape": [
|
||||||
|
1,
|
||||||
|
1500
|
||||||
|
],
|
||||||
|
"frame_duration_ms": 20,
|
||||||
|
"total_duration_ms": 30000,
|
||||||
|
"opset_version": 17,
|
||||||
|
"export_batch_size": 1,
|
||||||
|
"config_path": "",
|
||||||
|
"checkpoint_path": ""
|
||||||
|
}
|
||||||
5
vad_models/Whisper-Vad-EncDec-ASMR-onnx/requirements.txt
Normal file
5
vad_models/Whisper-Vad-EncDec-ASMR-onnx/requirements.txt
Normal file
|
|
@ -0,0 +1,5 @@
|
||||||
|
onnxruntime>=1.16.0 # or onnxruntime-gpu for GPU support
|
||||||
|
transformers>=4.30.0 # For WhisperFeatureExtractor
|
||||||
|
librosa>=0.10.0 # Audio processing
|
||||||
|
soundfile>=0.12.0 # Audio I/O (required by librosa)
|
||||||
|
numpy>=1.24.0 # Array operations
|
||||||
Loading…
Add table
Add a link
Reference in a new issue