Automatic Speech Recognition
Transformers
Safetensors
Japanese
whisper
audio
hf-asr-leaderboard
Eval Results
kotoba-whisper-v1.0 / benchmark.py
asahi417's picture
Update benchmark.py
d41b5b8 verified
raw
history blame
736 Bytes
from time import time
from pprint import pprint
import torch
from transformers import pipeline
from datasets import load_dataset
# config
generate_kwargs = {"language": "japanese", "task": "transcribe"}
model_id = "kotoba-tech/kotoba-whisper-v1.0"
# load model
pipe = pipeline(
"automatic-speech-recognition",
model=model_id,
torch_dtype=torch.bfloat16
)
test_audio = [
"kotoba-whisper-eval/audio/long_interview_1.wav",
"kotoba-whisper-eval/audio/manzai1.wav",
"kotoba-whisper-eval/audio/manzai2.wav",
"kotoba-whisper-eval/audio/manzai3.wav"
]
elapsed = {}
for x in test_audio:
start = time()
transcription = pipe(x, generate_kwargs=generate_kwargs)
elapsed[x] = time() - start
pprint(elapsed)