Compare commits
4 Commits
fee52c9229
...
3b4a6aa1c2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3b4a6aa1c2 | ||
|
|
c7cb2aa8d4 | ||
|
|
c0d93d0829 | ||
|
|
19c294f978 |
12
README.md
12
README.md
@@ -161,6 +161,18 @@ ct2-transformers-converter --model openai/whisper-large-v2 --output_dir whisper-
|
||||
|
||||
Models can also be converted from the code. See the [conversion API](https://opennmt.net/CTranslate2/python/ctranslate2.converters.TransformersConverter.html).
|
||||
|
||||
### Load a converted model
|
||||
|
||||
1. Directly load the model from a local directory:
|
||||
```python
|
||||
model = faster_whisper.WhisperModel('whisper-large-v2-ct2')
|
||||
```
|
||||
|
||||
2. [Upload your model to the Hugging Face Hub](https://huggingface.co/docs/transformers/model_sharing#upload-with-the-web-interface) and load it from its name:
|
||||
```python
|
||||
model = faster_whisper.WhisperModel('username/whisper-large-v2-ct2')
|
||||
```
|
||||
|
||||
## Comparing performance against other implementations
|
||||
|
||||
If you are comparing the performance against other Whisper implementations, you should make sure to run the comparison with similar settings. In particular:
|
||||
|
||||
@@ -88,8 +88,9 @@ class WhisperModel:
|
||||
|
||||
Args:
|
||||
model_size_or_path: Size of the model to use (tiny, tiny.en, base, base.en,
|
||||
small, small.en, medium, medium.en, large-v1, or large-v2) or a path to a converted
|
||||
model directory. When a size is configured, the converted model is downloaded
|
||||
small, small.en, medium, medium.en, large-v1, or large-v2), a path to a converted
|
||||
model directory, or a CTranslate2-converted Whisper model ID from the Hugging Face Hub.
|
||||
When a size or a model ID is configured, the converted model is downloaded
|
||||
from the Hugging Face Hub.
|
||||
device: Device to use for computation ("cpu", "cuda", "auto").
|
||||
device_index: Device ID to use.
|
||||
@@ -369,6 +370,7 @@ class WhisperModel:
|
||||
else:
|
||||
all_tokens.extend(options.initial_prompt)
|
||||
|
||||
last_speech_timestamp = 0.0
|
||||
while seek < content_frames:
|
||||
time_offset = seek * self.feature_extractor.time_per_frame
|
||||
segment = features[:, seek : seek + self.feature_extractor.nb_max_frames]
|
||||
@@ -510,12 +512,14 @@ class WhisperModel:
|
||||
segment_size,
|
||||
options.prepend_punctuations,
|
||||
options.append_punctuations,
|
||||
last_speech_timestamp=last_speech_timestamp,
|
||||
)
|
||||
|
||||
word_end_timestamps = [
|
||||
w["end"] for s in current_segments for w in s["words"]
|
||||
]
|
||||
|
||||
if len(word_end_timestamps) > 0:
|
||||
last_speech_timestamp = word_end_timestamps[-1]
|
||||
if not single_timestamp_ending and len(word_end_timestamps) > 0:
|
||||
seek_shift = round(
|
||||
(word_end_timestamps[-1] - time_offset) * self.frames_per_second
|
||||
@@ -649,6 +653,12 @@ class WhisperModel:
|
||||
options.log_prob_threshold,
|
||||
)
|
||||
|
||||
if (
|
||||
options.no_speech_threshold is not None
|
||||
and result.no_speech_prob > options.no_speech_threshold
|
||||
):
|
||||
needs_fallback = False # silence
|
||||
|
||||
if not needs_fallback:
|
||||
break
|
||||
|
||||
@@ -688,6 +698,7 @@ class WhisperModel:
|
||||
num_frames: int,
|
||||
prepend_punctuations: str,
|
||||
append_punctuations: str,
|
||||
last_speech_timestamp: float,
|
||||
):
|
||||
if len(segments) == 0:
|
||||
return
|
||||
@@ -701,6 +712,26 @@ class WhisperModel:
|
||||
alignment = self.find_alignment(
|
||||
tokenizer, text_tokens, encoder_output, num_frames
|
||||
)
|
||||
word_durations = np.array([word["end"] - word["start"] for word in alignment])
|
||||
word_durations = word_durations[word_durations.nonzero()]
|
||||
median_duration = np.median(word_durations) if len(word_durations) > 0 else 0.0
|
||||
max_duration = median_duration * 2
|
||||
|
||||
# hack: truncate long words at sentence boundaries.
|
||||
# a better segmentation algorithm based on VAD should be able to replace this.
|
||||
if len(word_durations) > 0:
|
||||
median_duration = np.median(word_durations)
|
||||
max_duration = median_duration * 2
|
||||
sentence_end_marks = ".。!!??"
|
||||
# ensure words at sentence boundaries
|
||||
# are not longer than twice the median word duration.
|
||||
for i in range(1, len(alignment)):
|
||||
if alignment[i]["end"] - alignment[i]["start"] > max_duration:
|
||||
if alignment[i]["word"] in sentence_end_marks:
|
||||
alignment[i]["end"] = alignment[i]["start"] + max_duration
|
||||
elif alignment[i - 1]["word"] in sentence_end_marks:
|
||||
alignment[i]["start"] = alignment[i]["end"] - max_duration
|
||||
|
||||
merge_punctuations(alignment, prepend_punctuations, append_punctuations)
|
||||
|
||||
time_offset = (
|
||||
@@ -731,10 +762,51 @@ class WhisperModel:
|
||||
saved_tokens += len(timing["tokens"])
|
||||
word_index += 1
|
||||
|
||||
# hack: truncate long words at segment boundaries.
|
||||
# a better segmentation algorithm based on VAD should be able to replace this.
|
||||
if len(words) > 0:
|
||||
# adjust the segment-level timestamps based on the word-level timestamps
|
||||
segment["start"] = words[0]["start"]
|
||||
segment["end"] = words[-1]["end"]
|
||||
# ensure the first and second word after a pause is not longer than
|
||||
# twice the median word duration.
|
||||
if words[0]["end"] - last_speech_timestamp > median_duration * 4 and (
|
||||
words[0]["end"] - words[0]["start"] > max_duration
|
||||
or (
|
||||
len(words) > 1
|
||||
and words[1]["end"] - words[0]["start"] > max_duration * 2
|
||||
)
|
||||
):
|
||||
if (
|
||||
len(words) > 1
|
||||
and words[1]["end"] - words[1]["start"] > max_duration
|
||||
):
|
||||
boundary = max(
|
||||
words[1]["end"] / 2, words[1]["end"] - max_duration
|
||||
)
|
||||
words[0]["end"] = words[1]["start"] = boundary
|
||||
words[0]["start"] = max(0, words[0]["end"] - max_duration)
|
||||
|
||||
# prefer the segment-level start timestamp if the first word is too long.
|
||||
if (
|
||||
segment["start"] < words[0]["end"]
|
||||
and segment["start"] - 0.5 > words[0]["start"]
|
||||
):
|
||||
words[0]["start"] = max(
|
||||
0, min(words[0]["end"] - median_duration, segment["start"])
|
||||
)
|
||||
else:
|
||||
segment["start"] = words[0]["start"]
|
||||
|
||||
# prefer the segment-level end timestamp if the last word is too long.
|
||||
if (
|
||||
segment["end"] > words[-1]["start"]
|
||||
and segment["end"] + 0.5 < words[-1]["end"]
|
||||
):
|
||||
words[-1]["end"] = max(
|
||||
words[-1]["start"] + median_duration, segment["end"]
|
||||
)
|
||||
else:
|
||||
segment["end"] = words[-1]["end"]
|
||||
|
||||
last_speech_timestamp = segment["end"]
|
||||
|
||||
segment["words"] = words
|
||||
|
||||
@@ -779,22 +851,6 @@ class WhisperModel:
|
||||
for i, j in zip(word_boundaries[:-1], word_boundaries[1:])
|
||||
]
|
||||
|
||||
# hack: ensure the first and second word is not longer than twice the median word duration.
|
||||
# a better segmentation algorithm based on VAD should be able to replace this.
|
||||
word_durations = end_times - start_times
|
||||
word_durations = word_durations[word_durations.nonzero()]
|
||||
if len(word_durations) > 0:
|
||||
median_duration = np.median(word_durations)
|
||||
max_duration = median_duration * 2
|
||||
if len(word_durations) >= 2 and word_durations[1] > max_duration:
|
||||
boundary = max(end_times[2] / 2, end_times[2] - max_duration)
|
||||
end_times[0] = start_times[1] = boundary
|
||||
if (
|
||||
len(word_durations) >= 1
|
||||
and end_times[0] - start_times[0] > max_duration
|
||||
):
|
||||
start_times[0] = max(0, end_times[0] - max_duration)
|
||||
|
||||
return [
|
||||
dict(
|
||||
word=word, tokens=tokens, start=start, end=end, probability=probability
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
|
||||
from typing import Optional
|
||||
|
||||
@@ -33,7 +34,7 @@ def get_logger():
|
||||
|
||||
|
||||
def download_model(
|
||||
size: str,
|
||||
size_or_id: str,
|
||||
output_dir: Optional[str] = None,
|
||||
local_files_only: bool = False,
|
||||
cache_dir: Optional[str] = None,
|
||||
@@ -43,8 +44,9 @@ def download_model(
|
||||
The model is downloaded from https://huggingface.co/guillaumekln.
|
||||
|
||||
Args:
|
||||
size: Size of the model to download (tiny, tiny.en, base, base.en, small, small.en,
|
||||
medium, medium.en, large-v1, or large-v2).
|
||||
size_or_id: Size of the model to download (tiny, tiny.en, base, base.en, small, small.en,
|
||||
medium, medium.en, large-v1, or large-v2), or a CTranslate2-converted model ID
|
||||
from the Hugging Face Hub (e.g. guillaumekln/faster-whisper-large-v2).
|
||||
output_dir: Directory where the model should be saved. If not set, the model is saved in
|
||||
the cache directory.
|
||||
local_files_only: If True, avoid downloading the file and return the path to the local
|
||||
@@ -57,12 +59,16 @@ def download_model(
|
||||
Raises:
|
||||
ValueError: if the model size is invalid.
|
||||
"""
|
||||
if size not in _MODELS:
|
||||
raise ValueError(
|
||||
"Invalid model size '%s', expected one of: %s" % (size, ", ".join(_MODELS))
|
||||
)
|
||||
if re.match(r".*/.*", size_or_id):
|
||||
repo_id = size_or_id
|
||||
else:
|
||||
if size_or_id not in _MODELS:
|
||||
raise ValueError(
|
||||
"Invalid model size '%s', expected one of: %s"
|
||||
% (size_or_id, ", ".join(_MODELS))
|
||||
)
|
||||
|
||||
repo_id = "guillaumekln/faster-whisper-%s" % size
|
||||
repo_id = "guillaumekln/faster-whisper-%s" % size_or_id
|
||||
|
||||
allow_patterns = [
|
||||
"config.json",
|
||||
|
||||
Reference in New Issue
Block a user