fix where the tokens are reset (#175)

This commit is contained in:
Anthony
2023-04-24 16:28:47 +02:00
committed by GitHub
parent f893113759
commit 338a725ff8

View File

@@ -483,9 +483,6 @@ class WhisperModel:
seek += segment_size
if not options.condition_on_previous_text or temperature > 0.5:
prompt_reset_since = len(all_tokens)
if options.word_timestamps:
self.add_word_timestamps(
current_segments,
@@ -538,6 +535,9 @@ class WhisperModel:
),
)
if not options.condition_on_previous_text or temperature > 0.5:
prompt_reset_since = len(all_tokens)
def encode(self, features: np.ndarray) -> ctranslate2.StorageView:
# When the model is running on multiple GPUs, the encoder output should be moved
# to the CPU since we don't know which GPU will handle the next job.