diff --git a/STT/whisper_stt_handler.py b/STT/whisper_stt_handler.py
index ea37b8ba2aa86cac52534a7e843d9d2705270cfd..b1596a9b701cbed35640e69989bca15fd6639e39 100644
--- a/STT/whisper_stt_handler.py
+++ b/STT/whisper_stt_handler.py
@@ -67,7 +67,7 @@ class WhisperSTTHandler(BaseHandler):
             # generating more tokens than previously will trigger CUDA graphs capture
             # one should warmup with a number of generated tokens above max tokens targeted for subsequent generation
             warmup_gen_kwargs = {
-                "min_new_tokens": self.gen_kwargs["min_new_tokens"],
+                "min_new_tokens": self.gen_kwargs["max_new_tokens"],  # Yes, assign max_new_tokens to min_new_tokens
                 "max_new_tokens": self.gen_kwargs["max_new_tokens"],
                 **self.gen_kwargs,
             }
diff --git a/arguments_classes/whisper_stt_arguments.py b/arguments_classes/whisper_stt_arguments.py
index 2edb4c24e7d75bd8a204edf1a82a8dd79b0df457..bed382dda754da36965b4d86e68a7f8b4d9c322c 100644
--- a/arguments_classes/whisper_stt_arguments.py
+++ b/arguments_classes/whisper_stt_arguments.py
@@ -33,12 +33,6 @@ class WhisperSTTHandlerArguments:
             "help": "The maximum number of new tokens to generate. Default is 128."
         },
     )
-    stt_gen_min_new_tokens: int = field(
-        default=0,
-        metadata={
-            "help": "The minimum number of new tokens to generate. Default is 0."
-        },
-    )
     stt_gen_num_beams: int = field(
         default=1,
         metadata={