diff --git a/ludwig/schema/llms/generation.py b/ludwig/schema/llms/generation.py index a12c9990ee3..e1d96e21f94 100644 --- a/ludwig/schema/llms/generation.py +++ b/ludwig/schema/llms/generation.py @@ -19,7 +19,10 @@ class LLMGenerationConfig(schema_utils.BaseMarshmallowConfig): max_new_tokens: Optional[int] = schema_utils.PositiveInteger( default=32, allow_none=True, - description="The maximum number of new tokens to generate, ignoring the number of tokens in the input prompt.", + description="The maximum number of new tokens to generate, ignoring the number of tokens in the input prompt. " + "If not set, this is dynamically determined by Ludwig based on either the `max_sequence_length` of the ouput " + "feature, the global_max_sequence_length specified in preprocessing (if specified), or the " + "maximum context length supported by the model (in the order specified).", parameter_metadata=LLM_METADATA["generation"]["max_new_tokens"], )