Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion sagemaker-train/README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ Table of Contents


Installing the SageMaker Python SDK Train
-----------------------------------
-----------------------------------------

You can install from source by cloning this repository and running a pip install command in the root directory of the repository:

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -301,7 +301,7 @@ class BenchMarkEvaluator(BaseEvaluator):

benchmark: _Benchmark
subtasks: Optional[Union[str, List[str]]] = None
evaluate_base_model: bool = True
evaluate_base_model: bool = False
_hyperparameters: Optional[Any] = None


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ class CustomScorerEvaluator(BaseEvaluator):
_hyperparameters: Optional[Any] = None

# Template-required fields
evaluate_base_model: bool = True
evaluate_base_model: bool = False

@validator('dataset', pre=True)
def _resolve_dataset(cls, v):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ class LLMAsJudgeEvaluator(BaseEvaluator):
custom_metrics: Optional[str] = None

# Template-required fields
evaluate_base_model: bool = True
evaluate_base_model: bool = False

@validator('dataset', pre=True)
def _resolve_dataset(cls, v):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ def test_benchmark_evaluator_initialization_minimal(mock_artifact, mock_resolve)

assert evaluator.benchmark == _Benchmark.MMLU
assert evaluator.model == DEFAULT_MODEL
assert evaluator.evaluate_base_model is True
assert evaluator.evaluate_base_model is False
assert evaluator.subtasks == "ALL"


Expand Down Expand Up @@ -525,7 +525,7 @@ def test_benchmark_evaluator_get_benchmark_template_additions(mock_artifact, moc
assert additions['strategy'] == 'zs_cot'
assert additions['evaluation_metric'] == 'accuracy'
assert additions['subtask'] == 'abstract_algebra'
assert additions['evaluate_base_model'] is True
assert additions['evaluate_base_model'] is False


@patch('sagemaker.train.common_utils.recipe_utils._is_nova_model')
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ def test_custom_scorer_evaluator_initialization_minimal(mock_artifact, mock_reso
assert evaluator.evaluator == _BuiltInMetric.PRIME_MATH
assert evaluator.dataset == DEFAULT_DATASET
assert evaluator.model == DEFAULT_MODEL
assert evaluator.evaluate_base_model is True
assert evaluator.evaluate_base_model is False


@patch('sagemaker.train.common_utils.model_resolution._resolve_base_model')
Expand Down Expand Up @@ -952,7 +952,7 @@ def test_custom_scorer_evaluator_get_custom_scorer_template_additions_builtin(
assert additions['task'] == 'gen_qa'
assert additions['strategy'] == 'gen_qa'
assert additions['evaluation_metric'] == 'all'
assert additions['evaluate_base_model'] is True
assert additions['evaluate_base_model'] is False
assert additions['evaluator_arn'] is None
assert additions['preset_reward_function'] == 'prime_math'
assert 'temperature' in additions
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ def test_llm_as_judge_evaluator_initialization_minimal(mock_artifact, mock_resol
assert evaluator.evaluator_model == DEFAULT_EVALUATOR_MODEL
assert evaluator.dataset == DEFAULT_DATASET
assert evaluator.model == DEFAULT_MODEL
assert evaluator.evaluate_base_model is True
assert evaluator.evaluate_base_model is False
assert evaluator.builtin_metrics is None
assert evaluator.custom_metrics is None

Expand Down Expand Up @@ -472,7 +472,7 @@ def test_llm_as_judge_evaluator_get_llmaj_template_additions(mock_artifact, mock
assert additions['top_p'] == '1.0'
# pipeline_name is no longer in template additions - it's resolved dynamically in execution.py
assert 'pipeline_name' not in additions
assert additions['evaluate_base_model'] is True
assert additions['evaluate_base_model'] is False

# Verify S3 upload was called
mock_s3_upload.assert_called_once()
Expand Down
Loading