Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 25 additions & 10 deletions flaml/automl/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -1196,16 +1196,31 @@ def on_epoch_end(self, args, state, control, **callback_kwargs):
control.should_save = True
control.should_evaluate = True

self._trainer = TrainerForAuto(
args=self._training_args,
model_init=self._model_init,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
tokenizer=self.tokenizer,
data_collator=self.data_collator,
compute_metrics=self._compute_metrics_by_dataset_name,
callbacks=[EarlyStoppingCallbackForAuto],
)
# Use processing_class for transformers >= 4.44.0, tokenizer for older versions
trainer_kwargs = {
"args": self._training_args,
"model_init": self._model_init,
"train_dataset": train_dataset,
"eval_dataset": eval_dataset,
"data_collator": self.data_collator,
"compute_metrics": self._compute_metrics_by_dataset_name,
"callbacks": [EarlyStoppingCallbackForAuto],
}

# Check if processing_class parameter is supported (transformers >= 4.44.0)
try:
import transformers
from packaging import version

if version.parse(transformers.__version__) >= version.parse("4.44.0"):
trainer_kwargs["processing_class"] = self.tokenizer
else:
trainer_kwargs["tokenizer"] = self.tokenizer
except (ImportError, AttributeError, ValueError):
# Fallback to tokenizer if version check fails
trainer_kwargs["tokenizer"] = self.tokenizer

self._trainer = TrainerForAuto(**trainer_kwargs)

if self._task in NLG_TASKS:
setattr(self._trainer, "_is_seq2seq", True)
Expand Down
37 changes: 18 additions & 19 deletions flaml/automl/nlp/huggingface/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -211,29 +211,28 @@ def tokenize_onedataframe(
hf_args=None,
prefix_str=None,
):
with tokenizer.as_target_tokenizer():
_, tokenized_column_names = tokenize_row(
dict(X.iloc[0]),
_, tokenized_column_names = tokenize_row(
dict(X.iloc[0]),
tokenizer,
prefix=(prefix_str,) if task is SUMMARIZATION else None,
task=task,
hf_args=hf_args,
return_column_name=True,
)
d = X.apply(
lambda x: tokenize_row(
x,
tokenizer,
prefix=(prefix_str,) if task is SUMMARIZATION else None,
task=task,
hf_args=hf_args,
return_column_name=True,
)
d = X.apply(
lambda x: tokenize_row(
x,
tokenizer,
prefix=(prefix_str,) if task is SUMMARIZATION else None,
task=task,
hf_args=hf_args,
),
axis=1,
result_type="expand",
)
X_tokenized = pd.DataFrame(columns=tokenized_column_names)
X_tokenized[tokenized_column_names] = d
return X_tokenized
),
axis=1,
result_type="expand",
)
X_tokenized = pd.DataFrame(columns=tokenized_column_names)
X_tokenized[tokenized_column_names] = d
return X_tokenized


def tokenize_row(
Expand Down
Loading