In a standard training loop, you should have code like this at the end
.....
def train_and_evaluate(output_dir, hparams):
get_train = read_dataset(hparams['train_data_path'],
tf.estimator.ModeKeys.TRAIN,
hparams['train_batch_size'])
get_valid = read_dataset(hparams['eval_data_path'],
tf.estimator.ModeKeys.EVAL,
1000)
estimator = tf.estimator.Estimator(model_fn=sequence_regressor,
params=hparams,
config=tf.estimator.RunConfig(
save_checkpoints_steps=
hparams['save_checkpoint_steps']),
model_dir=output_dir)
train_spec = tf.estimator.TrainSpec(input_fn=get_train,
max_steps=hparams['train_steps'])
exporter = tf.estimator.LatestExporter('exporter', serving_input_fn)
eval_spec = tf.estimator.EvalSpec(input_fn=get_valid,
steps=None,
exporters=exporter,
start_delay_secs=hparams['eval_delay_secs'],
throttle_secs=hparams['min_eval_frequency'])
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
especially this part
estimator = tf.estimator.Estimator(model_fn=sequence_regressor,
params=hparams,
config=tf.estimator.RunConfig(
save_checkpoints_steps=
hparams['save_checkpoint_steps']),
model_dir=output_dir)
Here you specify after how many steps (save_checkpoints_steps
) you export your model to the output_dir
.
Do you have something like this in your code?