generated from BL-8258/idash2021
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathcheckpoint_manager.py
221 lines (204 loc) · 9.92 KB
/
checkpoint_manager.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for saving and loading experiment checkpoints."""
import os.path
import re
from typing import Any, List, Tuple, Union
from absl import logging
import tensorflow as tf
class FileCheckpointManager():
"""A checkpoint manager backed by a file system.
This checkpoint manager is a utility to save and load checkpoints. While
the checkpoint manager is compatible with any nested structure supported by
`tf.convert_to_tensor`, checkpoints may often represent the output of a
`tff.templates.IterativeProcess`. For example, one possible use case would
be to save the `ServerState` output of an iterative process created via
`tff.learning`. This is comparable to periodically saving model weights and
optimizer states during non-federated training.
The implementation you find here is slightly different from
`tf.train.CheckpointManager`. This implementation yields nested structures
that are immutable whereas `tf.train.CheckpointManager` is used to manage
`tf.train.Checkpoint` objects, which are mutable collections. Additionally,
this implementation allows retaining the initial checkpoint as part of the
total number of checkpoints that are kept.
The checkpoint manager is intended only for allowing simulations to be
resumed after interruption. In particular, it is intended to only restart the
same simulation, run with the same version of TensorFlow Federated.
"""
def __init__(self,
root_dir: str,
prefix: str = 'ckpt_',
step: int = 1,
keep_total: int = 5,
keep_first: bool = True):
"""Returns an initialized `FileCheckpointManager`.
Args:
root_dir: A path on the filesystem to store checkpoints.
prefix: A string to use as the prefix for checkpoint names.
step: How often the checkpoint manager should save a checkpoint. When
calling `FileCheckpointManager.save_checkpoint`, a checkpoint will only
be written for round numbers divisible by `step`.
keep_total: An integer representing the total number of checkpoints to
keep. If the value is zero or smaller, all checkpoints will be kept.
keep_first: A boolean indicating if the first checkpoint should be kept,
irrespective of whether it is in the last `keep_total` checkpoints. This
is desirable in settings where you would like to ensure full
reproducibility of the simulation, especially in settings where
model weights or optimizer states are initialized randomly. By loading
from the initial checkpoint, one can avoid re-initializing and obtaining
different results.
"""
self._root_dir = root_dir
self._prefix = prefix
self._step = step
self._keep_total = keep_total
self._keep_first = keep_first
path = re.escape(os.path.join(root_dir, prefix))
self._round_num_expression = re.compile(r'{}([0-9]+)$'.format(path))
def load_latest_checkpoint_or_default(self, default: Any) -> Tuple[Any, int]:
"""Loads latest checkpoint, loading `default` if no checkpoints exist.
Saves `default` as the 0th checkpoint if no checkpoints exist.
Args:
default: A nested structure which `tf.convert_to_tensor` supports to use
as a template when reconstructing the loaded template. This structure
will be saved as the checkpoint for round number 0 and returned if there
are no pre-existing saved checkpoints.
Returns:
A `tuple` of `(state, round_num)` where `state` matches the Python
structure in `structure`, and `round_num` is an integer. If no
checkpoints have been written, returns `(default, 0)`.
"""
state, round_num = self.load_latest_checkpoint(default)
if state is None:
state = default
round_num = 0
self.save_checkpoint(state, round_num)
return state, round_num
def load_latest_checkpoint(self,
structure: Any) -> Tuple[Any, Union[int, None]]:
"""Loads the latest state and round number.
Args:
structure: A nested structure which `tf.convert_to_tensor` supports to use
as a template when reconstructing the loaded template.
Returns:
A `tuple` of `(state, round_num)` where `state` matches the Python
structure in `structure`, and `round_num` is an integer. If no checkpoints
have been previously saved, returns the tuple `(None, None)`.
"""
checkpoint_paths = self._get_all_checkpoint_paths()
if checkpoint_paths:
checkpoint_path = max(checkpoint_paths, key=self._round_num)
return self._load_checkpoint_from_path(structure, checkpoint_path)
return None, None
def load_checkpoint(self, structure: Any, round_num: int) -> Any:
"""Returns the checkpointed state for the given `round_num`.
Args:
structure: A nested structure which `tf.convert_to_tensor` supports to use
as a template when reconstructing the loaded template.
round_num: An integer representing the round to load from.
"""
basename = '{}{}'.format(self._prefix, round_num)
checkpoint_path = os.path.join(self._root_dir, basename)
state, _ = self._load_checkpoint_from_path(structure, checkpoint_path)
return state
def _load_checkpoint_from_path(self, structure: Any,
checkpoint_path: str) -> Tuple[Any, int]:
"""Returns the state and round number for the given `checkpoint_path`.
Args:
structure: A nested structure which `tf.convert_to_tensor` supports to use
as a template when reconstructing the loaded template.
checkpoint_path: A path on the filesystem to load.
Raises:
FileNotFoundError: If a checkpoint for given `checkpoint_path` doesn't
exist.
"""
if not tf.io.gfile.exists(checkpoint_path):
raise FileNotFoundError(
'No such file or directory: {}'.format(checkpoint_path))
model = tf.saved_model.load(checkpoint_path)
flat_obj = model.build_obj_fn()
state = tf.nest.pack_sequence_as(structure, flat_obj)
round_num = self._round_num(checkpoint_path)
logging.info('Checkpoint loaded: %s', checkpoint_path)
return state, round_num
def _save_checkpoint(self, state: Any, round_num: int) -> None:
"""Internal function to save a new checkpoint.
Args:
state: A nested structure which `tf.convert_to_tensor` supports.
round_num: An integer representing the current training round.
"""
basename = '{}{}'.format(self._prefix, round_num)
checkpoint_path = os.path.join(self._root_dir, basename)
flat_obj = tf.nest.flatten(state)
model = tf.Module()
model.obj = flat_obj
model.build_obj_fn = tf.function(lambda: model.obj, input_signature=())
# First write to a temporary directory.
temp_basename = '.temp_{}'.format(basename)
temp_path = os.path.join(self._root_dir, temp_basename)
try:
tf.io.gfile.rmtree(temp_path)
except tf.errors.NotFoundError:
pass
tf.io.gfile.makedirs(temp_path)
tf.saved_model.save(model, temp_path, signatures={})
# Rename the temp directory to the final location atomically.
tf.io.gfile.rename(temp_path, checkpoint_path)
logging.info('Checkpoint saved: %s', checkpoint_path)
self._clear_old_checkpoints()
def save_checkpoint(self, state: Any, round_num: int) -> None:
"""Saves a new checkpointed `state` for the given `round_num`.
Note that a checkpoint is only written if `round_num` is divisible by the
`step` initialization argument of the manager.
Args:
state: A nested structure which `tf.convert_to_tensor` supports.
round_num: An integer representing the current training round.
"""
if round_num % self._step == 0:
self._save_checkpoint(state, round_num)
def _clear_old_checkpoints(self) -> None:
"""Removes old checkpoints."""
if self._keep_total <= 0:
return
checkpoint_paths = self._get_all_checkpoint_paths()
if len(checkpoint_paths) > self._keep_total:
checkpoint_paths = sorted(checkpoint_paths, key=self._round_num)
start = 1 if self._keep_first else 0
stop = start - self._keep_total
for checkpoint_path in checkpoint_paths[start:stop]:
tf.io.gfile.rmtree(checkpoint_path)
logging.info('Checkpoint removed: %s', checkpoint_path)
def _round_num(self, checkpoint_path: str) -> int:
"""Returns the round number for the given `checkpoint_path`, or `-1`."""
match = self._round_num_expression.match(checkpoint_path)
if match is None:
logging.debug(
'Could not extract round number from: \'%s\' using the following '
'pattern: \'%s\'', checkpoint_path,
self._round_num_expression.pattern)
return -1
return int(match.group(1))
def _get_all_checkpoint_paths(self) -> List[str]:
"""Returns all the checkpoint paths managed by the instance."""
# Due to tensorflow/issues/19378, we cannot use `tf.io.gfile.glob` here
# because it returns directory contents recursively on Windows.
if tf.io.gfile.exists(self._root_dir):
root_dir_entries = tf.io.gfile.listdir(self._root_dir)
return [
os.path.join(self._root_dir, e)
for e in root_dir_entries
if e.startswith(self._prefix)
]
else:
return []