Skip to content

Commit

Permalink
Use items instead of iteritems
Browse files Browse the repository at this point in the history
  • Loading branch information
Garrett Smith committed Jul 30, 2018
1 parent 142aea4 commit a26c3d6
Show file tree
Hide file tree
Showing 7 changed files with 17 additions and 17 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def _update_dict(initial_dict, update):
update: updated dictionary.
"""

for key, value_list in update.iteritems():
for key, value_list in update.items():
if key in initial_dict:
initial_dict[key].extend(value_list)
else:
Expand Down Expand Up @@ -70,7 +70,7 @@ def _build_plain_hierarchy(hierarchy, skip_root=False):
if not skip_root:
all_keyed_parent[hierarchy['LabelName']] = all_children
all_children = [hierarchy['LabelName']] + all_children
for child, _ in all_keyed_child.iteritems():
for child, _ in all_keyed_child.items():
all_keyed_child[child].append(hierarchy['LabelName'])
all_keyed_child[hierarchy['LabelName']] = []

Expand Down
6 changes: 3 additions & 3 deletions research/object_detection/eval_util_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ def test_get_eval_metric_ops_for_coco_detections(self):

with self.test_session() as sess:
metrics = {}
for key, (value_op, _) in metric_ops.iteritems():
for key, (value_op, _) in metric_ops.items():
metrics[key] = value_op
sess.run(update_op)
metrics = sess.run(metrics)
Expand All @@ -93,7 +93,7 @@ def test_get_eval_metric_ops_for_coco_detections_and_masks(self):

with self.test_session() as sess:
metrics = {}
for key, (value_op, _) in metric_ops.iteritems():
for key, (value_op, _) in metric_ops.items():
metrics[key] = value_op
sess.run(update_op_boxes)
sess.run(update_op_masks)
Expand All @@ -113,7 +113,7 @@ def test_get_eval_metric_ops_for_coco_detections_and_resized_masks(self):

with self.test_session() as sess:
metrics = {}
for key, (value_op, _) in metric_ops.iteritems():
for key, (value_op, _) in metric_ops.items():
metrics[key] = value_op
sess.run(update_op_boxes)
sess.run(update_op_masks)
Expand Down
4 changes: 2 additions & 2 deletions research/object_detection/metrics/coco_evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -536,7 +536,7 @@ def evaluate(self):
'annotations': self._groundtruth_list,
'images': [{'id': image_id, 'height': shape[1], 'width': shape[2]}
for image_id, shape in self._image_id_to_mask_shape_map.
iteritems()],
items()],
'categories': self._categories
}
coco_wrapped_groundtruth = coco_tools.COCOWrapper(
Expand All @@ -550,7 +550,7 @@ def evaluate(self):
include_metrics_per_category=self._include_metrics_per_category)
mask_metrics.update(mask_per_category_ap)
mask_metrics = {'DetectionMasks_'+ key: value
for key, value in mask_metrics.iteritems()}
for key, value in mask_metrics.items()}
return mask_metrics

def get_estimator_eval_metric_ops(self, image_id, groundtruth_boxes,
Expand Down
10 changes: 5 additions & 5 deletions research/object_detection/metrics/coco_evaluation_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -296,7 +296,7 @@ def testGetOneMAPWithMatchingGroundtruthAndDetections(self):
detection_classes: np.array([2])
})
metrics = {}
for key, (value_op, _) in eval_metric_ops.iteritems():
for key, (value_op, _) in eval_metric_ops.items():
metrics[key] = value_op
metrics = sess.run(metrics)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0)
Expand Down Expand Up @@ -392,7 +392,7 @@ def testGetOneMAPWithMatchingGroundtruthAndDetectionsPadded(self):
np.array([2, 2])
})
metrics = {}
for key, (value_op, _) in eval_metric_ops.iteritems():
for key, (value_op, _) in eval_metric_ops.items():
metrics[key] = value_op
metrics = sess.run(metrics)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0)
Expand Down Expand Up @@ -450,7 +450,7 @@ def testGetOneMAPWithMatchingGroundtruthAndDetectionsBatched(self):
detection_classes: np.array([[1], [3], [2]])
})
metrics = {}
for key, (value_op, _) in eval_metric_ops.iteritems():
for key, (value_op, _) in eval_metric_ops.items():
metrics[key] = value_op
metrics = sess.run(metrics)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0)
Expand Down Expand Up @@ -520,7 +520,7 @@ def testGetOneMAPWithMatchingGroundtruthAndDetectionsPaddedBatches(self):
num_det_boxes_per_image: np.array([1, 1, 2]),
})
metrics = {}
for key, (value_op, _) in eval_metric_ops.iteritems():
for key, (value_op, _) in eval_metric_ops.items():
metrics[key] = value_op
metrics = sess.run(metrics)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0)
Expand Down Expand Up @@ -701,7 +701,7 @@ def testGetOneMAPWithMatchingGroundtruthAndDetections(self):
mode='constant')
})
metrics = {}
for key, (value_op, _) in eval_metric_ops.iteritems():
for key, (value_op, _) in eval_metric_ops.items():
metrics[key] = value_op
metrics = sess.run(metrics)
self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP'], 1.0)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ def _swap_labelmap_dict(labelmap_dict):
Returns:
A dictionary mapping class name to class numerical id.
"""
return dict((v, k) for k, v in labelmap_dict.iteritems())
return dict((v, k) for k, v in labelmap_dict.items())


def main(parsed_args):
Expand Down
2 changes: 1 addition & 1 deletion research/object_detection/model_lib.py
Original file line number Diff line number Diff line change
Expand Up @@ -388,7 +388,7 @@ def tpu_scaffold():
if img_summary is not None:
eval_metric_ops['Detections_Left_Groundtruth_Right'] = (
img_summary, tf.no_op())
eval_metric_ops = {str(k): v for k, v in eval_metric_ops.iteritems()}
eval_metric_ops = {str(k): v for k, v in eval_metric_ops.items()}

if eval_config.use_moving_averages:
variable_averages = tf.train.ExponentialMovingAverage(0.0)
Expand Down
6 changes: 3 additions & 3 deletions research/object_detection/utils/vrd_evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -252,12 +252,12 @@ def evaluate(self, relationships=None):
recall_100,
}
if relationships:
for key, average_precision in average_precisions.iteritems():
for key, average_precision in average_precisions.items():
vrd_metrics[self._metric_prefix + 'AP@{}IOU/{}'.format(
self._matching_iou_threshold,
relationships[key])] = average_precision
else:
for key, average_precision in average_precisions.iteritems():
for key, average_precision in average_precisions.items():
vrd_metrics[self._metric_prefix + 'AP@{}IOU/{}'.format(
self._matching_iou_threshold, key)] = average_precision

Expand Down Expand Up @@ -547,7 +547,7 @@ def evaluate(self):
relation_field_values = np.concatenate(self._relation_field_values)

for relation_field_value, _ in (
self._num_gt_instances_per_relationship.iteritems()):
self._num_gt_instances_per_relationship.items()):
precisions, recalls = metrics.compute_precision_recall(
scores[relation_field_values == relation_field_value],
tp_fp_labels[relation_field_values == relation_field_value],
Expand Down

0 comments on commit a26c3d6

Please sign in to comment.