diff --git a/research/object_detection/model_lib.py b/research/object_detection/model_lib.py index 5ba6f9b6..2239bb8b 100644 --- a/research/object_detection/model_lib.py +++ b/research/object_detection/model_lib.py @@ -411,7 +411,7 @@ def tpu_scaffold(): # Eval metrics on a single example. eval_metric_ops = eval_util.get_eval_metric_ops_for_evaluators( - eval_config, category_index.values(), eval_dict) + eval_config, list(category_index.values()), eval_dict) for loss_key, loss_tensor in iter(losses_dict.items()): eval_metric_ops[loss_key] = tf.metrics.mean(loss_tensor) for var in optimizer_summary_vars: diff --git a/research/object_detection/utils/object_detection_evaluation.py b/research/object_detection/utils/object_detection_evaluation.py index 5826c581..1c155c2d 100644 --- a/research/object_detection/utils/object_detection_evaluation.py +++ b/research/object_detection/utils/object_detection_evaluation.py @@ -304,7 +304,7 @@ def evaluate(self): if idx + self._label_id_offset in category_index: category_name = category_index[idx + self._label_id_offset]['name'] try: - category_name = unicode(category_name, 'utf-8') + category_name = str(category_name, 'utf-8') except TypeError: pass category_name = unicodedata.normalize( @@ -795,8 +795,7 @@ def add_single_detected_image_info(self, image_key, detected_boxes, if scores[i].shape[0] > 0: self.scores_per_class[i].append(scores[i]) self.tp_fp_labels_per_class[i].append(tp_fp_labels[i]) - (self.num_images_correctly_detected_per_class - ) += is_class_correctly_detected_in_image + self.num_images_correctly_detected_per_class += is_class_correctly_detected_in_image def _update_ground_truth_statistics(self, groundtruth_class_labels, groundtruth_is_difficult_list,