From d17e055ec6ff6bf9eb4fffd86a40dd6217a1e155 Mon Sep 17 00:00:00 2001 From: Krzysztof Kowalczyk Date: Wed, 3 Apr 2019 22:58:16 +0200 Subject: [PATCH] Add return_score option to evaluate --- spacy/cli/evaluate.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/spacy/cli/evaluate.py b/spacy/cli/evaluate.py index 68a7eca2cee..123e5c536c4 100644 --- a/spacy/cli/evaluate.py +++ b/spacy/cli/evaluate.py @@ -4,6 +4,7 @@ import plac from timeit import default_timer as timer from wasabi import Printer +import srsly from ..gold import GoldCorpus from .. import util @@ -17,6 +18,7 @@ gpu_id=("Use GPU", "option", "g", int), displacy_path=("Directory to output rendered parses as HTML", "option", "dp", str), displacy_limit=("Limit of parses to render as HTML", "option", "dl", int), + return_scores=("Return dict containing model scores", "flag", "r", bool), ) def evaluate( model, @@ -25,6 +27,7 @@ def evaluate( gold_preproc=False, displacy_path=None, displacy_limit=25, + return_scores=False, ): """ Evaluate a model. To render a sample of parses in a HTML file, set an @@ -75,6 +78,8 @@ def evaluate( ents=render_ents, ) msg.good("Generated {} parses as HTML".format(displacy_limit), displacy_path) + if return_scores: + return scorer.scores def render_parses(docs, output_path, model_name="", limit=250, deps=True, ents=True):