Skip to content

Commit 95fe8da

Browse files
committed
review comment
1 parent 562bc8b commit 95fe8da

File tree

1 file changed

+7
-4
lines changed

1 file changed

+7
-4
lines changed

rasa/nlu/tokenizers/spacy_tokenizer.py

+7-4
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,8 @@ def train(
4040

4141
if attribute_doc is not None:
4242
example.set(
43-
MESSAGE_TOKENS_NAMES[attribute], self.tokenize(attribute_doc)
43+
MESSAGE_TOKENS_NAMES[attribute],
44+
self.tokenize(attribute_doc, attribute),
4445
)
4546

4647
def get_doc(self, message: Message, attribute: Text) -> "Doc":
@@ -49,10 +50,12 @@ def get_doc(self, message: Message, attribute: Text) -> "Doc":
4950
def process(self, message: Message, **kwargs: Any) -> None:
5051
message.set(
5152
MESSAGE_TOKENS_NAMES[MESSAGE_TEXT_ATTRIBUTE],
52-
self.tokenize(self.get_doc(message, MESSAGE_TEXT_ATTRIBUTE)),
53+
self.tokenize(
54+
self.get_doc(message, MESSAGE_TEXT_ATTRIBUTE), MESSAGE_TEXT_ATTRIBUTE
55+
),
5356
)
5457

55-
def tokenize(self, doc: "Doc") -> List[Token]:
58+
def tokenize(self, doc: "Doc", attribute: Text) -> List[Token]:
5659
tokens = [Token(t.text, t.idx) for t in doc]
57-
self.add_cls_token(tokens)
60+
self.add_cls_token(tokens, attribute)
5861
return tokens

0 commit comments

Comments
 (0)