Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update NEL emerson config options #207

Merged
merged 7 commits into from
Apr 2, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ gpu_allocator = null

[nlp]
lang = "en"
pipeline = ["sentencizer","entity_ruler","ner","entity_linker"]
pipeline = ["sentencizer","entity_ruler","entity_linker"]
disabled = []
before_creation = null
after_creation = null
Expand All @@ -30,10 +30,6 @@ punct_chars = null
source = "${paths.base_nlp}"
component = "entity_ruler"

[components.ner]
source = "${paths.base_nlp}"
component = "ner"

[components.entity_linker]
factory = "entity_linker"
entity_vector_length = 64
Expand Down Expand Up @@ -94,7 +90,7 @@ eval_frequency = 200
accumulate_gradient = 2
max_epochs = 0
max_steps = 600
frozen_components = ["sentencizer","ner"]
frozen_components = []
before_to_disk = null

[training.logger]
Expand Down Expand Up @@ -130,6 +126,12 @@ learn_rate = 0.001
nel_micro_p = 0.0
nel_micro_r = 0.0
nel_micro_f = 1.0
ents_f = 0.0
ents_p = 0.0
ents_r = 0.0
sents_f = null
sents_p = null
sents_r = null

[pretraining]

Expand Down
148 changes: 148 additions & 0 deletions tutorials/nel_emerson/configs/nel_ner.cfg
Original file line number Diff line number Diff line change
@@ -0,0 +1,148 @@
[paths]
train = ""
dev = ""
raw = null
init_tok2vec = null
kb = ""
base_nlp = ""
vectors = "${paths.base_nlp}"

[system]
seed = 342
gpu_allocator = null

[nlp]
lang = "en"
pipeline = ["sentencizer","ner","entity_linker"]
disabled = []
before_creation = null
after_creation = null
after_pipeline_creation = null
tokenizer = {"@tokenizers":"spacy.Tokenizer.v1"}

[components]

[components.sentencizer]
factory = "sentencizer"
punct_chars = null

[components.ner]
source = "${paths.base_nlp}"
component = "ner"

[components.entity_linker]
factory = "entity_linker"
entity_vector_length = 64
get_candidates = {"@misc":"spacy.CandidateGenerator.v1"}
incl_context = true
incl_prior = true
labels_discard = []
use_gold_ents = true

[components.entity_linker.model]
@architectures = "spacy.EntityLinker.v2"
nO = null

[components.entity_linker.model.tok2vec]
@architectures = "spacy.HashEmbedCNN.v1"
pretrained_vectors = null
width = 96
depth = 2
embed_size = 2000
window_size = 1
maxout_pieces = 3
subword_features = true

[initialize]
vectors = ${paths.vectors}
init_tok2vec = ${paths.init_tok2vec}
vocab_data = null
lookups = null

[initialize.components]

[initialize.components.entity_linker]

[initialize.components.entity_linker.kb_loader]
@misc = "spacy.KBFromFile.v1"
kb_path = ${paths.kb}

[initialize.tokenizer]


[corpora]

[corpora.train]
@readers = "MyCorpus.v1"
file = ${paths.train}

[corpora.dev]
@readers = "MyCorpus.v1"
file = ${paths.dev}

[training]
train_corpus = "corpora.train"
dev_corpus = "corpora.dev"
seed = ${system.seed}
gpu_allocator = ${system.gpu_allocator}
dropout = 0.2
patience = 10000
eval_frequency = 200
accumulate_gradient = 2
max_epochs = 0
max_steps = 600
frozen_components = ["ner"]
before_to_disk = null

[training.logger]
@loggers = "spacy.ConsoleLogger.v1"
progress_bar = false


[training.batcher]
@batchers = "spacy.batch_by_words.v1"
discard_oversize = false
tolerance = 0.2
get_length = null

[training.batcher.size]
@schedules = "compounding.v1"
start = 100
stop = 1000
compound = 1.001
t = 0.0

[training.optimizer]
@optimizers = "Adam.v1"
beta1 = 0.9
beta2 = 0.999
L2_is_weight_decay = true
L2 = 0.01
grad_clip = 1.0
use_averages = false
eps = 0.00000001
learn_rate = 0.001

[training.score_weights]
nel_micro_p = 0.0
nel_micro_r = 0.0
nel_micro_f = 1.0
ents_f = 0.0
ents_p = 0.0
ents_r = 0.0
sents_f = null
sents_p = null
sents_r = null

[pretraining]

[optimizer]
@optimizers = "Adam.v1"
learn_rate = 0.001
beta1 = 0.9
beta2 = 0.999
L2 = 0.0
eps = 0.00000001
grad_clip = 1.0
L2_is_weight_decay = true
use_averages = true
141 changes: 141 additions & 0 deletions tutorials/nel_emerson/configs/nel_only.cfg
Original file line number Diff line number Diff line change
@@ -0,0 +1,141 @@
[paths]
train = ""
dev = ""
raw = null
init_tok2vec = null
kb = ""
base_nlp = ""
vectors = "${paths.base_nlp}"

[system]
seed = 342
gpu_allocator = null

[nlp]
lang = "en"
pipeline = ["sentencizer","entity_linker"]
disabled = []
before_creation = null
after_creation = null
after_pipeline_creation = null
tokenizer = {"@tokenizers":"spacy.Tokenizer.v1"}

[components]

[components.sentencizer]
factory = "sentencizer"
punct_chars = null

[components.entity_linker]
factory = "entity_linker"
entity_vector_length = 64
get_candidates = {"@misc":"spacy.CandidateGenerator.v1"}
incl_context = true
incl_prior = true
labels_discard = []
use_gold_ents = true

[components.entity_linker.model]
@architectures = "spacy.EntityLinker.v2"
nO = null

[components.entity_linker.model.tok2vec]
@architectures = "spacy.HashEmbedCNN.v1"
pretrained_vectors = null
width = 96
depth = 2
embed_size = 2000
window_size = 1
maxout_pieces = 3
subword_features = true

[initialize]
vectors = ${paths.vectors}
init_tok2vec = ${paths.init_tok2vec}
vocab_data = null
lookups = null

[initialize.components]

[initialize.components.entity_linker]

[initialize.components.entity_linker.kb_loader]
@misc = "spacy.KBFromFile.v1"
kb_path = ${paths.kb}

[initialize.tokenizer]


[corpora]

[corpora.train]
@readers = "MyCorpus.v1"
file = ${paths.train}

[corpora.dev]
@readers = "MyCorpus.v1"
file = ${paths.dev}

[training]
train_corpus = "corpora.train"
dev_corpus = "corpora.dev"
seed = ${system.seed}
gpu_allocator = ${system.gpu_allocator}
dropout = 0.2
patience = 10000
eval_frequency = 200
accumulate_gradient = 2
max_epochs = 0
max_steps = 600
frozen_components = []
before_to_disk = null

[training.logger]
@loggers = "spacy.ConsoleLogger.v1"
progress_bar = false


[training.batcher]
@batchers = "spacy.batch_by_words.v1"
discard_oversize = false
tolerance = 0.2
get_length = null

[training.batcher.size]
@schedules = "compounding.v1"
start = 100
stop = 1000
compound = 1.001
t = 0.0

[training.optimizer]
@optimizers = "Adam.v1"
beta1 = 0.9
beta2 = 0.999
L2_is_weight_decay = true
L2 = 0.01
grad_clip = 1.0
use_averages = false
eps = 0.00000001
learn_rate = 0.001

[training.score_weights]
nel_micro_p = 0.0
nel_micro_r = 0.0
nel_micro_f = 1.0
sents_f = null
sents_p = null
sents_r = null

[pretraining]

[optimizer]
@optimizers = "Adam.v1"
learn_rate = 0.001
beta1 = 0.9
beta2 = 0.999
L2 = 0.0
eps = 0.00000001
grad_clip = 1.0
L2_is_weight_decay = true
use_averages = true
4 changes: 2 additions & 2 deletions tutorials/nel_emerson/project.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,15 +3,15 @@ description: "**This project was created as part of a [step-by-step video tutori
# Variables can be referenced across the project.yml using ${vars.var_name}
vars:
name: "nel_emerson"
config: "nel.cfg"
config: "nel_entityruler.cfg"
vectors_model: "en_core_web_md"
annotations: "emerson_annotated_text.jsonl"
entities: "entities.csv"
kb: "my_kb"
nlp: "my_nlp"
train: "train"
dev: "dev"
version: "0.0.3"
version: "0.0.4"

# These are the directories that the project needs. The project CLI will make
# sure that they always exist.
Expand Down
Loading