Проблема с катастрофическим забыванием Spacy Custom Name Entity Recognition (NER)

Модель не может запомнить предыдущие метки, на которых она была обучена, я знаю, что это «катастрофическое забывание», но ни один пример или блог, похоже, не помогли бы решить эту проблему. наиболее распространенный ответ на этот блог - это https://explosion.ai/blog/pseudo-rehearsal-catastrophic-forgetting, но это уже устарело и не помогает

Вот мой код:

from __future__ import unicode_literals, print_function
import json
labeled_data = []
with open(r"/content/emails_labeled.jsonl", "r") as read_file:
    for line in read_file:
        data = json.loads(line)
        labeled_data.append(data)

TRAIN_DATA = []
for entry in labeled_data:
    entities = []
    for e in entry['labels']:
        entities.append((e[0], e[1],e[2]))
    spacy_entry = (entry['text'], {"entities": entities})
    TRAIN_DATA.append(spacy_entry)       
import plac
import random
import warnings
from pathlib import Path
import spacy
from spacy.util import minibatch, compounding


# new entity label
LABEL = "OIL"

# training data
# Note: If you're using an existing model, make sure to mix in examples of
# other entity types that spaCy correctly recognized before. Otherwise, your
# model might learn the new type, but "forget" what it previously knew.
# https://explosion.ai/blog/pseudo-rehearsal-catastrophic-forgetting
'''
TRAIN_DATA = [
    (
        "Horses are too tall and they pretend to care about your feelings",
        {"entities": [(0, 6, LABEL)]},
    ),
    ("Do they bite?", {"entities": []}),
    (
        "horses are too tall and they pretend to care about your feelings",
        {"entities": [(0, 6, LABEL)]},
    ),
    ("horses pretend to care about your feelings", {"entities": [(0, 6, LABEL)]}),
    (
        "they pretend to care about your feelings, those horses",
        {"entities": [(48, 54, LABEL)]},
    ),
    ("horses?", {"entities": [(0, 6, LABEL)]}),
]
'''

@plac.annotations(
    model=("Model name. Defaults to blank 'en' model.", "option", "m", str),
    new_model_name=("New model name for model meta.", "option", "nm", str),
    output_dir=("Optional output directory", "option", "o", Path),
    n_iter=("Number of training iterations", "option", "n", int),
)
def main(model='/content/LinkModelOutput', new_model_name="Oil21", output_dir='/content/Last', n_iter=30):
    """Set up the pipeline and entity recognizer, and train the new entity."""
    random.seed(0)
    if model is not None:
        nlp = spacy.load(model)  # load existing spaCy model
        print("Loaded model '%s'" % model)
    else:
        nlp = spacy.blank("en")  # create blank Language class
        print("Created blank 'en' model")
    # Add entity recognizer to model if it's not in the pipeline
    # nlp.create_pipe works for built-ins that are registered with spaCy
    if "ner" not in nlp.pipe_names:
        ner = nlp.create_pipe("ner")
        nlp.add_pipe(ner)
    # otherwise, get it, so we can add labels to it
    else:
        ner = nlp.get_pipe("ner")

    ner.add_label(LABEL)  # add new entity label to entity recognizer
    # Adding extraneous labels shouldn't mess anything up
    #ner.add_label("VEGETABLE")
    if model is None:
        optimizer = nlp.begin_training()
    else:
        optimizer = nlp.resume_training()
    move_names = list(ner.move_names)
    # get names of other pipes to disable them during training
    pipe_exceptions = ["ner", "trf_wordpiecer", "trf_tok2vec"]
    other_pipes = [pipe for pipe in nlp.pipe_names if pipe not in pipe_exceptions]
    # only train NER
    with nlp.disable_pipes(*other_pipes), warnings.catch_warnings():
        # show warnings for misaligned entity spans once
        warnings.filterwarnings("once", category=UserWarning, module='spacy')

        sizes = compounding(1.0, 4.0, 1.001)
        # batch up the examples using spaCy's minibatch
        for itn in range(n_iter):
            random.shuffle(TRAIN_DATA)
            batches = minibatch(TRAIN_DATA, size=sizes)
            losses = {}
            for batch in batches:
                texts, annotations = zip(*batch)
                nlp.entity.update(texts, annotations, sgd=optimizer, drop=0.35, losses=losses)
            print("Losses", losses)

    # test the trained model
    test_text = "Here is Hindustan petroleum's oil reserves coup in Australia. Details can be found at https://www.textfixer.com/tools/remove-line-breaks.php?"
    doc = nlp(test_text)
    print("Entities in '%s'" % test_text)
    for ent in doc.ents:
        print(ent.label_, ent.text)

    # save model to output directory
    if output_dir is not None:
        output_dir = Path(output_dir)
        if not output_dir.exists():
            output_dir.mkdir()
        nlp.meta["name"] = new_model_name  # rename model
        nlp.to_disk(output_dir)
        print("Saved model to", output_dir)

        # test the saved model
        print("Loading from", output_dir)
        nlp2 = spacy.load(output_dir)
        # Check the classes have loaded back consistently
        assert nlp2.get_pipe("ner").move_names == move_names
        doc2 = nlp2(test_text)
        for ent in doc2.ents:
            print(ent.label_, ent.text)


if __name__ == "__main__":
    plac.call(main)

и аннотация данных была сделана на «Даккано». Вот посмотрите на данные:

{"id": 174, "text": "service\tmarathon petroleum reduces service postings marathon petroleum co said it reduced the contract price it will pay for all grades of service oil one dlr a barrel effective today the decrease brings marathon s posted price for both west texas intermediate and west texas sour to dlrs a bbl the south louisiana sweet grade of service was reduced to dlrs a bbl the company last changed its service postings on jan reuter", "meta": {}, "annotation_approver": null, "labels": [[61, 70, "OIL"], [147, 150, "OIL"]]}
{"id": 175, "text": "mutual funds\tmunsingwear inc mun th qtr jan loss shr loss cts vs loss seven cts net loss vs loss revs mln vs mln year shr profit cts vs profit cts net profit vs profit revs mln vs mln avg shrs vs note per shr adjusted for for stock split july and for split may reuter", "meta": {}, "annotation_approver": null, "labels": []}

person aravind kamarsu    schedule 28.01.2021    source источник


Ответы (1)


Я не специалист по пространству, но у меня была та же проблема. Есть несколько важных моментов: инструмент аннотации, количество данных поезда, смешивание правильных предсказанных сущностей. Сначала убедитесь, что ваши данные обучения правильно помечены инструментом по вашему выбору (вы не получаете предупреждений пользователей). Для хорошего прогноза вашей модели требуется много данных. Это означает не менее 200 примеров для каждой сущности, которую вы хотите обучить. Я лично маркирую как можно больше данных. И производитель Spacy рекомендует смешивать объекты, которые ваша модель правильно предсказала.

person gzkhv    schedule 02.02.2021
comment
Спасибо за ответ. Я использовал инструмент для аннотации Doccano. Как вы думаете, что в нем не так? - person aravind kamarsu; 03.02.2021