rel_precision: 0.0001, rel_recall: 0.0269, rel_f1: 0.0001, rel_span_recall: 0.0370, loss: 0.4834 ||: 100%|##########|
1022/1022 [00:57<00:00, 17.70it/s]
2020-11-17 15:06:06,074 - INFO - allennlp.training.trainer - Ran out of patience. Stopping training.
2020-11-17 15:06:06,076 - INFO - allennlp.training.checkpointer - loading best weights
2020-11-17 15:06:06,349 - INFO - allennlp.commands.train - To evaluate on the test set after training, pass the 'evaluate_on_test' flag, or use the 'allennlp evaluate' command.
2020-11-17 15:06:06,350 - INFO - allennlp.models.archival - archiving weights and vocabulary to ./models/kn/model.tar.gz
2020-11-17 15:06:30,631 - INFO - allennlp.common.util - Metrics: {
"best_epoch": 8,
"peak_cpu_memory_MB": 5457.956,
"peak_gpu_0_memory_MB": 7333,
"peak_gpu_1_memory_MB": 9653,
"peak_gpu_2_memory_MB": 10289,
"peak_gpu_3_memory_MB": 9653,
"peak_gpu_4_memory_MB": 10439,
"peak_gpu_5_memory_MB": 1079,
"peak_gpu_6_memory_MB": 10289,
"peak_gpu_7_memory_MB": 7649,
"training_duration": "3:17:07.685956",
"training_start_epoch": 0,
"training_epochs": 22,
"epoch": 22,
"training__coref_precision": 0.0,
"training__coref_recall": 0.0,
"training__coref_f1": 0.0,
"training__coref_mention_recall": 0.0,
"training__ner_precision": 0.0,
"training__ner_recall": 0.0,
"training__ner_f1": 0.0,
"training_rel_precision": 3.5733284070843595e-05,
"training_rel_recall": 0.013081574725410341,
"training_rel_f1": 7.127188374614434e-05,
"training_rel_span_recall": 0.016413673947920523,
"training__trig_id_precision": 0,
"training__trig_id_recall": 0,
"training__trig_id_f1": 0,
"training__trig_class_precision": 0,
"training__trig_class_recall": 0,
"training__trig_class_f1": 0,
"training__arg_id_precision": 0,
"training__arg_id_recall": 0,
"training__arg_id_f1": 0,
"training__arg_class_precision": 0,
"training__arg_class_recall": 0,
"training__arg_class_f1": 0,
"training__args_multiple": 0,
"training_loss": 0.010883575075255758,
"training_cpu_memory_MB": 5334.428,
"training_gpu_0_memory_MB": 7333,
"training_gpu_1_memory_MB": 9653,
"training_gpu_2_memory_MB": 10289,
"training_gpu_3_memory_MB": 9653,
"training_gpu_4_memory_MB": 10439,
"training_gpu_5_memory_MB": 1079,
"training_gpu_6_memory_MB": 10289,
"training_gpu_7_memory_MB": 7649,
"validation__coref_precision": 0.0,
"validation__coref_recall": 0.0,
"validation__coref_f1": 0.0,
"validation__coref_mention_recall": 0.0,
"validation__ner_precision": 0.0,
"validation__ner_recall": 0.0,
"validation__ner_f1": 0.0,
"validation_rel_precision": 5.676047081316843e-05,
"validation_rel_recall": 0.02132435465768799,
"validation_rel_f1": 0.0001132195774526191,
"validation_rel_span_recall": 0.0306771417882529,
"validation__trig_id_precision": 0,
"validation__trig_id_recall": 0,
"validation__trig_id_f1": 0,
"validation__trig_class_precision": 0,
"validation__trig_class_recall": 0,
"validation__trig_class_f1": 0,
"validation__arg_id_precision": 0,
"validation__arg_id_recall": 0,
"validation__arg_id_f1": 0,
"validation__arg_class_precision": 0,
"validation__arg_class_recall": 0,
"validation__arg_class_f1": 0,
"validation__args_multiple": 0,
"validation_loss": 0.3997326233545447,
"best_validation__coref_precision": 0.0,
"best_validation__coref_recall": 0.0,
"best_validation__coref_f1": 0.0,
"best_validation__coref_mention_recall": 0.0,
"best_validation__ner_precision": 0.0,
"best_validation__ner_recall": 0.0,
"best_validation__ner_f1": 0.0,
"best_validation_rel_precision": 0.00013013713035796913,
"best_validation_rel_recall": 0.037037037037037035,
"best_validation_rel_f1": 0.00025936293651240686,
"best_validation_rel_span_recall": 0.04526748971193416,
"best_validation__trig_id_precision": 0,
"best_validation__trig_id_recall": 0,
"best_validation__trig_id_f1": 0,
"best_validation__trig_class_precision": 0,
"best_validation__trig_class_recall": 0,
"best_validation__trig_class_f1": 0,
"best_validation__arg_id_precision": 0,
"best_validation__arg_id_recall": 0,
"best_validation__arg_id_f1": 0,
"best_validation__arg_class_precision": 0,
"best_validation__arg_class_recall": 0,
"best_validation__arg_class_f1": 0,
"best_validation__args_multiple": 0,
"best_validation_loss": 0.15924045607033888
}
Hi @schmidek
Thank you a lot for your contribution! We tried to launch your project on KnowledgeNet Dataset (this version: https://github.com/schmidek/dygiepp/tree/multitask/dygie) and encounter with the following issue: the training does not yield the expected high scores and a NaN or Inf warning is constantly raised during training.
The setup is:
We run the following command:
.\scripts\train\train_kn.sh gpu_id.This NaN warning appears a couple of times in each epoch. The inter-evaluation also does not improve over the epochs.
Our guess is that the problem might be in the training configs, since the NaN issue might be caused by the older version of allennlp and its tensorboard logging (allenai/allennlp#3116). Did you use the same config as mentioned above for the training of Dygie++ for the KnowledgeNet leaderboard?
Could you help us please? Did we miss anything in the preprocessing that enables successful training?
Evaluation after the first epoch
Evaluation after epoch 22 (early-stopping)