scoring_metrics.py Functions

scoring_metrics.accuracy(tp, fp, tn, fn)[source]
scoring_metrics.add_missing_fields(score_summary)[source]
scoring_metrics.document_level_annot_comparison_runner(reference_filename, confusion_matrix, score_card, reference_annot, test_entries, fuzzy_flag, scorable_attributes)[source]
scoring_metrics.end_comparison_runner(reference_filename, confusion_matrix, score_card, reference_annot, test_entries, start_key, end_key, fuzzy_flag, scorable_attributes, scorable_engines, norm_synonyms)[source]
scoring_metrics.evaluate_doc_properties(reference_filename, confusion_matrix, score_card, reference_ss, test_ss, patterns, fuzzy_flag='doc-property', scorable_attributes=[], scorable_engines=[], norm_synonyms={})[source]
scoring_metrics.evaluate_positions(reference_filename, confusion_matrix, score_card, reference_ss, test_ss, fuzzy_flag='exact', use_mapped_chars=False, scorable_attributes=[], scorable_engines=[], norm_synonyms={})[source]
scoring_metrics.exact_comparison_runner(reference_filename, confusion_matrix, score_card, reference_annot, test_entries, start_key, end_key, fuzzy_flag, scorable_attributes, scorable_engines, norm_synonyms)[source]
scoring_metrics.f_score(p, r, beta=1)[source]
scoring_metrics.flatten_ss_dictionary(ss_dictionary, category='(unknown)')[source]
scoring_metrics.fully_contained_comparison_runner(reference_filename, confusion_matrix, score_card, reference_annot, test_entries, start_key, end_key, fuzzy_flag, scorable_attributes, scorable_engines, norm_synonyms)[source]
scoring_metrics.get_annotation_from_base_entry(annotation_entry, start_key, end_key)[source]
scoring_metrics.get_unique_types(config)[source]
scoring_metrics.new_score_card(fuzzy_flags=['exact'], normalization_engines=[])[source]
scoring_metrics.norm_summary(score_summary, args)[source]
scoring_metrics.output_metrics(class_data, fuzzy_flag, metrics, delimiter_prefix, delimiter, stdout_flag, csv_out_filename, pretty_print_flag)[source]
scoring_metrics.partial_comparison_runner(reference_filename, confusion_matrix, score_card, reference_annot, test_entries, start_key, end_key, fuzzy_flag, scorable_attributes, scorable_engines, norm_synonyms)[source]
scoring_metrics.precision(tp, fp)[source]
scoring_metrics.print_2018_n2c2_track1(score_card, file_mapping, args)[source]
scoring_metrics.print_confusion_matrix(confusion_matrix, file_mapping, reference_config, test_config, fuzzy_flag, args)[source]
scoring_metrics.print_confusion_matrix_shell(confusion_matrix, file_mapping, reference_patterns, test_patterns, args)[source]
scoring_metrics.print_counts_summary(score_card, file_list, config_patterns, args, set_type)[source]
scoring_metrics.print_score_summary(score_card, file_mapping, reference_config, test_config, fuzzy_flag, args, norm_engine='')[source]
scoring_metrics.print_score_summary_shell(score_card, file_mapping, reference_config, test_config, args)[source]
scoring_metrics.recall(tp, fn)[source]
scoring_metrics.recursive_deep_key_value_pair(dictionary, path, key, value)[source]
scoring_metrics.reference_annot_comparison_runner(reference_filename, confusion_matrix, score_card, reference_annot, test_entries, start_key, end_key, fuzzy_flag, scorable_attributes, scorable_engines, norm_synonyms)[source]
scoring_metrics.specificity(tn, fp, empty_value=None)[source]
scoring_metrics.start_comparison_runner(reference_filename, confusion_matrix, score_card, reference_annot, test_entries, start_key, end_key, fuzzy_flag, scorable_attributes, scorable_engines, norm_synonyms)[source]
scoring_metrics.update_confusion_matrix(confusion_matrix, fuzzy_flag, ref_type, test_type)[source]
scoring_metrics.update_csv_output(csv_out_filename, delimiter, row_content)[source]
scoring_metrics.update_output_dictionary(out_file, metric_type, metrics_keys, metrics_values)[source]
scoring_metrics.update_score_card(condition, score_card, fuzzy_flag, filename, start_pos, end_pos, type, pivot_value=None, ref_annot=None, test_annot=None, scorable_attributes=None, scorable_engines=None, norm_synonyms={})[source]