@prefix dc: <http://purl.org/dc/terms/> .
@prefix this: <http://purl.org/np/RAhqNxfIdaMkNgPfUSApl6k0hebcUvBdBLnZSQ3Yk2KgU> .
@prefix sub: <http://purl.org/np/RAhqNxfIdaMkNgPfUSApl6k0hebcUvBdBLnZSQ3Yk2KgU#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix prov: <http://www.w3.org/ns/prov#> .
@prefix pav: <http://purl.org/pav/> .
@prefix np: <http://www.nanopub.org/nschema#> .
@prefix doco: <http://purl.org/spar/doco/> .
@prefix c4o: <http://purl.org/spar/c4o/> .
sub:Head {
  this: np:hasAssertion sub:assertion ;
    np:hasProvenance sub:provenance ;
    np:hasPublicationInfo sub:pubinfo ;
    a np:Nanopublication .
}
sub:assertion {
  sub:paragraph c4o:hasContent "Besides the fact datasets, we also keep track of confidence scores and generate additional datasets accord- ingly. Therefore, it is possible to filter facts that are not considered as confident by setting a suitable threshold. When processing a sentence, our pipeline outputs two different scores for each FE, stemming from the entity linker and the supervised classifier. We merge both signals by calculating the F-score between them, as if they were representing precision and recall, in a fashion similar to the standard classification metrics. The final score can be then produced via an aggregation of the single FE scores in multiple ways, namely: (a) arithmetic mean; (b) weighted mean based on core FEs (i.e., they have a higher weight than extra ones); (c) harmonic mean, weighted on core FEs as well." ;
    a doco:Paragraph .
}
sub:provenance {
  sub:assertion prov:hadPrimarySource <http://dx.doi.org/10.3233/SW-170269> ;
    prov:wasAttributedTo <https://orcid.org/0000-0002-5456-7964> .
}
sub:pubinfo {
  this: dc:created "2019-11-10T12:34:11+01:00"^^xsd:dateTime ;
    pav:createdBy <https://orcid.org/0000-0002-7114-6459> .
}