@prefix dcterms: .
@prefix orcid: .
@prefix this: .
@prefix sub: .
@prefix po: .
@prefix t4: .
@prefix xsd: .
@prefix prov: .
@prefix pav: .
@prefix np: .
@prefix npx: .
@prefix doco: .
@prefix c4o: .
sub:Head {
this: np:hasAssertion sub:assertion;
np:hasProvenance sub:provenance;
np:hasPublicationInfo sub:pubinfo;
a np:Nanopublication .
}
sub:assertion {
t4: npx:introduces t4:\#table .
sub:paragraph c4o:hasContent "We compared the common 1, 073 triples assessed in each crowdsourcing approach against our gold standard and measured precision as well as inter-rater agreement values for each type of task (see Table 4). For the contest-based approach, the tool allowed two participants to evaluate a single resource. In total, there were 268 inter-evaluations for which we calculated the triple-based inter-agreement (adjusting the observed agreement with agreement by chance) to be 0.38. For the microtasks, we measured the inter-rater agreement values between a maximum of 5 workers for each type of task using Fleiss’ kappa measure [10]. While the inter-rater agreement between workers for the interlinking was high (0.7396), the ones for object values and datatypes was moderate to low with 0.5348 and 0.4960, respectively. Table 4 reports on the precision achieved by the LF experts and crowd in each stage. In the following we present further details on the results for each type of task.";
po:contains t4:\#table;
a doco:Paragraph .
}
sub:provenance {
sub:assertion prov:hadPrimarySource ;
prov:wasAttributedTo orcid:0000-0003-0530-4305 .
}
sub:pubinfo {
this: dcterms:created "2019-09-20T18:05:11+01:00"^^xsd:dateTime;
pav:createdBy orcid:0000-0002-7114-6459 .
}