@prefix dcterms: .
@prefix orcid: .
@prefix this: .
@prefix sub: .
@prefix xsd: .
@prefix prov: .
@prefix pav: .
@prefix np: .
@prefix doco: .
@prefix c4o: .
sub:Head {
this: np:hasAssertion sub:assertion;
np:hasProvenance sub:provenance;
np:hasPublicationInfo sub:pubinfo;
a np:Nanopublication .
}
sub:assertion {
sub:paragraph c4o:hasContent "The Web of Data spans a network of data sources of varying quality. There are a large number of high- quality data sets, for instance, in the life-science do- main, which are the result of decades of thorough curation and have been recently made available as Linked Open Data 2 . Other data sets, however, have been (semi-)automatically translated into RDF from their primary sources, or via crowdsourcing in a decen- tralized process involving a large number of contrib- utors, for example DBpedia [23]. While the combina- tion of machine-driven extraction and crowdsourcing was a reasonable approach to produce a baseline ver- sion of a greatly useful resource, it was also the cause of a wide range of quality problems, in particular in the mappings between Wikipedia at";
a doco:Paragraph .
}
sub:provenance {
sub:assertion prov:hadPrimarySource ;
prov:wasAttributedTo orcid:0000-0003-0530-4305 .
}
sub:pubinfo {
this: dcterms:created "2019-11-10T12:34:11+01:00"^^xsd:dateTime;
pav:createdBy orcid:0000-0002-7114-6459 .
}