@prefix dcterms: <http://purl.org/dc/terms/> .
@prefix orcid: <https://orcid.org/> .
@prefix this: <http://purl.org/np/RAvRE3wxcjsrqH3jLUj1uYBWwnz3hv32F-35s2vEyV6KQ> .
@prefix sub: <http://purl.org/np/RAvRE3wxcjsrqH3jLUj1uYBWwnz3hv32F-35s2vEyV6KQ#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix prov: <http://www.w3.org/ns/prov#> .
@prefix pav: <http://purl.org/pav/> .
@prefix np: <http://www.nanopub.org/nschema#> .
@prefix doco: <http://purl.org/spar/doco/> .
@prefix c4o: <http://purl.org/spar/c4o/> .
sub:Head {
  this: np:hasAssertion sub:assertion ;
    np:hasProvenance sub:provenance ;
    np:hasPublicationInfo sub:pubinfo ;
    a np:Nanopublication .
}
sub:assertion {
  sub:paragraph c4o:hasContent "The Web of Data spans a network of data sources of varying quality. There are a large number of high- quality data sets, for instance, in the life-science do- main, which are the result of decades of thorough curation and have been recently made available as Linked Open Data 2 . Other data sets, however, have been (semi-)automatically translated into RDF from their primary sources, or via crowdsourcing in a decen- tralized process involving a large number of contrib- utors, for example DBpedia [23]. While the combina- tion of machine-driven extraction and crowdsourcing was a reasonable approach to produce a baseline ver- sion of a greatly useful resource, it was also the cause of a wide range of quality problems, in particular in the mappings between Wikipedia at" ;
    a doco:Paragraph .
}
sub:provenance {
  sub:assertion prov:hadPrimarySource <http://dx.doi.org/10.3233/SW-160239> ;
    prov:wasAttributedTo orcid:0000-0003-0530-4305 .
}
sub:pubinfo {
  this: dcterms:created "2019-11-10T12:34:11+01:00"^^xsd:dateTime ;
    pav:createdBy orcid:0000-0002-7114-6459 .
}