<owl:Class xmlns="https://folio.openlegalstandard.org/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:v1="http://www.loc.gov/mads/rdf/v1#" xmlns:owl="http://www.w3.org/2002/07/owl#" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:xsd="http://www.w3.org/2001/XMLSchema#" xmlns:folio="https://folio.openlegalstandard.org/" xmlns:rdfs="http://www.w3.org/2000/01/rdf-schema#" xmlns:skos="http://www.w3.org/2004/02/skos/core#" rdf:about="https://folio.openlegalstandard.org/RBMj5dbvLFgFGrPZ3MvFGYl">
  <rdfs:subClassOf rdf:resource="https://folio.openlegalstandard.org/RBHMad8oNmYXkYHOHZLCgqv"/>
  <rdfs:label>Data Poisoning</rdfs:label>
  <skos:altLabel>Adversarial Data Manipulation</skos:altLabel>
  <skos:altLabel>Dataset Compromise</skos:altLabel>
  <skos:altLabel>Malicious Data Tampering</skos:altLabel>
  <skos:altLabel>Poisoning Attack</skos:altLabel>
  <skos:altLabel>Training Data Corruption</skos:altLabel>
  <skos:definition>Data Poisoning is a type of adversarial attack where malicious actors intentionally alter or manipulate the training data of a machine learning model to compromise its integrity and performance. This can lead to the model learning incorrect patterns or making erroneous predictions.</skos:definition>
</owl:Class>
