<owl:Class xmlns="https://folio.openlegalstandard.org/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:v1="http://www.loc.gov/mads/rdf/v1#" xmlns:owl="http://www.w3.org/2002/07/owl#" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:xsd="http://www.w3.org/2001/XMLSchema#" xmlns:folio="https://folio.openlegalstandard.org/" xmlns:rdfs="http://www.w3.org/2000/01/rdf-schema#" xmlns:skos="http://www.w3.org/2004/02/skos/core#" rdf:about="https://folio.openlegalstandard.org/RDjGVtbkpmSvuHjMNBohZ8o">
  <rdfs:subClassOf rdf:resource="https://folio.openlegalstandard.org/RBHMad8oNmYXkYHOHZLCgqv"/>
  <rdfs:label>Adversarial Machine Learning</rdfs:label>
  <skos:altLabel>Adversarial AI</skos:altLabel>
  <skos:altLabel>Adversarial Attack Techniques</skos:altLabel>
  <skos:altLabel>Defensive Machine Learning</skos:altLabel>
  <skos:definition>Adversarial Machine Learning refers to the study and design of algorithms that can withstand intentional manipulation or adversarial attacks. These attacks aim to deceive machine learning models by supplying deceptive inputs, highlighting vulnerabilities and the need for robust defenses in AI systems.</skos:definition>
</owl:Class>
