From 03ec89cd87d4fa5f11f5875ee2568836e401dd7a Mon Sep 17 00:00:00 2001 From: forrestmckee <43412563+forrestmckee@users.noreply.github.com> Date: Mon, 22 Jul 2024 16:08:56 -0500 Subject: [PATCH] Update papers.bib --- _bibliography/papers.bib | 180 ++++++++++++++++++++++++--------------- 1 file changed, 110 insertions(+), 70 deletions(-) diff --git a/_bibliography/papers.bib b/_bibliography/papers.bib index 53fd8fd..2b68e6a 100644 --- a/_bibliography/papers.bib +++ b/_bibliography/papers.bib @@ -1,125 +1,165 @@ --- --- +@inproceedings{McKee2024SafeguardingSPTM, + journal={Security, Privacy and Trust Management}, + organization={12th International Conference of Security, Privacy and Trust Management}, + doi={10.5121/csit.2024.141112}, + publisher={Academy & Industry Research Collaboration Center}, + title={Safeguarding Voice Privacy: Harnessing Near-Ultrasonic Interference to Protect Against Unauthorized Audio Recording}, + url={http://dx.doi.org/10.5121/csit.2024.141112}, + author={McKee, Forrest and Noever, David}, + date={2024-06-22}, + year={2024}, + month={6}, + day={22}, + abbr={SPTM}, +} + +@article{mckee2024transparency, + title={Transparency Attacks: How Imperceptible Image Layers Can Fool AI Perception}, + author={McKee, Forrest and Noever, David}, + journal={arXiv preprint arXiv:2401.15817}, + year={2024}, + abstract={This paper investigates a novel algorithmic vulnerability when imperceptible image layers confound multiple vision models into arbitrary label assignments and captions. We explore image preprocessing methods to introduce stealth transparency, which triggers AI misinterpretation of what the human eye perceives. The research compiles a broad attack surface to investigate the consequences ranging from traditional watermarking, steganography, and background-foreground miscues. We demonstrate dataset poisoning using the attack to mislabel a collection of grayscale landscapes and logos using either a single attack layer or randomly selected poisoning classes. For example, a military tank to the human eye is a mislabeled bridge to object classifiers based on convolutional networks (YOLO, etc.) and vision transformers (ViT, GPT-Vision, etc.). A notable attack limitation stems from its dependency on the background (hidden) layer in grayscale as a rough match to the transparent foreground image that the human eye perceives. This dependency limits the practical success rate without manual tuning and exposes the hidden layers when placed on the opposite display theme (e.g., light background, light transparent foreground visible, works best against a light theme image viewer or browser). The stealth transparency confounds established vision systems, including evading facial recognition and surveillance, digital watermarking, content filtering, dataset curating, automotive and drone autonomy, forensic evidence tampering, and retail product misclassifying. This method stands in contrast to traditional adversarial attacks that typically focus on modifying pixel values in ways that are either slightly perceptible or entirely imperceptible for both humans and machines.} +} + +@article{noever2024exploiting, + title={Exploiting Alpha Transparency In Language And Vision-Based AI Systems}, + author={Noever, David and McKee, Forrest}, + journal={arXiv preprint arXiv:2402.09671}, + year={2024}, + abstract={This investigation reveals a novel exploit derived from PNG image file formats, specifically their alpha transparency layer, and its potential to fool multiple AI vision systems. Our method uses this alpha layer as a clandestine channel invisible to human observers but fully actionable by AI image processors. The scope tested for the vulnerability spans representative vision systems from Apple, Microsoft, Google, Salesforce, Nvidia, and Facebook, highlighting the attack's potential breadth. This vulnerability challenges the security protocols of existing and fielded vision systems, from medical imaging to autonomous driving technologies. Our experiments demonstrate that the affected systems, which rely on convolutional neural networks or the latest multimodal language models, cannot quickly mitigate these vulnerabilities through simple patches or updates. Instead, they require retraining and architectural changes, indicating a persistent hole in multimodal technologies without some future adversarial hardening against such vision-language exploits} +} + +@article{mckee2024safeguarding, + title={Safeguarding Voice Privacy: Harnessing Near-Ultrasonic Interference To Protect Against Unauthorized Audio Recording}, + author={McKee, Forrest and Noever, David}, + journal={arXiv preprint arXiv:2404.04769}, + year={2024}, + abstract={The widespread adoption of voice-activated systems has modified routine human-machine interaction but has also introduced new vulnerabilities. This paper investigates the susceptibility of automatic speech recognition (ASR) algorithms in these systems to interference from near-ultrasonic noise. Building upon prior research that demonstrated the ability of near-ultrasonic frequencies (16 kHz - 22 kHz) to exploit the inherent properties of microelectromechanical systems (MEMS) microphones, our study explores alternative privacy enforcement means using this interference phenomenon. We expose a critical vulnerability in the most common microphones used in modern voice-activated devices, which inadvertently demodulate near-ultrasonic frequencies into the audible spectrum, disrupting the ASR process. Through a systematic analysis of the impact of near-ultrasonic noise on various ASR systems, we demonstrate that this vulnerability is consistent across different devices and under varying conditions, such as broadcast distance and specific phoneme structures. Our findings highlight the need to develop robust countermeasures to protect voice-activated systems from malicious exploitation of this vulnerability. Furthermore, we explore the potential applications of this phenomenon in enhancing privacy by disrupting unauthorized audio recording or eavesdropping. This research underscores the importance of a comprehensive approach to securing voice-activated systems, combining technological innovation, responsible development practices, and informed policy decisions to ensure the privacy and security of users in an increasingly connected world.} +} + @article{mckee2023adversarial, title={Adversarial Agents For Attacking Inaudible Voice Activated Devices}, author={McKee, Forrest and Noever, David}, journal={arXiv preprint arXiv:2307.12204}, - month = {July}, - year = {2023}, - abstract = {The paper applies reinforcement learning to novel Internet of Thing configurations. Our analysis of inaudible attacks on voice-activated devices confirms the alarming risk factor of 7.6 out of 10, underlining significant security vulnerabilities scored independently by NIST National Vulnerability Database (NVD). Our baseline network model showcases a scenario in which an attacker uses inaudible voice commands to gain unauthorized access to confidential information on a secured laptop. We simulated many attack scenarios on this baseline network model, revealing the potential for mass exploitation of interconnected devices to discover and own privileged information through physical access without adding new hardware or amplifying device skills. Using Microsoft's CyberBattleSim framework, we evaluated six reinforcement learning algorithms and found that Deep-Q learning with exploitation proved optimal, leading to rapid ownership of all nodes in fewer steps. Our findings underscore the critical need for understanding non-conventional networks and new cybersecurity measures in an ever-expanding digital landscape, particularly those characterized by mobile devices, voice activation, and non-linear microphones susceptible to malicious actors operating stealth attacks in the near-ultrasound or inaudible ranges. By 2024, this new attack surface might encompass more digital voice assistants than people on the planet yet offer fewer remedies than conventional patching or firmware fixes since the inaudible attacks arise inherently from the microphone design and digital signal processing.}, - arxiv = {2307.12204}, + month={July}, + year={2023}, + abstract={The paper applies reinforcement learning to novel Internet of Thing configurations. Our analysis of inaudible attacks on voice-activated devices confirms the alarming risk factor of 7.6 out of 10, underlining significant security vulnerabilities scored independently by NIST National Vulnerability Database (NVD). Our baseline network model showcases a scenario in which an attacker uses inaudible voice commands to gain unauthorized access to confidential information on a secured laptop. We simulated many attack scenarios on this baseline network model, revealing the potential for mass exploitation of interconnected devices to discover and own privileged information through physical access without adding new hardware or amplifying device skills. Using Microsoft's CyberBattleSim framework, we evaluated six reinforcement learning algorithms and found that Deep-Q learning with exploitation proved optimal, leading to rapid ownership of all nodes in fewer steps. Our findings underscore the critical need for understanding non-conventional networks and new cybersecurity measures in an ever-expanding digital landscape, particularly those characterized by mobile devices, voice activation, and non-linear microphones susceptible to malicious actors operating stealth attacks in the near-ultrasound or inaudible ranges. By 2024, this new attack surface might encompass more digital voice assistants than people on the planet yet offer fewer remedies than conventional patching or firmware fixes since the inaudible attacks arise inherently from the microphone design and digital signal processing.}, + arxiv={2307.12204}, } @article{mckee2022chatbots, title={Chatbots in a botnet world}, author={McKee, Forrest and Noever, David}, journal={arXiv preprint arXiv:2212.11126}, - month = {December}, - year = {2022}, - abstract = {Question-and-answer formats provide a novel experimental platform for investigating cybersecurity questions. Unlike previous chatbots, the latest ChatGPT model from OpenAI supports an advanced understanding of complex coding questions. The research demonstrates thirteen coding tasks that generally qualify as stages in the MITRE ATT&CK framework, ranging from credential access to defense evasion. With varying success, the experimental prompts generate examples of keyloggers, logic bombs, obfuscated worms, and payment-fulfilled ransomware. The empirical results illustrate cases that support the broad gain of functionality, including self-replication and self-modification, evasion, and strategic understanding of complex cybersecurity goals. One surprising feature of ChatGPT as a language-only model centers on its ability to spawn coding approaches that yield images that obfuscate or embed executable programming steps or links.}, - arxiv = {2212.11126}, + month={December}, + year={2022}, + abstract={Question-and-answer formats provide a novel experimental platform for investigating cybersecurity questions. Unlike previous chatbots, the latest ChatGPT model from OpenAI supports an advanced understanding of complex coding questions. The research demonstrates thirteen coding tasks that generally qualify as stages in the MITRE ATT&CK framework, ranging from credential access to defense evasion. With varying success, the experimental prompts generate examples of keyloggers, logic bombs, obfuscated worms, and payment-fulfilled ransomware. The empirical results illustrate cases that support the broad gain of functionality, including self-replication and self-modification, evasion, and strategic understanding of complex cybersecurity goals. One surprising feature of ChatGPT as a language-only model centers on its ability to spawn coding approaches that yield images that obfuscate or embed executable programming steps or links.}, + arxiv={2212.11126}, } @article{mckee2023acoustic, title={Acoustic Cybersecurity: Exploiting Voice-Activated Systems}, author={McKee, Forrest and Noever, David}, journal={arXiv preprint arXiv:2312.00039}, - month = {November}, - year = {2023}, - abstract = {In this study, we investigate the emerging threat of inaudible acoustic attacks targeting digital voice assistants, a critical concern given their projected prevalence to exceed the global population by 2024. Our research extends the feasibility of these attacks across various platforms like Amazon's Alexa, Android, iOS, and Cortana, revealing significant vulnerabilities in smart devices. The twelve attack vectors identified include successful manipulation of smart home devices and automotive systems, potential breaches in military communication, and challenges in critical infrastructure security. We quantitatively show that attack success rates hover around 60%, with the ability to activate devices remotely from over 100 feet away. Additionally, these attacks threaten critical infrastructure, emphasizing the need for multifaceted defensive strategies combining acoustic shielding, advanced signal processing, machine learning, and robust user authentication to mitigate these risks.}, - arxiv = {2312.00039}, + month={November}, + year={2023}, + abstract={In this study, we investigate the emerging threat of inaudible acoustic attacks targeting digital voice assistants, a critical concern given their projected prevalence to exceed the global population by 2024. Our research extends the feasibility of these attacks across various platforms like Amazon's Alexa, Android, iOS, and Cortana, revealing significant vulnerabilities in smart devices. The twelve attack vectors identified include successful manipulation of smart home devices and automotive systems, potential breaches in military communication, and challenges in critical infrastructure security. We quantitatively show that attack success rates hover around 60%, with the ability to activate devices remotely from over 100 feet away. Additionally, these attacks threaten critical infrastructure, emphasizing the need for multifaceted defensive strategies combining acoustic shielding, advanced signal processing, machine learning, and robust user authentication to mitigate these risks.}, + arxiv={2312.00039}, } @article{mckee2023nuance, title={NUANCE: Near Ultrasound Attack On Networked Communication Environments}, author={McKee, Forrest and Noever, David}, journal={arXiv preprint arXiv:2305.10358}, - month = {April}, - year = {2023}, - abstract = {This study investigates a primary inaudible attack vector on Amazon Alexa voice services using near ultrasound trojans and focuses on characterizing the attack surface and examining the practical implications of issuing inaudible voice commands. The research maps each attack vector to a tactic or technique from the MITRE ATT&CK matrix, covering enterprise, mobile, and Industrial Control System (ICS) frameworks. The experiment involved generating and surveying fifty near-ultrasonic audios to assess the attacks' effectiveness, with unprocessed commands having a 100% success rate and processed ones achieving a 58% overall success rate. This systematic approach stimulates previously unaddressed attack surfaces, ensuring comprehensive detection and attack design while pairing each ATT&CK Identifier with a tested defensive method, providing attack and defense tactics for prompt-response options. The main findings reveal that the attack method employs Single Upper Sideband Amplitude Modulation (SUSBAM) to generate near-ultrasonic audio from audible sources, transforming spoken commands into a frequency range beyond human-adult hearing. By eliminating the lower sideband, the design achieves a 6 kHz minimum from 16-22 kHz while remaining inaudible after transformation. The research investigates the one-to-many attack surface where a single device simultaneously triggers multiple actions or devices. Additionally, the study demonstrates the reversibility or demodulation of the inaudible signal, suggesting potential alerting methods and the possibility of embedding secret messages like audio steganography.}, - arxiv = {2305.10358}, + month={April}, + year={2023}, + abstract={This study investigates a primary inaudible attack vector on Amazon Alexa voice services using near ultrasound trojans and focuses on characterizing the attack surface and examining the practical implications of issuing inaudible voice commands. The research maps each attack vector to a tactic or technique from the MITRE ATT&CK matrix, covering enterprise, mobile, and Industrial Control System (ICS) frameworks. The experiment involved generating and surveying fifty near-ultrasonic audios to assess the attacks' effectiveness, with unprocessed commands having a 100% success rate and processed ones achieving a 58% overall success rate. This systematic approach stimulates previously unaddressed attack surfaces, ensuring comprehensive detection and attack design while pairing each ATT&CK Identifier with a tested defensive method, providing attack and defense tactics for prompt-response options. The main findings reveal that the attack method employs Single Upper Sideband Amplitude Modulation (SUSBAM) to generate near-ultrasonic audio from audible sources, transforming spoken commands into a frequency range beyond human-adult hearing. By eliminating the lower sideband, the design achieves a 6 kHz minimum from 16-22 kHz while remaining inaudible after transformation. The research investigates the one-to-many attack surface where a single device simultaneously triggers multiple actions or devices. Additionally, the study demonstrates the reversibility or demodulation of the inaudible signal, suggesting potential alerting methods and the possibility of embedding secret messages like audio steganography.}, + arxiv={2305.10358}, } @article{noever2023numeracy, title={Numeracy from Literacy: Data Science as an Emergent Skill from Large Language Models}, author={Noever, David and McKee, Forrest}, journal={arXiv preprint arXiv:2301.13382}, - month = {January}, - year = {2023}, - abstract = {Large language models (LLM) such as OpenAI's ChatGPT and GPT-3 offer unique testbeds for exploring the translation challenges of turning literacy into numeracy. Previous publicly-available transformer models from eighteen months prior and 1000 times smaller failed to provide basic arithmetic. The statistical analysis of four complex datasets described here combines arithmetic manipulations that cannot be memorized or encoded by simple rules. The work examines whether next-token prediction succeeds from sentence completion into the realm of actual numerical understanding. For example, the work highlights cases for descriptive statistics on in-memory datasets that the LLM initially loads from memory or generates randomly using python libraries. The resulting exploratory data analysis showcases the model's capabilities to group by or pivot categorical sums, infer feature importance, derive correlations, and predict unseen test cases using linear regression. To extend the model's testable range, the research deletes and appends random rows such that recall alone cannot explain emergent numeracy.}, - arxiv = {2301.13382}, + month={January}, + year={2023}, + abstract={Large language models (LLM) such as OpenAI's ChatGPT and GPT-3 offer unique testbeds for exploring the translation challenges of turning literacy into numeracy. Previous publicly-available transformer models from eighteen months prior and 1000 times smaller failed to provide basic arithmetic. The statistical analysis of four complex datasets described here combines arithmetic manipulations that cannot be memorized or encoded by simple rules. The work examines whether next-token prediction succeeds from sentence completion into the realm of actual numerical understanding. For example, the work highlights cases for descriptive statistics on in-memory datasets that the LLM initially loads from memory or generates randomly using python libraries. The resulting exploratory data analysis showcases the model's capabilities to group by or pivot categorical sums, infer feature importance, derive correlations, and predict unseen test cases using linear regression. To extend the model's testable range, the research deletes and appends random rows such that recall alone cannot explain emergent numeracy.}, + arxiv={2301.13382}, } @article{mckee2023chatbots, title={Chatbots in a honeypot world}, author={McKee, Forrest and Noever, David}, journal={arXiv preprint arXiv:2301.03771}, - month = {January}, - year = {2023}, - abstract = {Question-and-answer agents like ChatGPT offer a novel tool for use as a potential honeypot interface in cyber security. By imitating Linux, Mac, and Windows terminal commands and providing an interface for TeamViewer, nmap, and ping, it is possible to create a dynamic environment that can adapt to the actions of attackers and provide insight into their tactics, techniques, and procedures (TTPs). The paper illustrates ten diverse tasks that a conversational agent or large language model might answer appropriately to the effects of command-line attacker. The original result features feasibility studies for ten model tasks meant for defensive teams to mimic expected honeypot interfaces with minimal risks. Ultimately, the usefulness outside of forensic activities stems from whether the dynamic honeypot can extend the time-to-conquer or otherwise delay attacker timelines short of reaching key network assets like databases or confidential information. While ongoing maintenance and monitoring may be required, ChatGPT's ability to detect and deflect malicious activity makes it a valuable option for organizations seeking to enhance their cyber security posture. Future work will focus on cybersecurity layers, including perimeter security, host virus detection, and data security.}, - arxiv = {2301.03771}, + month={January}, + year={2023}, + abstract={Question-and-answer agents like ChatGPT offer a novel tool for use as a potential honeypot interface in cyber security. By imitating Linux, Mac, and Windows terminal commands and providing an interface for TeamViewer, nmap, and ping, it is possible to create a dynamic environment that can adapt to the actions of attackers and provide insight into their tactics, techniques, and procedures (TTPs). The paper illustrates ten diverse tasks that a conversational agent or large language model might answer appropriately to the effects of command-line attacker. The original result features feasibility studies for ten model tasks meant for defensive teams to mimic expected honeypot interfaces with minimal risks. Ultimately, the usefulness outside of forensic activities stems from whether the dynamic honeypot can extend the time-to-conquer or otherwise delay attacker timelines short of reaching key network assets like databases or confidential information. While ongoing maintenance and monitoring may be required, ChatGPT's ability to detect and deflect malicious activity makes it a valuable option for organizations seeking to enhance their cyber security posture. Future work will focus on cybersecurity layers, including perimeter security, host virus detection, and data security.}, + arxiv={2301.03771}, } @article{noever2023chatbots, title={Chatbots as Problem Solvers: Playing Twenty Questions with Role Reversals}, author={Noever, David and McKee, Forrest}, journal={arXiv preprint arXiv:2301.01743}, - month = {January}, - year = {2023}, - abstract = {New chat AI applications like ChatGPT offer an advanced understanding of question context and memory across multi-step tasks, such that experiments can test its deductive reasoning. This paper proposes a multi-role and multi-step challenge, where ChatGPT plays the classic twenty-questions game but innovatively switches roles from the questioner to the answerer. The main empirical result establishes that this generation of chat applications can guess random object names in fewer than twenty questions (average, 12) and correctly guess 94% of the time across sixteen different experimental setups. The research introduces four novel cases where the chatbot fields the questions, asks the questions, both question-answer roles, and finally tries to guess appropriate contextual emotions. One task that humans typically fail but trained chat applications complete involves playing bilingual games of twenty questions (English answers to Spanish questions). Future variations address direct problem-solving using a similar inquisitive format to arrive at novel outcomes deductively, such as patentable inventions or combination thinking. Featured applications of this dialogue format include complex protein designs, neuroscience metadata, and child development educational materials.}, - arxiv = {2301.01743}, + month={January}, + year={2023}, + abstract={New chat AI applications like ChatGPT offer an advanced understanding of question context and memory across multi-step tasks, such that experiments can test its deductive reasoning. This paper proposes a multi-role and multi-step challenge, where ChatGPT plays the classic twenty-questions game but innovatively switches roles from the questioner to the answerer. The main empirical result establishes that this generation of chat applications can guess random object names in fewer than twenty questions (average, 12) and correctly guess 94% of the time across sixteen different experimental setups. The research introduces four novel cases where the chatbot fields the questions, asks the questions, both question-answer roles, and finally tries to guess appropriate contextual emotions. One task that humans typically fail but trained chat applications complete involves playing bilingual games of twenty questions (English answers to Spanish questions). Future variations address direct problem-solving using a similar inquisitive format to arrive at novel outcomes deductively, such as patentable inventions or combination thinking. Featured applications of this dialogue format include complex protein designs, neuroscience metadata, and child development educational materials.}, + arxiv={2301.01743}, } + @inproceedings{McKee2023, -author={McKee, Forrest and Noever, D.}, -title={NEAR ULTRASONIC ATTACK AND DEFENSIVE COUNTERMEASURES}, -series={International Journal of Network Security & Its Applications}, -year={2023}, -month={May}, -day={01}, -volume={15}, -abstract={The practical implications of issuing inaudible voice commands. The research mapped each attack vector to a tactic or technique from the MITRE ATT{\&}CK matrix, covering enterprise, mobile, and Industrial Control System (ICS) frameworks. The experiment involved generating and surveying fifty near-ultrasonic audios to assess the attacks' effectiveness. Unprocessed commands achieved a 100{\%} success rate, while processed commands achieved an 86{\%} acknowledgment rate and a 58{\%} overall executed (successful) rate. The research systematically stimulated previously unaddressed attack surfaces, aiming for comprehensive detection and attack design. Each ATT{\&}CK identifier was paired with a tested defensive method, providing attack and defense tactics. The research findings revealed that the attack method employed Single Upper Sideband Amplitude Modulation (SUSBAM) to generate near-ultrasonic audio from audible sources. By eliminating the lower sideband, the design achieved a 6 kHz minimum from 16-22 kHz while remaining inaudible after transformation. The research also investigated the one-to-many attack surface, exploring scenarios where a single device triggers multiple actions or devices. Furthermore, the study demonstrated the reversibility or demodulation of the inaudible signal, suggesting potential alerting methods and the possibility of embedding secret messages like audio steganography. A critical methodological advance included tapping into the post-processed audio signal when the server demodulates the signal for comparison to both the audible and inaudible input signals to improve the actionable success rates.}, -doi={10.5121/ijnsa.2023.15301}, -url={https://doi.org/10.5121/ijnsa.2023.15301}, -pdf={https://aircconline.com/ijnsa/V15N3/15323ijnsa01.pdf}, -abbr={IJNSA} + author={McKee, Forrest and Noever, D.}, + title={NEAR ULTRASONIC ATTACK AND DEFENSIVE COUNTERMEASURES}, + series={International Journal of Network Security & Its Applications}, + year={2023}, + month={May}, + day={01}, + volume={15}, + abstract={The practical implications of issuing inaudible voice commands. The research mapped each attack vector to a tactic or technique from the MITRE ATT{\&}CK matrix, covering enterprise, mobile, and Industrial Control System (ICS) frameworks. The experiment involved generating and surveying fifty near-ultrasonic audios to assess the attacks' effectiveness. Unprocessed commands achieved a 100{\%} success rate, while processed commands achieved an 86{\%} acknowledgment rate and a 58{\%} overall executed (successful) rate. The research systematically stimulated previously unaddressed attack surfaces, aiming for comprehensive detection and attack design. Each ATT{\&}CK identifier was paired with a tested defensive method, providing attack and defense tactics. The research findings revealed that the attack method employed Single Upper Sideband Amplitude Modulation (SUSBAM) to generate near-ultrasonic audio from audible sources. By eliminating the lower sideband, the design achieved a 6 kHz minimum from 16-22 kHz while remaining inaudible after transformation. The research also investigated the one-to-many attack surface, exploring scenarios where a single device triggers multiple actions or devices. Furthermore, the study demonstrated the reversibility or demodulation of the inaudible signal, suggesting potential alerting methods and the possibility of embedding secret messages like audio steganography. A critical methodological advance included tapping into the post-processed audio signal when the server demodulates the signal for comparison to both the audible and inaudible input signals to improve the actionable success rates.}, + doi={10.5121/ijnsa.2023.15301}, + url={https://doi.org/10.5121/ijnsa.2023.15301}, + pdf={https://aircconline.com/ijnsa/V15N3/15323ijnsa01.pdf}, + abbr={IJNSA} } @article{article, -author = {McKee, Forrest and Noever, D.}, -year = {2023}, -month = {05}, -pages = {89-107}, -title = {Nuance: Near Ultrasound Attack on Networked Communication Environments}, -volume = {12}, -journal = {International Journal on Cybernetics & Informatics}, -doi = {10.5121/ijci.2023.120307}, -pdf = {https://ijcionline.com/paper/12/12323ijci07.pdf}, -abbr = {IJCI} + author={McKee, Forrest and Noever, D.}, + year={2023}, + month={05}, + pages={89-107}, + title={Nuance: Near Ultrasound Attack on Networked Communication Environments}, + volume={12}, + journal={International Journal on Cybernetics & Informatics}, + doi={10.5121/ijci.2023.120307}, + pdf={https://ijcionline.com/paper/12/12323ijci07.pdf}, + abbr={IJCI} } @article{article, -author = {McKee, Forrest and Noever, D.}, -year = {2023}, -month = {03}, -pages = {1-34}, -title = {The Evolving Landscape of Cybersecurity: Red Teams, Large Language Models, and the Emergence of New AI Attack Surfaces}, -volume = {13}, -journal = {International Journal on Cryptography and Information Security}, -doi = {10.5121/ijcis.2023.13101}, -pdf = {https://wireilla.com/papers/ijcis/V13N1/13123ijcis01.pdf}, -abbr = {IJCIS} + author={McKee, Forrest and Noever, D.}, + year={2023}, + month={03}, + pages={1-34}, + title={The Evolving Landscape of Cybersecurity: Red Teams, Large Language Models, and the Emergence of New AI Attack Surfaces}, + volume={13}, + journal={International Journal on Cryptography and Information Security}, + doi={10.5121/ijcis.2023.13101}, + pdf={https://wireilla.com/papers/ijcis/V13N1/13123ijcis01.pdf}, + abbr={IJCIS} } @article{article, -author = {McKee, Forrest and Noever, D.}, -year = {2023}, -month = {03}, -pages = {77-95}, -title = {Chatbots in a Botnet World}, -volume = {12}, -journal = {International Journal on Cybernetics & Informatics}, -doi = {10.5121/ijci.2023.120207}, -pdf = {https://ijcionline.com/paper/12/12223ijci07.pdf}, -abbr = {IJCI} + author={McKee, Forrest and Noever, D.}, + year={2023}, + month={03}, + pages={77-95}, + title={Chatbots in a Botnet World}, + volume={12}, + journal={International Journal on Cybernetics & Informatics}, + doi={10.5121/ijci.2023.120207}, + pdf={https://ijcionline.com/paper/12/12223ijci07.pdf}, + abbr={IJCI} }