@inproceedings{f0647f52ad53461fb88742ce8a875910,
title = "Deep Neural Exposure: You Can Run, but Not Hide Your Neural Network Architecture!",
abstract = "Deep Neural Networks (DNNs) are at the heart of many of today's most innovative technologies. With companies investing lots of resources to design, build and optimize these networks for their custom products, DNNs are now integral to many companies' tightly guarded Intellectual Property. As is the case for every high-value product, one can expect bad actors to increasingly design techniques aimed to uncover the architectural designs of proprietary DNNs. This paper investigates if the power draw patterns of a GPU on which a DNN runs could be leveraged to glean key details of its design architecture. Based on ten of the most well-known Convolutional Neural Network (CNN) architectures, we study this line of attack under varying assumptions about the kind of data available to the attacker. We show the attack to be highly effective, attaining an accuracy in the 80 percentage range for the best performing attack scenario.",
keywords = "GPU, deep neural networks, power attack, side channel",
author = "Arefin, {Sayed Erfan} and Abdul Serwadda",
note = "Publisher Copyright: {\textcopyright} 2021 ACM.; null ; Conference date: 22-06-2021 Through 25-06-2021",
year = "2021",
month = jun,
day = "17",
doi = "10.1145/3437880.3460415",
language = "English",
series = "IH and MMSec 2021 - Proceedings of the 2021 ACM Workshop on Information Hiding and Multimedia Security",
publisher = "Association for Computing Machinery, Inc",
pages = "75--80",
booktitle = "IH and MMSec 2021 - Proceedings of the 2021 ACM Workshop on Information Hiding and Multimedia Security",
}