@COMMENT This file was generated by bib2html.pl version 0.94 @COMMENT written by Patrick Riley @COMMENT This file came from Sanjit Seshia's publication pages at http://www.eecs.berkeley.edu/~sseshia @inproceedings{dreossi-vnn19, author = {Tommaso Dreossi and Shromona Ghosh and Alberto L. Sangiovanni-Vincentelli and Sanjit A. Seshia}, title = {A Formalization of Robustness for Deep Neural Networks}, booktitle = {Proceedings of the AAAI Spring Symposium Workshop on Verification of Neural Networks (VNN)}, month = "March", year = {2019}, abstract = {Deep neural networks have been shown to lack robustness to small input perturbations. The process of generating the perturbations that expose the lack of robustness of neural networks is known as adversarial input generation. This process depends on the goals and capabilities of the adversary, In this paper, we propose a unifying formalization of the adversarial input generation process from a formal methods perspective. We provide a definition of robustness that is general enough to capture different formulations. The expressiveness of our formalization is shown by modeling and comparing a variety of adversarial attack techniques.}, }