Valid HTML 4.0! Valid CSS!
%%% -*-BibTeX-*-
%%% ====================================================================
%%%  BibTeX-file{
%%%     author          = "Nelson H. F. Beebe",
%%%     version         = "1.05",
%%%     date            = "15 February 2020",
%%%     time            = "07:51:24 MST",
%%%     filename        = "tops.bib",
%%%     address         = "University of Utah
%%%                        Department of Mathematics, 110 LCB
%%%                        155 S 1400 E RM 233
%%%                        Salt Lake City, UT 84112-0090
%%%                        USA",
%%%     telephone       = "+1 801 581 5254",
%%%     FAX             = "+1 801 581 4148",
%%%     URL             = "http://www.math.utah.edu/~beebe",
%%%     checksum        = "26876 3958 23199 216779",
%%%     email           = "beebe at math.utah.edu, beebe at acm.org,
%%%                       beebe at computer.org (Internet)",
%%%     codetable       = "ISO/ASCII",
%%%     keywords        = "bibliography, BibTeX, ACM Transactions
%%%                        on Privacy and Security (TOPS)",
%%%     license         = "public domain",
%%%     supported       = "yes",
%%%     docstring       = "This is a COMPLETE BibTeX bibliography for
%%%                        the journal ACM Transactions on Privacy and
%%%                        Security (TOPS) (CODEN none, ISSN 2471-2566
%%%                        (print), 2471-2574 (electronic)).  covering
%%%                        all journal issues from 2017--date.
%%%
%%%                        Publication began with volume 19, number 1,
%%%                        in 2016, as a continuation of the predecessor
%%%                        journal, ACM Transactions on Information and
%%%                        System Security.  The older journal is
%%%                        covered in a separate bibliography,
%%%                        tissec.bib.
%%%
%%%                        The journal has a Web site at
%%%
%%%                            http://dl.acm.org/citation.cfm?id=J1547
%%%
%%%                        At version 1.05, the COMPLETE journal
%%%                        coverage looked like this:
%%%
%%%                             2016 (   9)    2018 (  27)    2020 (   6)
%%%                             2017 (  17)    2019 (  17)
%%%
%%%                             Article:         76
%%%
%%%                             Total entries:   76
%%%
%%%                        The initial draft was extracted from the
%%%                        journal Web site.
%%%
%%%                        ACM copyrights explicitly permit abstracting
%%%                        with credit, so article abstracts, keywords,
%%%                        and subject classifications have been
%%%                        included in this bibliography wherever
%%%                        available.  Article reviews have been
%%%                        omitted, until their copyright status has
%%%                        been clarified.
%%%
%%%                        URL keys in the bibliography point to
%%%                        World Wide Web locations of additional
%%%                        information about the entry.
%%%
%%%                        Numerous errors in the sources noted above
%%%                        have been corrected.   Spelling has been
%%%                        verified with the UNIX spell and GNU ispell
%%%                        programs using the exception dictionary
%%%                        stored in the companion file with extension
%%%                        .sok.
%%%
%%%                        BibTeX citation tags are uniformly chosen
%%%                        as name:year:abbrev, where name is the
%%%                        family name of the first author or editor,
%%%                        year is a 4-digit number, and abbrev is a
%%%                        3-letter condensation of important title
%%%                        words. Citation tags were automatically
%%%                        generated by software developed for the
%%%                        BibNet Project.
%%%
%%%                        In this bibliography, entries are sorted in
%%%                        publication order, using ``bibsort -byvolume.''
%%%
%%%                        The checksum field above contains a CRC-16
%%%                        checksum as the first value, followed by the
%%%                        equivalent of the standard UNIX wc (word
%%%                        count) utility output of lines, words, and
%%%                        characters.  This is produced by Robert
%%%                        Solovay's checksum utility.",
%%%  }
%%% ====================================================================
@Preamble{"\input bibnames.sty"}

%%% ====================================================================
%%% Acknowledgement abbreviations:
@String{ack-nhfb = "Nelson H. F. Beebe,
                    University of Utah,
                    Department of Mathematics, 110 LCB,
                    155 S 1400 E RM 233,
                    Salt Lake City, UT 84112-0090, USA,
                    Tel: +1 801 581 5254,
                    FAX: +1 801 581 4148,
                    e-mail: \path|beebe@math.utah.edu|,
                            \path|beebe@acm.org|,
                            \path|beebe@computer.org| (Internet),
                    URL: \path|http://www.math.utah.edu/~beebe/|"}

%%% ====================================================================
%%% Journal abbreviations:
@String{j-TOPS                  = "ACM Transactions on Privacy and Security
                                  (TOPS)"}

%%% ====================================================================
%%% Bibliography entries:
@Article{Eberz:2016:LLE,
  author =       "Simon Eberz and Kasper B. Rasmussen and Vincent
                 Lenders and Ivan Martinovic",
  title =        "Looks Like {Eve}: Exposing Insider Threats Using Eye
                 Movement Biometrics",
  journal =      j-TOPS,
  volume =       "19",
  number =       "1",
  pages =        "1:1--1:??",
  month =        aug,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2904018",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Mon Apr 3 09:09:38 MDT 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "We introduce a novel biometric based on distinctive
                 eye movement patterns. The biometric consists of 20
                 features that allow us to reliably distinguish users
                 based on differences in these patterns. We leverage
                 this distinguishing power along with the ability to
                 gauge the users' task familiarity, that is, level of
                 knowledge, to address insider threats. In a controlled
                 experiment, we test how both time and task familiarity
                 influence eye movements and feature stability, and how
                 different subsets of features affect the classifier
                 performance. These feature subsets can be used to
                 tailor the eye movement biometric to different
                 authentication methods and threat models. Our results
                 show that eye movement biometrics support reliable and
                 stable continuous authentication of users. We
                 investigate different approaches in which an attacker
                 could attempt to use inside knowledge to mimic the
                 legitimate user. Our results show that while this
                 advance knowledge is measurable, it does not increase
                 the likelihood of successful impersonation. In order to
                 determine the time stability of our features, we repeat
                 the experiment twice within 2 weeks. The results
                 indicate that we can reliably authenticate users over
                 the entire period. We show that lower sampling rates
                 provided by low-cost hardware pose a challenge, but
                 that reliable authentication is possible even at the
                 rate of 50Hz commonly available with consumer-level
                 devices. In a second set of experiments, we evaluate
                 how our authentication system performs across a variety
                 of real-world tasks, including reading, writing, and
                 web browsing. We discuss the advantages and limitations
                 of our approach in detail and give practical insights
                 on the use of this biometric in a real-world
                 environment.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Mitropoulos:2016:HTY,
  author =       "Dimitris Mitropoulos and Konstantinos Stroggylos and
                 Diomidis Spinellis and Angelos D. Keromytis",
  title =        "How to Train Your Browser: Preventing {XSS} Attacks
                 Using Contextual Script Fingerprints",
  journal =      j-TOPS,
  volume =       "19",
  number =       "1",
  pages =        "2:1--2:??",
  month =        aug,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2939374",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Mon Apr 3 09:09:38 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "Cross-Site Scripting (XSS) is one of the most common
                 web application vulnerabilities. It is therefore
                 sometimes referred to as the ``buffer overflow of the
                 web.'' Drawing a parallel from the current state of
                 practice in preventing unauthorized native code
                 execution (the typical goal in a code injection), we
                 propose a script whitelisting approach to tame
                 JavaScript-driven XSS attacks. Our scheme involves a
                 transparent script interception layer placed in the
                 browser's JavaScript engine. This layer is designed to
                 detect every script that reaches the browser, from
                 every possible route, and compare it to a list of valid
                 scripts for the site or page being accessed; scripts
                 not on the list are prevented from executing. To avoid
                 the false positives caused by minor syntactic changes
                 (e.g., due to dynamic code generation), our layer uses
                 the concept of contextual fingerprints when comparing
                 scripts. Contextual fingerprints are identifiers that
                 represent specific elements of a script and its
                 execution context. Fingerprints can be easily enriched
                 with new elements, if needed, to enhance the proposed
                 method's robustness. The list can be populated by the
                 website's administrators or a trusted third party. To
                 verify our approach, we have developed a prototype and
                 tested it successfully against an extensive array of
                 attacks that were performed on more than 50 real-world
                 vulnerable web applications. We measured the browsing
                 performance overhead of the proposed solution on eight
                 websites that make heavy use of JavaScript. Our
                 mechanism imposed an average overhead of 11.1\% on the
                 execution time of the JavaScript engine. When measured
                 as part of a full browsing session, and for all tested
                 websites, the overhead introduced by our layer was less
                 than 0.05\%. When script elements are altered or new
                 scripts are added on the server side, a new fingerprint
                 generation phase is required. To examine the temporal
                 aspect of contextual fingerprints, we performed a
                 short-term and a long-term experiment based on the same
                 websites. The former, showed that in a short period of
                 time (10 days), for seven of eight websites, the
                 majority of valid fingerprints stay the same (more than
                 92\% on average). The latter, though, indicated that,
                 in the long run, the number of fingerprints that do not
                 change is reduced. Both experiments can be seen as one
                 of the first attempts to study the feasibility of a
                 whitelisting approach for the web.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Aonghusa:2016:DLG,
  author =       "P{\'o}l Mac Aonghusa and Douglas J. Leith",
  title =        "Don't Let {Google} Know {I}'m Lonely",
  journal =      j-TOPS,
  volume =       "19",
  number =       "1",
  pages =        "3:1--3:??",
  month =        aug,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2937754",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Mon Apr 3 09:09:38 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "From buying books to finding the perfect partner, we
                 share our most intimate wants and needs with our
                 favourite online systems. But how far should we accept
                 promises of privacy in the face of personalized
                 profiling? In particular, we ask how we can improve
                 detection of sensitive topic profiling by online
                 systems. We propose a definition of privacy disclosure
                 that we call $ \epsilon $-indistinguishability, from
                 which we construct scalable, practical tools to assess
                 the learning potential from personalized content. We
                 demonstrate our results using openly available
                 resources, detecting a learning rate in excess of 98\%
                 for a range of sensitive topics during our
                 experiments.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Rahbarinia:2016:EAB,
  author =       "Babak Rahbarinia and Roberto Perdisci and Manos
                 Antonakakis",
  title =        "Efficient and Accurate Behavior-Based Tracking of
                 Malware-Control Domains in Large {ISP} Networks",
  journal =      j-TOPS,
  volume =       "19",
  number =       "2",
  pages =        "4:1--4:??",
  month =        sep,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2960409",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Mon Apr 3 09:09:39 MDT 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "In this article, we propose Segugio, a novel defense
                 system that allows for efficiently tracking the
                 occurrence of new malware-control domain names in very
                 large ISP networks. Segugio passively monitors the DNS
                 traffic to build a machine-domain bipartite graph
                 representing who is querying what. After labeling nodes
                 in this query behavior graph that are known to be
                 either benign or malware-related, we propose a novel
                 approach to accurately detect previously unknown
                 malware-control domains. We implemented a
                 proof-of-concept version of Segugio and deployed it in
                 large ISP networks that serve millions of users. Our
                 experimental results show that Segugio can track the
                 occurrence of new malware-control domains with up to
                 94\% true positives (TPs) at less than 0.1\% false
                 positives (FPs). In addition, we provide the following
                 results: (1) we show that Segugio can also detect
                 control domains related to new, previously unseen
                 malware families, with 85\% TPs at 0.1\% FPs; (2)
                 Segugio's detection models learned on traffic from a
                 given ISP network can be deployed into a different ISP
                 network and still achieve very high detection accuracy;
                 (3) new malware-control domains can be detected days or
                 even weeks before they appear in a large commercial
                 domain-name blacklist; (4) Segugio can be used to
                 detect previously unknown malware-infected machines in
                 ISP networks; and (5) we show that Segugio clearly
                 outperforms domain-reputation systems based on Belief
                 Propagation.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Dong:2016:DRC,
  author =       "Zheng Dong and Kevin Kane and L. Jean Camp",
  title =        "Detection of Rogue Certificates from Trusted
                 Certificate Authorities Using Deep Neural Networks",
  journal =      j-TOPS,
  volume =       "19",
  number =       "2",
  pages =        "5:1--5:??",
  month =        sep,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2975591",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Mon Apr 3 09:09:39 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "Rogue certificates are valid certificates issued by a
                 legitimate certificate authority (CA) that are
                 nonetheless untrustworthy; yet trusted by web browsers
                 and users. With the current public key infrastructure,
                 there exists a window of vulnerability between the time
                 a rogue certificate is issued and when it is detected.
                 Rogue certificates from recent compromises have been
                 trusted for as long as weeks before detection and
                 revocation. Previous proposals to close this window of
                 vulnerability require changes in the infrastructure,
                 Internet protocols, or end user experience. We present
                 a method for detecting rogue certificates from trusted
                 CAs developed from a large and timely collection of
                 certificates. This method automates classification by
                 building machine-learning models with Deep Neural
                 Networks (DNN). Despite the scarcity of rogue instances
                 in the dataset, DNN produced a classification method
                 that is proven both in simulation and in the July 2014
                 compromise of the India CCA. We report the details of
                 the classification method and illustrate that it is
                 repeatable, such as with datasets obtained from
                 crawling. We describe the classification performance
                 under our current research deployment.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Garay:2016:MPA,
  author =       "Juan A. Garay and Vladimir Kolesnikov and Rae
                 Mclellan",
  title =        "{MAC} Precomputation with Applications to Secure
                 Memory",
  journal =      j-TOPS,
  volume =       "19",
  number =       "2",
  pages =        "6:1--6:??",
  month =        sep,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2943780",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Mon Apr 3 09:09:39 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "We present Shallow MAC (ShMAC), a fixed-input-length
                 message authentication code that performs most of the
                 computation prior to the availability of the message.
                 Specifically, ShMAC's message-dependent computation is
                 much faster and smaller in hardware than the evaluation
                 of a pseudorandom permutation (PRP) and can be
                 implemented by a small shallow circuit, while its
                 precomputation consists of one PRP evaluation. A main
                 building block for ShMAC is the notion of strong
                 differential uniformity (SDU), which we introduce and
                 which may be of independent interest. We show an
                 efficient SDU construction built from previously
                 considered differentially uniform functions. Our main
                 motivating application is a system architecture where a
                 hardware-secured processor uses memory controlled by an
                 adversary. We also present in technical detail a novel,
                 efficient approach to encrypting and authenticating
                 memory and discuss the associated tradeoffs, while
                 paying special attention to minimizing hardware costs
                 and the reduction of Dynamic Random Access Memory
                 latency.",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Ozalp:2016:PPP,
  author =       "Ismet Ozalp and Mehmet Emre Gursoy and Mehmet Ercan
                 Nergiz and Yucel Saygin",
  title =        "Privacy-Preserving Publishing of Hierarchical Data",
  journal =      j-TOPS,
  volume =       "19",
  number =       "3",
  pages =        "7:1--7:??",
  month =        dec,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2976738",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Mon Apr 3 09:09:39 MDT 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "Many applications today rely on storage and management
                 of semi-structured information, for example, XML
                 databases and document-oriented databases. These data
                 often have to be shared with untrusted third parties,
                 which makes individuals' privacy a fundamental problem.
                 In this article, we propose anonymization techniques
                 for privacy-preserving publishing of hierarchical data.
                 We show that the problem of anonymizing hierarchical
                 data poses unique challenges that cannot be readily
                 solved by existing mechanisms. We extend two standards
                 for privacy protection in tabular data ( k -anonymity
                 and l-diversity) and apply them to hierarchical data.
                 We present utility-aware algorithms that enforce these
                 definitions of privacy using generalizations and
                 suppressions of data values. To evaluate our algorithms
                 and their heuristics, we experiment on synthetic and
                 real datasets obtained from two universities. Our
                 experiments show that we significantly outperform
                 related methods that provide comparable privacy
                 guarantees.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Crampton:2016:WSP,
  author =       "Jason Crampton and Andrei Gagarin and Gregory Gutin
                 and Mark Jones and Magnus Wahlstr{\"o}m",
  title =        "On the Workflow Satisfiability Problem with
                 Class-Independent Constraints for Hierarchical
                 Organizations",
  journal =      j-TOPS,
  volume =       "19",
  number =       "3",
  pages =        "8:1--8:??",
  month =        dec,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2988239",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Mon Apr 3 09:09:39 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "A workflow specification defines a set of steps, a set
                 of users, and an access control policy. The policy
                 determines which steps a user is authorized to perform
                 and imposes constraints on which sets of users can
                 perform which sets of steps. The workflow
                 satisfiability problem (WSP) is the problem of
                 determining whether there exists an assignment of users
                 to workflow steps that satisfies the policy. Given the
                 computational hardness of WSP and its importance in the
                 context of workflow management systems, it is important
                 to develop algorithms that are as efficient as possible
                 to solve WSP. In this article, we study the
                 fixed-parameter tractability of WSP in the presence of
                 class-independent constraints, which enable us to (1)
                 model security requirements based on the groups to
                 which users belong and (2) generalize the notion of a
                 user-independent constraint. Class-independent
                 constraints are defined in terms of equivalence
                 relations over the set of users. We consider sets of
                 nested equivalence relations because this enables us to
                 model security requirements in hierarchical
                 organizations. We prove that WSP is fixed-parameter
                 tractable (FPT) for class-independent constraints
                 defined over nested equivalence relations and develop
                 an FPT algorithm to solve WSP instances incorporating
                 such constraints. We perform experiments to evaluate
                 the performance of our algorithm and compare it with
                 that of SAT4J, an off-the-shelf pseudo-Boolean SAT
                 solver. The results of these experiments demonstrate
                 that our algorithm significantly outperforms SAT4J for
                 many instances of WSP.",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Gutierrez:2016:IDO,
  author =       "Christopher N. Gutierrez and Mohammed H. Almeshekah
                 and Eugene H. Spafford and Mikhail J. Atallah and Jeff
                 Avery",
  title =        "Inhibiting and Detecting Offline Password Cracking
                 Using {ErsatzPasswords}",
  journal =      j-TOPS,
  volume =       "19",
  number =       "3",
  pages =        "9:1--9:??",
  month =        dec,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2996457",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Mon Apr 3 09:09:39 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "In this work, we present a simple, yet effective and
                 practical scheme to improve the security of stored
                 password hashes, increasing the difficulty to crack
                 passwords and exposing cracking attempts. We utilize a
                 hardware-dependent function (HDF), such as a physically
                 unclonable function (PUF) or a hardware security module
                 (HSM), at the authentication server to inhibit offline
                 password discovery. Additionally, a deception mechanism
                 is incorporated to alert administrators of cracking
                 attempts. Using an HDF to generate password hashes
                 hinders attackers from recovering the true passwords
                 without constant access to the HDF. Our scheme can
                 integrate with legacy systems without needing
                 additional servers, changing the structure of the
                 hashed password file, nor modifying client machines.
                 When using our scheme, the structure of the hashed
                 passwords file, e.g., etc/shadow or etc/master.passwd,
                 will appear no different than traditional hashed
                 password files.$^1$ However, when attackers exfiltrate
                 the hashed password file and attempt to crack it, the
                 passwords they will receive are ErsatzPasswords-``fake
                 passwords.'' The ErsatzPasswords scheme is flexible by
                 design, enabling it to be integrated into existing
                 authentication systems without changes to user
                 experience. The proposed scheme is integrated into the
                 pam\_unix module as well as two client/server
                 authentication schemes: Lightweight Directory Access
                 Protocol (LDAP) authentication and the Pythia
                 pseudorandom function (PRF) Service [Everspaugh et al.
                 2015]. The core library to support ErsatzPasswords
                 written in C and Python consists of 255 and 103 lines
                 of code, respectively. The integration of
                 ErsatzPasswords into each explored authentication
                 system required less than 100 lines of additional code.
                 Experimental evaluation of ErsatzPasswords shows an
                 increase in authentication latency on the order of
                 100ms, which maybe acceptable for real world systems.
                 We also describe a framework for implementing
                 ErsatzPasswords using a Trusted Platform Module
                 (TPM).",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Meutzner:2017:TIA,
  author =       "Hendrik Meutzner and Santosh Gupta and Viet-Hung
                 Nguyen and Thorsten Holz and Dorothea Kolossa",
  title =        "Toward Improved Audio {CAPTCHAs} Based on Auditory
                 Perception and Language Understanding",
  journal =      j-TOPS,
  volume =       "19",
  number =       "4",
  pages =        "10:1--10:??",
  month =        feb,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2856820",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Mon Apr 3 09:09:39 MDT 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "A so-called completely automated public Turing test to
                 tell computers and humans apart (CAPTCHA) represents a
                 challenge-response test that is widely used on the
                 Internet to distinguish human users from fraudulent
                 computer programs, often referred to as bots. To enable
                 access for visually impaired users, most Web sites
                 utilize audio CAPTCHAs in addition to a conventional
                 image-based scheme. Recent research has shown that most
                 currently available audio CAPTCHAs are insecure, as
                 they can be broken by means of machine learning at
                 relatively low costs. Moreover, most audio CAPTCHAs
                 suffer from low human success rates that arise from
                 severe signal distortions. This article proposes two
                 different audio CAPTCHA schemes that systematically
                 exploit differences between humans and computers in
                 terms of auditory perception and language
                 understanding, yielding a better trade-off between
                 usability and security as compared to currently
                 available schemes. Furthermore, we provide an elaborate
                 analysis of Google's prominent reCAPTCHA that serves as
                 a baseline setting when evaluating our proposed CAPTCHA
                 designs.",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Shokri:2017:PGA,
  author =       "Reza Shokri and George Theodorakopoulos and Carmela
                 Troncoso",
  title =        "Privacy Games Along Location Traces: a Game-Theoretic
                 Framework for Optimizing Location Privacy",
  journal =      j-TOPS,
  volume =       "19",
  number =       "4",
  pages =        "11:1--11:??",
  month =        feb,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3009908",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Mon Apr 3 09:09:39 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "The mainstream approach to protecting the privacy of
                 mobile users in location-based services (LBSs) is to
                 alter (e.g., perturb, hide, and so on) the users'
                 actual locations in order to reduce exposed sensitive
                 information. In order to be effective, a
                 location-privacy preserving mechanism must consider
                 both the privacy and utility requirements of each user,
                 as well as the user's overall exposed locations (which
                 contribute to the adversary's background knowledge). In
                 this article, we propose a methodology that enables the
                 design of optimal user-centric location obfuscation
                 mechanisms respecting each individual user's service
                 quality requirements, while maximizing the expected
                 error that the optimal adversary incurs in
                 reconstructing the user's actual trace. A key advantage
                 of a user-centric mechanism is that it does not depend
                 on third-party proxies or anonymizers; thus, it can be
                 directly integrated in the mobile devices that users
                 employ to access LBSs. Our methodology is based on the
                 mutual optimization of user/adversary objectives
                 (maximizing location privacy versus minimizing
                 localization error) formalized as a Stackelberg
                 Bayesian game. This formalization makes our solution
                 robust against any location inference attack, that is,
                 the adversary cannot decrease the user's privacy by
                 designing a better inference algorithm as long as the
                 obfuscation mechanism is designed according to our
                 privacy games. We develop two linear programs that
                 solve the location privacy game and output the optimal
                 obfuscation strategy and its corresponding optimal
                 inference attack. These linear programs are used to
                 design location privacy--preserving mechanisms that
                 consider the correlation between past, current, and
                 future locations of the user, thus can be tuned to
                 protect different privacy objectives along the user's
                 location trace. We illustrate the efficacy of the
                 optimal location privacy--preserving mechanisms
                 obtained with our approach against real location
                 traces, showing their performance in protecting users'
                 different location privacy objectives.",
  acknowledgement = ack-nhfb,
  articleno =    "11",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Argyros:2017:EPG,
  author =       "George Argyros and Theofilos Petsios and Suphannee
                 Sivakorn and Angelos D. Keromytis and Jason Polakis",
  title =        "Evaluating the Privacy Guarantees of Location
                 Proximity Services",
  journal =      j-TOPS,
  volume =       "19",
  number =       "4",
  pages =        "12:1--12:??",
  month =        feb,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3007209",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Mon Apr 3 09:09:39 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "Location-based services have become an integral part
                 of everyday life. To address the privacy issues that
                 emerge from the use and sharing of location
                 information, social networks and smartphone
                 applications have adopted location proximity schemes as
                 a means of balancing user privacy with utility.
                 Unfortunately, despite the extensive academic
                 literature on this topic, the schemes that large
                 service providers have adopted are not always designed
                 or implemented correctly, rendering users vulnerable to
                 location-disclosure attacks. Such attacks have recently
                 received major publicity as, in some cases, they even
                 exposed citizens of oppressive regimes to
                 life-threatening risks. In this article, we
                 systematically assess the defenses that popular
                 location-based services and mobile applications deploy
                 to guard against adversaries seeking to identify a
                 user's location. We provide the theoretical foundations
                 for formalizing the privacy guarantees of currently
                 adopted proximity models, design practical attacks for
                 each case, and prove tight bounds on the number of
                 queries required for carrying out successful attacks in
                 practice. To evaluate the completeness of our approach,
                 we conduct extensive experiments against popular
                 services including Facebook, Foursquare, and Grindr.
                 Our results demonstrate that, even though the
                 aforementioned services implement various
                 privacy-preserving techniques to protect their users,
                 they are still vulnerable to attacks. In particular, we
                 are able to pinpoint Facebook users within 5m of their
                 exact location. For Foursquare and Grindr, users are
                 pinpointed within 15m of their location in 90\% of the
                 cases, even with the strictest privacy settings
                 enabled. Our attacks are highly efficient and complete
                 within a few seconds. The severity of our findings was
                 acknowledged by Facebook and Foursquare, both of which
                 have followed our recommendations and adopted our
                 design of a safe proximity scheme in their production
                 systems. As the number of mobile applications offering
                 location functionality will continue to increase,
                 service providers and software developers must be able
                 to assess the privacy guarantees that their services
                 offer. To that end, we discuss viable defenses that can
                 be currently adopted by all major services, and provide
                 an open-source testing framework to be used by
                 researchers and service providers who wish to evaluate
                 the privacy-preserving properties of applications
                 offering proximity functionality.",
  acknowledgement = ack-nhfb,
  articleno =    "12",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Matsumoto:2017:ACG,
  author =       "Stephanos Matsumoto and Raphael M. Reischuk and Pawel
                 Szalachowski and Tiffany Hyun-Jin Kim and Adrian
                 Perrig",
  title =        "Authentication Challenges in a Global Environment",
  journal =      j-TOPS,
  volume =       "20",
  number =       "1",
  pages =        "1:1--1:??",
  month =        feb,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3007208",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Mon Apr 3 09:09:40 MDT 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "In this article, we address the problem of scaling
                 authentication for naming, routing, and end-entity (EE)
                 certification to a global environment in which
                 authentication policies and users' sets of trust roots
                 vary widely. The current mechanisms for authenticating
                 names (DNSSEC), routes (BGPSEC), and EE certificates
                 (TLS) do not support a coexistence of authentication
                 policies, affect the entire Internet when compromised,
                 cannot update trust root information efficiently, and
                 do not provide users with the ability to make flexible
                 trust decisions. We propose the Scalable Authentication
                 Infrastructure for Next-generation Trust (SAINT), which
                 partitions the Internet into groups with common, local
                 trust roots and isolates the effects of a compromised
                 trust root. SAINT requires groups with direct routing
                 connections to cross-sign each other for authentication
                 purposes, allowing diverse authentication policies
                 while keeping all entities' authentication information
                 globally discoverable. SAINT makes trust root
                 management a central part of the network architecture,
                 enabling trust root updates within seconds and allowing
                 users to make flexible trust decisions. SAINT operates
                 without a significant performance penalty and can be
                 deployed alongside existing infrastructures.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Wagner:2017:ESG,
  author =       "Isabel Wagner",
  title =        "Evaluating the Strength of Genomic Privacy Metrics",
  journal =      j-TOPS,
  volume =       "20",
  number =       "1",
  pages =        "2:1--2:??",
  month =        feb,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3020003",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Mon Apr 3 09:09:40 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "The genome is a unique identifier for human
                 individuals. The genome also contains highly sensitive
                 information, creating a high potential for misuse of
                 genomic data (for example, genetic discrimination). In
                 this article, we investigate how genomic privacy can be
                 measured in scenarios where an adversary aims to infer
                 a person's genomic markers by constructing probability
                 distributions on the values of genetic variations. We
                 measured the strength of privacy metrics by requiring
                 that metrics are monotonic with increasing adversary
                 strength and uncovered serious problems with several
                 existing metrics currently used to measure genomic
                 privacy. We provide suggestions on metric selection,
                 interpretation, and visualization and illustrate the
                 work flow using case studies for three real-world
                 diseases.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Humbert:2017:QIR,
  author =       "Mathias Humbert and Erman Ayday and Jean-Pierre Hubaux
                 and Amalio Telenti",
  title =        "Quantifying Interdependent Risks in Genomic Privacy",
  journal =      j-TOPS,
  volume =       "20",
  number =       "1",
  pages =        "3:1--3:??",
  month =        feb,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3035538",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Mon Apr 3 09:09:40 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "The rapid progress in human-genome sequencing is
                 leading to a high availability of genomic data. These
                 data is notoriously very sensitive and stable in time,
                 and highly correlated among relatives. In this article,
                 we study the implications of these familial
                 correlations on kin genomic privacy. We formalize the
                 problem and detail efficient reconstruction attacks
                 based on graphical models and belief propagation. With
                 our approach, an attacker can infer the genomes of the
                 relatives of an individual whose genome or phenotype
                 are observed by notably relying on Mendel's Laws,
                 statistical relationships between the genomic variants,
                 and between the genome and the phenotype. We evaluate
                 the effect of these dependencies on privacy with
                 respect to the amount of observed variants and the
                 relatives sharing them. We also study how the
                 algorithmic performance evolves when we take these
                 various relationships into account. Furthermore, to
                 quantify the level of genomic privacy as a result of
                 the proposed inference attack, we discuss possible
                 definitions of genomic privacy metrics, and compare
                 their values and evolution. Genomic data reveals
                 Mendelian disorders and the likelihood of developing
                 severe diseases, such as Alzheimer's. We also introduce
                 the quantification of health privacy, specifically, the
                 measure of how well the predisposition to a disease is
                 concealed from an attacker. We evaluate our approach on
                 actual genomic data from a pedigree and show the threat
                 extent by combining data gathered from a genome-sharing
                 website as well as an online social network.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Osterweil:2017:IAI,
  author =       "Leon J. Osterweil and Matt Bishop and Heather M.
                 Conboy and Huong Phan and Borislava I. Simidchieva and
                 George S. Avrunin and Lori A. Clarke and Sean Peisert",
  title =        "Iterative Analysis to Improve Key Properties of
                 Critical Human-Intensive Processes: an Election
                 Security Example",
  journal =      j-TOPS,
  volume =       "20",
  number =       "2",
  pages =        "5:1--5:??",
  month =        mar,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3041041",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Mon Apr 3 09:09:40 MDT 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "In this article, we present an approach for
                 systematically improving complex processes, especially
                 those involving human agents, hardware devices, and
                 software systems. We illustrate the utility of this
                 approach by applying it to part of an election process
                 and show how it can improve the security and
                 correctness of that subprocess. We use the Little-JIL
                 process definition language to create a precise and
                 detailed definition of the process. Given this process
                 definition, we use two forms of automated analysis to
                 explore whether specified key properties, such as
                 security and safety policies, can be undermined. First,
                 we use model checking to identify process execution
                 sequences that fail to conform to event-sequence
                 properties. After these are addressed, we apply fault
                 tree analysis to identify when the misperformance of
                 steps might allow undesirable outcomes, such as
                 security breaches. The results of these analyses can
                 provide assurance about the process; suggest areas for
                 improvement; and, when applied to a modified process
                 definition, evaluate proposed changes.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Noorman:2017:SLC,
  author =       "Job Noorman and Jo {Van Bulck} and Jan Tobias
                 M{\"u}hlberg and Frank Piessens and Pieter Maene and
                 Bart Preneel and Ingrid Verbauwhede and Johannes
                 G{\"o}tzfried and Tilo M{\"u}ller and Felix Freiling",
  title =        "{Sancus 2.0}: a Low-Cost Security Architecture for
                 {IoT} Devices",
  journal =      j-TOPS,
  volume =       "20",
  number =       "3",
  pages =        "7:1--7:??",
  month =        aug,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3079763",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Dec 23 09:59:06 MST 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "The Sancus security architecture for networked
                 embedded devices was proposed in 2013 at the USENIX
                 Security conference. It supports remote (even
                 third-party) software installation on devices while
                 maintaining strong security guarantees. More
                 specifically, Sancus can remotely attest to a software
                 provider that a specific software module is running
                 uncompromised and can provide a secure communication
                 channel between software modules and software
                 providers. Software modules can securely maintain local
                 state and can securely interact with other software
                 modules that they choose to trust. Over the past three
                 years, significant experience has been gained with
                 applications of Sancus, and several extensions of the
                 architecture have been investigated-both by the
                 original designers as well as by independent
                 researchers. Informed by these additional research
                 results, this journal version of the Sancus paper
                 describes an improved design and implementation,
                 supporting additional security guarantees (such as
                 confidential deployment) and a more efficient
                 cryptographic core. We describe the design of Sancus
                 2.0 (without relying on any prior knowledge of Sancus)
                 and develop and evaluate a prototype FPGA
                 implementation. The prototype extends an MSP430
                 processor with hardware support for the memory access
                 control and cryptographic functionality required to run
                 Sancus. We report on our experience using Sancus in a
                 variety of application scenarios and discuss some
                 important avenues of ongoing and future work.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Brandenburger:2017:DTC,
  author =       "Marcus Brandenburger and Christian Cachin and Nikola
                 Knezevi{\'c}",
  title =        "Don't Trust the Cloud, Verify: Integrity and
                 Consistency for Cloud Object Stores",
  journal =      j-TOPS,
  volume =       "20",
  number =       "3",
  pages =        "8:1--8:??",
  month =        aug,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3079762",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Dec 23 09:59:06 MST 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "Cloud services have turned remote computation into a
                 commodity and enable convenient online collaboration.
                 However, they require that clients fully trust the
                 service provider in terms of confidentiality,
                 integrity, and availability. Toward reducing this
                 dependency, this article introduces VICOS, a protocol
                 for verification of integrity and consistency for cloud
                 object storage that enables a group of mutually
                 trusting clients to detect data integrity and
                 consistency violations for a cloud object storage
                 service. It aims at services where multiple clients
                 cooperate on data stored remotely on a potentially
                 misbehaving service. VICOS enforces the consistency
                 notion of fork-linearizability, supports wait-free
                 client semantics for most operations, and reduces the
                 computation and communication overhead compared to
                 previous protocols. VICOS is based on a generic
                 authenticated data structure. Moreover, its operations
                 cover the hierarchical name space of a cloud object
                 store, supporting a real-world interface and not only a
                 simplistic abstraction. A prototype of VICOS that works
                 with the key-value store interface of commodity cloud
                 storage services has been implemented, and an
                 evaluation demonstrates its advantage compared to
                 existing systems.",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Toreini:2017:TRP,
  author =       "Ehsan Toreini and Siamak F. Shahandashti and Feng
                 Hao",
  title =        "Texture to the Rescue: Practical Paper Fingerprinting
                 Based on Texture Patterns",
  journal =      j-TOPS,
  volume =       "20",
  number =       "3",
  pages =        "9:1--9:??",
  month =        aug,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3092816",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Dec 23 09:59:06 MST 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "In this article, we propose a novel paper
                 fingerprinting technique based on analyzing the
                 translucent patterns revealed when a light source
                 shines through the paper. These patterns represent the
                 inherent texture of paper, formed by the random
                 interleaving of wooden particles during the
                 manufacturing process. We show that these patterns can
                 be easily captured by a commodity camera and condensed
                 into a compact 2,048-bit fingerprint code. Prominent
                 works in this area (Nature 2005, IEEE S8P 2009, CCS
                 2011) have all focused on fingerprinting paper based on
                 the paper ``surface.'' We are motivated by the
                 observation that capturing the surface alone misses
                 important distinctive features such as the noneven
                 thickness, random distribution of impurities, and
                 different materials in the paper with varying
                 opacities. Through experiments, we demonstrate that the
                 embedded paper texture provides a more reliable source
                 for fingerprinting than features on the surface. Based
                 on the collected datasets, we achieve 0\% false
                 rejection and 0\% false acceptance rates. We further
                 report that our extracted fingerprints contain 807
                 degrees of freedom (DoF), which is much higher than the
                 249 DoF with iris codes (that have the same size of
                 2,048 bits). The high amount of DoF for texture-based
                 fingerprints makes our method extremely scalable for
                 recognition among very large databases; it also allows
                 secure usage of the extracted fingerprint in
                 privacy-preserving authentication schemes based on
                 error correction techniques.",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Munoz-Gonzalez:2017:EAG,
  author =       "Luis Mu{\~n}oz-Gonz{\'a}lez and Daniele Sgandurra and
                 Andrea Paudice and Emil C. Lupu",
  title =        "Efficient Attack Graph Analysis through Approximate
                 Inference",
  journal =      j-TOPS,
  volume =       "20",
  number =       "3",
  pages =        "10:1--10:??",
  month =        aug,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3105760",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Dec 23 09:59:06 MST 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "Attack graphs provide compact representations of the
                 attack paths an attacker can follow to compromise
                 network resources from the analysis of network
                 vulnerabilities and topology. These representations are
                 a powerful tool for security risk assessment. Bayesian
                 inference on attack graphs enables the estimation of
                 the risk of compromise to the system's components given
                 their vulnerabilities and interconnections and accounts
                 for multi-step attacks spreading through the system.
                 While static analysis considers the risk posture at
                 rest, dynamic analysis also accounts for evidence of
                 compromise, for example, from Security Information and
                 Event Management software or forensic investigation.
                 However, in this context, exact Bayesian inference
                 techniques do not scale well. In this article, we show
                 how Loopy Belief Propagation-an approximate inference
                 technique-can be applied to attack graphs and that it
                 scales linearly in the number of nodes for both static
                 and dynamic analysis, making such analyses viable for
                 larger networks. We experiment with different
                 topologies and network clustering on synthetic Bayesian
                 attack graphs with thousands of nodes to show that the
                 algorithm's accuracy is acceptable and that it
                 converges to a stable solution. We compare sequential
                 and parallel versions of Loopy Belief Propagation with
                 exact inference techniques for both static and dynamic
                 analysis, showing the advantages and gains of
                 approximate inference techniques when scaling to larger
                 attack graphs.",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Reaves:2017:MBM,
  author =       "Bradley Reaves and Jasmine Bowers and Nolen Scaife and
                 Adam Bates and Arnav Bhartiya and Patrick Traynor and
                 Kevin R. B. Butler",
  title =        "Mo(bile) Money, Mo(bile) Problems: Analysis of
                 Branchless Banking Applications",
  journal =      j-TOPS,
  volume =       "20",
  number =       "3",
  pages =        "11:1--11:??",
  month =        aug,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3092368",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Dec 23 09:59:06 MST 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "Mobile money, also known as branchless banking,
                 leverages ubiquitous cellular networks to bring
                 much-needed financial services to the unbanked in the
                 developing world. These services are often deployed as
                 smartphone apps, and although marketed as secure, these
                 applications are often not regulated as strictly as
                 traditional banks, leaving doubt about the truth of
                 such claims. In this article, we evaluate these claims
                 and perform the first in-depth measurement analysis of
                 branchless banking applications. We first perform an
                 automated analysis of all 46 known Android mobile money
                 apps across the 246 known mobile money providers from
                 2015. We then perform a comprehensive manual teardown
                 of the registration, login, and transaction procedures
                 of a diverse 15\% of these apps. We uncover pervasive
                 vulnerabilities spanning botched certification
                 validation, do-it-yourself cryptography, and other
                 forms of information leakage that allow an attacker to
                 impersonate legitimate users, modify transactions, and
                 steal financial records. These findings show that the
                 majority of these apps fail to provide the protections
                 needed by financial services. In an expanded
                 re-evaluation one year later, we find that these
                 systems have only marginally improved their security.
                 Additionally, we document our experiences working in
                 this sector for future researchers and provide
                 recommendations to improve the security of this
                 critical ecosystem. Finally, through inspection of
                 providers' terms of service, we also discover that
                 liability for these problems unfairly rests on the
                 shoulders of the customer, threatening to erode trust
                 in branchless banking and hinder efforts for global
                 financial inclusion.",
  acknowledgement = ack-nhfb,
  articleno =    "11",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Shu:2017:LSP,
  author =       "Xiaokui Shu and Danfeng (Daphne) Yao and Naren
                 Ramakrishnan and Trent Jaeger",
  title =        "Long-Span Program Behavior Modeling and Attack
                 Detection",
  journal =      j-TOPS,
  volume =       "20",
  number =       "4",
  pages =        "12:1--12:??",
  month =        oct,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3105761",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Dec 23 09:59:06 MST 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "Intertwined developments between program attacks and
                 defenses witness the evolution of program anomaly
                 detection methods. Emerging categories of program
                 attacks, e.g., non-control data attacks and
                 data-oriented programming, are able to comply with
                 normal trace patterns at local views. This article
                 points out the deficiency of existing program anomaly
                 detection models against new attacks and presents
                 long-span behavior anomaly detection (LAD), a model
                 based on mildly context-sensitive grammar verification.
                 The key feature of LAD is its reasoning of correlations
                 among arbitrary events that occurred in long program
                 traces. It extends existing correlation analysis
                 between events at a stack snapshot, e.g., paired call
                 and ret, to correlation analysis among events that
                 historically occurred during the execution. The
                 proposed method leverages specialized machine learning
                 techniques to probe normal program behavior boundaries
                 in vast high-dimensional detection space. Its two-stage
                 modeling/detection design analyzes event correlation at
                 both binary and quantitative levels. Our prototype
                 successfully detects all reproduced real-world attacks
                 against sshd, libpcre, and sendmail. The detection
                 procedure incurs 0.1 ms to 1.3 ms overhead to profile
                 and analyze a single behavior instance that consists of
                 tens of thousands of function call or system call
                 events.",
  acknowledgement = ack-nhfb,
  articleno =    "12",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Ikram:2017:MCD,
  author =       "Muhammad Ikram and Lucky Onwuzurike and Shehroze
                 Farooqi and Emiliano {De Cristofaro} and Arik Friedman
                 and Guillaume Jourjon and Mohammed Ali Kaafar and M.
                 Zubair Shafiq",
  title =        "Measuring, Characterizing, and Detecting {Facebook}
                 Like Farms",
  journal =      j-TOPS,
  volume =       "20",
  number =       "4",
  pages =        "13:1--13:??",
  month =        oct,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3121134",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Dec 23 09:59:06 MST 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "Online social networks offer convenient ways to reach
                 out to large audiences. In particular, Facebook pages
                 are increasingly used by businesses, brands, and
                 organizations to connect with multitudes of users
                 worldwide. As the number of likes of a page has become
                 a de-facto measure of its popularity and profitability,
                 an underground market of services artificially
                 inflating page likes (``like farms '') has emerged
                 alongside Facebook's official targeted advertising
                 platform. Nonetheless, besides a few media reports,
                 there is little work that systematically analyzes
                 Facebook pages' promotion methods. Aiming to fill this
                 gap, we present a honeypot-based comparative
                 measurement study of page likes garnered via Facebook
                 advertising and from popular like farms. First, we
                 analyze likes based on demographic, temporal, and
                 social characteristics and find that some farms seem to
                 be operated by bots and do not really try to hide the
                 nature of their operations, while others follow a
                 stealthier approach, mimicking regular users' behavior.
                 Next, we look at fraud detection algorithms currently
                 deployed by Facebook and show that they do not work
                 well to detect stealthy farms that spread likes over
                 longer timespans and like popular pages to mimic
                 regular users. To overcome their limitations, we
                 investigate the feasibility of timeline-based detection
                 of like farm accounts, focusing on characterizing
                 content generated by Facebook accounts on their
                 timelines as an indicator of genuine versus fake social
                 activity. We analyze a wide range of features extracted
                 from timeline posts, which we group into two main
                 categories: lexical and non-lexical. We find that like
                 farm accounts tend to re-share content more often, use
                 fewer words and poorer vocabulary, and more often
                 generate duplicate comments and likes compared to
                 normal users. Using relevant lexical and non-lexical
                 features, we build a classifier to detect like farms
                 accounts that achieves a precision higher than 99\% and
                 a 93\% recall.",
  acknowledgement = ack-nhfb,
  articleno =    "13",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Polyakov:2017:FPR,
  author =       "Yuriy Polyakov and Kurt Rohloff and Gyana Sahu and
                 Vinod Vaikuntanathan",
  title =        "Fast Proxy Re-Encryption for Publish\slash Subscribe
                 Systems",
  journal =      j-TOPS,
  volume =       "20",
  number =       "4",
  pages =        "14:1--14:??",
  month =        oct,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3128607",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Dec 23 09:59:06 MST 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/cryptography2010.bib;
                 http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "We develop two IND-CPA-secure multihop unidirectional
                 Proxy Re-Encryption (PRE) schemes by applying the
                 Ring-LWE (RLWE) key switching approach from the
                 homomorphic encryption literature. Unidirectional PRE
                 is ideal for secure publish-subscribe operations where
                 a publisher encrypts information using a public key
                 without knowing upfront who the subscriber will be and
                 what private key will be used for decryption. The
                 proposed PRE schemes provide a multihop capability,
                 meaning that when PRE-encrypted information is
                 published onto a PRE-enabled server, the server can
                 either delegate access to specific clients or enable
                 other servers the right to delegate access. Our first
                 scheme (which we call NTRU-ABD-PRE) is based on a
                 variant of the NTRU-RLWE homomorphic encryption scheme.
                 Our second and main PRE scheme (which we call BV-PRE)
                 is built on top of the Brakerski-Vaikuntanathan (BV)
                 homomorphic encryption scheme and relies solely on the
                 RLWE assumption. We present an open-source C++
                 implementation of both schemes and discuss several
                 algorithmic and software optimizations. We examine
                 parameter selection tradeoffs in the context of
                 security, runtime/latency, throughput, ciphertext
                 expansion, memory usage, and multihop capabilities. Our
                 experimental analysis demonstrates that BV-PRE
                 outperforms NTRU-ABD-PRE in both single-hop and
                 multihop settings. The BV-PRE scheme has a lower time
                 and space complexity than existing IND-CPA-secure
                 lattice-based PRE schemes and requires small concrete
                 parameters, making the scheme computationally efficient
                 for use on low-resource embedded systems while still
                 providing 100 bits of security. We present practical
                 recommendations for applying the PRE schemes to several
                 use cases of ad hoc information sharing for
                 publish-subscribe operations.",
  acknowledgement = ack-nhfb,
  articleno =    "14",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Rullo:2017:POS,
  author =       "Antonino Rullo and Daniele Midi and Edoardo Serra and
                 Elisa Bertino",
  title =        "{Pareto} Optimal Security Resource Allocation for
                 {Internet of Things}",
  journal =      j-TOPS,
  volume =       "20",
  number =       "4",
  pages =        "15:1--15:??",
  month =        oct,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3139293",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Dec 23 09:59:06 MST 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "In many Internet of Thing (IoT) application domains
                 security is a critical requirement, because malicious
                 parties can undermine the effectiveness of IoT-based
                 systems by compromising single components and/or
                 communication channels. Thus, a security infrastructure
                 is needed to ensure the proper functioning of such
                 systems even under attack. However, it is also critical
                 that security be at a reasonable resource and energy
                 cost. In this article, we focus on the problem of
                 efficiently and effectively securing IoT networks by
                 carefully allocating security resources in the network
                 area. In particular, given a set of security resources
                 R and a set of attacks to be faced A, our method
                 chooses the subset of R that best addresses the attacks
                 in A, and the set of locations where to place them,
                 that ensure the security coverage of all IoT devices at
                 minimum cost and energy consumption. We model our
                 problem according to game theory and provide a
                 Pareto-optimal solution in which the cost of the
                 security infrastructure, its energy consumption, and
                 the probability of a successful attack are minimized.
                 Our experimental evaluation shows that our technique
                 improves the system robustness in terms of packet
                 delivery rate for different network topologies.
                 Furthermore, we also provide a method for handling the
                 computation of the resource allocation plan for
                 large-scale networks scenarios, where the optimization
                 problem may require an unreasonable amount of time to
                 be solved. We show how our proposed method drastically
                 reduces the computing time, while providing a
                 reasonable approximation of the optimal solution.",
  acknowledgement = ack-nhfb,
  articleno =    "15",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Su:2017:DPM,
  author =       "Dong Su and Jianneng Cao and Ninghui Li and Elisa
                 Bertino and Min Lyu and Hongxia Jin",
  title =        "Differentially Private {$K$}-Means Clustering and a
                 Hybrid Approach to Private Optimization",
  journal =      j-TOPS,
  volume =       "20",
  number =       "4",
  pages =        "16:1--16:??",
  month =        oct,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3133201",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Dec 23 09:59:06 MST 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "k -means clustering is a widely used clustering
                 analysis technique in machine learning. In this
                 article, we study the problem of differentially private
                 k -means clustering. Several state-of-the-art methods
                 follow the single-workload approach, which adapts an
                 existing machine-learning algorithm by making each step
                 private. However, most of them do not have satisfactory
                 empirical performance. In this work, we develop
                 techniques to analyze the empirical error behaviors of
                 one of the state-of-the-art single-workload approaches,
                 DPLloyd, which is a differentially private version of
                 the Lloyd algorithm for k {$>$}-means clustering. Based
                 on the analysis, we propose an improvement of DPLloyd.
                 We also propose a new algorithm for k -means clustering
                 from the perspective of the noninteractive approach,
                 which publishes a synopsis of the input dataset and
                 then runs k -means on synthetic data generated from the
                 synopsis. We denote this approach by EUGkM. After
                 analyzing the empirical error behaviors of EUGkM, we
                 further propose a hybrid approach that combines our
                 DPLloyd improvement and EUGkM. Results from extensive
                 and systematic experiments support our analysis and
                 demonstrate the effectiveness of the DPLloyd
                 improvement, EUGkM, and the hybrid approach.",
  acknowledgement = ack-nhfb,
  articleno =    "16",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Abdou:2018:SLV,
  author =       "Abdelrahman Abdou and P. C. {Van Oorschot}",
  title =        "Server Location Verification {(SLV)} and Server
                 Location Pinning: Augmenting {TLS} Authentication",
  journal =      j-TOPS,
  volume =       "21",
  number =       "1",
  pages =        "1:1--1:??",
  month =        jan,
  year =         "2018",
  DOI =          "https://doi.org/10.1145/3139294",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Sep 21 08:26:23 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/cryptography2010.bib;
                 http://www.math.utah.edu/pub/tex/bib/tops.bib",
  URL =          "https://dl.acm.org/citation.cfm?id=3139294",
  abstract =     "We introduce the first known mechanism providing
                 realtime server location verification. Its uses include
                 enhancing server authentication by enabling browsers to
                 automatically interpret server location information. We
                 describe the design of this new measurement-based
                 technique, Server Location Verification (SLV), and
                 evaluate it using PlanetLab. We explain how SLV is
                 compatible with the increasing trends of geographically
                 distributed content dissemination over the Internet,
                 without causing any new interoperability conflicts.
                 Additionally, we introduce the notion of (verifiable)
                 server location pinning (conceptually similar to
                 certificate pinning) to support SLV, and evaluate their
                 combined impact using a server-authentication
                 evaluation framework. The results affirm the addition
                 of new security benefits to the existing TLS-based
                 authentication mechanisms. We implement SLV through a
                 location verification service, the simplest version of
                 which requires no server-side changes. We also
                 implement a simple browser extension that interacts
                 seamlessly with the verification infrastructure to
                 obtain realtime server location-verification results.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Shi:2018:HAV,
  author =       "Hao Shi and Jelena Mirkovic and Abdulla Alwabel",
  title =        "Handling Anti-Virtual Machine Techniques in Malicious
                 Software",
  journal =      j-TOPS,
  volume =       "21",
  number =       "1",
  pages =        "2:1--2:??",
  month =        jan,
  year =         "2018",
  DOI =          "https://doi.org/10.1145/3139292",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Sep 21 08:26:23 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib;
                 http://www.math.utah.edu/pub/tex/bib/virtual-machines.bib",
  URL =          "https://dl.acm.org/citation.cfm?id=3139292",
  abstract =     "Malware analysis relies heavily on the use of virtual
                 machines (VMs) for functionality and safety. There are
                 subtle differences in operation between virtual and
                 physical machines. Contemporary malware checks for
                 these differences and changes its behavior when it
                 detects a VM presence. These anti-VM techniques hinder
                 malware analysis. Existing research approaches to
                 uncover differences between VMs and physical machines
                 use randomized testing, and thus cannot guarantee
                 completeness. In this article, we propose a
                 detect-and-hide approach, which systematically
                 addresses anti-VM techniques in malware. First, we
                 propose cardinal pill testing -a modification of red
                 pill testing that aims to enumerate the differences
                 between a given VM and a physical machine through
                 carefully designed tests. Cardinal pill testing finds
                 five times more pills by running 15 times fewer tests
                 than red pill testing. We examine the causes of pills
                 and find that, while the majority of them stem from the
                 failure of VMs to follow CPU specifications, a small
                 number stem from under-specification of certain
                 instructions by the Intel manual. This leads to
                 divergent implementations in different CPU and VM
                 architectures. Cardinal pill testing successfully
                 enumerates the differences that stem from the first
                 cause. Finally, we propose VM Cloak -a WinDbg plug-in
                 which hides the presence of VMs from malware. VM Cloak
                 monitors each execute malware command, detects
                 potential pills, and at runtime modifies the command's
                 outcomes to match those that a physical machine would
                 generate. We implemented VM Cloak and verified that it
                 successfully hides VM presence from malware.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Gong:2018:AIA,
  author =       "Neil Zhenqiang Gong and Bin Liu",
  title =        "Attribute Inference Attacks in Online Social
                 Networks",
  journal =      j-TOPS,
  volume =       "21",
  number =       "1",
  pages =        "3:1--3:??",
  month =        jan,
  year =         "2018",
  DOI =          "https://doi.org/10.1145/3154793",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Sep 21 08:26:23 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  URL =          "https://dl.acm.org/citation.cfm?id=3154793",
  abstract =     "We propose new privacy attacks to infer attributes
                 (e.g., locations, occupations, and interests) of online
                 social network users. Our attacks leverage seemingly
                 innocent user information that is publicly available in
                 online social networks to infer missing attributes of
                 targeted users. Given the increasing availability of
                 (seemingly innocent) user information online, our
                 results have serious implications for Internet
                 privacy-private attributes can be inferred from users'
                 publicly available data unless we take steps to protect
                 users from such inference attacks. To infer attributes
                 of a targeted user, existing inference attacks leverage
                 either the user's publicly available social friends or
                 the user's behavioral records (e.g., the web pages that
                 the user has liked on Facebook, the apps that the user
                 has reviewed on Google Play), but not both. As we will
                 show, such inference attacks achieve limited success
                 rates. However, the problem becomes qualitatively
                 different if we consider both social friends and
                 behavioral records. To address this challenge, we
                 develop a novel model to integrate social friends and
                 behavioral records, and design new attacks based on our
                 model. We theoretically and experimentally demonstrate
                 the effectiveness of our attacks. For instance, we
                 observe that, in a real-world large-scale dataset with
                 1.1 million users, our attack can correctly infer the
                 cities a user lived in for 57\% of the users; via
                 confidence estimation, we are able to increase the
                 attack success rate to over 90\% if the attacker
                 selectively attacks half of the users. Moreover, we
                 show that our attack can correctly infer attributes for
                 significantly more users than previous attacks.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Botacin:2018:EBM,
  author =       "Marcus Botacin and Paulo L{\'\i}cio {De Geus} and
                 Andr{\'e} Gr{\'e}gio",
  title =        "Enhancing Branch Monitoring for Security Purposes:
                 From Control Flow Integrity to Malware Analysis and
                 Debugging",
  journal =      j-TOPS,
  volume =       "21",
  number =       "1",
  pages =        "4:1--4:??",
  month =        jan,
  year =         "2018",
  DOI =          "https://doi.org/10.1145/3152162",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Sep 21 08:26:23 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  URL =          "https://dl.acm.org/citation.cfm?id=3152162",
  abstract =     "Malware and code-reuse attacks are the most
                 significant threats to current systems operation.
                 Solutions developed to countermeasure them have their
                 weaknesses exploited by attackers through sandbox
                 evasion and antidebug crafting. To address such
                 weaknesses, we propose a framework that relies on the
                 modern processors' branch monitor feature to allow us
                 to analyze malware while reducing evasion effects. The
                 use of hardware assistance aids in increasing
                 stealthiness, a key feature for debuggers, as modern
                 software (malicious or benign) may be antianalysis
                 armored. We achieve stealthier code execution control
                 by using the branch monitor hardware's inherent
                 interrupt capabilities, keeping the code under
                 execution intact. Previous works on branch monitoring
                 have already addressed the ROP attack problem but
                 require code injection and/or are limited in their
                 capture window size. Therefore, we also propose a ROP
                 detector without these limitations.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Bhattacharya:2018:UPC,
  author =       "Sarani Bhattacharya and Debdeep Mukhopadhyay",
  title =        "Utilizing Performance Counters for Compromising Public
                 Key Ciphers",
  journal =      j-TOPS,
  volume =       "21",
  number =       "1",
  pages =        "5:1--5:??",
  month =        jan,
  year =         "2018",
  DOI =          "https://doi.org/10.1145/3156015",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Sep 21 08:26:23 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/cryptography2010.bib;
                 http://www.math.utah.edu/pub/tex/bib/tops.bib",
  URL =          "https://dl.acm.org/citation.cfm?id=3156015",
  abstract =     "Hardware performance counters (HPCs) are useful
                 artifacts for evaluating the performance of software
                 implementations. Recently, HPCs have been made more
                 convenient to use without requiring explicit kernel
                 patches or superuser privileges. However, in this
                 article, we highlight that the information revealed by
                 HPCs can be also exploited to attack standard
                 implementations of public key algorithms. In
                 particular, we analyze the vulnerability due to the
                 event branch miss leaked via the HPCs during execution
                 of the target ciphers. We present an iterative attack
                 that targets the key bits of 1,024-bit RSA and 256-bit
                 ECC, whereas in the offline phase, the system's
                 underlying branch predictor is approximated by a
                 theoretical predictor in the literature. Subsimulations
                 are performed corresponding to each bit guess to
                 classify the message space into distinct partitions
                 based on the event branch misprediction and the target
                 key bit value. In the online phase, branch
                 mispredictions obtained from the hardware performance
                 monitors on the target system reveal the secret key
                 bits. We also theoretically prove that the probability
                 of success of the attack is equivalent to the accurate
                 modeling of the theoretical predictor to the underlying
                 system predictor. In addition, we propose an improved
                 version of the attack that requires fewer branch
                 misprediction traces from the HPCs to recover the
                 secret. Experimentations using both attack strategies
                 have been provided on Intel Core 2 Duo, Core i3, and
                 Core i5 platforms for 1,024-bit implementation of RSA
                 and 256-bit scalar multiplication over the secp 256 r 1
                 curve followed by results on the effect of change of
                 parameters on the success rate. The attack can
                 successfully reveal the exponent bits and thus seeks
                 attention to model secure branch predictors such that
                 it inherently prevents information leakage.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Zhang:2018:ISP,
  author =       "Yihua Zhang and Marina Blanton and Ghada Almashaqbeh",
  title =        "Implementing Support for Pointers to Private Data in a
                 General-Purpose Secure Multi-Party Compiler",
  journal =      j-TOPS,
  volume =       "21",
  number =       "2",
  pages =        "6:1--6:??",
  month =        feb,
  year =         "2018",
  DOI =          "https://doi.org/10.1145/3154600",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Sep 21 08:26:23 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  URL =          "https://dl.acm.org/citation.cfm?id=3154600",
  abstract =     "Recent compilers allow a general-purpose program
                 (written in a conventional programming language) that
                 handles private data to be translated into a secure
                 distributed implementation of the corresponding
                 functionality. The resulting program is then guaranteed
                 to provably protect private data using secure
                 multi-party computation techniques. The goals of such
                 compilers are generality, usability, and efficiency,
                 but the complete set of features of a modern
                 programming language has not been supported to date by
                 the existing compilers. In particular, recent compilers
                 PICCO and the two-party ANSI C compiler strive to
                 translate any C program into its secure multi-party
                 implementation, but they currently lack support for
                 pointers and dynamic memory allocation, which are
                 important components of many C programs. In this work,
                 we mitigate the limitation and add support for pointers
                 to private data and consequently dynamic memory
                 allocation to the PICCO compiler, enabling it to handle
                 a more diverse set of programs over private data.
                 Because doing so opens up a new design space, we
                 investigate the use of pointers to private data (with
                 known as well as private locations stored in them) in
                 programs and report our findings. Aside from dynamic
                 memory allocation, we examine other important topics
                 associated with common pointer use such as reference by
                 pointer/address, casting, and building various data
                 structures in the context of secure multi-party
                 computation. This results in enabling the compiler to
                 automatically translate a user program that uses
                 pointers to private data into its distributed
                 implementation that provably protects private data
                 throughout the computation. We empirically evaluate the
                 constructions and report on the performance of
                 representative programs.",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Pinkas:2018:SPS,
  author =       "Benny Pinkas and Thomas Schneider and Michael Zohner",
  title =        "Scalable Private Set Intersection Based on {OT}
                 Extension",
  journal =      j-TOPS,
  volume =       "21",
  number =       "2",
  pages =        "7:1--7:??",
  month =        feb,
  year =         "2018",
  DOI =          "https://doi.org/10.1145/3154794",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Sep 21 08:26:23 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/hash.bib;
                 http://www.math.utah.edu/pub/tex/bib/tops.bib",
  URL =          "https://dl.acm.org/citation.cfm?id=3154794",
  abstract =     "Private set intersection (PSI) allows two parties to
                 compute the intersection of their sets without
                 revealing any information about items that are not in
                 the intersection. It is one of the best studied
                 applications of secure computation and many PSI
                 protocols have been proposed. However, the variety of
                 existing PSI protocols makes it difficult to identify
                 the solution that performs best in a respective
                 scenario, especially since they were not compared in
                 the same setting. In addition, existing PSI protocols
                 are several orders of magnitude slower than an insecure
                 na{\"\i}ve hashing solution, which is used in practice.
                 In this article, we review the progress made on PSI
                 protocols and give an overview of existing protocols in
                 various security models. We then focus on PSI protocols
                 that are secure against semi-honest adversaries and
                 take advantage of the most recent efficiency
                 improvements in Oblivious Transfer (OT) extension,
                 propose significant optimizations to previous PSI
                 protocols, and suggest a new PSI protocol whose runtime
                 is superior to that of existing protocols. We compare
                 the performance of the protocols, both theoretically
                 and experimentally, by implementing all protocols on
                 the same platform, give recommendations on which
                 protocol to use in a particular setting, and evaluate
                 the progress on PSI protocols by comparing them to the
                 currently employed insecure na{\"\i}ve hashing
                 protocol. We demonstrate the feasibility of our new PSI
                 protocol by processing two sets with a billion elements
                 each.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Alrabaee:2018:FRE,
  author =       "Saed Alrabaee and Paria Shirani and Lingyu Wang and
                 Mourad Debbabi",
  title =        "{FOSSIL}: A Resilient and Efficient System for
                 Identifying {FOSS} Functions in Malware Binaries",
  journal =      j-TOPS,
  volume =       "21",
  number =       "2",
  pages =        "8:1--8:??",
  month =        feb,
  year =         "2018",
  DOI =          "https://doi.org/10.1145/3175492",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Sep 21 08:26:23 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/gnu.bib;
                 http://www.math.utah.edu/pub/tex/bib/tops.bib",
  URL =          "https://dl.acm.org/citation.cfm?id=3175492",
  abstract =     "Identifying free open-source software (FOSS) packages
                 on binaries when the source code is unavailable is
                 important for many security applications, such as
                 malware detection, software infringement, and digital
                 forensics. This capability enhances both the accuracy
                 and the efficiency of reverse engineering tasks by
                 avoiding false correlations between irrelevant code
                 bases. Although the FOSS package identification problem
                 belongs to the field of software engineering,
                 conventional approaches rely strongly on practical
                 methods in data mining and database searching. However,
                 various challenges in the use of these methods prevent
                 existing function identification approaches from being
                 effective in the absence of source code. To make
                 matters worse, the introduction of obfuscation
                 techniques, the use of different compilers and
                 compilation settings, and software refactoring
                 techniques has made the automated detection of FOSS
                 packages increasingly difficult. With very few
                 exceptions, the existing systems are not resilient to
                 such techniques, and the exceptions are not
                 sufficiently efficient. To address this issue, we
                 propose FOSSIL, a novel resilient and efficient system
                 that incorporates three components. The first component
                 extracts the syntactical features of functions by
                 considering opcode frequencies and applying a hidden
                 Markov model statistical test. The second component
                 applies a neighborhood hash graph kernel to random
                 walks derived from control-flow graphs, with the goal
                 of extracting the semantics of the functions. The third
                 component applies z-score to the normalized
                 instructions to extract the behavior of instructions in
                 a function. The components are integrated using a
                 Bayesian network model, which synthesizes the results
                 to determine the FOSS function. The novel approach of
                 combining these components using the Bayesian network
                 has produced stronger resilience to code obfuscation.
                 We evaluate our system on three datasets, including
                 real-world projects whose use of FOSS packages is
                 known, malware binaries for which there are security
                 and reverse engineering reports purporting to describe
                 their use of FOSS, and a large repository of malware
                 binaries. We demonstrate that our system is able to
                 identify FOSS packages in real-world projects with a
                 mean precision of 0.95 and with a mean recall of 0.85.
                 Furthermore, FOSSIL is able to discover FOSS packages
                 in malware binaries that match those listed in security
                 and reverse engineering reports. Our results show that
                 modern malware binaries contain 0.10--0.45 of FOSS
                 packages.",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Giacobazzi:2018:ANI,
  author =       "Roberto Giacobazzi and Isabella Mastroeni",
  title =        "Abstract Non-Interference: A Unifying Framework for
                 Weakening Information-flow",
  journal =      j-TOPS,
  volume =       "21",
  number =       "2",
  pages =        "9:1--9:??",
  month =        feb,
  year =         "2018",
  DOI =          "https://doi.org/10.1145/3175660",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Sep 21 08:26:23 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  URL =          "https://dl.acm.org/citation.cfm?id=3175660",
  abstract =     "Non-interference happens when some elements of a
                 dynamic system do not interfere, i.e., do not affect,
                 other elements in the same system. Originally
                 introduced in language-based security, non-interference
                 means that the manipulation of private information has
                 no effect on public observations of data. In this
                 article, we introduce abstract non-interference as a
                 weakening of non-interference by abstract
                 interpretation. Abstract non-interference is parametric
                 on which private information we want to protect and
                 which are the observational capabilities of the
                 external observer, i.e., what the attacker can observe
                 of a computation and of the data manipulated during the
                 computation. This allows us to model a variety of
                 situations in information-flow security, where the
                 security of a system can be mastered by controlling the
                 degree of precision of the strongest harmless attacker
                 and the properties that are potentially leaked in case
                 of successful attack.",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Son:2018:GFD,
  author =       "Yunmok Son and Juhwan Noh and Jaeyeong Choi and
                 Yongdae Kim",
  title =        "{GyrosFinger}: Fingerprinting Drones for Location
                 Tracking Based on the Outputs of {MEMS} Gyroscopes",
  journal =      j-TOPS,
  volume =       "21",
  number =       "2",
  pages =        "10:1--10:??",
  month =        feb,
  year =         "2018",
  DOI =          "https://doi.org/10.1145/3177751",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Sep 21 08:26:23 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/cryptography2010.bib;
                 http://www.math.utah.edu/pub/tex/bib/tops.bib",
  URL =          "https://dl.acm.org/citation.cfm?id=3177751",
  abstract =     "Drones are widely used for various purposes such as
                 delivery, aerial photography, and surveillance.
                 Considering the increasing drone-related services,
                 tracking the locations of drones can cause security
                 threats such as escaping from drone surveillance,
                 disturbing drone-related services, and capturing
                 drones. For wirelessly monitoring the status of drones,
                 telemetry is used, and this status information contains
                 various data such as latitude and longitude, calibrated
                 sensor outputs, and sensor offsets. Because most of the
                 telemetry implementation supports neither
                 authentication nor encryption, an attacker can obtain
                 the status information of the drones by using an
                 appropriate wireless communication device such as
                 software-defined radio. While the attacker knows the
                 locations of the drones from the status information,
                 this information is not sufficient for tracking drones
                 because the status information does not include any
                 identity information that can bind the identity of the
                 drone with its location. \In this article, we propose a
                 fingerprinting method for drones in motion for the
                 binding of the identity of the drone with its location.
                 Our fingerprinting method is based on the sensor
                 outputs included in the status information, i.e., the
                 offsets of micro-electro mechanical systems (MEMS)
                 gyroscope, an essential sensor for maintaining the
                 attitude of drones. We found that the offsets of MEMS
                 gyroscopes are different from each other because of
                 manufacturing mismatches, and the offsets of five
                 drones obtained through their telemetry are
                 distinguishable and constant during their flights. To
                 evaluate the performance of our fingerprinting method
                 on a larger scale, we collected the offsets from 70
                 stand-alone MEMS gyroscopes to generate fingerprints.
                 Our experimental results show that, when using the
                 offsets of three and two axes calculated from 128
                 samples of the raw outputs per axis as fingerprints,
                 the F-scores of the proposed method reach 98.78\% and
                 94.47\%, respectively. The offsets collected after a
                 month are also fingerprinted with F-scores of 96.58\%
                 and 78.45\% under the same condition, respectively. The
                 proposed fingerprinting method is effective, robust,
                 and persistent. Additionally, unless the MEMS gyroscope
                 is not replaced, our fingerprinting method can be used
                 for drone tracking even when the target drones are
                 flying.",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Carminati:2018:SEB,
  author =       "Michele Carminati and Mario Polino and Andrea
                 Continella and Andrea Lanzi and Federico Maggi and
                 Stefano Zanero",
  title =        "Security Evaluation of a Banking Fraud Analysis
                 System",
  journal =      j-TOPS,
  volume =       "21",
  number =       "3",
  pages =        "11:1--11:??",
  month =        jun,
  year =         "2018",
  DOI =          "https://doi.org/10.1145/3178370",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Sep 21 08:26:24 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  URL =          "https://dl.acm.org/citation.cfm?id=3178370",
  abstract =     "The significant growth of banking fraud, fueled by the
                 underground economy of malware, has raised the need for
                 effective detection systems. Therefore, in the last few
                 years, banks have upgraded their security to protect
                 transactions from fraud. State-of-the-art solutions
                 detect fraud as deviations from customers' spending
                 habits. To the best of our knowledge, almost all
                 existing approaches do not provide an in-depth model's
                 granularity and security analysis against elusive
                 attacks. In this article, we examine Banksealer, a
                 decision support system for banking fraud analysis that
                 evaluates the influence on detection performance of the
                 granularity at which spending habits are modeled and
                 its security against evasive attacks. First, we compare
                 user-centric modeling, which builds a model for each
                 user, with system-centric modeling, which builds a
                 model for the entire system, from the point of view of
                 detection performance. Then, we assess the robustness
                 of Banksealer against malicious attackers that are
                 aware of the structure of the models in use. To this
                 end, we design and implement a proof-of-concept attack
                 tool that performs mimicry attacks, emulating a
                 sophisticated attacker that cloaks frauds to avoid
                 detection. We experimentally confirm the feasibility of
                 such attacks, their cost, and the effort required by an
                 attacker in order to perform them. In addition, we
                 discuss possible countermeasures. We provide a
                 comprehensive evaluation on a large real-world dataset
                 obtained from one of the largest Italian banks.",
  acknowledgement = ack-nhfb,
  articleno =    "11",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Kelbert:2018:DUC,
  author =       "Florian Kelbert and Alexander Pretschner",
  title =        "Data Usage Control for Distributed Systems",
  journal =      j-TOPS,
  volume =       "21",
  number =       "3",
  pages =        "12:1--12:??",
  month =        jun,
  year =         "2018",
  DOI =          "https://doi.org/10.1145/3183342",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Sep 21 08:26:24 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  URL =          "https://dl.acm.org/citation.cfm?id=3183342",
  abstract =     "Data usage control enables data owners to enforce
                 policies over how their data may be used after they
                 have been released and accessed. We address distributed
                 aspects of this problem, which arise if the protected
                 data reside within multiple systems. We contribute by
                 formalizing, implementing, and evaluating a fully
                 decentralized system that (i) generically and
                 transparently tracks protected data across systems,
                 (ii) propagates data usage policies along, and (iii)
                 efficiently and preventively enforces policies in a
                 decentralized manner. The evaluation shows that (i)
                 dataflow tracking and policy propagation achieve a
                 throughput of 21--54\% of native execution and (ii)
                 decentralized policy enforcement outperforms a
                 centralized approach in many situations.",
  acknowledgement = ack-nhfb,
  articleno =    "12",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Stobert:2018:PLC,
  author =       "Elizabeth Stobert and Robert Biddle",
  title =        "The Password Life Cycle",
  journal =      j-TOPS,
  volume =       "21",
  number =       "3",
  pages =        "13:1--13:??",
  month =        jun,
  year =         "2018",
  DOI =          "https://doi.org/10.1145/3183341",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Sep 21 08:26:24 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  URL =          "https://dl.acm.org/citation.cfm?id=3183341",
  abstract =     "Managing passwords is a difficult task for users, who
                 must create, remember, and keep track of large numbers
                 of passwords. In this work, we investigated users'
                 coping strategies for password management. Through a
                 series of interviews, we identified a ``life cycle'' of
                 password use and find that users' central task in
                 coping with their passwords is rationing their effort
                 to best protect their important accounts. We followed
                 up this work by interviewing experts about their
                 password management practices and found that experts
                 rely on the same kinds of coping strategies as
                 non-experts, but that their increased situation
                 awareness of security allows them to better ration
                 their effort into protecting their accounts. Finally,
                 we conducted a survey study to explore how the life
                 cycle model generalizes to the larger population and
                 find that the life cycle and rationing patterns can be
                 seen in the broader population, but that survey
                 respondents were less likely to characterize security
                 management as a challenging task.",
  acknowledgement = ack-nhfb,
  articleno =    "13",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Wei:2018:APG,
  author =       "Fengguo Wei and Sankardas Roy and Xinming Ou and
                 Robby",
  title =        "{Amandroid}: A Precise and General Inter-component
                 Data Flow Analysis Framework for Security Vetting of
                 {Android} Apps",
  journal =      j-TOPS,
  volume =       "21",
  number =       "3",
  pages =        "14:1--14:??",
  month =        jun,
  year =         "2018",
  DOI =          "https://doi.org/10.1145/3183575",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Sep 21 08:26:24 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  URL =          "https://dl.acm.org/citation.cfm?id=3183575",
  abstract =     "We present a new approach to static analysis for
                 security vetting of Android apps and a general
                 framework called Amandroid. Amandroid determines
                 points-to information for all objects in an Android app
                 component in a flow and context-sensitive
                 (user-configurable) way and performs data flow and data
                 dependence analysis for the component. Amandroid also
                 tracks inter-component communication activities. It can
                 stitch the component-level information into the
                 app-level information to perform intra-app or inter-app
                 analysis. In this article, (a) we show that the
                 aforementioned type of comprehensive app analysis is
                 completely feasible in terms of computing resources
                 with modern hardware, (b) we demonstrate that one can
                 easily leverage the results from this general analysis
                 to build various types of specialized security
                 analyses-in many cases the amount of additional coding
                 needed is around 100 lines of code, and (c) the result
                 of those specialized analyses leveraging Amandroid is
                 at least on par and often exceeds prior works designed
                 for the specific problems, which we demonstrate by
                 comparing Amandroid's results with those of prior works
                 whenever we can obtain the executable of those tools.
                 Since Amandroid's analysis directly handles
                 inter-component control and data flows, it can be used
                 to address security problems that result from
                 interactions among multiple components from either the
                 same or different apps. Amandroid's analysis is sound
                 in that it can provide assurance of the absence of the
                 specified security problems in an app with
                 well-specified and reasonable assumptions on Android
                 runtime system and its library.",
  acknowledgement = ack-nhfb,
  articleno =    "14",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Zhao:2018:FFI,
  author =       "Siqi Zhao and Xuhua Ding",
  title =        "{FIMCE}: A Fully Isolated Micro-Computing Environment
                 for Multicore Systems",
  journal =      j-TOPS,
  volume =       "21",
  number =       "3",
  pages =        "15:1--15:??",
  month =        jun,
  year =         "2018",
  DOI =          "https://doi.org/10.1145/3195181",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Sep 21 08:26:24 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  URL =          "https://dl.acm.org/citation.cfm?id=3195181",
  abstract =     "Virtualization-based memory isolation has been widely
                 used as a security primitive in various security
                 systems to counter kernel-level attacks. In this
                 article, our in-depth analysis on this primitive shows
                 that its security is significantly undermined in the
                 multicore setting when other hardware resources for
                 computing are not enclosed within the isolation
                 boundary. We thus propose to construct a fully isolated
                 micro-computing environment (FIMCE) as a new primitive.
                 By virtue of its architectural niche, FIMCE not only
                 offers stronger security assurance than its
                 predecessor, but also features a flexible and
                 composable environment with support for peripheral
                 device isolation, thus greatly expanding the scope of
                 applications. In addition, FIMCE can be integrated with
                 recent technologies such as Intel Software Guard
                 Extensions (SGX) to attain even stronger security
                 guarantees. We have built a prototype of FIMCE with a
                 bare-metal hypervisor. To show the benefits of using
                 FIMCE as a building block, we have also implemented
                 four applications which are difficult to construct
                 using the existing memory isolation method. Experiments
                 with these applications demonstrate that FIMCE imposes
                 less than 1\% overhead on single-threaded applications,
                 while the maximum performance loss on multithreaded
                 applications is bounded by the degree of parallelism at
                 the processor level.",
  acknowledgement = ack-nhfb,
  articleno =    "15",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Farris:2018:VSV,
  author =       "Katheryn A. Farris and Ankit Shah and George Cybenko
                 and Rajesh Ganesan and Sushil Jajodia",
  title =        "{VULCON}: A System for Vulnerability Prioritization,
                 Mitigation, and Management",
  journal =      j-TOPS,
  volume =       "21",
  number =       "4",
  pages =        "16:1--16:??",
  month =        oct,
  year =         "2018",
  DOI =          "https://doi.org/10.1145/3196884",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Sep 21 08:26:24 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  URL =          "https://dl.acm.org/citation.cfm?id=3196884",
  abstract =     "Vulnerability remediation is a critical task in
                 operational software and network security management.
                 In this article, an effective vulnerability management
                 strategy, called VULCON (VULnerability CONtrol), is
                 developed and evaluated. The strategy is based on two
                 fundamental performance metrics: (1)
                 time-to-vulnerability remediation (TVR) and (2) total
                 vulnerability exposure (TVE). VULCON takes as input
                 real vulnerability scan reports, metadata about the
                 discovered vulnerabilities, asset criticality, and
                 personnel resources. VULCON uses a mixed-integer
                 multiobjective optimization algorithm to prioritize
                 vulnerabilities for patching, such that the above
                 performance metrics are optimized subject to the given
                 resource constraints. VULCON has been tested on
                 multiple months of real scan data from a cyber-security
                 operations center (CSOC). Results indicate an overall
                 TVE reduction of 8.97\% when VULCON optimizes a
                 realistic security analyst workforce's effort.
                 Additionally, VULCON demonstrates that it can determine
                 monthly resources required to maintain a target TVE
                 score. As such, VULCON provides valuable operational
                 guidance for improving vulnerability response processes
                 in CSOCs.",
  acknowledgement = ack-nhfb,
  articleno =    "16",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Kim:2018:EPP,
  author =       "Jinsu Kim and Dongyoung Koo and Yuna Kim and Hyunsoo
                 Yoon and Junbum Shin and Sungwook Kim",
  title =        "Efficient Privacy-Preserving Matrix Factorization for
                 Recommendation via Fully Homomorphic Encryption",
  journal =      j-TOPS,
  volume =       "21",
  number =       "4",
  pages =        "17:1--17:??",
  month =        oct,
  year =         "2018",
  DOI =          "https://doi.org/10.1145/3212509",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Sep 21 08:26:24 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/cryptography2010.bib;
                 http://www.math.utah.edu/pub/tex/bib/tops.bib",
  URL =          "https://dl.acm.org/citation.cfm?id=3212509",
  abstract =     "There are recommendation systems everywhere in our
                 daily life. The collection of personal data of users by
                 a recommender in the system may cause serious privacy
                 issues. In this article, we propose the first
                 privacy-preserving matrix factorization for
                 recommendation using fully homomorphic encryption. Our
                 protocol performs matrix factorization over encrypted
                 users' rating data and returns encrypted outputs so
                 that the recommendation system learns nothing on rating
                 values and resulting user/item profiles. Furthermore,
                 the protocol provides a privacy-preserving method to
                 optimize the tuning parameters that can be a business
                 benefit for the recommendation service providers. To
                 overcome the performance degradation caused by the use
                 of fully homomorphic encryption, we introduce a novel
                 data structure to perform computations over encrypted
                 vectors, which are essential for matrix factorization,
                 through secure two-party computation in part. Our
                 experiments demonstrate the efficiency of our
                 protocol.",
  acknowledgement = ack-nhfb,
  articleno =    "17",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Levesque:2018:THF,
  author =       "Fanny Lalonde L{\'e}vesque and Sonia Chiasson and Anil
                 Somayaji and Jos{\'e} M. Fernandez",
  title =        "Technological and Human Factors of Malware Attacks: A
                 Computer Security Clinical Trial Approach",
  journal =      j-TOPS,
  volume =       "21",
  number =       "4",
  pages =        "18:1--18:??",
  month =        oct,
  year =         "2018",
  DOI =          "https://doi.org/10.1145/3210311",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Sep 21 08:26:24 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  URL =          "https://dl.acm.org/citation.cfm?id=3210311",
  abstract =     "The success (or failure) of malware attacks depends
                 upon both technological and human factors. The most
                 security-conscious users are susceptible to unknown
                 vulnerabilities, and even the best security mechanisms
                 can be circumvented as a result of user actions.
                 Although there has been significant research on the
                 technical aspects of malware attacks and defence, there
                 has been much less research on how users interact with
                 both malware and current malware defences. This article
                 describes a field study designed to examine the
                 interactions between users, antivirus (AV) software,
                 and malware as they occur on deployed systems. In a
                 fashion similar to medical studies that evaluate the
                 efficacy of a particular treatment, our experiment
                 aimed to assess the performance of AV software and the
                 human risk factors of malware attacks. The 4-month
                 study involved 50 home users who agreed to use laptops
                 that were instrumented to monitor for possible malware
                 attacks and gather data on user behaviour. This study
                 provided some very interesting, non-intuitive insights
                 into the efficacy of AV software and human risk
                 factors. AV performance was found to be lower under
                 real-life conditions compared to tests conducted in
                 controlled conditions. Moreover, computer expertise,
                 volume of network usage, and peer-to-peer activity were
                 found to be significant correlates of malware attacks.
                 We assert that this work shows the viability and the
                 merits of evaluating security products, techniques, and
                 strategies to protect systems through long-term field
                 studies with greater ecological validity than can be
                 achieved through other means.",
  acknowledgement = ack-nhfb,
  articleno =    "18",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Ye:2018:VBA,
  author =       "Guixin Ye and Zhanyong Tang and Dingyi Fang and
                 Xiaojiang Chen and Willy Wolff and Adam J. Aviv and
                 Zheng Wang",
  title =        "A Video-based Attack for {Android} Pattern Lock",
  journal =      j-TOPS,
  volume =       "21",
  number =       "4",
  pages =        "19:1--19:??",
  month =        oct,
  year =         "2018",
  DOI =          "https://doi.org/10.1145/3230740",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Sep 21 08:26:24 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/cryptography2010.bib;
                 http://www.math.utah.edu/pub/tex/bib/tops.bib",
  URL =          "https://dl.acm.org/citation.cfm?id=3230740",
  abstract =     "Pattern lock is widely used for identification and
                 authentication on Android devices. This article
                 presents a novel video-based side channel attack that
                 can reconstruct Android locking patterns from video
                 footage filmed using a smartphone. As a departure from
                 previous attacks on pattern lock, this new attack does
                 not require the camera to capture any content displayed
                 on the screen. Instead, it employs a computer vision
                 algorithm to track the fingertip movement trajectory to
                 infer the pattern. Using the geometry information
                 extracted from the tracked fingertip motions, the
                 method can accurately infer a small number of (often
                 one) candidate patterns to be tested by an attacker. We
                 conduct extensive experiments to evaluate our approach
                 using 120 unique patterns collected from 215
                 independent users. Experimental results show that the
                 proposed attack can reconstruct over 95\% of the
                 patterns in five attempts. We discovered that, in
                 contrast to most people's belief, complex patterns do
                 not offer stronger protection under our attacking
                 scenarios. This is demonstrated by the fact that we are
                 able to break all but one complex patterns (with a
                 97.5\% success rate) as opposed to 60\% of the simple
                 patterns in the first attempt. We demonstrate that this
                 video-side channel is a serious concern for not only
                 graphical locking patterns but also PIN-based
                 passwords, as algorithms and analysis developed from
                 the attack can be easily adapted to target PIN-based
                 passwords. As a countermeasure, we propose to change
                 the way the Android locking pattern is constructed and
                 used. We show that our proposal can successfully defeat
                 this video-based attack. We hope the results of this
                 article can encourage the community to revisit the
                 design and practical use of Android pattern lock.",
  acknowledgement = ack-nhfb,
  articleno =    "19",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Zhang:2018:VGP,
  author =       "Yupeng Zhang and Charalampos Papamanthou and Jonathan
                 Katz",
  title =        "Verifiable Graph Processing",
  journal =      j-TOPS,
  volume =       "21",
  number =       "4",
  pages =        "20:1--20:??",
  month =        oct,
  year =         "2018",
  DOI =          "https://doi.org/10.1145/3233181",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Sep 21 08:26:24 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  URL =          "https://dl.acm.org/citation.cfm?id=3233181",
  abstract =     "We consider a scenario in which a data owner
                 outsources storage of a large graph to an untrusted
                 server; the server performs computations on this graph
                 in response to queries from a client (whether the data
                 owner or others), and the goal is to ensure
                 verifiability of the returned results. Applying generic
                 verifiable computation (VC) would involve compiling
                 each graph computation to a circuit or a RAM program
                 and would incur large overhead, especially in the
                 proof-computation time. In this work, we address the
                 above by designing, building, and evaluating Alitheia,
                 a VC system tailored for graph queries such as
                 computing shortest paths, longest paths, and maximum
                 flows. The underlying principle of Alitheia is to
                 minimize the use of generic VC techniques by leveraging
                 various algorithmic approaches specific for graphs.
                 This leads to both theoretical and practical
                 improvements. Asymptotically, it improves the
                 complexity of proof computation by at least a
                 logarithmic factor. On the practical side, our system
                 achieves significant performance improvements over
                 current state-of-the-art VC systems (up to a
                 10-orders-of-magnitude improvement in proof-computation
                 time, and a 99.9\% reduction in server storage), while
                 scaling to 200,000-node graphs.",
  acknowledgement = ack-nhfb,
  articleno =    "20",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Madi:2018:IAV,
  author =       "Taous Madi and Yosr Jarraya and Amir Alimohammadifar
                 and Suryadipta Majumdar and Yushun Wang and Makan
                 Pourzandi and Lingyu Wang and Mourad Debbabi",
  title =        "{ISOTOP}: Auditing Virtual Networks Isolation Across
                 Cloud Layers in {OpenStack}",
  journal =      j-TOPS,
  volume =       "22",
  number =       "1",
  pages =        "1:1--1:??",
  month =        jan,
  year =         "2018",
  DOI =          "https://doi.org/10.1145/3267339",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Sep 21 08:26:24 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  URL =          "https://dl.acm.org/citation.cfm?id=3267339",
  abstract =     "Multi-tenancy in the cloud is a double-edged sword.
                 While it enables cost-effective resource sharing, it
                 increases security risks for the hosted applications.
                 Indeed, multiplexing virtual resources belonging to
                 different tenants on the same physical substrate may
                 lead to critical security concerns such as
                 cross-tenants data leakage and denial of service.
                 Particularly, virtual networks isolation failures are
                 among the foremost security concerns in the cloud. To
                 remedy these, automated tools are needed to verify
                 security mechanisms compliance with relevant security
                 policies and standards. However, auditing virtual
                 networks isolation is challenging due to the dynamic
                 and layered nature of the cloud. Particularly,
                 inconsistencies in network isolation mechanisms across
                 cloud-stack layers, namely, the infrastructure
                 management and the implementation layers, may lead to
                 virtual networks isolation breaches that are
                 undetectable at a single layer. In this article, we
                 propose an offline automated framework for auditing
                 consistent isolation between virtual networks in
                 OpenStack-managed cloud spanning over overlay and layer
                 2 by considering both cloud layers' views. To capture
                 the semantics of the audited data and its relation to
                 consistent isolation requirement, we devise a
                 multi-layered model for data related to each
                 cloud-stack layer's view. Furthermore, we integrate our
                 auditing system into OpenStack, and present our
                 experimental results on assessing several properties
                 related to virtual network isolation and consistency.
                 Our results show that our approach can be successfully
                 used to detect virtual network isolation breaches for
                 large OpenStack-based data centers in reasonable
                 time.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Reaves:2018:CSS,
  author =       "Bradley Reaves and Luis Vargas and Nolen Scaife and
                 Dave Tian and Logan Blue and Patrick Traynor and Kevin
                 R. B. Butler",
  title =        "Characterizing the Security of the {SMS} Ecosystem
                 with Public Gateways",
  journal =      j-TOPS,
  volume =       "22",
  number =       "1",
  pages =        "2:1--2:??",
  month =        jan,
  year =         "2018",
  DOI =          "https://doi.org/10.1145/3268932",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Sep 21 08:26:24 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/cryptography2010.bib;
                 http://www.math.utah.edu/pub/tex/bib/tops.bib",
  URL =          "https://dl.acm.org/citation.cfm?id=3268932",
  abstract =     "Recent years have seen the Short Message Service (SMS)
                 become a critical component of the security
                 infrastructure, assisting with tasks including identity
                 verification and second-factor authentication. At the
                 same time, this messaging infrastructure has become
                 dramatically more open and connected to public networks
                 than ever before. However, the implications of this
                 openness, the security practices of benign services,
                 and the malicious misuse of this ecosystem are not well
                 understood. In this article, we provide a comprehensive
                 longitudinal study to answer these questions, analyzing
                 over 900,000 text messages sent to public online SMS
                 gateways over the course of 28 months. From this data,
                 we uncover the geographical distribution of spam
                 messages, study SMS as a transmission medium of
                 malicious content, and find that changes in benign and
                 malicious behaviors in the SMS ecosystem have been
                 minimal during our collection period. The key takeaways
                 of this research show many services sending sensitive
                 security-based messages through an unencrypted medium,
                 implementing low entropy solutions for one-use codes,
                 and behaviors indicating that public gateways are
                 primarily used for evading account creation policies
                 that require verified phone numbers. This latter
                 finding has significant implications for combating
                 phone-verified account fraud and demonstrates that such
                 evasion will continue to be difficult to detect and
                 prevent.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Jansen:2018:KKI,
  author =       "Rob Jansen and Matthew Traudt and John Geddes and
                 Chris Wacek and Micah Sherr and Paul Syverson",
  title =        "{KIST}: Kernel-Informed Socket Transport for {Tor}",
  journal =      j-TOPS,
  volume =       "22",
  number =       "1",
  pages =        "3:1--3:??",
  month =        jan,
  year =         "2018",
  DOI =          "https://doi.org/10.1145/3278121",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Sep 21 08:26:24 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  URL =          "https://dl.acm.org/citation.cfm?id=3278121",
  abstract =     "Tor's growing popularity and user diversity has
                 resulted in network performance problems that are not
                 well understood, though performance is understood to be
                 a significant factor in Tor's security. A large body of
                 work has attempted to solve performance problems
                 without a complete understanding of where congestion
                 occurs in Tor. In this article, we first study
                 congestion in Tor at individual relays as well as along
                 the entire end-to-end Tor path and find that congestion
                 occurs almost exclusively in egress kernel socket
                 buffers. We then analyze Tor's socket interactions and
                 discover two major contributors to Tor's congestion:
                 Tor writes sockets sequentially, and Tor writes as much
                 as possible to each socket. To improve Tor's
                 performance, we design, implement, and test KIST: a new
                 socket management algorithm that uses real-time kernel
                 information to dynamically compute the amount to write
                 to each socket while considering all circuits of all
                 writable sockets when scheduling cells. We find that,
                 in the medians, KIST reduces circuit congestion by more
                 than 30\%, reduces network latency by 18\%, and
                 increases network throughput by nearly 10\%. We also
                 find that client and relay performance with KIST
                 improves as more relays deploy it and as network load
                 and packet loss rates increase. We analyze the security
                 of KIST and find an acceptable performance and security
                 tradeoff, as it does not significantly affect the
                 outcome of well-known latency, throughput, and traffic
                 correlation attacks. KIST has been merged and
                 configured as the default socket scheduling algorithm
                 in Tor version 0.3.2.1-alpha (released September 18,
                 2017) and became stable in Tor version 0.3.2.9
                 (released January 9, 2018). While our focus is Tor, our
                 techniques and observations should help analyze and
                 improve overlay and application performance, both for
                 security applications and in general.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Sluganovic:2018:ARE,
  author =       "Ivo Sluganovic and Marc Roeschlin and Kasper B.
                 Rasmussen and Ivan Martinovic",
  title =        "Analysis of Reflexive Eye Movements for Fast
                 Replay-Resistant Biometric Authentication",
  journal =      j-TOPS,
  volume =       "22",
  number =       "1",
  pages =        "4:1--4:??",
  month =        jan,
  year =         "2018",
  DOI =          "https://doi.org/10.1145/3281745",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Sep 21 08:26:24 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/cryptography2010.bib;
                 http://www.math.utah.edu/pub/tex/bib/tops.bib",
  URL =          "https://dl.acm.org/citation.cfm?id=3281745",
  abstract =     "Eye tracking devices have recently become increasingly
                 popular as an interface between people and
                 consumer-grade electronic devices. Due to the fact that
                 human eyes are fast, responsive, and carry information
                 unique to an individual, analyzing person's gaze is
                 particularly attractive for rapid biometric
                 authentication. Unfortunately, previous proposals for
                 gaze-based authentication systems either suffer from
                 high error rates or requires long authentication times.
                 We build on the fact that some eye movements can be
                 reflexively and predictably triggered and develop an
                 interactive visual stimulus for elicitation of
                 reflexive eye movements that support the extraction of
                 reliable biometric features in a matter of seconds,
                 without requiring any memorization or cognitive effort
                 on the part of the user. As an important benefit, our
                 stimulus can be made unique for every authentication
                 attempt and thus incorporated in a challenge-response
                 biometric authentication system. This allows us to
                 prevent replay attacks, which are possibly the most
                 applicable attack vectors against biometric
                 authentication. Using a gaze tracking device, we build
                 a prototype of our system and perform a series of
                 systematic user experiments with 30 participants from
                 the general public. We thoroughly analyze various
                 system parameters and evaluate the performance and
                 security guarantees under several different attack
                 scenarios. The results show that our system matches or
                 surpasses existing gaze-based authentication methods in
                 achieved equal error rates (6.3\%) while achieving
                 significantly lower authentication times (5s).",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Pomonis:2018:KPA,
  author =       "Marios Pomonis and Theofilos Petsios and Angelos D.
                 Keromytis and Michalis Polychronakis and Vasileios P.
                 Kemerlis",
  title =        "Kernel Protection Against Just-In-Time Code Reuse",
  journal =      j-TOPS,
  volume =       "22",
  number =       "1",
  pages =        "5:1--5:??",
  month =        jan,
  year =         "2018",
  DOI =          "https://doi.org/10.1145/3277592",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Sep 21 08:26:24 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  URL =          "https://dl.acm.org/citation.cfm?id=3277592",
  abstract =     "The abundance of memory corruption and disclosure
                 vulnerabilities in kernel code necessitates the
                 deployment of hardening techniques to prevent privilege
                 escalation attacks. As stricter memory isolation
                 mechanisms between the kernel and user space become
                 commonplace, attackers increasingly rely on code reuse
                 techniques to exploit kernel vulnerabilities. Contrary
                 to similar attacks in more restrictive settings, as in
                 web browsers, in kernel exploitation, non-privileged
                 local adversaries have great flexibility in abusing
                 memory disclosure vulnerabilities to dynamically
                 discover, or infer, the location of code snippets in
                 order to construct code-reuse payloads. Recent studies
                 have shown that the coupling of code diversification
                 with the enforcement of a ``read XOR execute'' (R$
                 \caret $X) memory safety policy is an effective defense
                 against the exploitation of userland software, but so
                 far this approach has not been applied for the
                 protection of the kernel itself. In this article, we
                 fill this gap by presenting kR$ \caret $X: a
                 kernel-hardening scheme based on execute-only memory
                 and code diversification. We study a previously
                 unexplored point in the design space, where a
                 hypervisor or a super-privileged component is not
                 required. Implemented mostly as a set of GCC plugins,
                 kR$ \caret $X is readily applicable to x86 Linux
                 kernels (both 32b and 64b) and can benefit from
                 hardware support (segmentation on x86, MPX on x86-64)
                 to optimize performance. In full protection mode, kR$
                 \caret $X incurs a low runtime overhead of 4.04\%,
                 which drops to 2.32\% when MPX is available, and 1.32\%
                 when memory segmentation is in use.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Ugarte-Pedrero:2018:CLD,
  author =       "Xabier Ugarte-Pedrero and Mariano Graziano and Davide
                 Balzarotti",
  title =        "A Close Look at a Daily Dataset of Malware Samples",
  journal =      j-TOPS,
  volume =       "22",
  number =       "1",
  pages =        "6:1--6:??",
  month =        jan,
  year =         "2018",
  DOI =          "https://doi.org/10.1145/3291061",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Sep 21 08:26:24 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  URL =          "https://dl.acm.org/citation.cfm?id=3291061",
  abstract =     "The number of unique malware samples is growing out of
                 control. Over the years, security companies have
                 designed and deployed complex infrastructures to
                 collect and analyze this overwhelming number of
                 samples. As a result, a security company can collect
                 more than 1M unique files per day only from its
                 different feeds. These are automatically stored and
                 processed to extract actionable information derived
                 from static and dynamic analysis. However, only a tiny
                 amount of this data is interesting for security
                 researchers and attracts the interest of a human
                 expert. To the best of our knowledge, nobody has
                 systematically dissected these datasets to precisely
                 understand what they really contain. The security
                 community generally discards the problem because of the
                 alleged prevalence of uninteresting samples. In this
                 article, we guide the reader through a step-by-step
                 analysis of the hundreds of thousands Windows
                 executables collected in one day from these feeds. Our
                 goal is to show how a company can employ existing
                 state-of-the-art techniques to automatically process
                 these samples and then perform manual experiments to
                 understand and document what is the real content of
                 this gigantic dataset. We present the filtering steps,
                 and we discuss in detail how samples can be grouped
                 together according to their behavior to support manual
                 verification. Finally, we use the results of this
                 measurement experiment to provide a rough estimate of
                 both the human and computer resources that are required
                 to get to the bottom of the catch of the day.",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Modersheim:2018:ABP,
  author =       "Sebastian M{\"o}dersheim and Luca Vigan{\`o}",
  title =        "Alpha--Beta Privacy",
  journal =      j-TOPS,
  volume =       "22",
  number =       "1",
  pages =        "7:1--7:??",
  month =        jan,
  year =         "2018",
  DOI =          "https://doi.org/10.1145/3289255",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Sep 21 08:26:24 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/cryptography2010.bib;
                 http://www.math.utah.edu/pub/tex/bib/tops.bib",
  URL =          "https://dl.acm.org/citation.cfm?id=3289255",
  abstract =     "The formal specification of privacy goals in symbolic
                 protocol models has proved to be not quite trivial so
                 far. The most widely used approach in formal methods is
                 based on the static equivalence of frames in the
                 applied pi-calculus, basically asking whether or not
                 the intruder is able to distinguish two given worlds.
                 But then a subtle question emerges: How can we be sure
                 that we have specified all pairs of worlds to properly
                 reflect our intuitive privacy goal? To address this
                 problem, we introduce in this article a novel and
                 declarative way to specify privacy goals, called (
                 \alpha , \beta )-privacy. This new approach is based on
                 specifying two formulae \alpha and \beta in first-order
                 logic with Herbrand universes, where \alpha reflects
                 the intentionally released information and \beta
                 includes the actual cryptographic (``technical'')
                 messages the intruder can see. Then ( \alpha , \beta
                 )-privacy means that the intruder cannot derive any
                 ``nontechnical'' statement from \beta that he cannot
                 derive from \alpha already. We describe by a variety of
                 examples how this notion can be used in practice. Even
                 though ( \alpha , \beta )-privacy does not directly
                 contain a notion of distinguishing between worlds,
                 there is a close relationship to static equivalence of
                 frames that we investigate formally. This allows us to
                 justify (and criticize) the specifications that are
                 currently used in verification tools and obtain a
                 decision procedure for a large fragment of ( \alpha ,
                 \beta )-privacy.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Kreutz:2019:ALC,
  author =       "Diego Kreutz and Jiangshan Yu and Fernando M. V. Ramos
                 and Paulo Esteves-Verissimo",
  title =        "{ANCHOR}: Logically Centralized Security for
                 Software-Defined Networks",
  journal =      j-TOPS,
  volume =       "22",
  number =       "2",
  pages =        "8:1--8:??",
  month =        apr,
  year =         "2019",
  DOI =          "https://doi.org/10.1145/3301305",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Sep 21 08:26:25 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  URL =          "https://dl.acm.org/citation.cfm?id=3301305",
  abstract =     "Software-defined networking (SDN) decouples the
                 control and data planes of traditional networks,
                 logically centralizing the functional properties of the
                 network in the SDN controller. While this
                 centralization brought advantages such as a faster pace
                 of innovation, it also disrupted some of the natural
                 defenses of traditional architectures against different
                 threats. The literature on SDN has mostly been
                 concerned with the functional side, despite some
                 specific works concerning non-functional properties
                 such as security or dependability. Though addressing
                 the latter in an ad-hoc, piecemeal way may work, it
                 will most likely lead to efficiency and effectiveness
                 problems. We claim that the enforcement of
                 non-functional properties as a pillar of SDN robustness
                 calls for a systemic approach. We further advocate, for
                 its materialization, the reiteration of the successful
                 formula behind SDN: `logical centralization'. As a
                 general concept, we propose anchor, a subsystem
                 architecture that promotes the logical centralization
                 of non-functional properties. To show the effectiveness
                 of the concept, we focus on security in this article:
                 we identify the current security gaps in SDNs and we
                 populate the architecture middleware with the
                 appropriate security mechanisms in a global and
                 consistent manner. Essential security mechanisms
                 provided by anchor include reliable entropy and
                 resilient pseudo-random generators, and protocols for
                 secure registration and association of SDN devices. We
                 claim and justify in the article that centralizing such
                 mechanisms is key for their effectiveness by allowing
                 us to define and enforce global policies for those
                 properties; reduce the complexity of controllers and
                 forwarding devices; ensure higher levels of robustness
                 for critical services; foster interoperability of the
                 non-functional property enforcement mechanisms; and
                 promote the security and resilience of the architecture
                 itself. We discuss design and implementation aspects,
                 and we prove and evaluate our algorithms and
                 mechanisms, including the formalisation of the main
                 protocols and the verification of their core security
                 properties using the Tamarin prover.",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Pagani:2019:ITD,
  author =       "Fabio Pagani and Oleksii Fedorov and Davide
                 Balzarotti",
  title =        "Introducing the Temporal Dimension to Memory
                 Forensics",
  journal =      j-TOPS,
  volume =       "22",
  number =       "2",
  pages =        "9:1--9:??",
  month =        apr,
  year =         "2019",
  DOI =          "https://doi.org/10.1145/3310355",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Sep 21 08:26:25 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  URL =          "https://dl.acm.org/citation.cfm?id=3310355",
  abstract =     "Kickstarted by the Digital Forensic Research Workshop
                 (DFRWS) conference in 2005, modern memory analysis is
                 now one of most active areas of computer forensics and
                 it mostly focuses on techniques to locate key operating
                 system data structures and extract high-level
                 information. These techniques work on the assumption
                 that the information inside a memory dump is consistent
                 and the copy of the physical memory was obtained in an
                 atomic operation. Unfortunately, this is seldom the
                 case in real investigations, where software acquisition
                 tools record information while the rest of the system
                 is running. Thus, since the content of the memory is
                 changing very rapidly, the resulting memory dump may
                 contain inconsistent data. While this problem is known,
                 its consequences are unclear and often overlooked.
                 Unfortunately, errors can be very subtle and can affect
                 the results of an analysis in ways that are difficult
                 to detect. In this article, we argue that memory
                 forensics should also consider the time in which each
                 piece of data was acquired. This new temporal dimension
                 provides a preliminary way to assess the reliability of
                 a given result and opens the door to new research
                 directions that can minimize the effect of the
                 acquisition time or detect inconsistencies. To support
                 our hypothesis, we conducted several experiments to
                 show that inconsistencies are very frequent and can
                 negatively impact an analysis. We then discuss
                 modifications we made to popular memory forensic tools
                 to make the temporal dimension explicit during the
                 analysis and to minimize its effect by resorting to a
                 locality-based acquisition.",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Kwon:2019:SEI,
  author =       "Donghyun Kwon and Hayoon Yi and Yeongpil Cho and
                 Yunheung Paek",
  title =        "Safe and Efficient Implementation of a Security System
                 on {ARM} using Intra-level Privilege Separation",
  journal =      j-TOPS,
  volume =       "22",
  number =       "2",
  pages =        "10:1--10:??",
  month =        apr,
  year =         "2019",
  DOI =          "https://doi.org/10.1145/3309698",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Sep 21 08:26:25 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  URL =          "https://dl.acm.org/citation.cfm?id=3309698",
  abstract =     "Security monitoring has long been considered as a
                 fundamental mechanism to mitigate the damage of a
                 security attack. Recently, intra-level security systems
                 have been proposed that can efficiently and securely
                 monitor system software without any involvement of more
                 privileged entity. Unfortunately, there exists no full
                 intra-level security system that can universally
                 operate at any privilege level on ARM. However, as
                 malware and attacks increase against virtually every
                 level of privileged software including an OS, a
                 hypervisor, and even the highest privileged software
                 armored by TrustZone, we have been motivated to develop
                 an intra-level security system, named Hilps. Hilps
                 realizes true intra-level scheme in all these levels of
                 privileged software on ARM by elaborately exploiting a
                 new hardware feature of ARM's latest 64-bit
                 architecture, called TxSZ, that enables elastic
                 adjustment of the accessible virtual address range.
                 Furthermore, Hilps newly supports the sandbox mechanism
                 that provides security tools with individually isolated
                 execution environments, thereby minimizing security
                 threats from untrusted security tools. We have
                 implemented a prototype of Hilps on a real machine. The
                 experimental results demonstrate that Hilps is quite
                 promising for practical use in real deployments.",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Woo:2019:UEM,
  author =       "Simon S. Woo and Ron Artstein and Elsi Kaiser and Xiao
                 Le and Jelena Mirkovic",
  title =        "Using Episodic Memory for User Authentication",
  journal =      j-TOPS,
  volume =       "22",
  number =       "2",
  pages =        "11:1--11:??",
  month =        apr,
  year =         "2019",
  DOI =          "https://doi.org/10.1145/3308992",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Sep 21 08:26:25 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/cryptography2010.bib;
                 http://www.math.utah.edu/pub/tex/bib/tops.bib",
  URL =          "https://dl.acm.org/citation.cfm?id=3308992",
  abstract =     "Passwords are widely used for user authentication, but
                 they are often difficult for a user to recall, easily
                 cracked by automated programs, and heavily reused.
                 Security questions are also used for secondary
                 authentication. They are more memorable than passwords,
                 because the question serves as a hint to the user, but
                 they are very easily guessed. We propose a new
                 authentication mechanism, called ``life-experience
                 passwords (LEPs).'' Sitting somewhere between passwords
                 and security questions, an LEP consists of several
                 facts about a user-chosen life event-such as a trip, a
                 graduation, a wedding, and so on. At LEP creation, the
                 system extracts these facts from the user's input and
                 transforms them into questions and answers. At
                 authentication, the system prompts the user with
                 questions and matches the answers with the stored ones.
                 We show that question choice and design make LEPs much
                 more secure than security questions and passwords,
                 while the question-answer format promotes low password
                 reuse and high recall. Specifically, we find that: (1)
                 LEPs are 10 9 --10 14 $ \times $ stronger than an
                 ideal, randomized, eight-character password; (2) LEPs
                 are up to 3 $ \times $ more memorable than passwords
                 and on par with security questions; and (3) LEPs are
                 reused half as often as passwords. While both LEPs and
                 security questions use personal experiences for
                 authentication, LEPs use several questions that are
                 closely tailored to each user. This increases LEP
                 security against guessing attacks. In our evaluation,
                 only 0.7\% of LEPs were guessed by casual friends, and
                 9.5\% by family members or close friends-roughly half
                 of the security question guessing rate. On the
                 downside, LEPs take around 5 $ \times $ longer to input
                 than passwords. So, these qualities make LEPs suitable
                 for multi-factor authentication at high-value servers,
                 such as financial or sensitive work servers, where
                 stronger authentication strength is needed.",
  acknowledgement = ack-nhfb,
  articleno =    "11",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Noh:2019:TBS,
  author =       "Juhwan Noh and Yujin Kwon and Yunmok Son and Hocheol
                 Shin and Dohyun Kim and Jaeyeong Choi and Yongdae Kim",
  title =        "{Tractor Beam}: Safe-hijacking of Consumer Drones with
                 Adaptive {GPS} Spoofing",
  journal =      j-TOPS,
  volume =       "22",
  number =       "2",
  pages =        "12:1--12:??",
  month =        apr,
  year =         "2019",
  DOI =          "https://doi.org/10.1145/3309735",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Sep 21 08:26:25 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  URL =          "https://dl.acm.org/citation.cfm?id=3309735",
  abstract =     "The consumer drone market is booming. Consumer drones
                 are predominantly used for aerial photography; however,
                 their use has been expanding because of their autopilot
                 technology. Unfortunately, terrorists have also begun
                 to use consumer drones for kamikaze bombing and
                 reconnaissance. To protect against such threats,
                 several companies have started ``anti-drone'' services
                 that primarily focus on disrupting or incapacitating
                 drone operations. However, the approaches employed are
                 inadequate, because they make any drone that has
                 intruded stop and remain over the protected area. We
                 specify this issue by introducing the concept of
                 safe-hijacking, which enables a hijacker to expel the
                 intruding drone from the protected area remotely. As a
                 safe-hijacking strategy, we investigated whether
                 consumer drones in the autopilot mode can be hijacked
                 via adaptive GPS spoofing. Specifically, as consumer
                 drones activate GPS fail-safe and change their flight
                 mode whenever a GPS error occurs, we performed black-
                 and white-box analyses of GPS fail-safe flight mode and
                 the following behavior after GPS signal recovery of
                 existing consumer drones. Based on our analyses
                 results, we developed a taxonomy of consumer drones
                 according to these fail-safe mechanisms and designed
                 safe-hijacking strategies for each drone type.
                 Subsequently, we applied these strategies to four
                 popular drones: DJI Phantom 3 Standard, DJI Phantom 4,
                 Parrot Bebop 2, and 3DR Solo. The results of field
                 experiments and software simulations verified the
                 efficacy of our safe-hijacking strategies against these
                 drones and demonstrated that the strategies can force
                 them to move in any direction with high accuracy.",
  acknowledgement = ack-nhfb,
  articleno =    "12",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Ruoti:2019:USF,
  author =       "Scott Ruoti and Jeff Andersen and Luke Dickinson and
                 Scott Heidbrink and Tyler Monson and Mark O'Neill and
                 Ken Reese and Brad Spendlove and Elham Vaziripour and
                 Justin Wu and Daniel Zappala and Kent Seamons",
  title =        "A Usability Study of Four Secure Email Tools Using
                 Paired Participants",
  journal =      j-TOPS,
  volume =       "22",
  number =       "2",
  pages =        "13:1--13:??",
  month =        apr,
  year =         "2019",
  DOI =          "https://doi.org/10.1145/3313761",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Sep 21 08:26:25 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  URL =          "https://dl.acm.org/citation.cfm?id=3313761",
  abstract =     "Secure email is increasingly being touted as usable by
                 novice users, with a push for adoption based on recent
                 concerns about government surveillance. To determine
                 whether secure email is ready for grassroots adoption,
                 we employ a laboratory user study that recruits pairs
                 of novice users to install and use several of the
                 latest systems to exchange secure messages. We present
                 both quantitative and qualitative results from 28 pairs
                 of novices as they use Private WebMail (Pwm), Tutanota,
                 and Virtru and 10 pairs of novices as they use
                 Mailvelope. Participants report being more at ease with
                 this type of study and better able to cope with
                 mistakes since both participants are ``on the same
                 page.'' We find that users prefer integrated solutions
                 over depot-based solutions and that tutorials are
                 important in helping first-time users. Finally, our
                 results demonstrate that Pretty Good Privacy using
                 manual key management is still unusable for novice
                 users, with 9 of 10 participant pairs failing to
                 complete the study.",
  acknowledgement = ack-nhfb,
  articleno =    "13",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Onwuzurike:2019:MDA,
  author =       "Lucky Onwuzurike and Enrico Mariconti and Panagiotis
                 Andriotis and Emiliano {De Cristofaro} and Gordon Ross
                 and Gianluca Stringhini",
  title =        "{MaMaDroid}: Detecting {Android} Malware by Building
                 {Markov} Chains of Behavioral Models (Extended
                 Version)",
  journal =      j-TOPS,
  volume =       "22",
  number =       "2",
  pages =        "14:1--14:??",
  month =        apr,
  year =         "2019",
  DOI =          "https://doi.org/10.1145/3313391",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Sep 21 08:26:25 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  URL =          "https://dl.acm.org/citation.cfm?id=3313391",
  abstract =     "As Android has become increasingly popular, so has
                 malware targeting it, thus motivating the research
                 community to propose different detection techniques.
                 However, the constant evolution of the Android
                 ecosystem, and of malware itself, makes it hard to
                 design robust tools that can operate for long periods
                 of time without the need for modifications or costly
                 re-training. Aiming to address this issue, we set to
                 detect malware from a behavioral point of view, modeled
                 as the sequence of abstracted API calls. We introduce M
                 AMADROID, a static-analysis-based system that abstracts
                 app's API calls to their class, package, or family, and
                 builds a model from their sequences obtained from the
                 call graph of an app as Markov chains. This ensures
                 that the model is more resilient to API changes and the
                 features set is of manageable size. We evaluate
                 MAMADROID using a dataset of 8.5K benign and 35.5K
                 malicious apps collected over a period of 6 years,
                 showing that it effectively detects malware (with up to
                 0.99 F-measure) and keeps its detection capabilities
                 for long periods of time (up to 0.87 F-measure 2 years
                 after training). We also show that MAMADROID remarkably
                 overperforms DROIDAPIMINER, a state-of-the-art
                 detection system that relies on the frequency of ( raw
                 ) API calls. Aiming to assess whether MAMADROID's
                 effectiveness mainly stems from the API abstraction or
                 from the sequencing modeling, we also evaluate a
                 variant of it that uses frequency (instead of
                 sequences), of abstracted API calls. We find that it is
                 not as accurate, failing to capture maliciousness when
                 trained on malware samples that include API calls that
                 are equally or more frequently used by benign apps.",
  acknowledgement = ack-nhfb,
  articleno =    "14",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Rao:2019:HPR,
  author =       "Fang-Yu Rao and Jianneng Cao and Elisa Bertino and
                 Murat Kantarcioglu",
  title =        "Hybrid Private Record Linkage: Separating
                 Differentially Private Synopses from Matching Records",
  journal =      j-TOPS,
  volume =       "22",
  number =       "3",
  pages =        "15:1--15:??",
  month =        jul,
  year =         "2019",
  DOI =          "https://doi.org/10.1145/3318462",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Sep 21 08:26:25 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/cryptography2010.bib;
                 http://www.math.utah.edu/pub/tex/bib/tops.bib",
  URL =          "https://dl.acm.org/citation.cfm?id=3318462",
  abstract =     "Private record linkage protocols allow multiple
                 parties to exchange matching records, which refer to
                 the same entities or have similar values, while keeping
                 the non-matching ones secret. Conventional protocols
                 are based on computationally expensive cryptographic
                 primitives and therefore do not scale. To address these
                 scalability issues, hybrid protocols have been proposed
                 that combine differential privacy techniques with
                 secure multiparty computation techniques. However, a
                 drawback of such protocols is that they disclose to the
                 parties both the matching records and the
                 differentially private synopses of the datasets
                 involved in the linkage. Consequently, differential
                 privacy is no longer always satisfied. To address this
                 issue, we propose a novel framework that separates the
                 private synopses from the matching records. The two
                 parties do not access the synopses directly, but still
                 use them to efficiently link records. We theoretically
                 prove the security of our framework under the
                 state-of-the-art privacy notion of differential privacy
                 for record linkage (DPRL). In addition, we develop a
                 simple but effective strategy for releasing private
                 synopses. Extensive experimental results show that our
                 framework is superior to the existing methods in terms
                 of efficiency.",
  acknowledgement = ack-nhfb,
  articleno =    "15",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Sharif:2019:GFA,
  author =       "Mahmood Sharif and Sruti Bhagavatula and Lujo Bauer
                 and Michael K. Reiter",
  title =        "A General Framework for Adversarial Examples with
                 Objectives",
  journal =      j-TOPS,
  volume =       "22",
  number =       "3",
  pages =        "16:1--16:??",
  month =        jul,
  year =         "2019",
  DOI =          "https://doi.org/10.1145/3317611",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Sep 21 08:26:25 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  URL =          "https://dl.acm.org/citation.cfm?id=3317611",
  abstract =     "Images perturbed subtly to be misclassified by neural
                 networks, called adversarial examples, have emerged as
                 a technically deep challenge and an important concern
                 for several application domains. Most research on
                 adversarial examples takes as its only constraint that
                 the perturbed images are similar to the originals.
                 However, real-world application of these ideas often
                 requires the examples to satisfy additional objectives,
                 which are typically enforced through custom
                 modifications of the perturbation process. In this
                 article, we propose adversarial generative nets (AGNs),
                 a general methodology to train a generator neural
                 network to emit adversarial examples satisfying desired
                 objectives. We demonstrate the ability of AGNs to
                 accommodate a wide range of objectives, including
                 imprecise ones difficult to model, in two application
                 domains. In particular, we demonstrate physical
                 adversarial examples-eyeglass frames designed to fool
                 face recognition-with better robustness,
                 inconspicuousness, and scalability than previous
                 approaches, as well as a new attack to fool a
                 handwritten-digit classifier.",
  acknowledgement = ack-nhfb,
  articleno =    "16",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Yan:2019:DAW,
  author =       "Chao Yan and Bo Li and Yevgeniy Vorobeychik and Aron
                 Laszka and Daniel Fabbri and Bradley Malin",
  title =        "Database Audit Workload Prioritization via Game
                 Theory",
  journal =      j-TOPS,
  volume =       "22",
  number =       "3",
  pages =        "17:1--17:??",
  month =        jul,
  year =         "2019",
  DOI =          "https://doi.org/10.1145/3323924",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Sep 21 08:26:25 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  URL =          "https://dl.acm.org/citation.cfm?id=3323924",
  abstract =     "The quantity of personal data that is collected,
                 stored, and subsequently processed continues to grow
                 rapidly. Given its sensitivity, ensuring privacy
                 protections has become a necessary component of
                 database management. To enhance protection, a number of
                 mechanisms have been developed, such as audit logging
                 and alert triggers, which notify administrators about
                 suspicious activities. However, this approach is
                 limited. First, the volume of alerts is often
                 substantially greater than the auditing capabilities of
                 organizations. Second, strategic attackers can attempt
                 to disguise their actions or carefully choose targets,
                 thus hide illicit activities. In this article, we
                 introduce an auditing approach that accounts for
                 adversarial behavior by (1) prioritizing the order in
                 which types of alerts are investigated and (2)
                 providing an upper bound on how much resource to
                 allocate for each type. Specifically, we model the
                 interaction between a database auditor and attackers as
                 a Stackelberg game. We show that even a highly
                 constrained version of such problem is NP-Hard. Then,
                 we introduce a method that combines linear programming,
                 column generation, and heuristic searching to derive an
                 auditing policy. On the synthetic data, we perform an
                 extensive evaluation on the approximation degree of our
                 solution with the optimal one. The two real datasets,
                 (1) 1.5 months of audit logs from Vanderbilt University
                 Medical Center and (2) a publicly available credit card
                 application dataset, are used to test the
                 policy-searching performance. The findings demonstrate
                 the effectiveness of the proposed methods for searching
                 the audit strategies, and our general approach
                 significantly outperforms non-game-theoretic
                 baselines.",
  acknowledgement = ack-nhfb,
  articleno =    "17",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Outkin:2019:GQT,
  author =       "Alexander V. Outkin and Brandon K. Eames and Meghan A.
                 Galiardi and Sarah Walsh and Eric D. Vugrin and Byron
                 Heersink and Jacob Hobbs and Gregory D. Wyss",
  title =        "{GPLADD}: Quantifying Trust in Government and
                 Commercial Systems: a Game-Theoretic Approach",
  journal =      j-TOPS,
  volume =       "22",
  number =       "3",
  pages =        "18:1--18:??",
  month =        jul,
  year =         "2019",
  DOI =          "https://doi.org/10.1145/3326283",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Sep 21 08:26:25 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  URL =          "https://dl.acm.org/citation.cfm?id=3326283",
  abstract =     "Trust in a microelectronics-based system can be
                 characterized as the level of confidence that a system
                 is free of subversive alterations made during system
                 development, or that the development process of a
                 system has not been manipulated by a malicious
                 adversary. Trust in systems has become an increasing
                 concern over the past decade. This article presents a
                 novel game-theoretic framework, called GPLADD
                 (Graph-based Probabilistic Learning Attacker and
                 Dynamic Defender), for analyzing and quantifying system
                 trustworthiness at the end of the development process,
                 through the analysis of risk of development-time system
                 manipulation. GPLADD represents attacks and
                 attacker-defender contests over time. It treats time as
                 an explicit constraint and allows incorporating the
                 informational asymmetries between the attacker and
                 defender into analysis. GPLADD includes an explicit
                 representation of attack steps via multi-step attack
                 graphs, attacker and defender strategies, and player
                 actions at different times. GPLADD allows quantifying
                 the attack success probability over time and the
                 attacker and defender costs based on their capabilities
                 and strategies. This ability to quantify different
                 attacks provides an input for evaluation of trust in
                 the development process. We demonstrate GPLADD on an
                 example attack and its variants. We develop a method
                 for representing success probability for arbitrary
                 attacks and derive an explicit analytic
                 characterization of success probability for a specific
                 attack. We present a numeric Monte Carlo study of a
                 small set of attacks, quantify attack success
                 probabilities, attacker and defender costs, and
                 illustrate the options the defender has for limiting
                 the attack success and improving trust in the
                 development process.",
  acknowledgement = ack-nhfb,
  articleno =    "18",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Wedaj:2019:DDA,
  author =       "Samuel Wedaj and Kolin Paul and Vinay J. Ribeiro",
  title =        "{DADS}: Decentralized Attestation for Device Swarms",
  journal =      j-TOPS,
  volume =       "22",
  number =       "3",
  pages =        "19:1--19:??",
  month =        jul,
  year =         "2019",
  DOI =          "https://doi.org/10.1145/3325822",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Sep 21 08:26:25 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  URL =          "https://dl.acm.org/citation.cfm?id=3325822",
  abstract =     "We present a novel scheme called Decentralized
                 Attestation for Device Swarms (DADS), which is, to the
                 best of our knowledge, the first to accomplish
                 decentralized attestation in device swarms. Device
                 swarms are smart, mobile, and interconnected devices
                 that operate in large numbers and are likely to be part
                 of emerging applications in Cyber-Physical Systems
                 (CPS) and Industrial Internet of Things (IIoTs). Swarm
                 devices process and exchange safety, privacy, and
                 mission-critical information. Thus, it is important to
                 have a good code verification technique that scales to
                 device swarms and establishes trust among collaborating
                 devices. DADS has several advantages over current
                 state-of-the-art swarm attestation techniques: It is
                 decentralized, has no single point of failure, and can
                 handle changing topologies after nodes are compromised.
                 DADS assures system resilience to node
                 compromise/failure while guaranteeing only devices that
                 execute genuine code remain part of the group. We
                 conduct performance measurements of communication,
                 computation, memory, and energy using the TrustLite
                 embedded systems architecture in OMNeT++ simulation
                 environment. We show that the proposed approach can
                 significantly reduce communication cost and is very
                 efficient in terms of computation, memory, and energy
                 requirements. We also analyze security and show that
                 DADS is very effective and robust against various
                 attacks.",
  acknowledgement = ack-nhfb,
  articleno =    "19",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Cantali:2019:AMS,
  author =       "Gokcan Cantali and Orhan Ermis and Mehmet Ufuk
                 {\c{C}}aglayan and Cem Ersoy",
  title =        "Analytical Models for the Scalability of Dynamic
                 Group-key Agreement Protocols and Secure File Sharing
                 Systems",
  journal =      j-TOPS,
  volume =       "22",
  number =       "4",
  pages =        "20:1--20:??",
  month =        dec,
  year =         "2019",
  DOI =          "https://doi.org/10.1145/3342998",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Wed Dec 18 14:55:10 MST 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  URL =          "https://dl.acm.org/citation.cfm?id=3342998",
  acknowledgement = ack-nhfb,
  articleno =    "20",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Jin:2019:RPP,
  author =       "Hongyu Jin and Panos Papadimitratos",
  title =        "Resilient Privacy Protection for Location-Based
                 Services through Decentralization",
  journal =      j-TOPS,
  volume =       "22",
  number =       "4",
  pages =        "21:1--21:??",
  month =        dec,
  year =         "2019",
  DOI =          "https://doi.org/10.1145/3319401",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Wed Dec 18 14:55:10 MST 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/cryptography2010.bib;
                 http://www.math.utah.edu/pub/tex/bib/tops.bib",
  URL =          "https://dl.acm.org/citation.cfm?id=3319401",
  abstract =     "Location-Based Services (LBSs) provide valuable
                 services, with convenient features for mobile users.
                 However, the location and other information disclosed
                 through each query to the LBS erodes user privacy. This
                 is a concern especially because LBS providers can be
                 honest-but-curious, collecting queries and tracking
                 users' whereabouts and infer sensitive user data. This
                 motivated both centralized and decentralized location
                 privacy protection schemes for LBSs: anonymizing and
                 obfuscating LBS queries to not disclose exact
                 information, while still getting useful responses.
                 Decentralized schemes overcome disadvantages of
                 centralized schemes, eliminating anonymizers, and
                 enhancing users' control over sensitive information.
                 However, an insecure decentralized system could create
                 serious risks beyond private information leakage. More
                 so, attacking an improperly designed decentralized LBS
                 privacy protection scheme could be an effective and
                 low-cost step to breach user privacy. We address
                 exactly this problem, by proposing security
                 enhancements for mobile data sharing systems. We
                 protect user privacy while preserving accountability of
                 user activities, leveraging pseudonymous authentication
                 with mainstream cryptography. We show our scheme can be
                 deployed with off-the-shelf devices based on an
                 experimental evaluation of an implementation in a
                 static automotive testbed.",
  acknowledgement = ack-nhfb,
  articleno =    "21",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Powell:2019:MOH,
  author =       "Brian A. Powell",
  title =        "Malicious Overtones: Hunting Data Theft in the
                 Frequency Domain with One-class Learning",
  journal =      j-TOPS,
  volume =       "22",
  number =       "4",
  pages =        "22:1--22:??",
  month =        dec,
  year =         "2019",
  DOI =          "https://doi.org/10.1145/3360469",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Wed Dec 18 14:55:10 MST 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  URL =          "https://dl.acm.org/citation.cfm?id=3360469",
  abstract =     "A method for detecting electronic data theft from
                 computer networks is described, capable of recognizing
                 patterns of remote exfiltration occurring over days to
                 weeks. Normal traffic flow data, in the form of a
                 host's ingress and egress bytes over time, is used to
                 train an ensemble of one-class learners. The detection
                 ensemble is modular, with individual classifiers
                 trained on different traffic features thought to
                 characterize malicious data transfers. We select
                 features that model the egress to ingress byte balance
                 over time, periodicity, short timescale irregularity,
                 and density of the traffic. The features are most
                 efficiently modeled in the frequency domain, which has
                 the added benefit that variable duration flows are
                 transformed to a fixed-size feature vector, and by
                 sampling the frequency space appropriately,
                 long-duration flows can be tested. When trained on days
                 or weeks worth of traffic from individual hosts, our
                 ensemble achieves a low false-positive rate.",
  acknowledgement = ack-nhfb,
  articleno =    "22",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Senarath:2019:WTU,
  author =       "Awanthika Senarath and Marthie Grobler and Nalin
                 Asanka Gamagedara Arachchilage",
  title =        "Will They Use It or Not? {Investigating} Software
                 Developers' Intention to Follow Privacy Engineering
                 Methodologies",
  journal =      j-TOPS,
  volume =       "22",
  number =       "4",
  pages =        "23:1--23:??",
  month =        dec,
  year =         "2019",
  DOI =          "https://doi.org/10.1145/3364224",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Wed Dec 18 14:55:10 MST 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  URL =          "https://dl.acm.org/citation.cfm?id=3364224",
  abstract =     "With the increasing concerns over privacy in software
                 systems, there is a growing enthusiasm to develop
                 methods to support the development of privacy aware
                 software systems. Inadequate privacy in software system
                 designs could result in users losing their sensitive
                 data, such as health information and financial
                 information, which may cause financial and reputation
                 loss. Privacy Engineering Methodologies (PEMs) are
                 introduced into the software development processes with
                 the goal of guiding software developers to embed
                 privacy into the systems they design. However, for PEMs
                 to be successful it is imperative that software
                 developers have a positive intention to use PEMs.
                 Otherwise, developers may attempt to bypass the privacy
                 methodologies or use them partially and hence develop
                 software systems that may not protect user privacy
                 appropriately. To investigate the factors that affect
                 software developers' behavioural intention to follow
                 PEMs, in this article, we conducted a study with 149
                 software developers. Findings of the study show that
                 the usefulness of the PEM to the developers' existing
                 work to be the strongest determinant that affects
                 software developers' intention to follow PEMs.
                 Moreover, the compatibility of the PEM with their way
                 of work and how the PEM demonstrates its results when
                 used were also found to be significant. These findings
                 provide important insights in understanding the
                 behaviour of software developers and how they perceive
                 PEMs. The findings could be used to assist
                 organisations and researchers to deploy PEMs and design
                 PEMs that are positively accepted by software
                 developers.",
  acknowledgement = ack-nhfb,
  articleno =    "23",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Cecconello:2019:STK,
  author =       "Stefano Cecconello and Alberto Compagno and Mauro
                 Conti and Daniele Lain and Gene Tsudik",
  title =        "{Skype \& Type}: Keyboard Eavesdropping in
                 {Voice-over-IP}",
  journal =      j-TOPS,
  volume =       "22",
  number =       "4",
  pages =        "24:1--24:??",
  month =        dec,
  year =         "2019",
  DOI =          "https://doi.org/10.1145/3365366",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Wed Dec 18 14:55:10 MST 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  URL =          "https://dl.acm.org/citation.cfm?id=3365366",
  abstract =     "Voice-over-IP (VoIP) software are among the most
                 widely spread and pervasive software, counting millions
                 of monthly users. However, we argue that people ignore
                 the drawbacks of transmitting information along with
                 their voice, such as keystroke sounds --- as such sound
                 can reveal what someone is typing on a keyboard. In
                 this article, we present and assess a new keyboard
                 acoustic eavesdropping attack that involves VoIP,
                 called Skype \& Type (S\&T). Unlike previous attacks,
                 S\&T assumes a weak adversary model that is very
                 practical in many real-world settings. Indeed, S\&T is
                 very feasible, as it does not require (i) the attacker
                 to be physically close to the victim (either in person
                 or with a recording device) and (ii) precise profiling
                 of the victim's typing style and keyboard; moreover, it
                 can work with a very small amount of leaked keystrokes.
                 We observe that leakage of keystrokes during a VoIP
                 call is likely, as people often ``multi-task'' during
                 such calls. As expected, VoIP software acquires and
                 faithfully transmits all sounds, including emanations
                 of pressed keystrokes, which can include passwords and
                 other sensitive information. We show that one very
                 popular VoIP software (Skype) conveys enough audio
                 information to reconstruct the victim's
                 input-keystrokes typed on the remote keyboard. Our
                 results demonstrate that, given some knowledge on the
                 victim's typing style and keyboard model, the attacker
                 attains top-5 accuracy of 91.7\% in guessing a random
                 key pressed by the victim. This work extends previous
                 results on S\&T, demonstrating that our attack is
                 effective with many different recording devices (such
                 as laptop microphones, headset microphones, and
                 smartphones located in proximity of the target
                 keyboard), diverse typing styles and speed, and is
                 particularly threatening when the victim is typing in a
                 known language.",
  acknowledgement = ack-nhfb,
  articleno =    "24",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Hoang:2020:MSO,
  author =       "Thang Hoang and Attila A. Yavuz and Jorge Guajardo",
  title =        "A Multi-server {ORAM} Framework with Constant Client
                 Bandwidth Blowup",
  journal =      j-TOPS,
  volume =       "23",
  number =       "1",
  pages =        "1:1--1:35",
  month =        feb,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3369108",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Feb 15 07:50:03 MST 2020",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3369108",
  abstract =     "Oblivious Random Access Machine (ORAM) allows a client
                 to hide the access pattern when accessing sensitive
                 data on a remote server. It is known that there exists
                 a logarithmic communication lower bound on any passive
                 ORAM construction, where the server \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "https://dl.acm.org/loi/tops",
}

@Article{Khan:2020:MAS,
  author =       "Hassan Khan and Urs Hengartner and Daniel Vogel",
  title =        "Mimicry Attacks on Smartphone Keystroke
                 Authentication",
  journal =      j-TOPS,
  volume =       "23",
  number =       "1",
  pages =        "2:1--2:34",
  month =        feb,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3372420",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Feb 15 07:50:03 MST 2020",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3372420",
  abstract =     "Keystroke behaviour-based authentication employs the
                 unique typing behaviour of users to authenticate them.
                 Recent such proposals for virtual keyboards on
                 smartphones employ diverse temporal, contact, and
                 spatial features to achieve over 95\% accuracy.
                 \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "https://dl.acm.org/loi/tops",
}

@Article{Lanotte:2020:FAP,
  author =       "Ruggero Lanotte and Massimo Merro and Andrei Munteanu
                 and Luca Vigan{\`o}",
  title =        "A Formal Approach to Physics-based Attacks in
                 Cyber-physical Systems",
  journal =      j-TOPS,
  volume =       "23",
  number =       "1",
  pages =        "3:1--3:41",
  month =        feb,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3373270",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Feb 15 07:50:03 MST 2020",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3373270",
  abstract =     "We apply formal methods to lay and streamline
                 theoretical foundations to reason about Cyber-Physical
                 Systems (CPSs) and physics-based attacks, i.e., attacks
                 targeting physical devices. We focus on a formal
                 treatment of both integrity and denial of \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "https://dl.acm.org/loi/tops",
}

@Article{Belman:2020:DPT,
  author =       "Amith K. Belman and Vir V. Phoha",
  title =        "Discriminative Power of Typing Features on Desktops,
                 Tablets, and Phones for User Identification",
  journal =      j-TOPS,
  volume =       "23",
  number =       "1",
  pages =        "4:1--4:36",
  month =        feb,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3377404",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Feb 15 07:50:03 MST 2020",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3377404",
  abstract =     "Research in Keystroke-Dynamics (KD) has customarily
                 focused on temporal features without considering
                 context to generate user templates that are used in
                 authentication. Additionally, work on KD in hand-held
                 devices such as smart-phones and tablets have
                 \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "https://dl.acm.org/loi/tops",
}

@Article{Karegar:2020:DUE,
  author =       "Farzaneh Karegar and John S{\"o}ren Pettersson and
                 Simone Fischer-H{\"u}bner",
  title =        "The Dilemma of User Engagement in Privacy Notices:
                 Effects of Interaction Modes and Habituation on User
                 Attention",
  journal =      j-TOPS,
  volume =       "23",
  number =       "1",
  pages =        "5:1--5:38",
  month =        feb,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3372296",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Feb 15 07:50:03 MST 2020",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3372296",
  abstract =     "Privacy notices and consent forms are the means of
                 conveying privacy policy information to users. In
                 Europe, a valid consent needs to be confirmed by a
                 clear affirmative action. Despite previous research, it
                 is not yet clear whether user engagement with
                 \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "https://dl.acm.org/loi/tops",
}

@Article{Wu:2020:CPM,
  author =       "Fang-Jing Wu and Tie Luo",
  title =        "{CrowdPrivacy}: Publish More Useful Data with Less
                 Privacy Exposure in Crowdsourced Location-Based
                 Services",
  journal =      j-TOPS,
  volume =       "23",
  number =       "1",
  pages =        "6:1--6:25",
  month =        feb,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3375752",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Feb 15 07:50:03 MST 2020",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3375752",
  abstract =     "Location-based services (LBSs) typically crowdsource
                 geo-tagged data from mobile users. Collecting more data
                 will generally improve the utility for LBS providers;
                 however, it also leads to more privacy exposure of
                 users' mobility patterns. Although the \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "https://dl.acm.org/loi/tops",
}