Valid HTML 4.0! Valid CSS!
%%% -*-BibTeX-*-
%%% ====================================================================
%%%  BibTeX-file{
%%%     author          = "Nelson H. F. Beebe",
%%%     version         = "1.00",
%%%     date            = "03 April 2017",
%%%     time            = "09:15:42 MST",
%%%     filename        = "tops.bib",
%%%     address         = "University of Utah
%%%                        Department of Mathematics, 110 LCB
%%%                        155 S 1400 E RM 233
%%%                        Salt Lake City, UT 84112-0090
%%%                        USA",
%%%     telephone       = "+1 801 581 5254",
%%%     FAX             = "+1 801 581 4148",
%%%     URL             = "http://www.math.utah.edu/~beebe",
%%%     checksum        = "17465 952 5490 51369",
%%%     email           = "beebe at math.utah.edu, beebe at acm.org,
%%%                       beebe at computer.org (Internet)",
%%%     codetable       = "ISO/ASCII",
%%%     keywords        = "bibliography, BibTeX, ACM Transactions
%%%                        on Privacy and Security (TOPS)",
%%%     license         = "public domain",
%%%     supported       = "yes",
%%%     docstring       = "This is a COMPLETE BibTeX bibliography for
%%%                        the journal ACM Transactions on Privacy and
%%%                        Security (TOPS) (CODEN none, ISSN 2471-2566
%%%                        (print), 2471-2574 (electronic)).  covering
%%%                        all journal issues from 2017--date.
%%%
%%%                        Publication began with volume 19, number 1,
%%%                        in 2016, as a continuation of the predecessor
%%%                        journal, ACM Transactions on Information and
%%%                        System Security.  The older journal is
%%%                        covered in a separate bibliography,
%%%                        tissec.bib.
%%%
%%%                        The journal has a Web site at
%%%
%%%                            http://dl.acm.org/citation.cfm?id=J1547
%%%
%%%                        At version 1.00, the COMPLETE journal
%%%                        coverage looked like this:
%%%
%%%                             2016 (   9)    2017 (   7)
%%%
%%%                             Article:         16
%%%
%%%                             Total entries:   16
%%%
%%%                        The initial draft was extracted from the
%%%                        journal Web site.
%%%
%%%                        ACM copyrights explicitly permit abstracting
%%%                        with credit, so article abstracts, keywords,
%%%                        and subject classifications have been
%%%                        included in this bibliography wherever
%%%                        available.  Article reviews have been
%%%                        omitted, until their copyright status has
%%%                        been clarified.
%%%
%%%                        URL keys in the bibliography point to
%%%                        World Wide Web locations of additional
%%%                        information about the entry.
%%%
%%%                        Numerous errors in the sources noted above
%%%                        have been corrected.   Spelling has been
%%%                        verified with the UNIX spell and GNU ispell
%%%                        programs using the exception dictionary
%%%                        stored in the companion file with extension
%%%                        .sok.
%%%
%%%                        BibTeX citation tags are uniformly chosen
%%%                        as name:year:abbrev, where name is the
%%%                        family name of the first author or editor,
%%%                        year is a 4-digit number, and abbrev is a
%%%                        3-letter condensation of important title
%%%                        words. Citation tags were automatically
%%%                        generated by software developed for the
%%%                        BibNet Project.
%%%
%%%                        In this bibliography, entries are sorted in
%%%                        publication order, using ``bibsort -byvolume.''
%%%
%%%                        The checksum field above contains a CRC-16
%%%                        checksum as the first value, followed by the
%%%                        equivalent of the standard UNIX wc (word
%%%                        count) utility output of lines, words, and
%%%                        characters.  This is produced by Robert
%%%                        Solovay's checksum utility.",
%%%  }
%%% ====================================================================
@Preamble{"\input bibnames.sty"}

%%% ====================================================================
%%% Acknowledgement abbreviations:
@String{ack-nhfb = "Nelson H. F. Beebe,
                    University of Utah,
                    Department of Mathematics, 110 LCB,
                    155 S 1400 E RM 233,
                    Salt Lake City, UT 84112-0090, USA,
                    Tel: +1 801 581 5254,
                    FAX: +1 801 581 4148,
                    e-mail: \path|beebe@math.utah.edu|,
                            \path|beebe@acm.org|,
                            \path|beebe@computer.org| (Internet),
                    URL: \path|http://www.math.utah.edu/~beebe/|"}

%%% ====================================================================
%%% Journal abbreviations:
@String{j-TOPS                  = "ACM Transactions on Privacy and Security
                                  (TOPS)"}

%%% ====================================================================
%%% Bibliography entries:
@Article{Eberz:2016:LLE,
  author =       "Simon Eberz and Kasper B. Rasmussen and Vincent
                 Lenders and Ivan Martinovic",
  title =        "Looks Like {Eve}: Exposing Insider Threats Using Eye
                 Movement Biometrics",
  journal =      j-TOPS,
  volume =       "19",
  number =       "1",
  pages =        "1:1--1:??",
  month =        aug,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2904018",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Mon Apr 3 09:09:38 MDT 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "We introduce a novel biometric based on distinctive
                 eye movement patterns. The biometric consists of 20
                 features that allow us to reliably distinguish users
                 based on differences in these patterns. We leverage
                 this distinguishing power along with the ability to
                 gauge the users' task familiarity, that is, level of
                 knowledge, to address insider threats. In a controlled
                 experiment, we test how both time and task familiarity
                 influence eye movements and feature stability, and how
                 different subsets of features affect the classifier
                 performance. These feature subsets can be used to
                 tailor the eye movement biometric to different
                 authentication methods and threat models. Our results
                 show that eye movement biometrics support reliable and
                 stable continuous authentication of users. We
                 investigate different approaches in which an attacker
                 could attempt to use inside knowledge to mimic the
                 legitimate user. Our results show that while this
                 advance knowledge is measurable, it does not increase
                 the likelihood of successful impersonation. In order to
                 determine the time stability of our features, we repeat
                 the experiment twice within 2 weeks. The results
                 indicate that we can reliably authenticate users over
                 the entire period. We show that lower sampling rates
                 provided by low-cost hardware pose a challenge, but
                 that reliable authentication is possible even at the
                 rate of 50Hz commonly available with consumer-level
                 devices. In a second set of experiments, we evaluate
                 how our authentication system performs across a variety
                 of real-world tasks, including reading, writing, and
                 web browsing. We discuss the advantages and limitations
                 of our approach in detail and give practical insights
                 on the use of this biometric in a real-world
                 environment.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Mitropoulos:2016:HTY,
  author =       "Dimitris Mitropoulos and Konstantinos Stroggylos and
                 Diomidis Spinellis and Angelos D. Keromytis",
  title =        "How to Train Your Browser: Preventing {XSS} Attacks
                 Using Contextual Script Fingerprints",
  journal =      j-TOPS,
  volume =       "19",
  number =       "1",
  pages =        "2:1--2:??",
  month =        aug,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2939374",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Mon Apr 3 09:09:38 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "Cross-Site Scripting (XSS) is one of the most common
                 web application vulnerabilities. It is therefore
                 sometimes referred to as the ``buffer overflow of the
                 web.'' Drawing a parallel from the current state of
                 practice in preventing unauthorized native code
                 execution (the typical goal in a code injection), we
                 propose a script whitelisting approach to tame
                 JavaScript-driven XSS attacks. Our scheme involves a
                 transparent script interception layer placed in the
                 browser's JavaScript engine. This layer is designed to
                 detect every script that reaches the browser, from
                 every possible route, and compare it to a list of valid
                 scripts for the site or page being accessed; scripts
                 not on the list are prevented from executing. To avoid
                 the false positives caused by minor syntactic changes
                 (e.g., due to dynamic code generation), our layer uses
                 the concept of contextual fingerprints when comparing
                 scripts. Contextual fingerprints are identifiers that
                 represent specific elements of a script and its
                 execution context. Fingerprints can be easily enriched
                 with new elements, if needed, to enhance the proposed
                 method's robustness. The list can be populated by the
                 website's administrators or a trusted third party. To
                 verify our approach, we have developed a prototype and
                 tested it successfully against an extensive array of
                 attacks that were performed on more than 50 real-world
                 vulnerable web applications. We measured the browsing
                 performance overhead of the proposed solution on eight
                 websites that make heavy use of JavaScript. Our
                 mechanism imposed an average overhead of 11.1\% on the
                 execution time of the JavaScript engine. When measured
                 as part of a full browsing session, and for all tested
                 websites, the overhead introduced by our layer was less
                 than 0.05\%. When script elements are altered or new
                 scripts are added on the server side, a new fingerprint
                 generation phase is required. To examine the temporal
                 aspect of contextual fingerprints, we performed a
                 short-term and a long-term experiment based on the same
                 websites. The former, showed that in a short period of
                 time (10 days), for seven of eight websites, the
                 majority of valid fingerprints stay the same (more than
                 92\% on average). The latter, though, indicated that,
                 in the long run, the number of fingerprints that do not
                 change is reduced. Both experiments can be seen as one
                 of the first attempts to study the feasibility of a
                 whitelisting approach for the web.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Aonghusa:2016:DLG,
  author =       "P{\'o}l Mac Aonghusa and Douglas J. Leith",
  title =        "Don't Let {Google} Know {I}'m Lonely",
  journal =      j-TOPS,
  volume =       "19",
  number =       "1",
  pages =        "3:1--3:??",
  month =        aug,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2937754",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Mon Apr 3 09:09:38 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "From buying books to finding the perfect partner, we
                 share our most intimate wants and needs with our
                 favourite online systems. But how far should we accept
                 promises of privacy in the face of personalized
                 profiling? In particular, we ask how we can improve
                 detection of sensitive topic profiling by online
                 systems. We propose a definition of privacy disclosure
                 that we call $ \epsilon $-indistinguishability, from
                 which we construct scalable, practical tools to assess
                 the learning potential from personalized content. We
                 demonstrate our results using openly available
                 resources, detecting a learning rate in excess of 98\%
                 for a range of sensitive topics during our
                 experiments.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Rahbarinia:2016:EAB,
  author =       "Babak Rahbarinia and Roberto Perdisci and Manos
                 Antonakakis",
  title =        "Efficient and Accurate Behavior-Based Tracking of
                 Malware-Control Domains in Large {ISP} Networks",
  journal =      j-TOPS,
  volume =       "19",
  number =       "2",
  pages =        "4:1--4:??",
  month =        sep,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2960409",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Mon Apr 3 09:09:39 MDT 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "In this article, we propose Segugio, a novel defense
                 system that allows for efficiently tracking the
                 occurrence of new malware-control domain names in very
                 large ISP networks. Segugio passively monitors the DNS
                 traffic to build a machine-domain bipartite graph
                 representing who is querying what. After labeling nodes
                 in this query behavior graph that are known to be
                 either benign or malware-related, we propose a novel
                 approach to accurately detect previously unknown
                 malware-control domains. We implemented a
                 proof-of-concept version of Segugio and deployed it in
                 large ISP networks that serve millions of users. Our
                 experimental results show that Segugio can track the
                 occurrence of new malware-control domains with up to
                 94\% true positives (TPs) at less than 0.1\% false
                 positives (FPs). In addition, we provide the following
                 results: (1) we show that Segugio can also detect
                 control domains related to new, previously unseen
                 malware families, with 85\% TPs at 0.1\% FPs; (2)
                 Segugio's detection models learned on traffic from a
                 given ISP network can be deployed into a different ISP
                 network and still achieve very high detection accuracy;
                 (3) new malware-control domains can be detected days or
                 even weeks before they appear in a large commercial
                 domain-name blacklist; (4) Segugio can be used to
                 detect previously unknown malware-infected machines in
                 ISP networks; and (5) we show that Segugio clearly
                 outperforms domain-reputation systems based on Belief
                 Propagation.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Dong:2016:DRC,
  author =       "Zheng Dong and Kevin Kane and L. Jean Camp",
  title =        "Detection of Rogue Certificates from Trusted
                 Certificate Authorities Using Deep Neural Networks",
  journal =      j-TOPS,
  volume =       "19",
  number =       "2",
  pages =        "5:1--5:??",
  month =        sep,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2975591",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Mon Apr 3 09:09:39 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "Rogue certificates are valid certificates issued by a
                 legitimate certificate authority (CA) that are
                 nonetheless untrustworthy; yet trusted by web browsers
                 and users. With the current public key infrastructure,
                 there exists a window of vulnerability between the time
                 a rogue certificate is issued and when it is detected.
                 Rogue certificates from recent compromises have been
                 trusted for as long as weeks before detection and
                 revocation. Previous proposals to close this window of
                 vulnerability require changes in the infrastructure,
                 Internet protocols, or end user experience. We present
                 a method for detecting rogue certificates from trusted
                 CAs developed from a large and timely collection of
                 certificates. This method automates classification by
                 building machine-learning models with Deep Neural
                 Networks (DNN). Despite the scarcity of rogue instances
                 in the dataset, DNN produced a classification method
                 that is proven both in simulation and in the July 2014
                 compromise of the India CCA. We report the details of
                 the classification method and illustrate that it is
                 repeatable, such as with datasets obtained from
                 crawling. We describe the classification performance
                 under our current research deployment.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Garay:2016:MPA,
  author =       "Juan A. Garay and Vladimir Kolesnikov and Rae
                 Mclellan",
  title =        "{MAC} Precomputation with Applications to Secure
                 Memory",
  journal =      j-TOPS,
  volume =       "19",
  number =       "2",
  pages =        "6:1--6:??",
  month =        sep,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2943780",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Mon Apr 3 09:09:39 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "We present Shallow MAC (ShMAC), a fixed-input-length
                 message authentication code that performs most of the
                 computation prior to the availability of the message.
                 Specifically, ShMAC's message-dependent computation is
                 much faster and smaller in hardware than the evaluation
                 of a pseudorandom permutation (PRP) and can be
                 implemented by a small shallow circuit, while its
                 precomputation consists of one PRP evaluation. A main
                 building block for ShMAC is the notion of strong
                 differential uniformity (SDU), which we introduce and
                 which may be of independent interest. We show an
                 efficient SDU construction built from previously
                 considered differentially uniform functions. Our main
                 motivating application is a system architecture where a
                 hardware-secured processor uses memory controlled by an
                 adversary. We also present in technical detail a novel,
                 efficient approach to encrypting and authenticating
                 memory and discuss the associated tradeoffs, while
                 paying special attention to minimizing hardware costs
                 and the reduction of Dynamic Random Access Memory
                 latency.",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Ozalp:2016:PPP,
  author =       "Ismet Ozalp and Mehmet Emre Gursoy and Mehmet Ercan
                 Nergiz and Yucel Saygin",
  title =        "Privacy-Preserving Publishing of Hierarchical Data",
  journal =      j-TOPS,
  volume =       "19",
  number =       "3",
  pages =        "7:1--7:??",
  month =        dec,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2976738",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Mon Apr 3 09:09:39 MDT 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "Many applications today rely on storage and management
                 of semi-structured information, for example, XML
                 databases and document-oriented databases. These data
                 often have to be shared with untrusted third parties,
                 which makes individuals' privacy a fundamental problem.
                 In this article, we propose anonymization techniques
                 for privacy-preserving publishing of hierarchical data.
                 We show that the problem of anonymizing hierarchical
                 data poses unique challenges that cannot be readily
                 solved by existing mechanisms. We extend two standards
                 for privacy protection in tabular data ( k -anonymity
                 and l-diversity) and apply them to hierarchical data.
                 We present utility-aware algorithms that enforce these
                 definitions of privacy using generalizations and
                 suppressions of data values. To evaluate our algorithms
                 and their heuristics, we experiment on synthetic and
                 real datasets obtained from two universities. Our
                 experiments show that we significantly outperform
                 related methods that provide comparable privacy
                 guarantees.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Crampton:2016:WSP,
  author =       "Jason Crampton and Andrei Gagarin and Gregory Gutin
                 and Mark Jones and Magnus Wahlstr{\"o}m",
  title =        "On the Workflow Satisfiability Problem with
                 Class-Independent Constraints for Hierarchical
                 Organizations",
  journal =      j-TOPS,
  volume =       "19",
  number =       "3",
  pages =        "8:1--8:??",
  month =        dec,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2988239",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Mon Apr 3 09:09:39 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "A workflow specification defines a set of steps, a set
                 of users, and an access control policy. The policy
                 determines which steps a user is authorized to perform
                 and imposes constraints on which sets of users can
                 perform which sets of steps. The workflow
                 satisfiability problem (WSP) is the problem of
                 determining whether there exists an assignment of users
                 to workflow steps that satisfies the policy. Given the
                 computational hardness of WSP and its importance in the
                 context of workflow management systems, it is important
                 to develop algorithms that are as efficient as possible
                 to solve WSP. In this article, we study the
                 fixed-parameter tractability of WSP in the presence of
                 class-independent constraints, which enable us to (1)
                 model security requirements based on the groups to
                 which users belong and (2) generalize the notion of a
                 user-independent constraint. Class-independent
                 constraints are defined in terms of equivalence
                 relations over the set of users. We consider sets of
                 nested equivalence relations because this enables us to
                 model security requirements in hierarchical
                 organizations. We prove that WSP is fixed-parameter
                 tractable (FPT) for class-independent constraints
                 defined over nested equivalence relations and develop
                 an FPT algorithm to solve WSP instances incorporating
                 such constraints. We perform experiments to evaluate
                 the performance of our algorithm and compare it with
                 that of SAT4J, an off-the-shelf pseudo-Boolean SAT
                 solver. The results of these experiments demonstrate
                 that our algorithm significantly outperforms SAT4J for
                 many instances of WSP.",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Gutierrez:2016:IDO,
  author =       "Christopher N. Gutierrez and Mohammed H. Almeshekah
                 and Eugene H. Spafford and Mikhail J. Atallah and Jeff
                 Avery",
  title =        "Inhibiting and Detecting Offline Password Cracking
                 Using {ErsatzPasswords}",
  journal =      j-TOPS,
  volume =       "19",
  number =       "3",
  pages =        "9:1--9:??",
  month =        dec,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2996457",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Mon Apr 3 09:09:39 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "In this work, we present a simple, yet effective and
                 practical scheme to improve the security of stored
                 password hashes, increasing the difficulty to crack
                 passwords and exposing cracking attempts. We utilize a
                 hardware-dependent function (HDF), such as a physically
                 unclonable function (PUF) or a hardware security module
                 (HSM), at the authentication server to inhibit offline
                 password discovery. Additionally, a deception mechanism
                 is incorporated to alert administrators of cracking
                 attempts. Using an HDF to generate password hashes
                 hinders attackers from recovering the true passwords
                 without constant access to the HDF. Our scheme can
                 integrate with legacy systems without needing
                 additional servers, changing the structure of the
                 hashed password file, nor modifying client machines.
                 When using our scheme, the structure of the hashed
                 passwords file, e.g., etc/shadow or etc/master.passwd,
                 will appear no different than traditional hashed
                 password files.$^1$ However, when attackers exfiltrate
                 the hashed password file and attempt to crack it, the
                 passwords they will receive are ErsatzPasswords-``fake
                 passwords.'' The ErsatzPasswords scheme is flexible by
                 design, enabling it to be integrated into existing
                 authentication systems without changes to user
                 experience. The proposed scheme is integrated into the
                 pam\_unix module as well as two client/server
                 authentication schemes: Lightweight Directory Access
                 Protocol (LDAP) authentication and the Pythia
                 pseudorandom function (PRF) Service [Everspaugh et al.
                 2015]. The core library to support ErsatzPasswords
                 written in C and Python consists of 255 and 103 lines
                 of code, respectively. The integration of
                 ErsatzPasswords into each explored authentication
                 system required less than 100 lines of additional code.
                 Experimental evaluation of ErsatzPasswords shows an
                 increase in authentication latency on the order of
                 100ms, which maybe acceptable for real world systems.
                 We also describe a framework for implementing
                 ErsatzPasswords using a Trusted Platform Module
                 (TPM).",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Meutzner:2017:TIA,
  author =       "Hendrik Meutzner and Santosh Gupta and Viet-Hung
                 Nguyen and Thorsten Holz and Dorothea Kolossa",
  title =        "Toward Improved Audio {CAPTCHAs} Based on Auditory
                 Perception and Language Understanding",
  journal =      j-TOPS,
  volume =       "19",
  number =       "4",
  pages =        "10:1--10:??",
  month =        feb,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2856820",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Mon Apr 3 09:09:39 MDT 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "A so-called completely automated public Turing test to
                 tell computers and humans apart (CAPTCHA) represents a
                 challenge-response test that is widely used on the
                 Internet to distinguish human users from fraudulent
                 computer programs, often referred to as bots. To enable
                 access for visually impaired users, most Web sites
                 utilize audio CAPTCHAs in addition to a conventional
                 image-based scheme. Recent research has shown that most
                 currently available audio CAPTCHAs are insecure, as
                 they can be broken by means of machine learning at
                 relatively low costs. Moreover, most audio CAPTCHAs
                 suffer from low human success rates that arise from
                 severe signal distortions. This article proposes two
                 different audio CAPTCHA schemes that systematically
                 exploit differences between humans and computers in
                 terms of auditory perception and language
                 understanding, yielding a better trade-off between
                 usability and security as compared to currently
                 available schemes. Furthermore, we provide an elaborate
                 analysis of Google's prominent reCAPTCHA that serves as
                 a baseline setting when evaluating our proposed CAPTCHA
                 designs.",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Shokri:2017:PGA,
  author =       "Reza Shokri and George Theodorakopoulos and Carmela
                 Troncoso",
  title =        "Privacy Games Along Location Traces: a Game-Theoretic
                 Framework for Optimizing Location Privacy",
  journal =      j-TOPS,
  volume =       "19",
  number =       "4",
  pages =        "11:1--11:??",
  month =        feb,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3009908",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Mon Apr 3 09:09:39 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "The mainstream approach to protecting the privacy of
                 mobile users in location-based services (LBSs) is to
                 alter (e.g., perturb, hide, and so on) the users'
                 actual locations in order to reduce exposed sensitive
                 information. In order to be effective, a
                 location-privacy preserving mechanism must consider
                 both the privacy and utility requirements of each user,
                 as well as the user's overall exposed locations (which
                 contribute to the adversary's background knowledge). In
                 this article, we propose a methodology that enables the
                 design of optimal user-centric location obfuscation
                 mechanisms respecting each individual user's service
                 quality requirements, while maximizing the expected
                 error that the optimal adversary incurs in
                 reconstructing the user's actual trace. A key advantage
                 of a user-centric mechanism is that it does not depend
                 on third-party proxies or anonymizers; thus, it can be
                 directly integrated in the mobile devices that users
                 employ to access LBSs. Our methodology is based on the
                 mutual optimization of user/adversary objectives
                 (maximizing location privacy versus minimizing
                 localization error) formalized as a Stackelberg
                 Bayesian game. This formalization makes our solution
                 robust against any location inference attack, that is,
                 the adversary cannot decrease the user's privacy by
                 designing a better inference algorithm as long as the
                 obfuscation mechanism is designed according to our
                 privacy games. We develop two linear programs that
                 solve the location privacy game and output the optimal
                 obfuscation strategy and its corresponding optimal
                 inference attack. These linear programs are used to
                 design location privacy--preserving mechanisms that
                 consider the correlation between past, current, and
                 future locations of the user, thus can be tuned to
                 protect different privacy objectives along the user's
                 location trace. We illustrate the efficacy of the
                 optimal location privacy--preserving mechanisms
                 obtained with our approach against real location
                 traces, showing their performance in protecting users'
                 different location privacy objectives.",
  acknowledgement = ack-nhfb,
  articleno =    "11",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Argyros:2017:EPG,
  author =       "George Argyros and Theofilos Petsios and Suphannee
                 Sivakorn and Angelos D. Keromytis and Jason Polakis",
  title =        "Evaluating the Privacy Guarantees of Location
                 Proximity Services",
  journal =      j-TOPS,
  volume =       "19",
  number =       "4",
  pages =        "12:1--12:??",
  month =        feb,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3007209",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Mon Apr 3 09:09:39 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "Location-based services have become an integral part
                 of everyday life. To address the privacy issues that
                 emerge from the use and sharing of location
                 information, social networks and smartphone
                 applications have adopted location proximity schemes as
                 a means of balancing user privacy with utility.
                 Unfortunately, despite the extensive academic
                 literature on this topic, the schemes that large
                 service providers have adopted are not always designed
                 or implemented correctly, rendering users vulnerable to
                 location-disclosure attacks. Such attacks have recently
                 received major publicity as, in some cases, they even
                 exposed citizens of oppressive regimes to
                 life-threatening risks. In this article, we
                 systematically assess the defenses that popular
                 location-based services and mobile applications deploy
                 to guard against adversaries seeking to identify a
                 user's location. We provide the theoretical foundations
                 for formalizing the privacy guarantees of currently
                 adopted proximity models, design practical attacks for
                 each case, and prove tight bounds on the number of
                 queries required for carrying out successful attacks in
                 practice. To evaluate the completeness of our approach,
                 we conduct extensive experiments against popular
                 services including Facebook, Foursquare, and Grindr.
                 Our results demonstrate that, even though the
                 aforementioned services implement various
                 privacy-preserving techniques to protect their users,
                 they are still vulnerable to attacks. In particular, we
                 are able to pinpoint Facebook users within 5m of their
                 exact location. For Foursquare and Grindr, users are
                 pinpointed within 15m of their location in 90\% of the
                 cases, even with the strictest privacy settings
                 enabled. Our attacks are highly efficient and complete
                 within a few seconds. The severity of our findings was
                 acknowledged by Facebook and Foursquare, both of which
                 have followed our recommendations and adopted our
                 design of a safe proximity scheme in their production
                 systems. As the number of mobile applications offering
                 location functionality will continue to increase,
                 service providers and software developers must be able
                 to assess the privacy guarantees that their services
                 offer. To that end, we discuss viable defenses that can
                 be currently adopted by all major services, and provide
                 an open-source testing framework to be used by
                 researchers and service providers who wish to evaluate
                 the privacy-preserving properties of applications
                 offering proximity functionality.",
  acknowledgement = ack-nhfb,
  articleno =    "12",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Matsumoto:2017:ACG,
  author =       "Stephanos Matsumoto and Raphael M. Reischuk and Pawel
                 Szalachowski and Tiffany Hyun-Jin Kim and Adrian
                 Perrig",
  title =        "Authentication Challenges in a Global Environment",
  journal =      j-TOPS,
  volume =       "20",
  number =       "1",
  pages =        "1:1--1:??",
  month =        feb,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3007208",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Mon Apr 3 09:09:40 MDT 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "In this article, we address the problem of scaling
                 authentication for naming, routing, and end-entity (EE)
                 certification to a global environment in which
                 authentication policies and users' sets of trust roots
                 vary widely. The current mechanisms for authenticating
                 names (DNSSEC), routes (BGPSEC), and EE certificates
                 (TLS) do not support a coexistence of authentication
                 policies, affect the entire Internet when compromised,
                 cannot update trust root information efficiently, and
                 do not provide users with the ability to make flexible
                 trust decisions. We propose the Scalable Authentication
                 Infrastructure for Next-generation Trust (SAINT), which
                 partitions the Internet into groups with common, local
                 trust roots and isolates the effects of a compromised
                 trust root. SAINT requires groups with direct routing
                 connections to cross-sign each other for authentication
                 purposes, allowing diverse authentication policies
                 while keeping all entities' authentication information
                 globally discoverable. SAINT makes trust root
                 management a central part of the network architecture,
                 enabling trust root updates within seconds and allowing
                 users to make flexible trust decisions. SAINT operates
                 without a significant performance penalty and can be
                 deployed alongside existing infrastructures.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Wagner:2017:ESG,
  author =       "Isabel Wagner",
  title =        "Evaluating the Strength of Genomic Privacy Metrics",
  journal =      j-TOPS,
  volume =       "20",
  number =       "1",
  pages =        "2:1--2:??",
  month =        feb,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3020003",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Mon Apr 3 09:09:40 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "The genome is a unique identifier for human
                 individuals. The genome also contains highly sensitive
                 information, creating a high potential for misuse of
                 genomic data (for example, genetic discrimination). In
                 this article, we investigate how genomic privacy can be
                 measured in scenarios where an adversary aims to infer
                 a person's genomic markers by constructing probability
                 distributions on the values of genetic variations. We
                 measured the strength of privacy metrics by requiring
                 that metrics are monotonic with increasing adversary
                 strength and uncovered serious problems with several
                 existing metrics currently used to measure genomic
                 privacy. We provide suggestions on metric selection,
                 interpretation, and visualization and illustrate the
                 work flow using case studies for three real-world
                 diseases.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Humbert:2017:QIR,
  author =       "Mathias Humbert and Erman Ayday and Jean-Pierre Hubaux
                 and Amalio Telenti",
  title =        "Quantifying Interdependent Risks in Genomic Privacy",
  journal =      j-TOPS,
  volume =       "20",
  number =       "1",
  pages =        "3:1--3:??",
  month =        feb,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3035538",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Mon Apr 3 09:09:40 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "The rapid progress in human-genome sequencing is
                 leading to a high availability of genomic data. These
                 data is notoriously very sensitive and stable in time,
                 and highly correlated among relatives. In this article,
                 we study the implications of these familial
                 correlations on kin genomic privacy. We formalize the
                 problem and detail efficient reconstruction attacks
                 based on graphical models and belief propagation. With
                 our approach, an attacker can infer the genomes of the
                 relatives of an individual whose genome or phenotype
                 are observed by notably relying on Mendel's Laws,
                 statistical relationships between the genomic variants,
                 and between the genome and the phenotype. We evaluate
                 the effect of these dependencies on privacy with
                 respect to the amount of observed variants and the
                 relatives sharing them. We also study how the
                 algorithmic performance evolves when we take these
                 various relationships into account. Furthermore, to
                 quantify the level of genomic privacy as a result of
                 the proposed inference attack, we discuss possible
                 definitions of genomic privacy metrics, and compare
                 their values and evolution. Genomic data reveals
                 Mendelian disorders and the likelihood of developing
                 severe diseases, such as Alzheimer's. We also introduce
                 the quantification of health privacy, specifically, the
                 measure of how well the predisposition to a disease is
                 concealed from an attacker. We evaluate our approach on
                 actual genomic data from a pedigree and show the threat
                 extent by combining data gathered from a genome-sharing
                 website as well as an online social network.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Osterweil:2017:IAI,
  author =       "Leon J. Osterweil and Matt Bishop and Heather M.
                 Conboy and Huong Phan and Borislava I. Simidchieva and
                 George S. Avrunin and Lori A. Clarke and Sean Peisert",
  title =        "Iterative Analysis to Improve Key Properties of
                 Critical Human-Intensive Processes: an Election
                 Security Example",
  journal =      j-TOPS,
  volume =       "20",
  number =       "2",
  pages =        "5:1--5:??",
  month =        mar,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3041041",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Mon Apr 3 09:09:40 MDT 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "In this article, we present an approach for
                 systematically improving complex processes, especially
                 those involving human agents, hardware devices, and
                 software systems. We illustrate the utility of this
                 approach by applying it to part of an election process
                 and show how it can improve the security and
                 correctness of that subprocess. We use the Little-JIL
                 process definition language to create a precise and
                 detailed definition of the process. Given this process
                 definition, we use two forms of automated analysis to
                 explore whether specified key properties, such as
                 security and safety policies, can be undermined. First,
                 we use model checking to identify process execution
                 sequences that fail to conform to event-sequence
                 properties. After these are addressed, we apply fault
                 tree analysis to identify when the misperformance of
                 steps might allow undesirable outcomes, such as
                 security breaches. The results of these analyses can
                 provide assurance about the process; suggest areas for
                 improvement; and, when applied to a modified process
                 definition, evaluate proposed changes.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}