Valid HTML 4.0! Valid CSS!
%%% -*-BibTeX-*-
%%% ====================================================================
%%%  BibTeX-file{
%%%     author          = "Nelson H. F. Beebe",
%%%     version         = "1.01",
%%%     date            = "23 December 2017",
%%%     time            = "10:02:13 MST",
%%%     filename        = "tops.bib",
%%%     address         = "University of Utah
%%%                        Department of Mathematics, 110 LCB
%%%                        155 S 1400 E RM 233
%%%                        Salt Lake City, UT 84112-0090
%%%                        USA",
%%%     telephone       = "+1 801 581 5254",
%%%     FAX             = "+1 801 581 4148",
%%%     URL             = "http://www.math.utah.edu/~beebe",
%%%     checksum        = "16650 1491 8690 81126",
%%%     email           = "beebe at math.utah.edu, beebe at acm.org,
%%%                       beebe at computer.org (Internet)",
%%%     codetable       = "ISO/ASCII",
%%%     keywords        = "bibliography, BibTeX, ACM Transactions
%%%                        on Privacy and Security (TOPS)",
%%%     license         = "public domain",
%%%     supported       = "yes",
%%%     docstring       = "This is a COMPLETE BibTeX bibliography for
%%%                        the journal ACM Transactions on Privacy and
%%%                        Security (TOPS) (CODEN none, ISSN 2471-2566
%%%                        (print), 2471-2574 (electronic)).  covering
%%%                        all journal issues from 2017--date.
%%%
%%%                        Publication began with volume 19, number 1,
%%%                        in 2016, as a continuation of the predecessor
%%%                        journal, ACM Transactions on Information and
%%%                        System Security.  The older journal is
%%%                        covered in a separate bibliography,
%%%                        tissec.bib.
%%%
%%%                        The journal has a Web site at
%%%
%%%                            http://dl.acm.org/citation.cfm?id=J1547
%%%
%%%                        At version 1.01, the COMPLETE journal
%%%                        coverage looked like this:
%%%
%%%                             2016 (   9)    2017 (  17)
%%%
%%%                             Article:         26
%%%
%%%                             Total entries:   26
%%%
%%%                        The initial draft was extracted from the
%%%                        journal Web site.
%%%
%%%                        ACM copyrights explicitly permit abstracting
%%%                        with credit, so article abstracts, keywords,
%%%                        and subject classifications have been
%%%                        included in this bibliography wherever
%%%                        available.  Article reviews have been
%%%                        omitted, until their copyright status has
%%%                        been clarified.
%%%
%%%                        URL keys in the bibliography point to
%%%                        World Wide Web locations of additional
%%%                        information about the entry.
%%%
%%%                        Numerous errors in the sources noted above
%%%                        have been corrected.   Spelling has been
%%%                        verified with the UNIX spell and GNU ispell
%%%                        programs using the exception dictionary
%%%                        stored in the companion file with extension
%%%                        .sok.
%%%
%%%                        BibTeX citation tags are uniformly chosen
%%%                        as name:year:abbrev, where name is the
%%%                        family name of the first author or editor,
%%%                        year is a 4-digit number, and abbrev is a
%%%                        3-letter condensation of important title
%%%                        words. Citation tags were automatically
%%%                        generated by software developed for the
%%%                        BibNet Project.
%%%
%%%                        In this bibliography, entries are sorted in
%%%                        publication order, using ``bibsort -byvolume.''
%%%
%%%                        The checksum field above contains a CRC-16
%%%                        checksum as the first value, followed by the
%%%                        equivalent of the standard UNIX wc (word
%%%                        count) utility output of lines, words, and
%%%                        characters.  This is produced by Robert
%%%                        Solovay's checksum utility.",
%%%  }
%%% ====================================================================
@Preamble{"\input bibnames.sty"}

%%% ====================================================================
%%% Acknowledgement abbreviations:
@String{ack-nhfb = "Nelson H. F. Beebe,
                    University of Utah,
                    Department of Mathematics, 110 LCB,
                    155 S 1400 E RM 233,
                    Salt Lake City, UT 84112-0090, USA,
                    Tel: +1 801 581 5254,
                    FAX: +1 801 581 4148,
                    e-mail: \path|beebe@math.utah.edu|,
                            \path|beebe@acm.org|,
                            \path|beebe@computer.org| (Internet),
                    URL: \path|http://www.math.utah.edu/~beebe/|"}

%%% ====================================================================
%%% Journal abbreviations:
@String{j-TOPS                  = "ACM Transactions on Privacy and Security
                                  (TOPS)"}

%%% ====================================================================
%%% Bibliography entries:
@Article{Eberz:2016:LLE,
  author =       "Simon Eberz and Kasper B. Rasmussen and Vincent
                 Lenders and Ivan Martinovic",
  title =        "Looks Like {Eve}: Exposing Insider Threats Using Eye
                 Movement Biometrics",
  journal =      j-TOPS,
  volume =       "19",
  number =       "1",
  pages =        "1:1--1:??",
  month =        aug,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2904018",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Mon Apr 3 09:09:38 MDT 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "We introduce a novel biometric based on distinctive
                 eye movement patterns. The biometric consists of 20
                 features that allow us to reliably distinguish users
                 based on differences in these patterns. We leverage
                 this distinguishing power along with the ability to
                 gauge the users' task familiarity, that is, level of
                 knowledge, to address insider threats. In a controlled
                 experiment, we test how both time and task familiarity
                 influence eye movements and feature stability, and how
                 different subsets of features affect the classifier
                 performance. These feature subsets can be used to
                 tailor the eye movement biometric to different
                 authentication methods and threat models. Our results
                 show that eye movement biometrics support reliable and
                 stable continuous authentication of users. We
                 investigate different approaches in which an attacker
                 could attempt to use inside knowledge to mimic the
                 legitimate user. Our results show that while this
                 advance knowledge is measurable, it does not increase
                 the likelihood of successful impersonation. In order to
                 determine the time stability of our features, we repeat
                 the experiment twice within 2 weeks. The results
                 indicate that we can reliably authenticate users over
                 the entire period. We show that lower sampling rates
                 provided by low-cost hardware pose a challenge, but
                 that reliable authentication is possible even at the
                 rate of 50Hz commonly available with consumer-level
                 devices. In a second set of experiments, we evaluate
                 how our authentication system performs across a variety
                 of real-world tasks, including reading, writing, and
                 web browsing. We discuss the advantages and limitations
                 of our approach in detail and give practical insights
                 on the use of this biometric in a real-world
                 environment.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Mitropoulos:2016:HTY,
  author =       "Dimitris Mitropoulos and Konstantinos Stroggylos and
                 Diomidis Spinellis and Angelos D. Keromytis",
  title =        "How to Train Your Browser: Preventing {XSS} Attacks
                 Using Contextual Script Fingerprints",
  journal =      j-TOPS,
  volume =       "19",
  number =       "1",
  pages =        "2:1--2:??",
  month =        aug,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2939374",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Mon Apr 3 09:09:38 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "Cross-Site Scripting (XSS) is one of the most common
                 web application vulnerabilities. It is therefore
                 sometimes referred to as the ``buffer overflow of the
                 web.'' Drawing a parallel from the current state of
                 practice in preventing unauthorized native code
                 execution (the typical goal in a code injection), we
                 propose a script whitelisting approach to tame
                 JavaScript-driven XSS attacks. Our scheme involves a
                 transparent script interception layer placed in the
                 browser's JavaScript engine. This layer is designed to
                 detect every script that reaches the browser, from
                 every possible route, and compare it to a list of valid
                 scripts for the site or page being accessed; scripts
                 not on the list are prevented from executing. To avoid
                 the false positives caused by minor syntactic changes
                 (e.g., due to dynamic code generation), our layer uses
                 the concept of contextual fingerprints when comparing
                 scripts. Contextual fingerprints are identifiers that
                 represent specific elements of a script and its
                 execution context. Fingerprints can be easily enriched
                 with new elements, if needed, to enhance the proposed
                 method's robustness. The list can be populated by the
                 website's administrators or a trusted third party. To
                 verify our approach, we have developed a prototype and
                 tested it successfully against an extensive array of
                 attacks that were performed on more than 50 real-world
                 vulnerable web applications. We measured the browsing
                 performance overhead of the proposed solution on eight
                 websites that make heavy use of JavaScript. Our
                 mechanism imposed an average overhead of 11.1\% on the
                 execution time of the JavaScript engine. When measured
                 as part of a full browsing session, and for all tested
                 websites, the overhead introduced by our layer was less
                 than 0.05\%. When script elements are altered or new
                 scripts are added on the server side, a new fingerprint
                 generation phase is required. To examine the temporal
                 aspect of contextual fingerprints, we performed a
                 short-term and a long-term experiment based on the same
                 websites. The former, showed that in a short period of
                 time (10 days), for seven of eight websites, the
                 majority of valid fingerprints stay the same (more than
                 92\% on average). The latter, though, indicated that,
                 in the long run, the number of fingerprints that do not
                 change is reduced. Both experiments can be seen as one
                 of the first attempts to study the feasibility of a
                 whitelisting approach for the web.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Aonghusa:2016:DLG,
  author =       "P{\'o}l Mac Aonghusa and Douglas J. Leith",
  title =        "Don't Let {Google} Know {I}'m Lonely",
  journal =      j-TOPS,
  volume =       "19",
  number =       "1",
  pages =        "3:1--3:??",
  month =        aug,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2937754",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Mon Apr 3 09:09:38 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "From buying books to finding the perfect partner, we
                 share our most intimate wants and needs with our
                 favourite online systems. But how far should we accept
                 promises of privacy in the face of personalized
                 profiling? In particular, we ask how we can improve
                 detection of sensitive topic profiling by online
                 systems. We propose a definition of privacy disclosure
                 that we call $ \epsilon $-indistinguishability, from
                 which we construct scalable, practical tools to assess
                 the learning potential from personalized content. We
                 demonstrate our results using openly available
                 resources, detecting a learning rate in excess of 98\%
                 for a range of sensitive topics during our
                 experiments.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Rahbarinia:2016:EAB,
  author =       "Babak Rahbarinia and Roberto Perdisci and Manos
                 Antonakakis",
  title =        "Efficient and Accurate Behavior-Based Tracking of
                 Malware-Control Domains in Large {ISP} Networks",
  journal =      j-TOPS,
  volume =       "19",
  number =       "2",
  pages =        "4:1--4:??",
  month =        sep,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2960409",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Mon Apr 3 09:09:39 MDT 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "In this article, we propose Segugio, a novel defense
                 system that allows for efficiently tracking the
                 occurrence of new malware-control domain names in very
                 large ISP networks. Segugio passively monitors the DNS
                 traffic to build a machine-domain bipartite graph
                 representing who is querying what. After labeling nodes
                 in this query behavior graph that are known to be
                 either benign or malware-related, we propose a novel
                 approach to accurately detect previously unknown
                 malware-control domains. We implemented a
                 proof-of-concept version of Segugio and deployed it in
                 large ISP networks that serve millions of users. Our
                 experimental results show that Segugio can track the
                 occurrence of new malware-control domains with up to
                 94\% true positives (TPs) at less than 0.1\% false
                 positives (FPs). In addition, we provide the following
                 results: (1) we show that Segugio can also detect
                 control domains related to new, previously unseen
                 malware families, with 85\% TPs at 0.1\% FPs; (2)
                 Segugio's detection models learned on traffic from a
                 given ISP network can be deployed into a different ISP
                 network and still achieve very high detection accuracy;
                 (3) new malware-control domains can be detected days or
                 even weeks before they appear in a large commercial
                 domain-name blacklist; (4) Segugio can be used to
                 detect previously unknown malware-infected machines in
                 ISP networks; and (5) we show that Segugio clearly
                 outperforms domain-reputation systems based on Belief
                 Propagation.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Dong:2016:DRC,
  author =       "Zheng Dong and Kevin Kane and L. Jean Camp",
  title =        "Detection of Rogue Certificates from Trusted
                 Certificate Authorities Using Deep Neural Networks",
  journal =      j-TOPS,
  volume =       "19",
  number =       "2",
  pages =        "5:1--5:??",
  month =        sep,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2975591",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Mon Apr 3 09:09:39 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "Rogue certificates are valid certificates issued by a
                 legitimate certificate authority (CA) that are
                 nonetheless untrustworthy; yet trusted by web browsers
                 and users. With the current public key infrastructure,
                 there exists a window of vulnerability between the time
                 a rogue certificate is issued and when it is detected.
                 Rogue certificates from recent compromises have been
                 trusted for as long as weeks before detection and
                 revocation. Previous proposals to close this window of
                 vulnerability require changes in the infrastructure,
                 Internet protocols, or end user experience. We present
                 a method for detecting rogue certificates from trusted
                 CAs developed from a large and timely collection of
                 certificates. This method automates classification by
                 building machine-learning models with Deep Neural
                 Networks (DNN). Despite the scarcity of rogue instances
                 in the dataset, DNN produced a classification method
                 that is proven both in simulation and in the July 2014
                 compromise of the India CCA. We report the details of
                 the classification method and illustrate that it is
                 repeatable, such as with datasets obtained from
                 crawling. We describe the classification performance
                 under our current research deployment.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Garay:2016:MPA,
  author =       "Juan A. Garay and Vladimir Kolesnikov and Rae
                 Mclellan",
  title =        "{MAC} Precomputation with Applications to Secure
                 Memory",
  journal =      j-TOPS,
  volume =       "19",
  number =       "2",
  pages =        "6:1--6:??",
  month =        sep,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2943780",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Mon Apr 3 09:09:39 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "We present Shallow MAC (ShMAC), a fixed-input-length
                 message authentication code that performs most of the
                 computation prior to the availability of the message.
                 Specifically, ShMAC's message-dependent computation is
                 much faster and smaller in hardware than the evaluation
                 of a pseudorandom permutation (PRP) and can be
                 implemented by a small shallow circuit, while its
                 precomputation consists of one PRP evaluation. A main
                 building block for ShMAC is the notion of strong
                 differential uniformity (SDU), which we introduce and
                 which may be of independent interest. We show an
                 efficient SDU construction built from previously
                 considered differentially uniform functions. Our main
                 motivating application is a system architecture where a
                 hardware-secured processor uses memory controlled by an
                 adversary. We also present in technical detail a novel,
                 efficient approach to encrypting and authenticating
                 memory and discuss the associated tradeoffs, while
                 paying special attention to minimizing hardware costs
                 and the reduction of Dynamic Random Access Memory
                 latency.",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Ozalp:2016:PPP,
  author =       "Ismet Ozalp and Mehmet Emre Gursoy and Mehmet Ercan
                 Nergiz and Yucel Saygin",
  title =        "Privacy-Preserving Publishing of Hierarchical Data",
  journal =      j-TOPS,
  volume =       "19",
  number =       "3",
  pages =        "7:1--7:??",
  month =        dec,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2976738",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Mon Apr 3 09:09:39 MDT 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "Many applications today rely on storage and management
                 of semi-structured information, for example, XML
                 databases and document-oriented databases. These data
                 often have to be shared with untrusted third parties,
                 which makes individuals' privacy a fundamental problem.
                 In this article, we propose anonymization techniques
                 for privacy-preserving publishing of hierarchical data.
                 We show that the problem of anonymizing hierarchical
                 data poses unique challenges that cannot be readily
                 solved by existing mechanisms. We extend two standards
                 for privacy protection in tabular data ( k -anonymity
                 and l-diversity) and apply them to hierarchical data.
                 We present utility-aware algorithms that enforce these
                 definitions of privacy using generalizations and
                 suppressions of data values. To evaluate our algorithms
                 and their heuristics, we experiment on synthetic and
                 real datasets obtained from two universities. Our
                 experiments show that we significantly outperform
                 related methods that provide comparable privacy
                 guarantees.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Crampton:2016:WSP,
  author =       "Jason Crampton and Andrei Gagarin and Gregory Gutin
                 and Mark Jones and Magnus Wahlstr{\"o}m",
  title =        "On the Workflow Satisfiability Problem with
                 Class-Independent Constraints for Hierarchical
                 Organizations",
  journal =      j-TOPS,
  volume =       "19",
  number =       "3",
  pages =        "8:1--8:??",
  month =        dec,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2988239",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Mon Apr 3 09:09:39 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "A workflow specification defines a set of steps, a set
                 of users, and an access control policy. The policy
                 determines which steps a user is authorized to perform
                 and imposes constraints on which sets of users can
                 perform which sets of steps. The workflow
                 satisfiability problem (WSP) is the problem of
                 determining whether there exists an assignment of users
                 to workflow steps that satisfies the policy. Given the
                 computational hardness of WSP and its importance in the
                 context of workflow management systems, it is important
                 to develop algorithms that are as efficient as possible
                 to solve WSP. In this article, we study the
                 fixed-parameter tractability of WSP in the presence of
                 class-independent constraints, which enable us to (1)
                 model security requirements based on the groups to
                 which users belong and (2) generalize the notion of a
                 user-independent constraint. Class-independent
                 constraints are defined in terms of equivalence
                 relations over the set of users. We consider sets of
                 nested equivalence relations because this enables us to
                 model security requirements in hierarchical
                 organizations. We prove that WSP is fixed-parameter
                 tractable (FPT) for class-independent constraints
                 defined over nested equivalence relations and develop
                 an FPT algorithm to solve WSP instances incorporating
                 such constraints. We perform experiments to evaluate
                 the performance of our algorithm and compare it with
                 that of SAT4J, an off-the-shelf pseudo-Boolean SAT
                 solver. The results of these experiments demonstrate
                 that our algorithm significantly outperforms SAT4J for
                 many instances of WSP.",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Gutierrez:2016:IDO,
  author =       "Christopher N. Gutierrez and Mohammed H. Almeshekah
                 and Eugene H. Spafford and Mikhail J. Atallah and Jeff
                 Avery",
  title =        "Inhibiting and Detecting Offline Password Cracking
                 Using {ErsatzPasswords}",
  journal =      j-TOPS,
  volume =       "19",
  number =       "3",
  pages =        "9:1--9:??",
  month =        dec,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2996457",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Mon Apr 3 09:09:39 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "In this work, we present a simple, yet effective and
                 practical scheme to improve the security of stored
                 password hashes, increasing the difficulty to crack
                 passwords and exposing cracking attempts. We utilize a
                 hardware-dependent function (HDF), such as a physically
                 unclonable function (PUF) or a hardware security module
                 (HSM), at the authentication server to inhibit offline
                 password discovery. Additionally, a deception mechanism
                 is incorporated to alert administrators of cracking
                 attempts. Using an HDF to generate password hashes
                 hinders attackers from recovering the true passwords
                 without constant access to the HDF. Our scheme can
                 integrate with legacy systems without needing
                 additional servers, changing the structure of the
                 hashed password file, nor modifying client machines.
                 When using our scheme, the structure of the hashed
                 passwords file, e.g., etc/shadow or etc/master.passwd,
                 will appear no different than traditional hashed
                 password files.$^1$ However, when attackers exfiltrate
                 the hashed password file and attempt to crack it, the
                 passwords they will receive are ErsatzPasswords-``fake
                 passwords.'' The ErsatzPasswords scheme is flexible by
                 design, enabling it to be integrated into existing
                 authentication systems without changes to user
                 experience. The proposed scheme is integrated into the
                 pam\_unix module as well as two client/server
                 authentication schemes: Lightweight Directory Access
                 Protocol (LDAP) authentication and the Pythia
                 pseudorandom function (PRF) Service [Everspaugh et al.
                 2015]. The core library to support ErsatzPasswords
                 written in C and Python consists of 255 and 103 lines
                 of code, respectively. The integration of
                 ErsatzPasswords into each explored authentication
                 system required less than 100 lines of additional code.
                 Experimental evaluation of ErsatzPasswords shows an
                 increase in authentication latency on the order of
                 100ms, which maybe acceptable for real world systems.
                 We also describe a framework for implementing
                 ErsatzPasswords using a Trusted Platform Module
                 (TPM).",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Meutzner:2017:TIA,
  author =       "Hendrik Meutzner and Santosh Gupta and Viet-Hung
                 Nguyen and Thorsten Holz and Dorothea Kolossa",
  title =        "Toward Improved Audio {CAPTCHAs} Based on Auditory
                 Perception and Language Understanding",
  journal =      j-TOPS,
  volume =       "19",
  number =       "4",
  pages =        "10:1--10:??",
  month =        feb,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2856820",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Mon Apr 3 09:09:39 MDT 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "A so-called completely automated public Turing test to
                 tell computers and humans apart (CAPTCHA) represents a
                 challenge-response test that is widely used on the
                 Internet to distinguish human users from fraudulent
                 computer programs, often referred to as bots. To enable
                 access for visually impaired users, most Web sites
                 utilize audio CAPTCHAs in addition to a conventional
                 image-based scheme. Recent research has shown that most
                 currently available audio CAPTCHAs are insecure, as
                 they can be broken by means of machine learning at
                 relatively low costs. Moreover, most audio CAPTCHAs
                 suffer from low human success rates that arise from
                 severe signal distortions. This article proposes two
                 different audio CAPTCHA schemes that systematically
                 exploit differences between humans and computers in
                 terms of auditory perception and language
                 understanding, yielding a better trade-off between
                 usability and security as compared to currently
                 available schemes. Furthermore, we provide an elaborate
                 analysis of Google's prominent reCAPTCHA that serves as
                 a baseline setting when evaluating our proposed CAPTCHA
                 designs.",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Shokri:2017:PGA,
  author =       "Reza Shokri and George Theodorakopoulos and Carmela
                 Troncoso",
  title =        "Privacy Games Along Location Traces: a Game-Theoretic
                 Framework for Optimizing Location Privacy",
  journal =      j-TOPS,
  volume =       "19",
  number =       "4",
  pages =        "11:1--11:??",
  month =        feb,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3009908",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Mon Apr 3 09:09:39 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "The mainstream approach to protecting the privacy of
                 mobile users in location-based services (LBSs) is to
                 alter (e.g., perturb, hide, and so on) the users'
                 actual locations in order to reduce exposed sensitive
                 information. In order to be effective, a
                 location-privacy preserving mechanism must consider
                 both the privacy and utility requirements of each user,
                 as well as the user's overall exposed locations (which
                 contribute to the adversary's background knowledge). In
                 this article, we propose a methodology that enables the
                 design of optimal user-centric location obfuscation
                 mechanisms respecting each individual user's service
                 quality requirements, while maximizing the expected
                 error that the optimal adversary incurs in
                 reconstructing the user's actual trace. A key advantage
                 of a user-centric mechanism is that it does not depend
                 on third-party proxies or anonymizers; thus, it can be
                 directly integrated in the mobile devices that users
                 employ to access LBSs. Our methodology is based on the
                 mutual optimization of user/adversary objectives
                 (maximizing location privacy versus minimizing
                 localization error) formalized as a Stackelberg
                 Bayesian game. This formalization makes our solution
                 robust against any location inference attack, that is,
                 the adversary cannot decrease the user's privacy by
                 designing a better inference algorithm as long as the
                 obfuscation mechanism is designed according to our
                 privacy games. We develop two linear programs that
                 solve the location privacy game and output the optimal
                 obfuscation strategy and its corresponding optimal
                 inference attack. These linear programs are used to
                 design location privacy--preserving mechanisms that
                 consider the correlation between past, current, and
                 future locations of the user, thus can be tuned to
                 protect different privacy objectives along the user's
                 location trace. We illustrate the efficacy of the
                 optimal location privacy--preserving mechanisms
                 obtained with our approach against real location
                 traces, showing their performance in protecting users'
                 different location privacy objectives.",
  acknowledgement = ack-nhfb,
  articleno =    "11",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Argyros:2017:EPG,
  author =       "George Argyros and Theofilos Petsios and Suphannee
                 Sivakorn and Angelos D. Keromytis and Jason Polakis",
  title =        "Evaluating the Privacy Guarantees of Location
                 Proximity Services",
  journal =      j-TOPS,
  volume =       "19",
  number =       "4",
  pages =        "12:1--12:??",
  month =        feb,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3007209",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Mon Apr 3 09:09:39 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "Location-based services have become an integral part
                 of everyday life. To address the privacy issues that
                 emerge from the use and sharing of location
                 information, social networks and smartphone
                 applications have adopted location proximity schemes as
                 a means of balancing user privacy with utility.
                 Unfortunately, despite the extensive academic
                 literature on this topic, the schemes that large
                 service providers have adopted are not always designed
                 or implemented correctly, rendering users vulnerable to
                 location-disclosure attacks. Such attacks have recently
                 received major publicity as, in some cases, they even
                 exposed citizens of oppressive regimes to
                 life-threatening risks. In this article, we
                 systematically assess the defenses that popular
                 location-based services and mobile applications deploy
                 to guard against adversaries seeking to identify a
                 user's location. We provide the theoretical foundations
                 for formalizing the privacy guarantees of currently
                 adopted proximity models, design practical attacks for
                 each case, and prove tight bounds on the number of
                 queries required for carrying out successful attacks in
                 practice. To evaluate the completeness of our approach,
                 we conduct extensive experiments against popular
                 services including Facebook, Foursquare, and Grindr.
                 Our results demonstrate that, even though the
                 aforementioned services implement various
                 privacy-preserving techniques to protect their users,
                 they are still vulnerable to attacks. In particular, we
                 are able to pinpoint Facebook users within 5m of their
                 exact location. For Foursquare and Grindr, users are
                 pinpointed within 15m of their location in 90\% of the
                 cases, even with the strictest privacy settings
                 enabled. Our attacks are highly efficient and complete
                 within a few seconds. The severity of our findings was
                 acknowledged by Facebook and Foursquare, both of which
                 have followed our recommendations and adopted our
                 design of a safe proximity scheme in their production
                 systems. As the number of mobile applications offering
                 location functionality will continue to increase,
                 service providers and software developers must be able
                 to assess the privacy guarantees that their services
                 offer. To that end, we discuss viable defenses that can
                 be currently adopted by all major services, and provide
                 an open-source testing framework to be used by
                 researchers and service providers who wish to evaluate
                 the privacy-preserving properties of applications
                 offering proximity functionality.",
  acknowledgement = ack-nhfb,
  articleno =    "12",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Matsumoto:2017:ACG,
  author =       "Stephanos Matsumoto and Raphael M. Reischuk and Pawel
                 Szalachowski and Tiffany Hyun-Jin Kim and Adrian
                 Perrig",
  title =        "Authentication Challenges in a Global Environment",
  journal =      j-TOPS,
  volume =       "20",
  number =       "1",
  pages =        "1:1--1:??",
  month =        feb,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3007208",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Mon Apr 3 09:09:40 MDT 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "In this article, we address the problem of scaling
                 authentication for naming, routing, and end-entity (EE)
                 certification to a global environment in which
                 authentication policies and users' sets of trust roots
                 vary widely. The current mechanisms for authenticating
                 names (DNSSEC), routes (BGPSEC), and EE certificates
                 (TLS) do not support a coexistence of authentication
                 policies, affect the entire Internet when compromised,
                 cannot update trust root information efficiently, and
                 do not provide users with the ability to make flexible
                 trust decisions. We propose the Scalable Authentication
                 Infrastructure for Next-generation Trust (SAINT), which
                 partitions the Internet into groups with common, local
                 trust roots and isolates the effects of a compromised
                 trust root. SAINT requires groups with direct routing
                 connections to cross-sign each other for authentication
                 purposes, allowing diverse authentication policies
                 while keeping all entities' authentication information
                 globally discoverable. SAINT makes trust root
                 management a central part of the network architecture,
                 enabling trust root updates within seconds and allowing
                 users to make flexible trust decisions. SAINT operates
                 without a significant performance penalty and can be
                 deployed alongside existing infrastructures.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Wagner:2017:ESG,
  author =       "Isabel Wagner",
  title =        "Evaluating the Strength of Genomic Privacy Metrics",
  journal =      j-TOPS,
  volume =       "20",
  number =       "1",
  pages =        "2:1--2:??",
  month =        feb,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3020003",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Mon Apr 3 09:09:40 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "The genome is a unique identifier for human
                 individuals. The genome also contains highly sensitive
                 information, creating a high potential for misuse of
                 genomic data (for example, genetic discrimination). In
                 this article, we investigate how genomic privacy can be
                 measured in scenarios where an adversary aims to infer
                 a person's genomic markers by constructing probability
                 distributions on the values of genetic variations. We
                 measured the strength of privacy metrics by requiring
                 that metrics are monotonic with increasing adversary
                 strength and uncovered serious problems with several
                 existing metrics currently used to measure genomic
                 privacy. We provide suggestions on metric selection,
                 interpretation, and visualization and illustrate the
                 work flow using case studies for three real-world
                 diseases.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Humbert:2017:QIR,
  author =       "Mathias Humbert and Erman Ayday and Jean-Pierre Hubaux
                 and Amalio Telenti",
  title =        "Quantifying Interdependent Risks in Genomic Privacy",
  journal =      j-TOPS,
  volume =       "20",
  number =       "1",
  pages =        "3:1--3:??",
  month =        feb,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3035538",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Mon Apr 3 09:09:40 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "The rapid progress in human-genome sequencing is
                 leading to a high availability of genomic data. These
                 data is notoriously very sensitive and stable in time,
                 and highly correlated among relatives. In this article,
                 we study the implications of these familial
                 correlations on kin genomic privacy. We formalize the
                 problem and detail efficient reconstruction attacks
                 based on graphical models and belief propagation. With
                 our approach, an attacker can infer the genomes of the
                 relatives of an individual whose genome or phenotype
                 are observed by notably relying on Mendel's Laws,
                 statistical relationships between the genomic variants,
                 and between the genome and the phenotype. We evaluate
                 the effect of these dependencies on privacy with
                 respect to the amount of observed variants and the
                 relatives sharing them. We also study how the
                 algorithmic performance evolves when we take these
                 various relationships into account. Furthermore, to
                 quantify the level of genomic privacy as a result of
                 the proposed inference attack, we discuss possible
                 definitions of genomic privacy metrics, and compare
                 their values and evolution. Genomic data reveals
                 Mendelian disorders and the likelihood of developing
                 severe diseases, such as Alzheimer's. We also introduce
                 the quantification of health privacy, specifically, the
                 measure of how well the predisposition to a disease is
                 concealed from an attacker. We evaluate our approach on
                 actual genomic data from a pedigree and show the threat
                 extent by combining data gathered from a genome-sharing
                 website as well as an online social network.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Osterweil:2017:IAI,
  author =       "Leon J. Osterweil and Matt Bishop and Heather M.
                 Conboy and Huong Phan and Borislava I. Simidchieva and
                 George S. Avrunin and Lori A. Clarke and Sean Peisert",
  title =        "Iterative Analysis to Improve Key Properties of
                 Critical Human-Intensive Processes: an Election
                 Security Example",
  journal =      j-TOPS,
  volume =       "20",
  number =       "2",
  pages =        "5:1--5:??",
  month =        mar,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3041041",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Mon Apr 3 09:09:40 MDT 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "In this article, we present an approach for
                 systematically improving complex processes, especially
                 those involving human agents, hardware devices, and
                 software systems. We illustrate the utility of this
                 approach by applying it to part of an election process
                 and show how it can improve the security and
                 correctness of that subprocess. We use the Little-JIL
                 process definition language to create a precise and
                 detailed definition of the process. Given this process
                 definition, we use two forms of automated analysis to
                 explore whether specified key properties, such as
                 security and safety policies, can be undermined. First,
                 we use model checking to identify process execution
                 sequences that fail to conform to event-sequence
                 properties. After these are addressed, we apply fault
                 tree analysis to identify when the misperformance of
                 steps might allow undesirable outcomes, such as
                 security breaches. The results of these analyses can
                 provide assurance about the process; suggest areas for
                 improvement; and, when applied to a modified process
                 definition, evaluate proposed changes.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Noorman:2017:SLC,
  author =       "Job Noorman and Jo {Van Bulck} and Jan Tobias
                 M{\"u}hlberg and Frank Piessens and Pieter Maene and
                 Bart Preneel and Ingrid Verbauwhede and Johannes
                 G{\"o}tzfried and Tilo M{\"u}ller and Felix Freiling",
  title =        "{Sancus 2.0}: a Low-Cost Security Architecture for
                 {IoT} Devices",
  journal =      j-TOPS,
  volume =       "20",
  number =       "3",
  pages =        "7:1--7:??",
  month =        aug,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3079763",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Dec 23 09:59:06 MST 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "The Sancus security architecture for networked
                 embedded devices was proposed in 2013 at the USENIX
                 Security conference. It supports remote (even
                 third-party) software installation on devices while
                 maintaining strong security guarantees. More
                 specifically, Sancus can remotely attest to a software
                 provider that a specific software module is running
                 uncompromised and can provide a secure communication
                 channel between software modules and software
                 providers. Software modules can securely maintain local
                 state and can securely interact with other software
                 modules that they choose to trust. Over the past three
                 years, significant experience has been gained with
                 applications of Sancus, and several extensions of the
                 architecture have been investigated-both by the
                 original designers as well as by independent
                 researchers. Informed by these additional research
                 results, this journal version of the Sancus paper
                 describes an improved design and implementation,
                 supporting additional security guarantees (such as
                 confidential deployment) and a more efficient
                 cryptographic core. We describe the design of Sancus
                 2.0 (without relying on any prior knowledge of Sancus)
                 and develop and evaluate a prototype FPGA
                 implementation. The prototype extends an MSP430
                 processor with hardware support for the memory access
                 control and cryptographic functionality required to run
                 Sancus. We report on our experience using Sancus in a
                 variety of application scenarios and discuss some
                 important avenues of ongoing and future work.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Brandenburger:2017:DTC,
  author =       "Marcus Brandenburger and Christian Cachin and Nikola
                 Knezevi{\'c}",
  title =        "Don't Trust the Cloud, Verify: Integrity and
                 Consistency for Cloud Object Stores",
  journal =      j-TOPS,
  volume =       "20",
  number =       "3",
  pages =        "8:1--8:??",
  month =        aug,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3079762",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Dec 23 09:59:06 MST 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "Cloud services have turned remote computation into a
                 commodity and enable convenient online collaboration.
                 However, they require that clients fully trust the
                 service provider in terms of confidentiality,
                 integrity, and availability. Toward reducing this
                 dependency, this article introduces VICOS, a protocol
                 for verification of integrity and consistency for cloud
                 object storage that enables a group of mutually
                 trusting clients to detect data integrity and
                 consistency violations for a cloud object storage
                 service. It aims at services where multiple clients
                 cooperate on data stored remotely on a potentially
                 misbehaving service. VICOS enforces the consistency
                 notion of fork-linearizability, supports wait-free
                 client semantics for most operations, and reduces the
                 computation and communication overhead compared to
                 previous protocols. VICOS is based on a generic
                 authenticated data structure. Moreover, its operations
                 cover the hierarchical name space of a cloud object
                 store, supporting a real-world interface and not only a
                 simplistic abstraction. A prototype of VICOS that works
                 with the key-value store interface of commodity cloud
                 storage services has been implemented, and an
                 evaluation demonstrates its advantage compared to
                 existing systems.",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Toreini:2017:TRP,
  author =       "Ehsan Toreini and Siamak F. Shahandashti and Feng
                 Hao",
  title =        "Texture to the Rescue: Practical Paper Fingerprinting
                 Based on Texture Patterns",
  journal =      j-TOPS,
  volume =       "20",
  number =       "3",
  pages =        "9:1--9:??",
  month =        aug,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3092816",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Dec 23 09:59:06 MST 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "In this article, we propose a novel paper
                 fingerprinting technique based on analyzing the
                 translucent patterns revealed when a light source
                 shines through the paper. These patterns represent the
                 inherent texture of paper, formed by the random
                 interleaving of wooden particles during the
                 manufacturing process. We show that these patterns can
                 be easily captured by a commodity camera and condensed
                 into a compact 2,048-bit fingerprint code. Prominent
                 works in this area (Nature 2005, IEEE S8P 2009, CCS
                 2011) have all focused on fingerprinting paper based on
                 the paper ``surface.'' We are motivated by the
                 observation that capturing the surface alone misses
                 important distinctive features such as the noneven
                 thickness, random distribution of impurities, and
                 different materials in the paper with varying
                 opacities. Through experiments, we demonstrate that the
                 embedded paper texture provides a more reliable source
                 for fingerprinting than features on the surface. Based
                 on the collected datasets, we achieve 0\% false
                 rejection and 0\% false acceptance rates. We further
                 report that our extracted fingerprints contain 807
                 degrees of freedom (DoF), which is much higher than the
                 249 DoF with iris codes (that have the same size of
                 2,048 bits). The high amount of DoF for texture-based
                 fingerprints makes our method extremely scalable for
                 recognition among very large databases; it also allows
                 secure usage of the extracted fingerprint in
                 privacy-preserving authentication schemes based on
                 error correction techniques.",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Munoz-Gonzalez:2017:EAG,
  author =       "Luis Mu{\~n}oz-Gonz{\'a}lez and Daniele Sgandurra and
                 Andrea Paudice and Emil C. Lupu",
  title =        "Efficient Attack Graph Analysis through Approximate
                 Inference",
  journal =      j-TOPS,
  volume =       "20",
  number =       "3",
  pages =        "10:1--10:??",
  month =        aug,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3105760",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Dec 23 09:59:06 MST 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "Attack graphs provide compact representations of the
                 attack paths an attacker can follow to compromise
                 network resources from the analysis of network
                 vulnerabilities and topology. These representations are
                 a powerful tool for security risk assessment. Bayesian
                 inference on attack graphs enables the estimation of
                 the risk of compromise to the system's components given
                 their vulnerabilities and interconnections and accounts
                 for multi-step attacks spreading through the system.
                 While static analysis considers the risk posture at
                 rest, dynamic analysis also accounts for evidence of
                 compromise, for example, from Security Information and
                 Event Management software or forensic investigation.
                 However, in this context, exact Bayesian inference
                 techniques do not scale well. In this article, we show
                 how Loopy Belief Propagation-an approximate inference
                 technique-can be applied to attack graphs and that it
                 scales linearly in the number of nodes for both static
                 and dynamic analysis, making such analyses viable for
                 larger networks. We experiment with different
                 topologies and network clustering on synthetic Bayesian
                 attack graphs with thousands of nodes to show that the
                 algorithm's accuracy is acceptable and that it
                 converges to a stable solution. We compare sequential
                 and parallel versions of Loopy Belief Propagation with
                 exact inference techniques for both static and dynamic
                 analysis, showing the advantages and gains of
                 approximate inference techniques when scaling to larger
                 attack graphs.",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Reaves:2017:MBM,
  author =       "Bradley Reaves and Jasmine Bowers and Nolen Scaife and
                 Adam Bates and Arnav Bhartiya and Patrick Traynor and
                 Kevin R. B. Butler",
  title =        "Mo(bile) Money, Mo(bile) Problems: Analysis of
                 Branchless Banking Applications",
  journal =      j-TOPS,
  volume =       "20",
  number =       "3",
  pages =        "11:1--11:??",
  month =        aug,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3092368",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Dec 23 09:59:06 MST 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "Mobile money, also known as branchless banking,
                 leverages ubiquitous cellular networks to bring
                 much-needed financial services to the unbanked in the
                 developing world. These services are often deployed as
                 smartphone apps, and although marketed as secure, these
                 applications are often not regulated as strictly as
                 traditional banks, leaving doubt about the truth of
                 such claims. In this article, we evaluate these claims
                 and perform the first in-depth measurement analysis of
                 branchless banking applications. We first perform an
                 automated analysis of all 46 known Android mobile money
                 apps across the 246 known mobile money providers from
                 2015. We then perform a comprehensive manual teardown
                 of the registration, login, and transaction procedures
                 of a diverse 15\% of these apps. We uncover pervasive
                 vulnerabilities spanning botched certification
                 validation, do-it-yourself cryptography, and other
                 forms of information leakage that allow an attacker to
                 impersonate legitimate users, modify transactions, and
                 steal financial records. These findings show that the
                 majority of these apps fail to provide the protections
                 needed by financial services. In an expanded
                 re-evaluation one year later, we find that these
                 systems have only marginally improved their security.
                 Additionally, we document our experiences working in
                 this sector for future researchers and provide
                 recommendations to improve the security of this
                 critical ecosystem. Finally, through inspection of
                 providers' terms of service, we also discover that
                 liability for these problems unfairly rests on the
                 shoulders of the customer, threatening to erode trust
                 in branchless banking and hinder efforts for global
                 financial inclusion.",
  acknowledgement = ack-nhfb,
  articleno =    "11",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Shu:2017:LSP,
  author =       "Xiaokui Shu and Danfeng (Daphne) Yao and Naren
                 Ramakrishnan and Trent Jaeger",
  title =        "Long-Span Program Behavior Modeling and Attack
                 Detection",
  journal =      j-TOPS,
  volume =       "20",
  number =       "4",
  pages =        "12:1--12:??",
  month =        oct,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3105761",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Dec 23 09:59:06 MST 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "Intertwined developments between program attacks and
                 defenses witness the evolution of program anomaly
                 detection methods. Emerging categories of program
                 attacks, e.g., non-control data attacks and
                 data-oriented programming, are able to comply with
                 normal trace patterns at local views. This article
                 points out the deficiency of existing program anomaly
                 detection models against new attacks and presents
                 long-span behavior anomaly detection (LAD), a model
                 based on mildly context-sensitive grammar verification.
                 The key feature of LAD is its reasoning of correlations
                 among arbitrary events that occurred in long program
                 traces. It extends existing correlation analysis
                 between events at a stack snapshot, e.g., paired call
                 and ret, to correlation analysis among events that
                 historically occurred during the execution. The
                 proposed method leverages specialized machine learning
                 techniques to probe normal program behavior boundaries
                 in vast high-dimensional detection space. Its two-stage
                 modeling/detection design analyzes event correlation at
                 both binary and quantitative levels. Our prototype
                 successfully detects all reproduced real-world attacks
                 against sshd, libpcre, and sendmail. The detection
                 procedure incurs 0.1 ms to 1.3 ms overhead to profile
                 and analyze a single behavior instance that consists of
                 tens of thousands of function call or system call
                 events.",
  acknowledgement = ack-nhfb,
  articleno =    "12",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Ikram:2017:MCD,
  author =       "Muhammad Ikram and Lucky Onwuzurike and Shehroze
                 Farooqi and Emiliano {De Cristofaro} and Arik Friedman
                 and Guillaume Jourjon and Mohammed Ali Kaafar and M.
                 Zubair Shafiq",
  title =        "Measuring, Characterizing, and Detecting {Facebook}
                 Like Farms",
  journal =      j-TOPS,
  volume =       "20",
  number =       "4",
  pages =        "13:1--13:??",
  month =        oct,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3121134",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Dec 23 09:59:06 MST 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "Online social networks offer convenient ways to reach
                 out to large audiences. In particular, Facebook pages
                 are increasingly used by businesses, brands, and
                 organizations to connect with multitudes of users
                 worldwide. As the number of likes of a page has become
                 a de-facto measure of its popularity and profitability,
                 an underground market of services artificially
                 inflating page likes (``like farms '') has emerged
                 alongside Facebook's official targeted advertising
                 platform. Nonetheless, besides a few media reports,
                 there is little work that systematically analyzes
                 Facebook pages' promotion methods. Aiming to fill this
                 gap, we present a honeypot-based comparative
                 measurement study of page likes garnered via Facebook
                 advertising and from popular like farms. First, we
                 analyze likes based on demographic, temporal, and
                 social characteristics and find that some farms seem to
                 be operated by bots and do not really try to hide the
                 nature of their operations, while others follow a
                 stealthier approach, mimicking regular users' behavior.
                 Next, we look at fraud detection algorithms currently
                 deployed by Facebook and show that they do not work
                 well to detect stealthy farms that spread likes over
                 longer timespans and like popular pages to mimic
                 regular users. To overcome their limitations, we
                 investigate the feasibility of timeline-based detection
                 of like farm accounts, focusing on characterizing
                 content generated by Facebook accounts on their
                 timelines as an indicator of genuine versus fake social
                 activity. We analyze a wide range of features extracted
                 from timeline posts, which we group into two main
                 categories: lexical and non-lexical. We find that like
                 farm accounts tend to re-share content more often, use
                 fewer words and poorer vocabulary, and more often
                 generate duplicate comments and likes compared to
                 normal users. Using relevant lexical and non-lexical
                 features, we build a classifier to detect like farms
                 accounts that achieves a precision higher than 99\% and
                 a 93\% recall.",
  acknowledgement = ack-nhfb,
  articleno =    "13",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Polyakov:2017:FPR,
  author =       "Yuriy Polyakov and Kurt Rohloff and Gyana Sahu and
                 Vinod Vaikuntanathan",
  title =        "Fast Proxy Re-Encryption for Publish\slash Subscribe
                 Systems",
  journal =      j-TOPS,
  volume =       "20",
  number =       "4",
  pages =        "14:1--14:??",
  month =        oct,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3128607",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Dec 23 09:59:06 MST 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/cryptography2010.bib;
                 http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "We develop two IND-CPA-secure multihop unidirectional
                 Proxy Re-Encryption (PRE) schemes by applying the
                 Ring-LWE (RLWE) key switching approach from the
                 homomorphic encryption literature. Unidirectional PRE
                 is ideal for secure publish-subscribe operations where
                 a publisher encrypts information using a public key
                 without knowing upfront who the subscriber will be and
                 what private key will be used for decryption. The
                 proposed PRE schemes provide a multihop capability,
                 meaning that when PRE-encrypted information is
                 published onto a PRE-enabled server, the server can
                 either delegate access to specific clients or enable
                 other servers the right to delegate access. Our first
                 scheme (which we call NTRU-ABD-PRE) is based on a
                 variant of the NTRU-RLWE homomorphic encryption scheme.
                 Our second and main PRE scheme (which we call BV-PRE)
                 is built on top of the Brakerski-Vaikuntanathan (BV)
                 homomorphic encryption scheme and relies solely on the
                 RLWE assumption. We present an open-source C++
                 implementation of both schemes and discuss several
                 algorithmic and software optimizations. We examine
                 parameter selection tradeoffs in the context of
                 security, runtime/latency, throughput, ciphertext
                 expansion, memory usage, and multihop capabilities. Our
                 experimental analysis demonstrates that BV-PRE
                 outperforms NTRU-ABD-PRE in both single-hop and
                 multihop settings. The BV-PRE scheme has a lower time
                 and space complexity than existing IND-CPA-secure
                 lattice-based PRE schemes and requires small concrete
                 parameters, making the scheme computationally efficient
                 for use on low-resource embedded systems while still
                 providing 100 bits of security. We present practical
                 recommendations for applying the PRE schemes to several
                 use cases of ad hoc information sharing for
                 publish-subscribe operations.",
  acknowledgement = ack-nhfb,
  articleno =    "14",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Rullo:2017:POS,
  author =       "Antonino Rullo and Daniele Midi and Edoardo Serra and
                 Elisa Bertino",
  title =        "{Pareto} Optimal Security Resource Allocation for
                 {Internet of Things}",
  journal =      j-TOPS,
  volume =       "20",
  number =       "4",
  pages =        "15:1--15:??",
  month =        oct,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3139293",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Dec 23 09:59:06 MST 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "In many Internet of Thing (IoT) application domains
                 security is a critical requirement, because malicious
                 parties can undermine the effectiveness of IoT-based
                 systems by compromising single components and/or
                 communication channels. Thus, a security infrastructure
                 is needed to ensure the proper functioning of such
                 systems even under attack. However, it is also critical
                 that security be at a reasonable resource and energy
                 cost. In this article, we focus on the problem of
                 efficiently and effectively securing IoT networks by
                 carefully allocating security resources in the network
                 area. In particular, given a set of security resources
                 R and a set of attacks to be faced A, our method
                 chooses the subset of R that best addresses the attacks
                 in A, and the set of locations where to place them,
                 that ensure the security coverage of all IoT devices at
                 minimum cost and energy consumption. We model our
                 problem according to game theory and provide a
                 Pareto-optimal solution in which the cost of the
                 security infrastructure, its energy consumption, and
                 the probability of a successful attack are minimized.
                 Our experimental evaluation shows that our technique
                 improves the system robustness in terms of packet
                 delivery rate for different network topologies.
                 Furthermore, we also provide a method for handling the
                 computation of the resource allocation plan for
                 large-scale networks scenarios, where the optimization
                 problem may require an unreasonable amount of time to
                 be solved. We show how our proposed method drastically
                 reduces the computing time, while providing a
                 reasonable approximation of the optimal solution.",
  acknowledgement = ack-nhfb,
  articleno =    "15",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}

@Article{Su:2017:DPM,
  author =       "Dong Su and Jianneng Cao and Ninghui Li and Elisa
                 Bertino and Min Lyu and Hongxia Jin",
  title =        "Differentially Private {$K$}-Means Clustering and a
                 Hybrid Approach to Private Optimization",
  journal =      j-TOPS,
  volume =       "20",
  number =       "4",
  pages =        "16:1--16:??",
  month =        oct,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3133201",
  ISSN =         "2471-2566 (print), 2471-2574 (electronic)",
  ISSN-L =       "2471-2566",
  bibdate =      "Sat Dec 23 09:59:06 MST 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/tops.bib",
  abstract =     "k -means clustering is a widely used clustering
                 analysis technique in machine learning. In this
                 article, we study the problem of differentially private
                 k -means clustering. Several state-of-the-art methods
                 follow the single-workload approach, which adapts an
                 existing machine-learning algorithm by making each step
                 private. However, most of them do not have satisfactory
                 empirical performance. In this work, we develop
                 techniques to analyze the empirical error behaviors of
                 one of the state-of-the-art single-workload approaches,
                 DPLloyd, which is a differentially private version of
                 the Lloyd algorithm for k {$>$}-means clustering. Based
                 on the analysis, we propose an improvement of DPLloyd.
                 We also propose a new algorithm for k -means clustering
                 from the perspective of the noninteractive approach,
                 which publishes a synopsis of the input dataset and
                 then runs k -means on synthetic data generated from the
                 synopsis. We denote this approach by EUGkM. After
                 analyzing the empirical error behaviors of EUGkM, we
                 further propose a hybrid approach that combines our
                 DPLloyd improvement and EUGkM. Results from extensive
                 and systematic experiments support our analysis and
                 demonstrate the effectiveness of the DPLloyd
                 improvement, EUGkM, and the hybrid approach.",
  acknowledgement = ack-nhfb,
  articleno =    "16",
  fjournal =     "ACM Transactions on Privacy and Security (TOPS)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1547",
}