Valid HTML 4.0! Valid CSS!
%%% -*-BibTeX-*-
%%% ====================================================================
%%%  BibTeX-file{
%%%     author          = "Nelson H. F. Beebe",
%%%     version         = "1.02",
%%%     date            = "14 October 2017",
%%%     time            = "08:49:17 MDT",
%%%     filename        = "imwut.bib",
%%%     address         = "University of Utah
%%%                        Department of Mathematics, 110 LCB
%%%                        155 S 1400 E RM 233
%%%                        Salt Lake City, UT 84112-0090
%%%                        USA",
%%%     telephone       = "+1 801 581 5254",
%%%     FAX             = "+1 801 581 4148",
%%%     URL             = "http://www.math.utah.edu/~beebe",
%%%     checksum        = "25792 1698 9817 90810",
%%%     email           = "beebe at math.utah.edu, beebe at acm.org,
%%%                        beebe at computer.org (Internet)",
%%%     codetable       = "ISO/ASCII",
%%%     keywords        = "bibliography; BibTeX; Proceedings of the ACM
%%%                        on Interactive, Mobile, Wearable and
%%%                        Ubiquitous Technologies (IMWUT)",
%%%     license         = "public domain",
%%%     supported       = "yes",
%%%     docstring       = "This is a COMPLETE BibTeX bibliography for
%%%                        Proceedings of the ACM on Interactive,
%%%                        Mobile, Wearable and Ubiquitous Technologies
%%%                        (IMWUT) (CODEN ????, ISSN 2474-9567).  The
%%%                        journal appears annually, and publication
%%%                        began with volume 1, number 1, in March 2017.
%%%
%%%                        At version 1.02, the COMPLETE journal
%%%                        coverage looked like this:
%%%
%%%                             2017 (  34)
%%%
%%%                             Article:         34
%%%
%%%                             Total entries:   34
%%%
%%%                        The journal Web pages can be found at:
%%%
%%%                            http://imwut.acm.org/
%%%                            http://imwut.acm.org/archive-toc.cfm
%%%
%%%                        The journal table of contents page is at:
%%%
%%%                            http://dl.acm.org/citation.cfm?id=J1566
%%%
%%%                        Qualified subscribers can retrieve the full
%%%                        text of recent articles in PDF form.
%%%
%%%                        The initial draft was extracted from the ACM
%%%                        Web pages.
%%%
%%%                        ACM copyrights explicitly permit abstracting
%%%                        with credit, so article abstracts, keywords,
%%%                        and subject classifications have been
%%%                        included in this bibliography wherever
%%%                        available.  Article reviews have been
%%%                        omitted, until their copyright status has
%%%                        been clarified.
%%%
%%%                        bibsource keys in the bibliography entries
%%%                        below indicate the entry originally came
%%%                        from the computer science bibliography
%%%                        archive, even though it has likely since
%%%                        been corrected and updated.
%%%
%%%                        URL keys in the bibliography point to
%%%                        World Wide Web locations of additional
%%%                        information about the entry.
%%%
%%%                        BibTeX citation tags are uniformly chosen
%%%                        as name:year:abbrev, where name is the
%%%                        family name of the first author or editor,
%%%                        year is a 4-digit number, and abbrev is a
%%%                        3-letter condensation of important title
%%%                        words. Citation tags were automatically
%%%                        generated by software developed for the
%%%                        BibNet Project.
%%%
%%%                        In this bibliography, entries are sorted in
%%%                        publication order, using ``bibsort -byvolume.''
%%%
%%%                        The checksum field above contains a CRC-16
%%%                        checksum as the first value, followed by the
%%%                        equivalent of the standard UNIX wc (word
%%%                        count) utility output of lines, words, and
%%%                        characters.  This is produced by Robert
%%%                        Solovay's checksum utility.",
%%%  }
%%% ====================================================================
@Preamble{"\input bibnames.sty" #
    "\def \TM {${}^{\sc TM}$}"
}

%%% ====================================================================
%%% Acknowledgement abbreviations:
@String{ack-nhfb = "Nelson H. F. Beebe,
                    University of Utah,
                    Department of Mathematics, 110 LCB,
                    155 S 1400 E RM 233,
                    Salt Lake City, UT 84112-0090, USA,
                    Tel: +1 801 581 5254,
                    FAX: +1 801 581 4148,
                    e-mail: \path|beebe@math.utah.edu|,
                            \path|beebe@acm.org|,
                            \path|beebe@computer.org| (Internet),
                    URL: \path|http://www.math.utah.edu/~beebe/|"}

%%% ====================================================================
%%% Journal abbreviations:
@String{j-IMWUT                 = "Proceedings of the ACM on Interactive,
                                  Mobile, Wearable and Ubiquitous
                                  Technologies (IMWUT)"}

%%% ====================================================================
%%% Bibliography entries:
@Article{Abowd:2017:E,
  author =       "Gregory D. Abowd and Vassilis Kostakos and Silvia
                 Santini and James Scott and Koji Yatani",
  title =        "Editorial",
  journal =      j-IMWUT,
  volume =       "1",
  number =       "1",
  pages =        "1--1",
  month =        mar,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3075960",
  ISSN =         "2474-9567",
  ISSN-L =       "2474-9567",
  bibdate =      "Fri Jun 16 10:24:00 MDT 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/imwut.bib",
  URL =          "http://dl.acm.org/citation.cfm?id=3075960",
  acknowledgement = ack-nhfb,
  articleno =    "",
  fjournal =     "Proceedings of the ACM on Interactive, Mobile,
                 Wearable and Ubiquitous Technologies (IMWUT)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1566",
}

@Article{Dementyev:2017:DWD,
  author =       "Artem Dementyev and Christian Holz",
  title =        "{DualBlink}: A Wearable Device to Continuously Detect,
                 Track, and Actuate Blinking For Alleviating Dry Eyes
                 and Computer Vision Syndrome",
  journal =      j-IMWUT,
  volume =       "1",
  number =       "1",
  pages =        "1:1--1:19",
  month =        mar,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3053330",
  ISSN =         "2474-9567",
  ISSN-L =       "2474-9567",
  bibdate =      "Fri Jun 16 10:24:00 MDT 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/imwut.bib",
  URL =          "http://dl.acm.org/citation.cfm?id=3053330",
  abstract =     "Increased visual attention, such as during computer
                 use leads to less blinking, which can cause dry eyes
                 --- the leading cause of computer vision syndrome. As
                 people spend more time looking at screens on mobile and
                 desktop devices, computer vision syndrome is becoming
                 epidemic in today's population, leading to blurry
                 vision, fatigue, and a reduced quality of life. One way
                 to alleviate dry eyes is increased blinking. In this
                 paper, we present a series of glasses-mounted devices
                 that track the wearer's blink rate and, upon absent
                 blinks, trigger blinks through actuation: light
                 flashes, physical taps, and small puffs of air near the
                 eye. We conducted a user study to evaluate the
                 effectiveness of our devices and found that air puff
                 and physical tap actuations result in a 36\% increase
                 in participants' average blink rate. Air puff thereby
                 struck the best compromise between effective blink
                 actuations and low distraction ratings from
                 participants. In a follow-up study, we found that high
                 intensity, short puffs near the eye were most effective
                 in triggering blinks while receiving only low-rated
                 distraction and invasiveness ratings from participants.
                 We conclude this paper with two miniaturized and
                 self-contained DualBlink prototypes, one integrated
                 into the frame of a pair of glasses and the other one
                 as a clip-on for existing glasses. We believe that
                 DualBlink can serve as an always-available and viable
                 option to treat computer vision syndrome in the
                 future.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "Proceedings of the ACM on Interactive, Mobile,
                 Wearable and Ubiquitous Technologies (IMWUT)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1566",
}

@Article{Hassan:2017:FEB,
  author =       "Mahmoud Hassan and Florian Daiber and Frederik Wiehr
                 and Felix Kosmalla and Antonio Kr{\"u}ger",
  title =        "{FootStriker}: An {EMS}-based Foot Strike Assistant
                 for Running",
  journal =      j-IMWUT,
  volume =       "1",
  number =       "1",
  pages =        "2:1--2:18",
  month =        mar,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3053332",
  ISSN =         "2474-9567",
  ISSN-L =       "2474-9567",
  bibdate =      "Fri Jun 16 10:24:00 MDT 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/imwut.bib",
  URL =          "http://dl.acm.org/citation.cfm?id=3053332",
  abstract =     "In running, knee-related injuries are very common. The
                 main cause are high impact forces when striking the
                 ground with the heel first. Mid- or forefoot running is
                 generally known to reduce impact loads and to be a more
                 efficient running style. In this paper, we introduce a
                 wearable running assistant, consisting of an electrical
                 muscle stimulation (EMS) device and an insole with
                 force sensing resistors. It detects heel striking and
                 actuates the calf muscles during the flight phase to
                 control the foot angle before landing. We conducted a
                 user study, in which we compared the classical coaching
                 approach using slow motion video analysis as a terminal
                 feedback to our proposed real-time EMS feedback. The
                 results show that EMS actuation significantly
                 outperforms traditional coaching, i.e., a decreased
                 average heel striking rate, when using the system. As
                 an implication, EMS feedback can generally be
                 beneficial for the motor learning of complex,
                 repetitive movements.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "Proceedings of the ACM on Interactive, Mobile,
                 Wearable and Ubiquitous Technologies (IMWUT)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1566",
}

@Article{Liu:2017:GAU,
  author =       "Can Liu and Gradeigh D. Clark and Janne Lindqvist",
  title =        "Guessing Attacks on User-Generated Gesture Passwords",
  journal =      j-IMWUT,
  volume =       "1",
  number =       "1",
  pages =        "3:1--3:24",
  month =        mar,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3053331",
  ISSN =         "2474-9567",
  ISSN-L =       "2474-9567",
  bibdate =      "Fri Jun 16 10:24:00 MDT 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/imwut.bib",
  URL =          "http://dl.acm.org/citation.cfm?id=3053331",
  abstract =     "Touchscreens, the dominant input type for mobile
                 phones, require unique authentication solutions.
                 Gesture passwords have been proposed as an alternative
                 ubiquitous authentication technique. Prior security
                 analysis has relied on inconsistent measurements such
                 as mutual information or shoulder surfing attacks.We
                 present the first approach for measuring the security
                 of gestures with guessing attacks that model real-world
                 attacker behavior. Our major contributions are: (1) a
                 comprehensive analysis of the weak subspace for gesture
                 passwords, (2) a method for enumerating the size of the
                 full theoretical gesture password space, (3) a design
                 of a novel guessing attack against user-chosen gestures
                 using a dictionary, and (4) a brute-force attack used
                 for benchmarking the performance of the guessing
                 attack. Our dictionary attack, tested on newly
                 collected user data, achieves a cracking rate of
                 47.71\% after two weeks of computation using 109
                 guesses. This is a difference of 35.78 percentage
                 points compared to the 11.93\% cracking rate of the
                 brute-force attack. In conclusion, users are not taking
                 full advantage of the large theoretical password space
                 and instead choose their gesture passwords from weak
                 subspaces. We urge for further work on addressing this
                 challenge.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "Proceedings of the ACM on Interactive, Mobile,
                 Wearable and Ubiquitous Technologies (IMWUT)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1566",
}

@Article{Akazue:2017:UTS,
  author =       "Moses Akazue and Martin Halvey and Lynne Baillie",
  title =        "Using Thermal Stimuli to Enhance Photo-Sharing in
                 Social Media",
  journal =      j-IMWUT,
  volume =       "1",
  number =       "2",
  pages =        "??--??",
  month =        jun,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3090050",
  ISSN =         "????",
  bibdate =      "Fri Aug 25 15:10:55 MDT 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/imwut.bib",
  URL =          "http://dl.acm.org/citation.cfm?id=3090050",
  abstract =     "Limited work has been undertaken to show how the
                 emotive ability of thermal stimuli can be used for
                 interaction purposes. One potential application area is
                 using thermal stimuli to influence emotions in images
                 shared online such as social media platforms. This
                 paper presents a two-part study, which examines how the
                 documented emotive property of thermal stimuli can be
                 applied to enhance social media images. Participants in
                 part-one supplied images from their personal collection
                 or social media profiles, and were asked to augment
                 each image with thermal stimuli based on the emotions
                 they wanted to enhance or reduce. Part-one participants
                 were interviewed to understand the effects they wanted
                 augmented images to have. In part-two, these augmented
                 images were perceived by a different set of
                 participants in a simulated social media interface.
                 Results showed strong agreement between the emotions
                 augmented images were designed to evoke and the
                 emotions they actually evoked as perceived by part-two
                 participants. Participants in part-one selected thermal
                 stimuli augmentation intended to modulate valence and
                 arousal in images as a way of enhancing the realism of
                 the images augmented. Part-two results indicate this
                 was achieved as participants perceived thermal stimuli
                 augmentation reduced valence in negative images and
                 modulated valence and arousal in positive images.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies (IMWUT)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1566",
}

@Article{Bae:2017:DDE,
  author =       "Sangwon Bae and Denzil Ferreira and Brian Suffoletto
                 and Juan C. Puyana and Ryan Kurtz and Tammy Chung and
                 Anind K. Dey",
  title =        "Detecting Drinking Episodes in Young Adults Using
                 {Smartphone}-based Sensors",
  journal =      j-IMWUT,
  volume =       "1",
  number =       "2",
  pages =        "??--??",
  month =        jun,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3090051",
  ISSN =         "????",
  bibdate =      "Fri Aug 25 15:10:55 MDT 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/imwut.bib",
  URL =          "http://dl.acm.org/citation.cfm?id=3090051",
  abstract =     "Alcohol use in young adults is common, with high rates
                 of morbidity and mortality largely due to periodic,
                 heavy drinking episodes (HDEs). Behavioral
                 interventions delivered through electronic
                 communication modalities (e.g., text messaging) can
                 reduce the frequency of HDEs in young adults, but
                 effects are small. One way to amplify these effects is
                 to deliver support materials proximal to drinking
                 occasions, but this requires knowledge of when they
                 will occur. Mobile phones have built-in sensors that
                 can potentially be useful in monitoring behavioral
                 patterns associated with the initiation of drinking
                 occasions. The objective of our work is to explore the
                 detection of daily-life behavioral markers using mobile
                 phone sensors and their utility in identifying drinking
                 occasions. We utilized data from 30 young adults aged
                 21-28 with past hazardous drinking and collected mobile
                 phone sensor data and daily Experience Sampling Method
                 (ESM) of drinking for 28 consecutive days. We built a
                 machine learning-based model that is 96.6\% accurate at
                 identifying non-drinking, drinking and heavy drinking
                 episodes. We highlight the most important features for
                 detecting drinking episodes and identify the amount of
                 historical data needed for accurate detection. Our
                 results suggest that mobile phone sensors can be used
                 for automated, continuous monitoring of at-risk
                 populations to detect drinking episodes and support the
                 delivery of timely interventions.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies (IMWUT)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1566",
}

@Article{Baumann:2017:EBC,
  author =       "Paul Baumann and Silvia Santini",
  title =        "Every Byte Counts: Selective Prefetching for Mobile
                 Applications",
  journal =      j-IMWUT,
  volume =       "1",
  number =       "2",
  pages =        "??--??",
  month =        jun,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3090052",
  ISSN =         "????",
  bibdate =      "Fri Aug 25 15:10:55 MDT 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/imwut.bib",
  URL =          "http://dl.acm.org/citation.cfm?id=3090052",
  abstract =     "Quick responses to user actions are instrumental to
                 the success of mobile applications. To ensure such
                 responsiveness, applications often prefetch data
                 objects before the user requests them. This way,
                 applications can avoid the need to retrieve data
                 through slow network connections during user
                 interactions. However, prefetches may also harm. They
                 increase launch delays and might cause substantial
                 amounts of data to be downloaded through energy-hungry,
                 cellular connections. In this paper, we propose EBC, a
                 novel algorithm to schedule application prefetches and
                 overcome their drawbacks. EBC computes application
                 usage probabilities and traffic volume estimates to
                 determine when and for which applications prefetches
                 should be triggered. Thereby, it applies different
                 strategies depending on whether a cellular or Wi-Fi
                 connection is available. We evaluate the performance of
                 EBC on two publicly available, large-scale data sets:
                 LiveLab and Device Analyzer. Our results show that EBC
                 can lower launch delays and ensure freshness of
                 application content. At the same time, it reduces the
                 amount of data downloaded through cellular connections.
                 On the Device Analyzer data set, for instance, EBC
                 achieves a 10\% reduction in cellular traffic and a
                 36\% better average freshness with respect to its
                 closest competitor.",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies (IMWUT)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1566",
}

@Article{Butscher:2017:ITO,
  author =       "Simon Butscher and Maximilian D{\"u}rr and Harald
                 Reiterer",
  title =        "{InformationSense}: Trade-offs for the Design and the
                 Implementation of a Large Highly Deformable Cloth
                 Display",
  journal =      j-IMWUT,
  volume =       "1",
  number =       "2",
  pages =        "??--??",
  month =        jun,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3090053",
  ISSN =         "????",
  bibdate =      "Fri Aug 25 15:10:55 MDT 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/imwut.bib",
  URL =          "http://dl.acm.org/citation.cfm?id=3090053",
  abstract =     "Deformable displays can provide two major benefits
                 compared to rigid displays: Objects of different shapes
                 and deformabilities, situated in our physical
                 environment, can be equipped with deformable displays,
                 and users can benefit from their pre-existing knowledge
                 about the interaction with physical objects when
                 interacting with deformable displays. In this article
                 we present InformationSense, a large, highly deformable
                 cloth display. The article contributes to two research
                 areas in the context of deformable displays: It
                 presents an approach for the tracking of large, highly
                 deformable surfaces, and it presents one of the first
                 UX analyses of cloth displays that will help with the
                 design of future interaction techniques for this kind
                 of display. The comparison of InformationSense with a
                 rigid display interface unveiled the trade-off that
                 while users are able to interact with InformationSense
                 more naturally and significantly preferred
                 InformationSense in terms of joy of use, they preferred
                 the rigid display interfaces in terms of efficiency.
                 This suggests that deformable displays are already
                 suitable if high hedonic qualities are important but
                 need to be enhanced with additional digital power if
                 high pragmatic qualities are required.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies (IMWUT)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1566",
}

@Article{Chen:2017:MMT,
  author =       "Ke-Yu Chen and Rahul C. Shah and Jonathan Huang and
                 Lama Nachman",
  title =        "{Mago}: Mode of Transport Inference Using the
                 {Hall}-Effect Magnetic Sensor and Accelerometer",
  journal =      j-IMWUT,
  volume =       "1",
  number =       "2",
  pages =        "??--??",
  month =        jun,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3090054",
  ISSN =         "????",
  bibdate =      "Fri Aug 25 15:10:55 MDT 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/imwut.bib",
  URL =          "http://dl.acm.org/citation.cfm?id=3090054",
  abstract =     "In this paper, we introduce Mago, a novel system that
                 can infer a person's mode of transport (MOT) using the
                 Hall-effect magnetic sensor and accelerometer present
                 in most smart devices. When a vehicle is moving, the
                 motions of its mechanical components such as the
                 wheels, transmission and the differential distort the
                 earth's magnetic field. The magnetic field is distorted
                 corresponding to the vehicle structure (e.g., bike
                 chain or car transmission system), which manifests
                 itself as a strong signal for sensing a person's
                 transportation modality. We utilize this magnetic
                 signal combined with the accelerometer and design a
                 robust algorithm for the MOT detection. In particular,
                 our system extracts frame-based features from the
                 sensor data and can run in nearly real-time with only a
                 few seconds of delay. We evaluated Mago using over 70
                 hours of daily commute data from 7 participants and the
                 leave-one-out analysis of our cross-user, cross-device
                 model reports an average accuracy of 94.4\% among seven
                 classes (stationary, bus, bike, car, train, light rail
                 and scooter). Besides MOT, our system is able to
                 reliably differentiate the phone's in-car position at
                 an average accuracy of 92.9\%. We believe Mago could
                 potentially benefit many contextually-aware
                 applications that require MOT detection such as a
                 digital personal assistant or a life coaching
                 application.",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies (IMWUT)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1566",
}

@Article{Dobbelstein:2017:PWD,
  author =       "David Dobbelstein and Christian Winkler and Gabriel
                 Haas and Enrico Rukzio",
  title =        "{PocketThumb}: a Wearable Dual-Sided Touch Interface
                 for Cursor-based Control of Smart-Eyewear",
  journal =      j-IMWUT,
  volume =       "1",
  number =       "2",
  pages =        "??--??",
  month =        jun,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3090055",
  ISSN =         "????",
  bibdate =      "Fri Aug 25 15:10:55 MDT 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/imwut.bib",
  URL =          "http://dl.acm.org/citation.cfm?id=3090055",
  abstract =     "We present PocketThumb, a wearable touch interface for
                 smart-eyewear that is embedded into the fabrics of the
                 front trouser pocket. The interface is reachable from
                 outside and inside of the pocket to allow for a
                 combined dual-sided touch input. The user can control
                 an absolute cursor with their thumb sliding along the
                 fabric from the inside, while at the same time tapping
                 or swiping with fingers from the outside to perform
                 joint gestures. This allows for resting the hand in a
                 comfortable and quickly accessible position, while
                 performing interaction with a high expressiveness that
                 is feasible in mobile scenarios. In a cursor-based
                 target selection study, we found that our introduced
                 dual-sided touch interaction is significantly faster in
                 comparison to common single-sided absolute as well as
                 relative touch interaction (~19\%, resp. ~23\% faster).
                 The effect is largest in the mobile conditions standing
                 and walking (up to ~31\% faster).",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies (IMWUT)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1566",
}

@Article{Finnigan:2017:AAE,
  author =       "S. Mitchell Finnigan and A. K. Clear and G.
                 Farr-Wharton and K. Ladha and R. Comber",
  title =        "Augmenting Audits: Exploring the Role of Sensor
                 Toolkits in Sustainable Buildings Management",
  journal =      j-IMWUT,
  volume =       "1",
  number =       "2",
  pages =        "??--??",
  month =        jun,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3090075",
  ISSN =         "????",
  bibdate =      "Fri Aug 25 15:10:55 MDT 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/imwut.bib",
  URL =          "http://dl.acm.org/citation.cfm?id=3090075",
  abstract =     "Audits are commonly carried out by facilities managers
                 (FMs) to quantify the sustainability and performance of
                 the buildings they manage, informing improvements to
                 infrastructure for resource and cost savings, and
                 assessing compliance with standards and legislation.
                 The scope for what can be audited is limited by
                 available infrastructure. In this article, we
                 investigate the utility of a flexible sensor toolkit to
                 enhance existing energy auditing practices. We present
                 findings from a qualitative study with FM and student
                 auditor participants from 3 organisations. Our study
                 covers how these toolkits were used and integrated into
                 auditing practices within these organisations, and the
                 opportunities and issues for resource management that
                 arose as a result. We conclude with design implications
                 for toolkits to support sensor-augmented audits, make
                 recommendations towards a deployment protocol for
                 sensor toolkits used in this context, and develop
                 broader considerations for how future standards and
                 policies might be adapted to leverage this potential.",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies (IMWUT)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1566",
}

@Article{Guan:2017:EDL,
  author =       "Yu Guan and Thomas Pl{\"o}tz",
  title =        "Ensembles of Deep {LSTM} Learners for Activity
                 Recognition using Wearables",
  journal =      j-IMWUT,
  volume =       "1",
  number =       "2",
  pages =        "??--??",
  month =        jun,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3090076",
  ISSN =         "????",
  bibdate =      "Fri Aug 25 15:10:55 MDT 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/imwut.bib",
  URL =          "http://dl.acm.org/citation.cfm?id=3090076",
  abstract =     "Recently, deep learning (DL) methods have been
                 introduced very successfully into human activity
                 recognition (HAR) scenarios in ubiquitous and wearable
                 computing. Especially the prospect of overcoming the
                 need for manual feature design combined with superior
                 classification capabilities render deep neural networks
                 very attractive for real-life HAR applications. Even
                 though DL-based approaches now outperform the
                 state-of-the-art in a number of recognition tasks,
                 still substantial challenges remain. Most prominently,
                 issues with real-life datasets, typically including
                 imbalanced datasets and problematic data quality, still
                 limit the effectiveness of activity recognition using
                 wearables. In this paper we tackle such challenges
                 through Ensembles of deep Long Short Term Memory (LSTM)
                 networks. LSTM networks currently represent the
                 state-of-the-art with superior classification
                 performance on relevant HAR benchmark datasets. We have
                 developed modified training procedures for LSTM
                 networks and combine sets of diverse LSTM learners into
                 classifier collectives. We demonstrate that Ensembles
                 of deep LSTM learners outperform individual LSTM
                 networks and thus push the state-of-the-art in human
                 activity recognition using wearables. Through an
                 extensive experimental evaluation on three standard
                 benchmarks (Opportunity, PAMAP2, Skoda) we demonstrate
                 the excellent recognition capabilities of our approach
                 and its potential for real-life applications of human
                 activity recognition.",
  acknowledgement = ack-nhfb,
  articleno =    "11",
  fjournal =     "Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies (IMWUT)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1566",
}

@Article{Gummeson:2017:RLB,
  author =       "Jeremy Gummeson and James Mccann and Chouchang (JACK)
                 Yang and Damith Ranasinghe and Scott Hudson and Alanson
                 Sample",
  title =        "{RFID} Light Bulb: Enabling Ubiquitous Deployment of
                 Interactive {RFID} Systems",
  journal =      j-IMWUT,
  volume =       "1",
  number =       "2",
  pages =        "??--??",
  month =        jun,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3090077",
  ISSN =         "????",
  bibdate =      "Fri Aug 25 15:10:55 MDT 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/imwut.bib",
  URL =          "http://dl.acm.org/citation.cfm?id=3090077",
  abstract =     "Radio-Frequency Identification (RFID) technology has
                 the potential to provide inexpensive, wireless,
                 battery-free connectivity and interactivity for objects
                 that are traditionally not instrumented. However, these
                 systems have not seen widespread deployment outside
                 warehouses and supply chains, owing to the complexity
                 of installing bulky RFID readers, antennas, and their
                 supporting power and network infrastructure. In this
                 work, we leverage advances in semiconductor optics, RF
                 antenna design and system integration to create a
                 hybrid RFID reader and smart LED lamp, in the form
                 factor of a standard light bulb. This makes deploying
                 RFID readers literally as easy as screwing in a light
                 bulb. We explore the home-scale RFID interactions
                 enabled by these smart bulbs, including infrastructure
                 monitoring, localization and guided navigation, and
                 standalone lighting effects.",
  acknowledgement = ack-nhfb,
  articleno =    "12",
  fjournal =     "Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies (IMWUT)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1566",
}

@Article{Hintze:2017:LSL,
  author =       "Daniel Hintze and Philipp Hintze and Rainhard D.
                 Findling and Ren{\'e} Mayrhofer",
  title =        "A Large-Scale, Long-Term Analysis of Mobile Device
                 Usage Characteristics",
  journal =      j-IMWUT,
  volume =       "1",
  number =       "2",
  pages =        "??--??",
  month =        jun,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3090078",
  ISSN =         "????",
  bibdate =      "Fri Aug 25 15:10:55 MDT 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/imwut.bib",
  URL =          "http://dl.acm.org/citation.cfm?id=3090078",
  abstract =     "Today, mobile devices like smartphones and tablets
                 have become an indispensable part of people's lives,
                 posing many new questions e.g., in terms of interaction
                 methods, but also security. In this paper, we conduct a
                 large scale, long term analysis of mobile device usage
                 characteristics like session length, interaction
                 frequency, and daily usage in locked and unlocked state
                 with respect to location context and diurnal pattern.
                 Based on detailed logs from 29,279 mobile phones and
                 tablets representing a total of 5,811 years of usage
                 time, we identify and analyze 52.2 million usage
                 sessions with some participants providing data for more
                 than four years. Our results show that context has a
                 highly significant effect on both frequency and extent
                 of mobile device usage, with mobile phones being used
                 twice as much at home compared to in the office.
                 Interestingly, devices are unlocked for only 46 \% of
                 the interactions. We found that with an average of 60
                 interactions per day, smartphones are used almost
                 thrice as often as tablet devices (23), while usage
                 sessions on tablets are three times longer, hence are
                 used almost for an equal amount of time throughout the
                 day. We conclude that usage session characteristics
                 differ considerably between tablets and smartphones.
                 These results inform future approaches to mobile
                 interaction as well as security.",
  acknowledgement = ack-nhfb,
  articleno =    "13",
  fjournal =     "Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies (IMWUT)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1566",
}

@Article{Jain:2017:PUS,
  author =       "Milan Jain and Amarjeet Singh and Vikas Chandan",
  title =        "{Portable+}: A Ubiquitous and Smart Way Towards
                 Comfortable Energy Savings",
  journal =      j-IMWUT,
  volume =       "1",
  number =       "2",
  pages =        "??--??",
  month =        jun,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3090079",
  ISSN =         "????",
  bibdate =      "Fri Aug 25 15:10:55 MDT 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/imwut.bib",
  URL =          "http://dl.acm.org/citation.cfm?id=3090079",
  abstract =     "An air conditioner (AC) consumes a significant
                 proportion of the total household power consumption.
                 Primarily used in developing countries, decentralised
                 AC has an inbuilt thermostat to cool the room to a
                 temperature, manually set by the users. However,
                 residents are incapable of specifying their goal
                 through these thermostats - maximise their comfort or
                 save AC energy. State-of-the-art portable thermostats
                 emulate AC remotes and assist occupants in remotely
                 changing the thermostat temperature, through their
                 smartphones. We propose extending such thermostats to
                 portable+ by adding a Comfort-Energy Trade-off (CET)
                 knob, realised through an optimisation framework to
                 allow users to balance their comfort and the savings
                 without worrying about the right set temperature.
                 Analysis based on real data, collected from a
                 controlled experiment (across two rooms for two weeks)
                 and an in-situ deployment (across five rooms for three
                 months), indicates that portable+ thermostats can
                 reduce residents' discomfort by 23\% (CET selection for
                 maximal comfort) and save 26\% energy when CET is set
                 for maximising savings.",
  acknowledgement = ack-nhfb,
  articleno =    "14",
  fjournal =     "Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies (IMWUT)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1566",
}

@Article{Johnson:2017:BWC,
  author =       "I. Johnson and J. Henderson and C. Perry and J.
                 Sch{\"o}ning and B. Hecht",
  title =        "Beautiful \ldots{} but at What Cost?: An Examination
                 of Externalities in Geographic Vehicle Routing",
  journal =      j-IMWUT,
  volume =       "1",
  number =       "2",
  pages =        "??--??",
  month =        jun,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3090080",
  ISSN =         "????",
  bibdate =      "Fri Aug 25 15:10:55 MDT 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/imwut.bib",
  URL =          "http://dl.acm.org/citation.cfm?id=3090080",
  abstract =     "Millions of people use platforms such as Google Maps
                 to search for routes to their desired destinations.
                 Recently, researchers and mapping platforms have shown
                 growing interest in optimizing routes for criteria
                 other than travel time, e.g. simplicity, safety, and
                 beauty. However, despite the ubiquity of algorithmic
                 routing and its potential to define how millions of
                 people move around the world, very little is known
                 about the externalities that arise when adopting these
                 new optimization criteria, e.g. potential
                 redistribution of traffic to certain neighborhoods and
                 increased route complexity (with its associated risks).
                 In this paper, we undertake the first controlled
                 examination of these externalities, doing so across
                 multiple mapping platforms, alternative optimizations,
                 and cities. We find, for example, that scenic routing
                 (i.e. ``beauty''-optimized routing) would remove
                 vehicles from highways, greatly increase traffic around
                 parks, and, in certain cases, do the same for
                 high-income areas. Our results also highlight that the
                 interaction between routing criteria and urban
                 structure is complex and effects vary from city to
                 city, an important consideration for the growing
                 literature on alternative routing strategies. Finally,
                 to address the lack of open implementations of
                 alternative routing algorithms and controlled routing
                 evaluation frameworks, we are releasing our alternative
                 routing and evaluation platform with this paper.",
  acknowledgement = ack-nhfb,
  articleno =    "15",
  fjournal =     "Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies (IMWUT)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1566",
}

@Article{Kucera:2017:TCD,
  author =       "Jan Kucera and James Scott and Nicholas Chen and
                 Patrick Olivier and Steve Hodges",
  title =        "Towards Calm Displays: Matching Ambient Illumination
                 in Bedrooms",
  journal =      j-IMWUT,
  volume =       "1",
  number =       "2",
  pages =        "??--??",
  month =        jun,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3090081",
  ISSN =         "????",
  bibdate =      "Fri Aug 25 15:10:55 MDT 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/imwut.bib",
  URL =          "http://dl.acm.org/citation.cfm?id=3090081",
  abstract =     "We present a system for making emissive computer
                 displays (LCDs) look like they are reflective, i.e. not
                 emitting light but instead reflecting ambient light, an
                 effect that we call a ``calm display''. We achieve this
                 effect by using a light sensor and a one-time
                 calibration process to drive an algorithm which
                 controls the display's backlight intensity and gamma
                 correction functionality to continually match the
                 brightness and chromaticity of the ambient light. We
                 present an experimental evaluation of our system,
                 showing quantitatively that the color and brightness
                 output by our system is perceptually close to that of a
                 piece of paper under similar lighting conditions. We
                 argue that calm displays can more easily fade into the
                 background, and further that they are more suitable for
                 environments such as bedrooms where glowing displays
                 are often out-of-place. We validate these claims and
                 more generally explore users' perception of calm
                 displays, through a field study of an LCD display
                 deployed in participants' bedrooms.",
  acknowledgement = ack-nhfb,
  articleno =    "16",
  fjournal =     "Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies (IMWUT)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1566",
}

@Article{Lu:2017:BEF,
  author =       "Yiqin Lu and Chun Yu and Xin Yi and Yuanchun Shi and
                 Shengdong Zhao",
  title =        "{BlindType}: Eyes-Free Text Entry on Handheld Touchpad
                 by Leveraging Thumb's Muscle Memory",
  journal =      j-IMWUT,
  volume =       "1",
  number =       "2",
  pages =        "??--??",
  month =        jun,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3090083",
  ISSN =         "????",
  bibdate =      "Fri Aug 25 15:10:55 MDT 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/imwut.bib",
  URL =          "http://dl.acm.org/citation.cfm?id=3090083",
  abstract =     "Eyes-free input is desirable for ubiquitous computing,
                 since interacting with mobile and wearable devices
                 often competes for visual attention with other devices
                 and tasks. In this paper, we explore eyes-free typing
                 on a touchpad using one thumb, wherein a user taps on
                 an imaginary QWERTY keyboard while receiving text
                 feedback on a separate screen. Our hypothesis is that
                 users can transfer their typing ability obtained from
                 visible keyboards to eyes-free use. We propose two
                 statistical decoding algorithms to infer users'
                 eyes-free input: the absolute algorithm and the
                 relative algorithm. The absolute algorithm infers user
                 input based on the absolute position of touch
                 endpoints, while the relative algorithm infers based on
                 the vectors between successive touch endpoints.
                 Evaluation results showed users could achieve
                 satisfying performance with both algorithms. Text entry
                 rate was 17-23 WPM (words per minute) depending on the
                 algorithm used. In comparison, a baseline cursor-based
                 text entry method yielded only 7.66 WPM. In conclusion,
                 our research demonstrates for the first time the
                 feasibility of thumb-based eyes-free typing, which
                 provides a new possibility for text entry on ubiquitous
                 computing platforms such as smart TVs and HMDs.",
  acknowledgement = ack-nhfb,
  articleno =    "18",
  fjournal =     "Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies (IMWUT)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1566",
}

@Article{Maag:2017:SMH,
  author =       "Balz Maag and Zimu Zhou and Olga Saukh and Lothar
                 Thiele",
  title =        "{SCAN}: Multi-Hop Calibration for Mobile Sensor
                 Arrays",
  journal =      j-IMWUT,
  volume =       "1",
  number =       "2",
  pages =        "??--??",
  month =        jun,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3090084",
  ISSN =         "????",
  bibdate =      "Fri Aug 25 15:10:55 MDT 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/imwut.bib",
  URL =          "http://dl.acm.org/citation.cfm?id=3090084",
  abstract =     "Urban air pollution monitoring with mobile, portable,
                 low-cost sensors has attracted increasing research
                 interest for their wide spatial coverage and affordable
                 expenses to the general public. However, low-cost air
                 quality sensors not only drift over time but also
                 suffer from cross-sensitivities and dependency on
                 meteorological effects. Therefore calibration of
                 measurements from low-cost sensors is indispensable to
                 guarantee data accuracy and consistency to be fit for
                 quantitative studies on air pollution. In this work we
                 propose sensor array network calibration (SCAN), a
                 multi-hop calibration technique for dependent low-cost
                 sensors. SCAN is applicable to sets of co-located,
                 heterogeneous sensors, known as sensor arrays, to
                 compensate for cross-sensitivities and dependencies on
                 meteorological influences. SCAN minimizes error
                 accumulation over multiple hops of sensor arrays, which
                 is unattainable with existing multi-hop calibration
                 techniques. We formulate SCAN as a novel constrained
                 least-squares regression and provide a closed-form
                 expression of its regression parameters. We
                 theoretically prove that SCAN is free from regression
                 dilution even in presence of measurement noise.
                 In-depth simulations demonstrate that SCAN outperforms
                 various calibration techniques. Evaluations on two
                 real-world low-cost air pollution sensor datasets
                 comprising 66 million samples collected over three
                 years show that SCAN yields 16\% to 60\% lower error
                 than state-of-the-art calibration techniques.",
  acknowledgement = ack-nhfb,
  articleno =    "19",
  fjournal =     "Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies (IMWUT)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1566",
}

@Article{Mariakakis:2017:BSB,
  author =       "Alex Mariakakis and Megan A. Banks and Lauren Phillipi
                 and Lei Yu and James Taylor and Shwetak N. Patel",
  title =        "{BiliScreen}: {Smartphone}-Based Scleral Jaundice
                 Monitoring for Liver and Pancreatic Disorders",
  journal =      j-IMWUT,
  volume =       "1",
  number =       "2",
  pages =        "??--??",
  month =        jun,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3090085",
  ISSN =         "????",
  bibdate =      "Fri Aug 25 15:10:55 MDT 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/imwut.bib",
  URL =          "http://dl.acm.org/citation.cfm?id=3090085",
  abstract =     "Pancreatic cancer has one of the worst survival rates
                 amongst all forms of cancer because its symptoms
                 manifest later into the progression of the disease. One
                 of those symptoms is jaundice, the yellow discoloration
                 of the skin and sclera due to the buildup of bilirubin
                 in the blood. Jaundice is only recognizable to the
                 naked eye in severe stages, but a ubiquitous test using
                 computer vision and machine learning can detect milder
                 forms of jaundice. We propose BiliScreen, a smartphone
                 app that captures pictures of the eye and produces an
                 estimate of a person's bilirubin level, even at levels
                 normally undetectable by the human eye. We test two
                 low-cost accessories that reduce the effects of
                 external lighting: (1) a 3D-printed box that controls
                 the eyes' exposure to light and (2) paper glasses with
                 colored squares for calibration. In a 70-person
                 clinical study, we found that BiliScreen with the box
                 achieves a Pearson correlation coefficient of 0.89 and
                 a mean error of -0.09 \pm 2.76 mg/dl in predicting a
                 person's bilirubin level. As a screening tool,
                 BiliScreen identifies cases of concern with a
                 sensitivity of 89.7\% and a specificity of 96.8\% with
                 the box accessory.",
  acknowledgement = ack-nhfb,
  articleno =    "20",
  fjournal =     "Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies (IMWUT)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1566",
}

@Article{Niforatos:2017:CLM,
  author =       "Evangelos Niforatos and Caterina Cinel and Cathleen
                 Cortis Mack and Marc Langheinrich and Geoff Ward",
  title =        "Can Less be More?: Contrasting Limited, Unlimited, and
                 Automatic Picture Capture for Augmenting Memory
                 Recall",
  journal =      j-IMWUT,
  volume =       "1",
  number =       "2",
  pages =        "??--??",
  month =        jun,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3090086",
  ISSN =         "????",
  bibdate =      "Fri Aug 25 15:10:55 MDT 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/imwut.bib",
  URL =          "http://dl.acm.org/citation.cfm?id=3090086",
  abstract =     "Today's abundance1 of cheap digital storage in the
                 form of tiny memory cards put literally no bounds on
                 the number of images one can capture with one's digital
                 camera or smartphone during an event. However, prior
                 work has shown that taking many pictures may actually
                 make us remember less of a particular event. Does
                 automated picture taking (lifelogging) help avoid this,
                 yet still offer to capture meaningful pictures? In this
                 work, we investigate the effect of capture modality
                 (i.e., limited, unlimited, automatic, and no capture)
                 on people's ability to recall a past event - with and
                 without the support of the pictures captured through
                 these modalities. Our results from a field experiment
                 with 83 participants show that capturing fewer pictures
                 does not necessarily lead to the capture of more
                 relevant pictures. However, when controlling for number
                 of pictures taken, our results show that having a
                 limited number of pictures to capture may lead to
                 pictures with increased memory value. At the same time,
                 automated capture failed to produce pictures that would
                 help remember the past experience better.",
  acknowledgement = ack-nhfb,
  articleno =    "21",
  fjournal =     "Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies (IMWUT)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1566",
}

@Article{Price:2017:LYL,
  author =       "Blaine A. Price and Avelie Stuart and Gul Calikli and
                 Ciaran Mccormick and Vikram Mehta and Luke Hutton and
                 Arosha K. Bandara and Mark Levine and Bashar Nuseibeh",
  title =        "Logging you, Logging me: A Replicable Study of Privacy
                 and Sharing Behaviour in Groups of Visual Lifeloggers",
  journal =      j-IMWUT,
  volume =       "1",
  number =       "2",
  pages =        "??--??",
  month =        jun,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3090087",
  ISSN =         "????",
  bibdate =      "Fri Aug 25 15:10:55 MDT 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/imwut.bib",
  URL =          "http://dl.acm.org/citation.cfm?id=3090087",
  abstract =     "Low cost digital cameras in smartphones and wearable
                 devices make it easy for people to automatically
                 capture and share images as a visual lifelog. Having
                 been inspired by a US campus based study that explored
                 individual privacy behaviours of visual lifeloggers, we
                 conducted a similar study on a UK campus, however we
                 also focussed on the privacy behaviours of groups of
                 lifeloggers. We argue for the importance of
                 replicability and therefore we built a publicly
                 available toolkit, which includes camera design, study
                 guidelines and source code. Our results show some
                 similar sharing behaviour to the US based study: people
                 tried to preserve the privacy of strangers, but we
                 found fewer bystander reactions despite using a more
                 obvious camera. In contrast, we did not find a
                 reluctance to share images of screens but we did find
                 that images of vices were shared less. Regarding
                 privacy behaviours in groups of lifeloggers, we found
                 that people were more willing to share images of people
                 they were interacting with than of strangers, that
                 lifelogging in groups could change what defines a
                 private space, and that lifelogging groups establish
                 different rules to manage privacy for those inside and
                 outside the group.",
  acknowledgement = ack-nhfb,
  articleno =    "22",
  fjournal =     "Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies (IMWUT)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1566",
}

@Article{Sapiezynski:2017:IPP,
  author =       "Piotr Sapiezynski and Arkadiusz Stopczynski and David
                 Kofoed Wind and Jure Leskovec and Sune Lehmann",
  title =        "Inferring Person-to-person Proximity Using {WiFi}
                 Signals",
  journal =      j-IMWUT,
  volume =       "1",
  number =       "2",
  pages =        "??--??",
  month =        jun,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3090089",
  ISSN =         "????",
  bibdate =      "Fri Aug 25 15:10:55 MDT 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/imwut.bib",
  URL =          "http://dl.acm.org/citation.cfm?id=3090089",
  abstract =     "Today's societies are enveloped in an ever-growing
                 telecommunication infrastructure. This infrastructure
                 offers important opportunities for sensing and
                 recording a multitude of human behaviors. Human
                 mobility patterns are a prominent example of such a
                 behavior which has been studied based on cell phone
                 towers, Bluetooth beacons, and WiFi networks as proxies
                 for location. While mobility is an important aspect of
                 human behavior, it is also crucial to study physical
                 interactions among individuals. Sensing proximity that
                 enables social interactions on a large scale is a
                 technical challenge and many commonly used
                 approaches-including RFID badges or Bluetooth
                 scanning-offer only limited scalability. Here we show
                 that it is possible, in a scalable and robust way, to
                 accurately infer person-to-person physical proximity
                 from the lists of WiFi access points measured by
                 smartphones carried by the two individuals. Based on a
                 longitudinal dataset of approximately 800 participants
                 with ground-truth Bluetooth proximity collected over a
                 year, we show that our model performs better than the
                 current state-of-the-art. Our results demonstrate the
                 value of WiFi signals as a tool for social sensing and
                 show how collections of WiFi data pose a potential
                 threat to privacy.",
  acknowledgement = ack-nhfb,
  articleno =    "24",
  fjournal =     "Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies (IMWUT)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1566",
}

@Article{Sicong:2017:UBL,
  author =       "Liu Sicong and Zhou Zimu and Du Junzhao and Shangguan
                 Longfei and Jun Han and Xin Wang",
  title =        "{UbiEar}: Bringing Location-independent Sound
                 Awareness to the Hard-of-hearing People with
                 {Smartphones}",
  journal =      j-IMWUT,
  volume =       "1",
  number =       "2",
  pages =        "??--??",
  month =        jun,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3090082",
  ISSN =         "????",
  bibdate =      "Fri Aug 25 15:10:55 MDT 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/imwut.bib",
  URL =          "http://dl.acm.org/citation.cfm?id=3090082",
  abstract =     "Non-speech sound-awareness is important to improve the
                 quality of life for the deaf and hard-of-hearing (DHH)
                 people. DHH people, especially the young, are not
                 always satisfied with their hearing aids. According to
                 the interviews with 60 young hard-of-hearing students,
                 a ubiquitous sound-awareness tool for emergency and
                 social events that works in diverse environments is
                 desired. In this paper, we design UbiEar, a
                 smartphone-based acoustic event sensing and
                 notification system. Core techniques in UbiEar are a
                 light-weight deep convolution neural network to enable
                 location-independent acoustic event recognition on
                 commodity smartphons, and a set of mechanisms for
                 prompt and energy-efficient acoustic sensing. We
                 conducted both controlled experiments and user studies
                 with 86 DHH students and showed that UbiEar can assist
                 the young DHH students in awareness of important
                 acoustic events in their daily life.",
  acknowledgement = ack-nhfb,
  articleno =    "17",
  fjournal =     "Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies (IMWUT)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1566",
}

@Article{Soha:2017:ILP,
  author =       "Rostaminia Soha and Mayberry Addison and Ganesan
                 Deepak and Marlin Benjamin and Gummeson Jeremy",
  title =        "{iLid}: Low-power Sensing of Fatigue and Drowsiness
                 Measures on a Computational Eyeglass",
  journal =      j-IMWUT,
  volume =       "1",
  number =       "2",
  pages =        "??--??",
  month =        jun,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3090088",
  ISSN =         "????",
  bibdate =      "Fri Aug 25 15:10:55 MDT 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/imwut.bib",
  URL =          "http://dl.acm.org/citation.cfm?id=3090088",
  abstract =     "The ability to monitor eye closures and blink patterns
                 has long been known to enable accurate assessment of
                 fatigue and drowsiness in individuals. Many measures of
                 the eye are known to be correlated with fatigue
                 including coarse-grained measures like the rate of
                 blinks as well as fine-grained measures like the
                 duration of blinks and the extent of eye closures.
                 Despite a plethora of research validating these
                 measures, we lack wearable devices that can continually
                 and reliably monitor them in the natural environment.
                 In this work, we present a low-power system, iLid, that
                 can continually sense fine-grained measures such as
                 blink duration and Percentage of Eye Closures (PERCLOS)
                 at high frame rates of 100fps. We present a complete
                 solution including design of the sensing, signal
                 processing, and machine learning pipeline;
                 implementation on a prototype computational eyeglass
                 platform; and extensive evaluation under many
                 conditions including illumination changes, eyeglass
                 shifts, and mobility. Our results are very encouraging,
                 showing that we can detect blinks, blink duration,
                 eyelid location, and fatigue-related metrics such as
                 PERCLOS with less than a few percent error.",
  acknowledgement = ack-nhfb,
  articleno =    "23",
  fjournal =     "Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies (IMWUT)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1566",
}

@Article{Talla:2017:BFC,
  author =       "Vamsi Talla and Bryce Kellogg and Shyamnath Gollakota
                 and Joshua R. Smith",
  title =        "Battery-Free Cellphone",
  journal =      j-IMWUT,
  volume =       "1",
  number =       "2",
  pages =        "??--??",
  month =        jun,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3090090",
  ISSN =         "????",
  bibdate =      "Fri Aug 25 15:10:55 MDT 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/imwut.bib",
  URL =          "http://dl.acm.org/citation.cfm?id=3090090",
  abstract =     "We present the first battery-free cellphone design
                 that consumes only a few micro-watts of power. Our
                 design can sense speech, actuate the earphones, and
                 switch between uplink and downlink communications, all
                 in real time. Our system optimizes transmission and
                 reception of speech while simultaneously harvesting
                 power which enables the battery-free cellphone to
                 operate continuously. The battery-free device prototype
                 is built using commercial-off-the-shelf components on a
                 printed circuit board. It can operate on power that is
                 harvested from RF signals transmitted by a basestation
                 31 feet (9.4 m) away. Further, using power harvested
                 from ambient light with tiny photodiodes, we show that
                 our device can communicate with a basestation that is
                 50 feet (15.2 m) away. Finally, we perform the first
                 Skype call using a battery-free phone over a cellular
                 network, via our custom bridged basestation. This we
                 believe is a major leap in the capability of
                 battery-free devices and a step towards a fully
                 functional battery-free cellphone.",
  acknowledgement = ack-nhfb,
  articleno =    "25",
  fjournal =     "Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies (IMWUT)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1566",
}

@Article{Tang:2017:HLT,
  author =       "Lie Ming Tang and Judy Kay",
  title =        "Harnessing Long Term Physical Activity Data --- How
                 Long-term Trackers Use Data and How an Adherence-based
                 Interface Supports New Insights",
  journal =      j-IMWUT,
  volume =       "1",
  number =       "2",
  pages =        "??--??",
  month =        jun,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3090091",
  ISSN =         "????",
  bibdate =      "Fri Aug 25 15:10:55 MDT 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/imwut.bib",
  URL =          "http://dl.acm.org/citation.cfm?id=3090091",
  abstract =     "Increasingly, people are amassing long term physical
                 activity data which could play an important role for
                 reflection. However, it is not clear if and how
                 existing trackers use their long term data and
                 incomplete data is a potential challenge. We introduced
                 the notion of adherence to design iStuckWithIt, a
                 custom calendar display that integrates and embeds
                 daily adherence (days with data and days without),
                 hourly adherence (hours of wear each day) and goal
                 adherence (days people achieved their activity goals).
                 Our study of 21 long term FitBit users (average: 23
                 months, 17 over 1 year) began with an interview about
                 their use and knowledge of long term physical activity
                 data followed by a think-aloud use of iStuckWithIt and
                 a post-interview. Our participants gained new insights
                 about their wearing patterns and they could then use
                 this to overcome problems of missing data, to gain
                 insights about their physical activity and goal
                 achievement. This work makes two main contributions:
                 new understanding of the ways that long term trackers
                 have used and understand their data; the design and
                 evaluation of iStuckWithIt demonstrating that people
                 can gain new insights through designs that embed daily,
                 hourly adherence data with goal adherence.",
  acknowledgement = ack-nhfb,
  articleno =    "26",
  fjournal =     "Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies (IMWUT)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1566",
}

@Article{Trinh:2017:RRC,
  author =       "H. Trinh and R. Asadi and D. Edge and T. Bickmore",
  title =        "{RoboCOP}: A Robotic Coach for Oral Presentations",
  journal =      j-IMWUT,
  volume =       "1",
  number =       "2",
  pages =        "??--??",
  month =        jun,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3090092",
  ISSN =         "????",
  bibdate =      "Fri Aug 25 15:10:55 MDT 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/imwut.bib",
  URL =          "http://dl.acm.org/citation.cfm?id=3090092",
  abstract =     "Rehearsing in front of a live audience is invaluable
                 when preparing for important presentations. However,
                 not all presenters take the opportunity to engage in
                 such rehearsal, due to time constraints, availability
                 of listeners who can provide constructive feedback, or
                 public speaking anxiety. We present RoboCOP, an
                 automated anthropomorphic robot head that acts as a
                 coach to provide spoken feedback during presentation
                 rehearsals at both the individual slide and overall
                 presentation level. The robot offers conversational
                 coaching on three key aspects of presentations: speech
                 quality, content coverage, and audience orientation.
                 The design of the feedback strategies was informed by
                 findings from an exploratory study with academic
                 professionals who were experienced in mentoring
                 students on their presentations. In a within-subjects
                 study comparing RoboCOP to visual feedback and spoken
                 feedback without a robot, the robotic coach was shown
                 to lead to significant improvement in the overall
                 experience of presenters. Results of a second
                 within-subjects evaluation study comparing RoboCOP with
                 existing rehearsal practices show that our system
                 creates a natural, interactive, and motivating
                 rehearsal environment that leads to improved
                 presentation quality.",
  acknowledgement = ack-nhfb,
  articleno =    "27",
  fjournal =     "Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies (IMWUT)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1566",
}

@Article{Weigel:2017:DDI,
  author =       "Martin Weigel and J{\"u}rgen Steimle",
  title =        "{DeformWear}: Deformation Input on Tiny Wearable
                 Devices",
  journal =      j-IMWUT,
  volume =       "1",
  number =       "2",
  pages =        "??--??",
  month =        jun,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3090093",
  ISSN =         "????",
  bibdate =      "Fri Aug 25 15:10:55 MDT 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/imwut.bib",
  URL =          "http://dl.acm.org/citation.cfm?id=3090093",
  abstract =     "Due to their small surfaces, wearable devices make
                 existing techniques for touch input very challenging.
                 This paper proposes deformation input on a tiny and
                 soft surface as an input modality for wearable
                 computing devices. We introduce DeformWear, tiny
                 wearable devices that leverage single-point deformation
                 input on various body locations. Despite the small
                 input surface, DeformWear enables expressive and
                 precise input using high-resolution pressure, shear,
                 and pinch deformations. We present a first set of
                 interaction techniques for tiny deformation-sensitive
                 wearable devices. They enable fluid interaction in a
                 large input space by combining multiple dimensions of
                 deformation. We demonstrate their use in seven
                 application examples, showing DeformWear as a
                 standalone input device and as a companion device for
                 smartwatches, head-mounted displays, or headphones.
                 Results from a user study demonstrate that these tiny
                 devices allow for precise and expressive interactions
                 on many body locations, in standing and walking
                 conditions.",
  acknowledgement = ack-nhfb,
  articleno =    "28",
  fjournal =     "Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies (IMWUT)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1566",
}

@Article{Wu:2017:GPA,
  author =       "Chenshu Wu and Jingao Xu and Zheng Yang and Nicholas
                 D. Lane and Zuwei Yin",
  title =        "Gain Without Pain: Accurate {WiFi}-based Localization
                 using Fingerprint Spatial Gradient",
  journal =      j-IMWUT,
  volume =       "1",
  number =       "2",
  pages =        "??--??",
  month =        jun,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3090094",
  ISSN =         "????",
  bibdate =      "Fri Aug 25 15:10:55 MDT 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/imwut.bib",
  URL =          "http://dl.acm.org/citation.cfm?id=3090094",
  abstract =     "Among numerous indoor localization systems proposed
                 during the past decades, WiFi fingerprint-based
                 localization has been one of the most attractive
                 solutions, which is known to be free of extra
                 infrastructure and specialized hardware. However,
                 current WiFi fingerprinting suffers from a pivotal
                 problem of RSS fluctuations caused by unpredictable
                 environmental dynamics. The RSS variations lead to
                 severe spatial ambiguity and temporal instability in
                 RSS fingerprinting, both impairing the location
                 accuracy. To overcome such drawbacks, we propose
                 fingerprint spatial gradient (FSG), a more stable and
                 distinctive form than RSS fingerprints, which exploits
                 the spatial relationships among the RSS fingerprints of
                 multiple neighbouring locations. As a spatially
                 relative form, FSG is more resistant to RSS
                 uncertainties. Based on the concept of FSG, we design
                 novel algorithms to construct FSG on top of a general
                 RSS fingerprint database and then propose effective FSG
                 matching methods for location estimation. Unlike
                 previous works, the resulting system, named ViVi,
                 yields performance gain without the pains of
                 introducing extra information or additional service
                 restrictions or assuming impractical RSS models.
                 Extensive experiments in different buildings
                 demonstrate that ViVi achieves great performance,
                 outperforming the best among four comparative
                 start-of-the-art approaches by 29\% in mean accuracy
                 and 19\% in 95th percentile accuracy and outweighing
                 the worst one by 39\% and 24\% respectively. We
                 envision FSG as a promising supplement and alternative
                 to existing RSS fingerprinting for future WiFi
                 localization.",
  acknowledgement = ack-nhfb,
  articleno =    "29",
  fjournal =     "Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies (IMWUT)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1566",
}

@Article{Zhang:2017:SCT,
  author =       "Cheng Zhang and Qiuyue Xue and Anandghan Waghmare and
                 Sumeet Jain and Yiming Pu and Sinan Hersek and Kent
                 Lyons and Kenneth A. Cunefare and Omer T. Inan and
                 Gregory D. Abowd",
  title =        "{SoundTrak}: Continuous {$3$D} Tracking of a Finger
                 Using Active Acoustics",
  journal =      j-IMWUT,
  volume =       "1",
  number =       "2",
  pages =        "??--??",
  month =        jun,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3090095",
  ISSN =         "????",
  bibdate =      "Fri Aug 25 15:10:55 MDT 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/imwut.bib",
  URL =          "http://dl.acm.org/citation.cfm?id=3090095",
  abstract =     "The small size of wearable devices limits the
                 efficiency and scope of possible user interactions, as
                 inputs are typically constrained to two dimensions: the
                 touchscreen surface. We present SoundTrak, an active
                 acoustic sensing technique that enables a user to
                 interact with wearable devices in the surrounding 3D
                 space by continuously tracking the finger position with
                 high resolution. The user wears a ring with an embedded
                 miniature speaker sending an acoustic signal at a
                 specific frequency (e.g., 11 kHz), which is captured by
                 an array of miniature, inexpensive microphones on the
                 target wearable device. A novel algorithm is designed
                 to localize the finger's position in 3D space by
                 extracting phase information from the received acoustic
                 signals. We evaluated SoundTrak in a volume of space
                 (20cm $ \times $ 16cm $ \times $ 11cm) around a
                 smartwatch, and show an average accuracy of 1.3 cm. We
                 report on results from a Fitts' Law experiment with 10
                 participants as the evaluation of the real-time
                 prototype. We also present a set of applications which
                 are supported by this 3D input technique, and show the
                 practical challenges that need to be addressed before
                 widespread use.",
  acknowledgement = ack-nhfb,
  articleno =    "30",
  fjournal =     "Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies (IMWUT)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1566",
}

@Article{Zhao:2017:MAM,
  author =       "Nan Zhao and Asaph Azaria and Joseph A. Paradiso",
  title =        "Mediated Atmospheres: A Multimodal Mediated Work
                 Environment",
  journal =      j-IMWUT,
  volume =       "1",
  number =       "2",
  pages =        "??--??",
  month =        jun,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3090096",
  ISSN =         "????",
  bibdate =      "Fri Aug 25 15:10:55 MDT 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/imwut.bib",
  URL =          "http://dl.acm.org/citation.cfm?id=3090096",
  abstract =     "Atmosphere --- the sensorial qualities of a space,
                 shaped by the composition of light, sound, objects,
                 people, etc. --- has remarkable influence on our
                 experiences and behavior. Manipulating it has been
                 shown to be powerful, affecting cognitive performance,
                 mood and even physiology, our work envisions and
                 implements a smart office prototype, capable of
                 digitally transforming its atmosphere --- creating what
                 we call Mediated Atmospheres (MA) --- using
                 computationally controlled lighting, video projection
                 and sound. Additionally, we equipped this space with a
                 modular real-time data collection infrastructure,
                 integrating a set of biosignal sensors. Through a user
                 study (N=29) we demonstrate MA's effects on occupants'
                 ability to focus and to recover from a stressful
                 situation. Our evaluation is based on subjective
                 measurements of perception, as well as objective
                 measurements, extracted from recordings of heart rate
                 variability and facial features. We compare multiple
                 signal processing approaches for quantifying changes in
                 occupant physiological state. Our findings show that MA
                 significantly \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "31",
  fjournal =     "Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies (IMWUT)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1566",
}

@Article{Zhao:2017:SAC,
  author =       "Ru Zhao and Vivian Li and Hugo Barbosa and Gourab
                 Ghoshal and Mohammed Ehsan Hoque",
  title =        "Semi-Automated 8 Collaborative Online Training Module
                 for Improving Communication Skills",
  journal =      j-IMWUT,
  volume =       "1",
  number =       "2",
  pages =        "??--??",
  month =        jun,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3090097",
  ISSN =         "????",
  bibdate =      "Fri Aug 25 15:10:55 MDT 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/imwut.bib",
  URL =          "http://dl.acm.org/citation.cfm?id=3090097",
  abstract =     "This paper presents a description and evaluation of
                 the ROC Speak system, a platform that allows ubiquitous
                 access to communication skills training. ROC Speak
                 (available at rocspeak.com) enables anyone to go to a
                 website, record a video, and receive feedback on smile
                 intensity, body movement, volume modulation, filler
                 word usage, unique word usage, word cloud of the spoken
                 words, in addition to overall assessment and subjective
                 comments by peers. Peer comments are automatically
                 ranked and sorted for usefulness and sentiment (i.e.,
                 positive vs. negative). We evaluated the system with a
                 diverse group of 56 online participants for a 10-day
                 period. Participants submitted responses to career
                 oriented prompts every other day. The participants were
                 randomly split into two groups: (1) treatment --- full
                 feedback from the ROC Speak system; (2) control ---
                 written feedback from online peers. When judged by
                 peers \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "32",
  fjournal =     "Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies (IMWUT)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1566",
}

@Article{Nandakumar:2017:CAI,
  author =       "Rajalakshmi Nandakumar and Alex Takakuwa and Tadayoshi
                 Kohno and Shyamnath Gollakota",
  title =        "{CovertBand}: Activity Information Leakage using
                 Music",
  journal =      j-IMWUT,
  volume =       "1",
  number =       "3",
  pages =        "87:1--87:24",
  month =        "????",
  year =         "2017",
  CODEN =        "????",
  ISSN =         "????",
  bibdate =      "Fri Aug 25 15:07:55 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/cryptography2010.bib;
                 http://www.math.utah.edu/pub/tex/bib/imwut.bib",
  URL =          "http://musicattacks.cs.washington.edu/activity-information-leakage.pdf",
  abstract =     "This paper contributes a novel method for low-cost,
                 covert physical sensing and, by doing so, surfaces new
                 privacy threats. We demonstrate how a smartphone and
                 portable speaker playing music with embedded, inaudible
                 signals can track multiple individuals' locations and
                 activities both within a room and through barriers in
                 2D space. We achieve this by transforming a smartphone
                 into an active sonar system that emits a combination of
                 a sonar pulse and music and listens to the reflections
                 off of humans in the environment. Our implementation,
                 CovertBand, monitors minute changes to these
                 reflections to track multiple people concurrently and
                 to recognize different types of motion, leaking
                 information about where people are in addition to what
                 they may be doing. We evaluated CovertBand by running
                 experiments in five homes in the Seattle area, showing
                 that we can localize both single and multiple
                 individuals through barriers. These tests show
                 CovertBand can track walking subjects with a mean
                 tracking error of 18 cm and subjects moving at a fixed
                 position with an accuracy of 8 cm at up to 6 m in
                 line-of-sight and 3 m through barriers. We test a
                 variety of rhythmic mot ions such as pumping arms,
                 jumping, and supine pelvic tilts in through-wall
                 scenarios and show that they produce discernibly
                 different spectrograms from walking in the acoustic
                 reflections. In tests with 33 subjects, we also show
                 that even in ideal scenarios, listeners were unlikely
                 to detect a CovertBand attack",
  acknowledgement = ack-nhfb,
  articleno =    "87",
  fjournal =     "Proceedings of the ACM on Interactive, Mobile,
                 Wearable and Ubiquitous Technologies (IMWUT)",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=J1566",
  keywords =     "active sonar; information leakage; motion tracking;
                 music processing",
}