Valid HTML 4.0! Valid CSS!
%%% -*-BibTeX-*-
%%% ====================================================================
%%% BibTeX-file{
%%%     author          = "Nelson H. F. Beebe",
%%%     version         = "1.61",
%%%     date            = "13 January 2024",
%%%     time            = "15:24:49 MST",
%%%     filename        = "tap.bib",
%%%     address         = "University of Utah
%%%                        Department of Mathematics, 110 LCB
%%%                        155 S 1400 E RM 233
%%%                        Salt Lake City, UT 84112-0090
%%%                        USA",
%%%     telephone       = "+1 801 581 5254",
%%%     FAX             = "+1 801 581 4148",
%%%     URL             = "https://www.math.utah.edu/~beebe",
%%%     checksum        = "06513 18267 96413 909649",
%%%     email           = "beebe at math.utah.edu, beebe at acm.org,
%%%                        beebe at computer.org (Internet)",
%%%     codetable       = "ISO/ASCII",
%%%     keywords        = "ACM Transactions on Applied Perception;
%%%                        bibliography; data processing;
%%%                        human-computer interaction; psychology;
%%%                        TAP; visual perception",
%%%     license         = "public domain",
%%%     supported       = "yes",
%%%     docstring       = "This is a COMPLETE BibTeX bibliography for
%%%                        ACM Transactions on Applied Perception (CODEN
%%%                        ????, ISSN 1544-3558 (print), 1544-3965
%%%                        (electronic)), covering all journal issues
%%%                        from 2004 -- date.
%%%
%%%                        At version 1.61, the COMPLETE journal
%%%                        coverage looked like this:
%%%
%%%                             2004 (   9)    2011 (  19)    2018 (  20)
%%%                             2005 (  51)    2012 (  21)    2019 (  22)
%%%                             2006 (  25)    2013 (  27)    2020 (  17)
%%%                             2007 (  19)    2014 (  13)    2021 (  23)
%%%                             2008 (  22)    2015 (  31)    2022 (  19)
%%%                             2009 (  31)    2016 (  24)    2023 (  16)
%%%                             2010 (  35)    2017 (  26)    2024 (   4)
%%%
%%%                             Article:        474
%%%
%%%                             Total entries:  474
%%%
%%%                        The journal Web page can be found at:
%%%
%%%                            http://www.acm.org/pubs/tap.html
%%%
%%%                        The journal table of contents page is at:
%%%
%%%                            http://www.acm.org/tap/
%%%                            http://www.acm.org/tap/PastIssues.html
%%%                            http://www.acm.org/tap/TitlesToAppear.html
%%%                            http://portal.acm.org/browse_dl.cfm?idx=J932
%%%                            https://dl.acm.org/loi/tap
%%%
%%%                        Qualified subscribers can retrieve the full
%%%                        text of recent articles in PDF form.
%%%
%%%                        The initial draft was extracted from the ACM
%%%                        Web pages.
%%%
%%%                        ACM copyrights explicitly permit abstracting
%%%                        with credit, so article abstracts, keywords,
%%%                        and subject classifications have been
%%%                        included in this bibliography wherever
%%%                        available.  Article reviews have been
%%%                        omitted, until their copyright status has
%%%                        been clarified.
%%%
%%%                        bibsource keys in the bibliography entries
%%%                        below indicate the entry originally came
%%%                        from the computer science bibliography
%%%                        archive, even though it has likely since
%%%                        been corrected and updated.
%%%
%%%                        URL keys in the bibliography point to
%%%                        World Wide Web locations of additional
%%%                        information about the entry.
%%%
%%%                        BibTeX citation tags are uniformly chosen
%%%                        as name:year:abbrev, where name is the
%%%                        family name of the first author or editor,
%%%                        year is a 4-digit number, and abbrev is a
%%%                        3-letter condensation of important title
%%%                        words. Citation tags were automatically
%%%                        generated by software developed for the
%%%                        BibNet Project.
%%%
%%%                        In this bibliography, entries are sorted in
%%%                        publication order, using ``bibsort -byvolume.''
%%%
%%%                        The checksum field above contains a CRC-16
%%%                        checksum as the first value, followed by the
%%%                        equivalent of the standard UNIX wc (word
%%%                        count) utility output of lines, words, and
%%%                        characters.  This is produced by Robert
%%%                        Solovay's checksum utility."
%%%     }
%%% ====================================================================
@Preamble{"\input bibnames.sty"}

%%% ====================================================================
%%% Acknowledgement abbreviations:
@String{ack-nhfb = "Nelson H. F. Beebe,
                    University of Utah,
                    Department of Mathematics, 110 LCB,
                    155 S 1400 E RM 233,
                    Salt Lake City, UT 84112-0090, USA,
                    Tel: +1 801 581 5254,
                    FAX: +1 801 581 4148,
                    e-mail: \path|beebe@math.utah.edu|,
                            \path|beebe@acm.org|,
                            \path|beebe@computer.org| (Internet),
                    URL: \path|https://www.math.utah.edu/~beebe/|"}

%%% ====================================================================
%%% Journal abbreviations:
@String{j-TAP                   = "ACM Transactions on Applied Perception"}

%%% ====================================================================
%%% Bibliography entries:
@Article{Reinhard:2004:E,
  author =       "Erik Reinhard and Heinrich B{\"u}lthoff",
  title =        "Editorial",
  journal =      j-TAP,
  volume =       "1",
  number =       "1",
  pages =        "1--2",
  month =        jul,
  year =         "2004",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:58 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Ware:2004:MSR,
  author =       "Colin Ware and Robert Bobrow",
  title =        "Motion to support rapid interactive queries on
                 node--link diagrams",
  journal =      j-TAP,
  volume =       "1",
  number =       "1",
  pages =        "3--18",
  month =        jul,
  year =         "2004",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:58 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Frowd:2004:EHE,
  author =       "Charlie D. Frowd and Peter J. B. Hancock and Derek
                 Carson",
  title =        "{EvoFIT}: a holistic, evolutionary facial imaging
                 technique for creating composites",
  journal =      j-TAP,
  volume =       "1",
  number =       "1",
  pages =        "19--39",
  month =        jul,
  year =         "2004",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:58 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Surakka:2004:GFN,
  author =       "Veikko Surakka and Marko Illi and Poika Isokoski",
  title =        "Gazing and frowning as a new human--computer
                 interaction technique",
  journal =      j-TAP,
  volume =       "1",
  number =       "1",
  pages =        "40--56",
  month =        jul,
  year =         "2004",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:58 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Yu:2004:MLI,
  author =       "Chen Yu and Dana H. Ballard",
  title =        "A multimodal learning interface for grounding spoken
                 language in sensory perceptions",
  journal =      j-TAP,
  volume =       "1",
  number =       "1",
  pages =        "57--80",
  month =        jul,
  year =         "2004",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:58 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Gugerty:2004:ESA,
  author =       "Leo Gugerty and Richard A. Tyrrell and Thomas R. Aten
                 and K. Andy Edmonds",
  title =        "The effects of subpixel addressing on users'
                 performance and preferences during reading-related
                 tasks",
  journal =      j-TAP,
  volume =       "1",
  number =       "2",
  pages =        "81--101",
  month =        oct,
  year =         "2004",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:58 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Robles-De-La-Torre:2004:NEI,
  author =       "G. Robles-De-La-Torre and R. Sekuler",
  title =        "Numerically estimating internal models of dynamic
                 virtual objects",
  journal =      j-TAP,
  volume =       "1",
  number =       "2",
  pages =        "102--117",
  month =        oct,
  year =         "2004",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:58 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Ottaviani:2004:APS,
  author =       "Laura Ottaviani and Davide Rocchesso",
  title =        "Auditory perception of {$3$D} size: Experiments with
                 synthetic resonators",
  journal =      j-TAP,
  volume =       "1",
  number =       "2",
  pages =        "118--129",
  month =        oct,
  year =         "2004",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:58 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{McGookin:2004:UCE,
  author =       "David K. McGookin and Stephen A. Brewster",
  title =        "Understanding concurrent earcons: Applying auditory
                 scene analysis principles to concurrent earcon
                 recognition",
  journal =      j-TAP,
  volume =       "1",
  number =       "2",
  pages =        "130--155",
  month =        oct,
  year =         "2004",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:58 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Shin:2005:VCA,
  author =       "Do Hyoung Shin and Phillip S. Dunston and Xiangyu
                 Wang",
  title =        "View changes in augmented reality
                 computer-aided-drawing",
  journal =      j-TAP,
  volume =       "2",
  number =       "1",
  pages =        "1--14",
  month =        jan,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:58 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Payandeh:2005:SLD,
  author =       "Shahram Payandeh and John Dill and Jian Zhang",
  title =        "A study of level-of-detail in haptic rendering",
  journal =      j-TAP,
  volume =       "2",
  number =       "1",
  pages =        "15--34",
  month =        jan,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:58 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Sahm:2005:TVW,
  author =       "Cynthia S. Sahm and Sarah H. Creem-Regehr and William
                 B. Thompson and Peter Willemsen",
  title =        "Throwing versus walking as indicators of distance
                 perception in similar real and virtual environments",
  journal =      j-TAP,
  volume =       "2",
  number =       "1",
  pages =        "35--45",
  month =        jan,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:58 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Kikuuwe:2005:EHD,
  author =       "Ryo Kikuuwe and Akihito Sano and Hiromi Mochiyama and
                 Naoyuki Takesue and Hideo Fujimoto",
  title =        "Enhancing haptic detection of surface undulation",
  journal =      j-TAP,
  volume =       "2",
  number =       "1",
  pages =        "46--67",
  month =        jan,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:58 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Harders:2005:ESI,
  author =       "Matthias Harders and Marc Ernst",
  title =        "{EuroHaptics} special issue editorial",
  journal =      j-TAP,
  volume =       "2",
  number =       "2",
  pages =        "69--70",
  month =        apr,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Dinse:2005:IHH,
  author =       "Hubert R. Dinse and Tobias Kalisch and Patrick Ragert
                 and Burkhard Pleger and Peter Schwenkreis and Martin
                 Tegenthoff",
  title =        "Improving human haptic performance in normal and
                 impaired human populations through unattended
                 activation-based learning",
  journal =      j-TAP,
  volume =       "2",
  number =       "2",
  pages =        "71--88",
  month =        apr,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Choi:2005:FCE,
  author =       "Seungmoon Choi and Laron Walker and Hong Z. Tan and
                 Scott Crittenden and Ron Reifenberger",
  title =        "Force constancy and its effect on haptic perception of
                 virtual surfaces",
  journal =      j-TAP,
  volume =       "2",
  number =       "2",
  pages =        "89--105",
  month =        apr,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{VanErp:2005:WNV,
  author =       "Jan B. F. {Van Erp} and Hendrik A. H. C. {Van Veen}
                 and Chris Jansen and Trevor Dobbins",
  title =        "Waypoint navigation with a vibrotactile waist belt",
  journal =      j-TAP,
  volume =       "2",
  number =       "2",
  pages =        "106--117",
  month =        apr,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Drewing:2005:FEN,
  author =       "Knut Drewing and Michael Fritschi and Regine Zopf and
                 Marc O. Ernst and Martin Buss",
  title =        "First evaluation of a novel tactile display exerting
                 shear force via lateral displacement",
  journal =      j-TAP,
  volume =       "2",
  number =       "2",
  pages =        "118--131",
  month =        apr,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Levesque:2005:DVB,
  author =       "Vincent L{\'e}vesque and J{\'e}r{\^o}me Pasquero and
                 Vincent Hayward and Maryse Legault",
  title =        "Display of virtual {Braille} dots by lateral skin
                 deformation: feasibility study",
  journal =      j-TAP,
  volume =       "2",
  number =       "2",
  pages =        "132--149",
  month =        apr,
  year =         "2005",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1060581.1060587",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Webster:2005:NTD,
  author =       "Robert J. {Webster III} and Todd E. Murphy and Lawton
                 N. Verner and Allison M. Okamura",
  title =        "A novel two-dimensional tactile slip display: design,
                 kinematics and perceptual experiments",
  journal =      j-TAP,
  volume =       "2",
  number =       "2",
  pages =        "150--165",
  month =        apr,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Schmidt:2005:HNH,
  author =       "Henning Schmidt and Stefan Hesse and Rolf Bernhardt
                 and J{\"o}rg Kr{\"u}ger",
  title =        "{HapticWalker}---a novel haptic foot device",
  journal =      j-TAP,
  volume =       "2",
  number =       "2",
  pages =        "166--180",
  month =        apr,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Rushmeier:2005:GE,
  author =       "Holly Rushmeier",
  title =        "Guest Editorial",
  journal =      j-TAP,
  volume =       "2",
  number =       "3",
  pages =        "181--182",
  month =        jul,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Riecke:2005:VCC,
  author =       "Bernhard E. Riecke and Markus {Von Der Heyde} and
                 Heinrich H. B{\"u}lthoff",
  title =        "Visual cues can be sufficient for triggering
                 automatic, reflexlike spatial updating",
  journal =      j-TAP,
  volume =       "2",
  number =       "3",
  pages =        "183--215",
  month =        jul,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Plumert:2005:DPR,
  author =       "Jodie M. Plumert and Joseph K. Kearney and James F.
                 Cremer and Kara Recker",
  title =        "Distance perception in real and virtual environments",
  journal =      j-TAP,
  volume =       "2",
  number =       "3",
  pages =        "216--233",
  month =        jul,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Messing:2005:DPV,
  author =       "Ross Messing and Frank H. Durgin",
  title =        "Distance Perception and the Visual Horizon in
                 Head-Mounted Displays",
  journal =      j-TAP,
  volume =       "2",
  number =       "3",
  pages =        "234--250",
  month =        jul,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Cunningham:2005:MVS,
  author =       "Douglas W. Cunningham and Mario Kleiner and Christian
                 Wallraven and Heinrich H. B{\"u}lthoff",
  title =        "Manipulating Video Sequences to Determine the
                 Components of Conversational Facial Expressions",
  journal =      j-TAP,
  volume =       "2",
  number =       "3",
  pages =        "251--269",
  month =        jul,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Cosker:2005:TPR,
  author =       "Darren Cosker and David Marshall and Paul L. Rosin and
                 Susan Paddock and Simon Rushton",
  title =        "Toward Perceptually Realistic Talking Heads: Models,
                 Methods, and {McGurk}",
  journal =      j-TAP,
  volume =       "2",
  number =       "3",
  pages =        "270--285",
  month =        jul,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Howlett:2005:PES,
  author =       "Sarah Howlett and John Hamill and Carol O'Sullivan",
  title =        "Predicting and Evaluating Saliency for Simplified
                 Polygonal Models",
  journal =      j-TAP,
  volume =       "2",
  number =       "3",
  pages =        "286--308",
  month =        jul,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{OSullivan:2005:CA,
  author =       "Carol O'Sullivan",
  title =        "Collisions and Attention",
  journal =      j-TAP,
  volume =       "2",
  number =       "3",
  pages =        "309--321",
  month =        jul,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Chang:2005:EBC,
  author =       "Youngha Chang and Suguru Saito and Keiji Uchikawa and
                 Masayuki Nakajima",
  title =        "Example-Based Color Stylization of Images",
  journal =      j-TAP,
  volume =       "2",
  number =       "3",
  pages =        "322--345",
  month =        jul,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Fleming:2005:LLI,
  author =       "Roland W. Fleming and Heinrich H. B{\"u}lthoff",
  title =        "Low-Level Image Cues in the Perception of Translucent
                 Materials",
  journal =      j-TAP,
  volume =       "2",
  number =       "3",
  pages =        "346--382",
  month =        jul,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Kramer:2005:SSM,
  author =       "Gregory Kramer and Bruce N. Walker",
  title =        "Sound science: Marking ten international conferences
                 on auditory display",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "383--388",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Barrass:2005:PFA,
  author =       "Stephen Barrass",
  title =        "A perceptual framework for the auditory display of
                 scientific data",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "389--402",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See comments \cite{Barrass:2005:CFA}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Barrass:2005:CFA,
  author =       "Stephen Barrass",
  title =        "A comprehensive framework for auditory display:
                 Comments on {Barrass}, {ICAD 1994}",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "403--406",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See \cite{Barrass:2005:PFA}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Walker:2005:MMA,
  author =       "Bruce N. Walker and Gregory Kramer",
  title =        "Mappings and metaphors in auditory displays: An
                 experimental assessment",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "407--412",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See comments \cite{Walker:2005:SDM}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Walker:2005:SDM,
  author =       "Bruce N. Walker and Gregory Kramer",
  title =        "Sonification design and metaphors: Comments on
                 {Walker} and {Kramer}, {ICAD 1996}",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "413--417",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See \cite{Walker:2005:MMA}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Shinn-Cunningham:2005:PPS,
  author =       "Barbara G. Shinn-Cunningham and Timothy Streeter and
                 Jean-Fran{\c{c}}ois Gyss",
  title =        "Perceptual plasticity in spatial auditory displays",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "418--425",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See comments \cite{Shinn-Cunningham:2005:SAD}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Shinn-Cunningham:2005:SAD,
  author =       "Barbara G. Shinn-Cunningham and Timothy Streeter",
  title =        "Spatial auditory display: Comments on
                 {Shinn-Cunningham} et al., {ICAD 2001}",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "426--429",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See \cite{Shinn-Cunningham:2005:PPS}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Brungart:2005:OSC,
  author =       "Douglas S. Brungart and Brian D. Simpson",
  title =        "Optimizing the spatial configuration of a seven-talker
                 speech display",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "430--436",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See comments \cite{Brungart:2005:OVS}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Brungart:2005:OVS,
  author =       "Douglas S. Brungart and Brian D. Simpson",
  title =        "Optimizing a virtual speech display: Comments on
                 {Brungart} and {Simpson}, {ICAD 2003}",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "437--441",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See \cite{Brungart:2005:OSC}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Edwards:2005:PMS,
  author =       "Alistair D. N. Edwards and Evangelos Mitsopoulos",
  title =        "A principled methodology for the specification and
                 design of nonvisual widgets",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "442--449",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See comments \cite{Edwards:2005:PAD}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Edwards:2005:PAD,
  author =       "Alistair D. N. Edwards and Evangelos Mitsopoulos",
  title =        "Perceptual auditory design: Comments on {Edwards} and
                 {Mitsopoulos}, {ICAD 1998}",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "450--454",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See \cite{Edwards:2005:PMS}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Brewster:2005:DES,
  author =       "Stephen A. Brewster and Catherine V. Clarke",
  title =        "The design and evaluation of a sonically enhanced tool
                 palette",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "455--461",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See comments \cite{Brewster:2005:SEW}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Brewster:2005:SEW,
  author =       "Stephen A. Brewster",
  title =        "Sonically-enhanced widgets: Comments on {Brewster} and
                 {Clarke}, {ICAD 1997}",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "462--466",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See \cite{Brewster:2005:DES}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Flowers:2005:DSD,
  author =       "John H. Flowers and Dion C. Buhman and Kimberly D.
                 Turnage",
  title =        "Data sonification from the desktop: Should sound be
                 part of standard data analysis software?",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "467--472",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See comments \cite{Flowers:2005:DDS}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Flowers:2005:DDS,
  author =       "John H. Flowers and Kimberly D. Turnage and Dion C.
                 Buhman",
  title =        "Desktop data sonification: Comments on {Flowers} et
                 al., {ICAD 1996}",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "473--476",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See \cite{Flowers:2005:DSD}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Vickers:2005:MPA,
  author =       "Paul Vickers and James L. Alty",
  title =        "Musical program auralization: Empirical studies",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "477--489",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See comments \cite{Vickers:2005:PAA}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Vickers:2005:PAA,
  author =       "Paul Vickers",
  title =        "Program auralization: {Author}'s comments on {Vickers}
                 and {Alty}, {ICAD 2000}",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "490--494",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See \cite{Vickers:2005:MPA}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Fernstrom:2005:ADM,
  author =       "Mikael Fernstr{\"o}m and Caolan McNamara",
  title =        "After direct manipulation---direct sonification",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "495--499",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See comments \cite{Fernstrom:2005:RSB}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Fernstrom:2005:RSB,
  author =       "Mikael Fernstr{\"o}m",
  title =        "Reflections on sonic browsing: Comments on
                 {Fernstr{\"o}m} and {McNamara}, {ICAD 1998}",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "500--504",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See \cite{Fernstrom:2005:ADM}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Bonebright:2005:DCA,
  author =       "Terri L. Bonebright and Nadine E. Miner and Timothy E.
                 Goldsmith and Thomas P. Caudell",
  title =        "Data collection and analysis techniques for evaluating
                 the perceptual qualities of auditory stimuli",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "505--516",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See comments \cite{Bonebright:2005:EAD}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Bonebright:2005:EAD,
  author =       "Terri L. Bonebright and Nadine E. Miner",
  title =        "Evaluation of auditory displays: Comments on
                 {Bonebright} et al., {ICAD 1998}",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "517--520",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See \cite{Bonebright:2005:DCA}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Miner:2005:UWS,
  author =       "Nadine E. Miner and Thomas P. Caudell",
  title =        "Using wavelets to synthesize stochastic-based sounds
                 for immersive virtual environments",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "521--528",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See comments \cite{Miner:2005:ACM}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Miner:2005:ACM,
  author =       "Nadine E. Miner and Victor E. Vergara Panaiotis and
                 Thomas Preston Caudell",
  title =        "Authors' comments on {Miner} and {Caudell}, {ICAD
                 1997}",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "529--533",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See \cite{Miner:2005:UWS}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{vandenDoel:2005:PBM,
  author =       "Kees van den Doel",
  title =        "Physically based models for liquid sounds",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "534--546",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See comments \cite{vandenDoel:2005:PSC}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{vandenDoel:2005:PSC,
  author =       "Kees van den Doel",
  title =        "From physics to sound: Comments on {van den Doel},
                 {ICAD 2004}",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "547--549",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See \cite{vandenDoel:2005:PBM}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Hermann:2005:CSH,
  author =       "Thomas Hermann and Helge Ritter",
  title =        "Crystallization sonification of high-dimensional
                 datasets",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "550--558",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See comments \cite{Hermann:2005:MBS}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Hermann:2005:MBS,
  author =       "Thomas Hermann and Helge Ritter",
  title =        "Model-based sonification revisited---authors' comments
                 on {Hermann} and {Ritter}, {ICAD 2002}",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "559--563",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See \cite{Hermann:2005:CSH}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Grohn:2005:CAV,
  author =       "Matti Gr{\"o}hn and Tapio Lokki and Tapio Takala",
  title =        "Comparison of auditory, visual, and audiovisual
                 navigation in a {$3$D} space",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "564--570",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See comments \cite{Grohn:2005:ACG}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Grohn:2005:ACG,
  author =       "Matti Gr{\"o}hn and Tapio Lokki and Tapio Takala",
  title =        "Author's comments on {Gr{\"o}hn}, {Lokki}, and
                 {Takala}, {ICAD 2003}",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "571--573",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See \cite{Grohn:2005:CAV}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Klatzky:2006:PRR,
  author =       "Roberta L. Klatzky and Susan J. Lederman",
  title =        "The perceived roughness of resistive virtual textures:
                 {I}. {Rendering} by a force-feedback mouse",
  journal =      j-TAP,
  volume =       "3",
  number =       "1",
  pages =        "1--14",
  month =        jan,
  year =         "2006",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Mar 14 07:36:58 MST 2006",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Lederman:2006:PRR,
  author =       "Susan J. Lederman and Roberta L. Klatzky and Christine
                 Tong and Cheryl Hamilton",
  title =        "The perceived roughness of resistive virtual textures:
                 {II}. Effects of varying viscosity with a
                 force-feedback device",
  journal =      j-TAP,
  volume =       "3",
  number =       "1",
  pages =        "15--30",
  month =        jan,
  year =         "2006",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Mar 14 07:36:58 MST 2006",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Neumann:2006:IRP,
  author =       "Dirk Neumann and Karl R. Gegenfurtner",
  title =        "Image retrieval and perceptual similarity",
  journal =      j-TAP,
  volume =       "3",
  number =       "1",
  pages =        "31--47",
  month =        jan,
  year =         "2006",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Mar 14 07:36:58 MST 2006",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Niemenlehto:2006:DES,
  author =       "Pekka-Henrik Niemenlehto and Martti Juhola and Veikko
                 Surakka",
  title =        "Detection of electromyographic signals from facial
                 muscles with neural networks",
  journal =      j-TAP,
  volume =       "3",
  number =       "1",
  pages =        "48--61",
  month =        jan,
  year =         "2006",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Mar 14 07:36:58 MST 2006",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Zana:2006:FRB,
  author =       "Yossi Zana and Roberto M. {Cesar, Jr.}",
  title =        "Face recognition based on polar frequency features",
  journal =      j-TAP,
  volume =       "3",
  number =       "1",
  pages =        "62--82",
  month =        jan,
  year =         "2006",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Mar 14 07:36:58 MST 2006",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Kelly:2006:SMS,
  author =       "Jonathan W. Kelly and Andrew C. Beall and Jack M.
                 Loomis and Roy S. Smith and Kristen L. Macuga",
  title =        "Simultaneous measurement of steering performance and
                 perceived heading on a curving path",
  journal =      j-TAP,
  volume =       "3",
  number =       "2",
  pages =        "83--94",
  month =        apr,
  year =         "2006",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Wed Aug 23 14:16:39 MDT 2006",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Seuntiens:2006:PQC,
  author =       "Pieter Seuntiens and Lydia Meesters and Wijnand
                 Ijsselsteijn",
  title =        "Perceived quality of compressed stereoscopic images:
                 Effects of symmetric and asymmetric {JPEG} coding and
                 camera separation",
  journal =      j-TAP,
  volume =       "3",
  number =       "2",
  pages =        "95--109",
  month =        apr,
  year =         "2006",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Wed Aug 23 14:16:39 MDT 2006",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Marston:2006:ESD,
  author =       "James R. Marston and Jack M. Loomis and Roberta L.
                 Klatzky and Reginald G. Golledge and Ethan L. Smith",
  title =        "Evaluation of spatial displays for navigation without
                 sight",
  journal =      j-TAP,
  volume =       "3",
  number =       "2",
  pages =        "110--124",
  month =        apr,
  year =         "2006",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Wed Aug 23 14:16:39 MDT 2006",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Barbagli:2006:HDF,
  author =       "Federico Barbagli and Ken Salisbury and Cristy Ho and
                 Charles Spence and Hong Z. Tan",
  title =        "Haptic discrimination of force direction and the
                 influence of visual information",
  journal =      j-TAP,
  volume =       "3",
  number =       "2",
  pages =        "125--135",
  month =        apr,
  year =         "2006",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Wed Aug 23 14:16:39 MDT 2006",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Atkins:2006:AET,
  author =       "M. Stella Atkins and Adrian Moise and Robert
                 Rohling",
  title =        "An application of eyegaze tracking for designing
                 radiologists' workstations: Insights for comparative
                 visual search tasks",
  journal =      j-TAP,
  volume =       "3",
  number =       "2",
  pages =        "136--151",
  month =        apr,
  year =         "2006",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Wed Aug 23 14:16:39 MDT 2006",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Interrante:2006:GE,
  author =       "Victoria Interrante",
  title =        "Guest Editorial",
  journal =      j-TAP,
  volume =       "3",
  number =       "3",
  pages =        "153--154",
  month =        jul,
  year =         "2006",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 17 05:25:39 MDT 2006",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Lovell:2006:EMC,
  author =       "P. George Lovell and C. Alejandro P{\'a}rraga and Tom
                 Troscianko and Caterina Ripamonti and David J. Tolhurst",
  title =        "Evaluation of a multiscale color model for visual
                 difference prediction",
  journal =      j-TAP,
  volume =       "3",
  number =       "3",
  pages =        "155--178",
  month =        jul,
  year =         "2006",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1166087.1166089",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 17 05:25:39 MDT 2006",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Langer:2006:PLM,
  author =       "Michael S. Langer and Javeen Pereira and Dipinder
                 Rekhi",
  title =        "Perceptual limits on {$2$D} motion-field
                 visualization",
  journal =      j-TAP,
  volume =       "3",
  number =       "3",
  pages =        "179--193",
  month =        jul,
  year =         "2006",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1166087.1166090",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 17 05:25:39 MDT 2006",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Riecke:2006:CFC,
  author =       "Bernhard E. Riecke and J{\"o}rg Schulte-Pelkum and
                 Marios N. Avraamides and Markus Von Der Heyde and
                 Heinrich H. B{\"u}lthoff",
  title =        "Cognitive factors can influence self-motion perception
                 (vection) in virtual reality",
  journal =      j-TAP,
  volume =       "3",
  number =       "3",
  pages =        "194--216",
  month =        jul,
  year =         "2006",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1166087.1166091",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 17 05:25:39 MDT 2006",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{McNamara:2006:EVA,
  author =       "Ann McNamara",
  title =        "Exploring visual and automatic measures of perceptual
                 fidelity in real and simulated imagery",
  journal =      j-TAP,
  volume =       "3",
  number =       "3",
  pages =        "217--238",
  month =        jul,
  year =         "2006",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1166087.1166092",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 17 05:25:39 MDT 2006",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Cooke:2006:OFV,
  author =       "Theresa Cooke and Sebastian Kannengiesser and
                 Christian Wallraven and Heinrich H. B{\"u}lthoff",
  title =        "Object feature validation using visual and haptic
                 similarity ratings",
  journal =      j-TAP,
  volume =       "3",
  number =       "3",
  pages =        "239--261",
  month =        jul,
  year =         "2006",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1166087.1166093",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 17 05:25:39 MDT 2006",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Weidenbacher:2006:SSS,
  author =       "Ulrich Weidenbacher and Pierre Bayerl and Heiko
                 Neumann and Roland Fleming",
  title =        "Sketching shiny surfaces: {$3$D} shape extraction and
                 depiction of specular surfaces",
  journal =      j-TAP,
  volume =       "3",
  number =       "3",
  pages =        "262--285",
  month =        jul,
  year =         "2006",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1166087.1166094",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 17 05:25:39 MDT 2006",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Mantiuk:2006:PFC,
  author =       "Rafal Mantiuk and Karol Myszkowski and Hans-Peter
                 Seidel",
  title =        "A perceptual framework for contrast processing of high
                 dynamic range images",
  journal =      j-TAP,
  volume =       "3",
  number =       "3",
  pages =        "286--308",
  month =        jul,
  year =         "2006",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1166087.1166095",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 17 05:25:39 MDT 2006",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Dixon:2006:MAF,
  author =       "T. D. Dixon and E. F. Canga and J. M. Noyes and T.
                 Troscianko and S. G. Nikolov and D. R. Bull and
                 C. N. Canagarajah",
  title =        "Methods for the assessment of fused images",
  journal =      j-TAP,
  volume =       "3",
  number =       "3",
  pages =        "309--332",
  month =        jul,
  year =         "2006",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1166087.1166096",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 17 05:25:39 MDT 2006",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Schwaninger:2006:PPM,
  author =       "Adrian Schwaninger and Julia Vogel and Franziska Hofer
                 and Bernt Schiele",
  title =        "A psychophysically plausible model for typicality
                 ranking of natural scenes",
  journal =      j-TAP,
  volume =       "3",
  number =       "4",
  pages =        "333--353",
  month =        oct,
  year =         "2006",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Apr 14 10:50:15 MDT 2007",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Balas:2006:RBR,
  author =       "Benjamin J. Balas and Pawan Sinha",
  title =        "Region-based representations for face recognition",
  journal =      j-TAP,
  volume =       "3",
  number =       "4",
  pages =        "354--375",
  month =        oct,
  year =         "2006",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Apr 14 10:50:15 MDT 2007",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Holten:2006:PBS,
  author =       "Danny Holten and Jarke J. {Van Wijk} and Jean-Bernard
                 Martens",
  title =        "A perceptually based spectral model for isotropic
                 textures",
  journal =      j-TAP,
  volume =       "3",
  number =       "4",
  pages =        "376--398",
  month =        oct,
  year =         "2006",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Apr 14 10:50:15 MDT 2007",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Ashikhmin:2006:RCT,
  author =       "Michael Ashikhmin and Jay Goyal",
  title =        "A reality check for tone-mapping operators",
  journal =      j-TAP,
  volume =       "3",
  number =       "4",
  pages =        "399--411",
  month =        oct,
  year =         "2006",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Apr 14 10:50:15 MDT 2007",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Wilcox:2006:PSV,
  author =       "Laurie M. Wilcox and Robert S. Allison and Samuel
                 Elfassy and Cynthia Grelik",
  title =        "Personal space in virtual reality",
  journal =      j-TAP,
  volume =       "3",
  number =       "4",
  pages =        "412--428",
  month =        oct,
  year =         "2006",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Apr 14 10:50:15 MDT 2007",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Devlin:2006:VCC,
  author =       "Kate Devlin and Alan Chalmers and Erik Reinhard",
  title =        "Visual calibration and correction for ambient
                 illumination",
  journal =      j-TAP,
  volume =       "3",
  number =       "4",
  pages =        "429--452",
  month =        oct,
  year =         "2006",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Apr 14 10:50:15 MDT 2007",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Apfelbaum:2007:HAT,
  author =       "Henry Apfelbaum and Adar Pelah and Eli Peli",
  title =        "Heading assessment by ``tunnel vision'' patients and
                 control subjects standing or walking in a virtual
                 reality environment",
  journal =      j-TAP,
  volume =       "4",
  number =       "1",
  pages =        "??--??",
  month =        jan,
  year =         "2007",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Apr 14 10:50:16 MDT 2007",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Durgin:2007:SFP,
  author =       "Frank H. Durgin and Catherine Reed and Cara Tigue",
  title =        "Step frequency and perceived self-motion",
  journal =      j-TAP,
  volume =       "4",
  number =       "1",
  pages =        "??--??",
  month =        jan,
  year =         "2007",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Apr 14 10:50:16 MDT 2007",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Fink:2007:OAD,
  author =       "Philip W. Fink and Patrick S. Foo and William H.
                 Warren",
  title =        "Obstacle avoidance during walking in real and virtual
                 environments",
  journal =      j-TAP,
  volume =       "4",
  number =       "1",
  pages =        "??--??",
  month =        jan,
  year =         "2007",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Apr 14 10:50:16 MDT 2007",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Fortenbaugh:2007:GDC,
  author =       "Francesca C. Fortenbaugh and Sidhartha Chaudhury and
                 John C. Hicks and Lei Hao and Kathleen A. Turano",
  title =        "Gender differences in cue preference during path
                 integration in virtual environments",
  journal =      j-TAP,
  volume =       "4",
  number =       "1",
  pages =        "??--??",
  month =        jan,
  year =         "2007",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Apr 14 10:50:16 MDT 2007",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Frenz:2007:ETD,
  author =       "Harald Frenz and Markus Lappe and Marina Kolesnik and
                 Thomas B{\"u}hrmann",
  title =        "Estimation of travel distance from visual motion in
                 virtual environments",
  journal =      j-TAP,
  volume =       "4",
  number =       "1",
  pages =        "??--??",
  month =        jan,
  year =         "2007",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Apr 14 10:50:16 MDT 2007",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Lichtenstein:2007:FCI,
  author =       "Lee Lichtenstein and James Barabas and Russell L.
                 Woods and Eli Peli",
  title =        "A feedback-controlled interface for treadmill
                 locomotion in virtual environments",
  journal =      j-TAP,
  volume =       "4",
  number =       "1",
  pages =        "??--??",
  month =        jan,
  year =         "2007",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Apr 14 10:50:16 MDT 2007",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Mohler:2007:CLR,
  author =       "Betty J. Mohler and William B. Thompson and Sarah H.
                 Creem-Regehr and Peter Willemsen and Herbert L. {Pick,
                 Jr.} and John J. Rieser",
  title =        "Calibration of locomotion resulting from visual motion
                 in a treadmill-based virtual environment",
  journal =      j-TAP,
  volume =       "4",
  number =       "1",
  pages =        "??--??",
  month =        jan,
  year =         "2007",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Apr 14 10:50:16 MDT 2007",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Pelah:2007:EWR,
  author =       "Adar Pelah and Jan J. Koenderink",
  title =        "Editorial: Walking in real and virtual environments",
  journal =      j-TAP,
  volume =       "4",
  number =       "1",
  pages =        "??--??",
  month =        jan,
  year =         "2007",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Apr 14 10:50:16 MDT 2007",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Kuang:2007:EHR,
  author =       "Jiangtao Kuang and Hiroshi Yamaguchi and Changmeng Liu
                 and Garrett M. Johnson and Mark D. Fairchild",
  title =        "Evaluating {HDR} rendering algorithms",
  journal =      j-TAP,
  volume =       "4",
  number =       "2",
  pages =        "9:1--9:??",
  month =        jul,
  year =         "2007",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1265957.1265958",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:16:46 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "A series of three experiments has been performed to
                 test both the preference and accuracy of high
                 dynamic-range (HDR) rendering algorithms in digital
                 photography application. The goal was to develop a
                 methodology for testing a wide variety of previously
                 published tone-mapping algorithms for overall
                 preference and rendering accuracy. A number of
                 algorithms were chosen and evaluated first in a
                 paired-comparison experiment for overall image
                 preference. A rating-scale experiment was then designed
                 for further investigation of individual image
                 attributes that make up overall image preference. This
                 was designed to identify the correlations between image
                 attributes and the overall preference results obtained
                 from the first experiments. In a third experiment,
                 three real-world scenes with a diversity of dynamic
                 range and spatial configuration were designed and
                 captured to evaluate seven HDR rendering algorithms for
                 both of their preference and accuracy performance by
                 comparing the appearance of the physical scenes and the
                 corresponding tone-mapped images directly. In this
                 series of experiments, a modified Durand and Dorsey's
                 bilateral filter technique consistently performed well
                 for both preference and accuracy, suggesting that it is
                 a good candidate for a common algorithm that could be
                 included in future HDR algorithm testing evaluations.
                 The results of these experiments provide insight for
                 understanding of perceptual HDR image rendering and
                 should aid in design strategies for spatial processing
                 and tone mapping. The results indicate ways to improve
                 and design more robust rendering algorithms for general
                 HDR scenes in the future. Moreover, the purpose of this
                 research was not simply to find out the ``best''
                 algorithms, but rather to find a more general
                 psychophysical experiment based methodology to evaluate
                 HDR image-rendering algorithms. This paper provides an
                 overview of the many issues involved in an experimental
                 framework that can be used for these evaluations.",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "High dynamic-range imaging; psychophysical
                 experiments; tone-mapping algorithms evaluation",
}

@Article{Tan:2007:DIF,
  author =       "Hong Z. Tan and Mandayam A. Srinivasan and Charlotte
                 M. Reed and Nathaniel I. Durlach",
  title =        "Discrimination and identification of finger
                 joint-angle position using active motion",
  journal =      j-TAP,
  volume =       "4",
  number =       "2",
  pages =        "10:1--10:??",
  month =        jul,
  year =         "2007",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1265957.1265959",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:16:46 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "The authors report six experiments on the human
                 ability to discriminate and identify finger joint-angle
                 positions using active motion. The PIP (proximal
                 interphalangeal) joint of the index finger was examined
                 in Exps. 1--3 and the MCP (metacarpophalangeal) joint
                 in Exps. 4--6. In Exp. 1, the just noticeable
                 difference (JND) of PIP joint-angle position was
                 measured when the MCP joint was either fully extended
                 or halfway bent. In Exp. 2, the JND of PIP joint-angle
                 position as a function of PIP joint-angle reference
                 position was measured when the PIP joint was almost
                 fully extended, halfway bent, or almost fully flexed.
                 In Exp. 3, the information transfer of PIP joint-angle
                 position was estimated with the MCP joint in a fully
                 extended position. In Exps. 4--6, the JND and the
                 information transfer of MCP joint-angle position were
                 studied with a similar experimental design. The results
                 show that the JNDs of the PIP joint-angle position were
                 roughly constant ($2.5^\circ$--$2.7^\circ$) independent
                 of the PIP joint-angle reference position or the MCP
                 joint-angle position used (Exps. 1 and 2). The JNDs of
                 the MCP joint-angle position, however, increased with
                 the flexion of both the PIP and MCP joints and ranged
                 from $1.7^\circ$ to $2.7^\circ$ (Exps. 4 and 5). The
                 information transfer of the PIP and MCP joint-angle
                 position were similar, indicating 3--4 perfectly
                 identifiable joint-angle positions for both joints
                 (Exps. 3 and 6). The results provide the basic data
                 needed for estimating, for example, the resolution of
                 fingertip position during active free motion. They are
                 compared to the results from previous studies on joint
                 position, length, and thickness perception.",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "discrimination; haptic perception; identification;
                 JND; Joint position; kinesthesis",
}

@Article{Sprague:2007:MEV,
  author =       "Nathan Sprague and Dana Ballard and Al Robinson",
  title =        "Modeling embodied visual behaviors",
  journal =      j-TAP,
  volume =       "4",
  number =       "2",
  pages =        "11:1--11:??",
  month =        jul,
  year =         "2007",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1265957.1265960",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:16:46 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "To make progess in understanding human visuomotor
                 behavior, we will need to understand its basic
                 components at an abstract level. One way to achieve
                 such an understanding would be to create a model of a
                 human that has a sufficient amount of complexity so as
                 to be capable of generating such behaviors. Recent
                 technological advances have been made that allow
                 progress to be made in this direction. Graphics models
                 that simulate extensive human capabilities can be used
                 as platforms from which to develop synthetic models of
                 visuomotor behavior. Currently, such models can capture
                 only a small portion of a full behavioral repertoire,
                 but for the behaviors that they do model, they can
                 describe complete visuomotor subsystems at a useful
                 level of detail. The value in doing so is that the
                 body's elaborate visuomotor structures greatly simplify
                 the specification of the abstract behaviors that guide
                 them. The net result is that, essentially, one is faced
                 with proposing an embodied ``operating system'' model
                 for picking the right set of abstract behaviors at each
                 instant. This paper outlines one such model. A
                 centerpiece of the model uses vision to aid the
                 behavior that has the most to gain from taking
                 environmental measurements. Preliminary tests of the
                 model against human performance in realistic VR
                 environments show that main features of the model show
                 up in human behavior.",
  acknowledgement = ack-nhfb,
  articleno =    "11",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "reinforcement learning; visual attention; Visual
                 routines",
}

@Article{Williams:2007:FSS,
  author =       "Betsy Williams and Gayathri Narasimham and Claire
                 Westerman and John Rieser and Bobby Bodenheimer",
  title =        "Functional similarities in spatial representations
                 between real and virtual environments",
  journal =      j-TAP,
  volume =       "4",
  number =       "2",
  pages =        "12:1--12:??",
  month =        jul,
  year =         "2007",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1265957.1265961",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:16:46 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "This paper presents results that demonstrate
                 functional similarities in subjects' access to spatial
                 knowledge (or spatial representation ) between real and
                 virtual environments. Such representations are
                 important components of the transfer of reasoning
                 ability and knowledge between these two environments.
                 In particular, we present two experiments aimed at
                 investigating similarities in spatial knowledge derived
                 from exploring on foot both physical environments and
                 virtual environments presented through a head-mounted
                 display. In the first experiment, subjects were asked
                 to learn the locations of target objects in the real or
                 virtual environment and then rotate the perspective by
                 either physically locomoting to a new facing direction
                 or imagining moving. The latencies and errors were
                 generally worse after imagining locomoting and for
                 greater degrees of rotation in perspective; they did
                 not differ significantly across knowledge derived from
                 exploring the physical versus virtual environments. In
                 the second experiment, subjects were asked to imagine
                 simple rotations versus simple translations in
                 perspective. The errors and latencies indicated that
                 the to-be-imagined disparity was linearly related after
                 learning the physical and virtual environment. These
                 results demonstrate functional similarities in access
                 to knowledge of new perspective when it is learned by
                 exploring physical environments and virtual renderings
                 of the same environment.",
  acknowledgement = ack-nhfb,
  articleno =    "12",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "space perception; Virtual reality (VR)",
}

@Article{Ho:2007:DET,
  author =       "Hsin-Ni Ho and Lynette A. Jones",
  title =        "Development and evaluation of a thermal display for
                 material identification and discrimination",
  journal =      j-TAP,
  volume =       "4",
  number =       "2",
  pages =        "13:1--13:??",
  month =        jul,
  year =         "2007",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1265957.1265962",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:16:46 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "The objective of this study was to develop and
                 evaluate a thermal display that assists in object
                 identification in virtual environments by simulating
                 the thermal cues associated with making contact with
                 materials with different thermal properties. The
                 thermal display was developed based on a semi-infinite
                 body model. Three experiments were conducted to
                 evaluate the performance of the display. The first
                 experiment compared the ability of subjects' to
                 identify various materials, which were presented
                 physically or simulated with the thermal display. The
                 second experiment examined the capacity of subjects to
                 discriminate between a real and simulated material
                 based on thermal cues. In the third experiment, the
                 changes in skin temperature that occurred when making
                 contact with real and simulated materials were measured
                 to evaluate how these compare to theoretical
                 predictions. The results indicated that there was no
                 significant difference in material identification and
                 discrimination when subjects were presented with real
                 or simulated materials. The changes in skin temperature
                 were comparable for real and simulated materials and
                 were related to the contact coefficient of the material
                 palpated, consistent with the semi-infinite body model.
                 These findings suggest that a thermal display is
                 capable of facilitating object recognition when visual
                 cues are limited.",
  acknowledgement = ack-nhfb,
  articleno =    "13",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "hand--object interaction; Haptic interface; material
                 identification; semi-infinite body model; thermal
                 display; thermal feedback; thermal perception; virtual
                 environment",
}

@Article{Thompson:2007:GE,
  author =       "William B. Thompson",
  title =        "Guest Editorial",
  journal =      j-TAP,
  volume =       "4",
  number =       "3",
  pages =        "14:1--14:??",
  month =        nov,
  year =         "2007",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1278387.1278388",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:17:01 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "14",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Sundstedt:2007:PRP,
  author =       "Veronica Sundstedt and Diego Gutierrez and Oscar Anson
                 and Francesco Banterle and Alan Chalmers",
  title =        "Perceptual rendering of participating media",
  journal =      j-TAP,
  volume =       "4",
  number =       "3",
  pages =        "15:1--15:??",
  month =        nov,
  year =         "2007",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1278387.1278389",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:17:01 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "High-fidelity image synthesis is the process of
                 computing images that are perceptually
                 indistinguishable from the real world they are
                 attempting to portray. Such a level of fidelity
                 requires that the physical processes of materials and
                 the behavior of light are accurately simulated. Most
                 computer graphics algorithms assume that light passes
                 freely between surfaces within an environment. However,
                 in many applications, we also need to take into account
                 how the light interacts with media, such as dust,
                 smoke, fog, etc., between the surfaces. The
                 computational requirements for calculating the
                 interaction of light with such participating media are
                 substantial. This process can take many hours and
                 rendering effort is often spent on computing parts of
                 the scene that may not be perceived by the viewer. In
                 this paper, we present a novel perceptual strategy for
                 physically based rendering of participating media. By
                 using a combination of a saliency map with our new
                 extinction map (X map), we can significantly reduce
                 rendering times for inhomogeneous media. The visual
                 quality of the resulting images is validated using two
                 objective difference metrics and a subjective
                 psychophysical experiment. Although the average pixel
                 errors of these metric are all less than 1\%, the
                 subjective validation indicates that the degradation in
                 quality still is noticeable for certain scenes. We thus
                 introduce and validate a novel light map (L map) that
                 accounts for salient features caused by multiple light
                 scattering around light sources.",
  acknowledgement = ack-nhfb,
  articleno =    "15",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "attention; extinction map; light map; Participating
                 media; perception; saliency map; selective rendering",
}

@Article{Wallraven:2007:ERW,
  author =       "Christian Wallraven and Heinrich H. B{\"u}lthoff and
                 Douglas W. Cunningham and Jan Fischer and Dirk Bartz",
  title =        "Evaluation of real-world and computer-generated
                 stylized facial expressions",
  journal =      j-TAP,
  volume =       "4",
  number =       "3",
  pages =        "16:1--16:??",
  month =        nov,
  year =         "2007",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1278387.1278390",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:17:01 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "The goal of stylization is to provide an abstracted
                 representation of an image that highlights specific
                 types of visual information. Recent advances in
                 computer graphics techniques have made it possible to
                 render many varieties of stylized imagery efficiently
                 making stylization into a useful technique, not only
                 for artistic, but also for visualization applications.
                 In this paper, we report results from two sets of
                 experiments that aim at characterizing the perceptual
                 impact and effectiveness of three different stylization
                 techniques in the context of dynamic facial
                 expressions. In the first set of experiments, animated
                 facial expressions are stylized using three common
                 techniques (brush, cartoon, and illustrative
                 stylization) and investigated using different
                 experimental measures. Going beyond the usual
                 questionnaire approach, these experiments compare the
                 techniques according to several criteria ranging from
                 subjective preference to task-dependent measures (such
                 as recognizability, intensity) allowing us to compare
                 behavioral and introspective approaches. The second set
                 of experiments use the same stylization techniques on
                 real-world video sequences in order to compare the
                 effect of stylization on natural and artificial
                 stimuli. Our results shed light on how stylization of
                 image contents affects the perception and subjective
                 evaluation of both real and computer-generated facial
                 expressions.",
  acknowledgement = ack-nhfb,
  articleno =    "16",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "avatar; Evaluation of facial animations; facial
                 expressions; perceptually adaptive graphics;
                 psychophysics; stylization",
}

@Article{Majumder:2007:PBC,
  author =       "Aditi Majumder and Sandy Irani",
  title =        "Perception-based contrast enhancement of images",
  journal =      j-TAP,
  volume =       "4",
  number =       "3",
  pages =        "17:1--17:??",
  month =        nov,
  year =         "2007",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1278387.1278391",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:17:01 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Study of contrast sensitivity of the human eye shows
                 that our suprathreshold contrast sensitivity follows
                 the Weber Law and, hence, increases proportionally with
                 the increase in the mean local luminance. In this
                 paper, we effectively apply this fact to design a
                 contrast-enhancement method for images that improves
                 the local image contrast by controlling the local image
                 gradient with a single parameter. Unlike previous
                 methods, we achieve this without explicit segmentation
                 of the image, either in the spatial (multiscale) or
                 frequency (multiresolution) domain. We pose the
                 contrast enhancement as an optimization problem that
                 maximizes the average local contrast of an image
                 strictly constrained by a perceptual constraint derived
                 directly from the Weber Law. We then propose a greedy
                 heuristic, controlled by a single parameter, to
                 approximate this optimization problem.",
  acknowledgement = ack-nhfb,
  articleno =    "17",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "contrast enhancement; contrast sensitivity; Human
                 perception",
}

@Article{Seward:2007:UVE,
  author =       "A. Elizabeth Seward and Daniel H. Ashmead and Bobby
                 Bodenheimer",
  title =        "Using virtual environments to assess time-to-contact
                 judgments from pedestrian viewpoints",
  journal =      j-TAP,
  volume =       "4",
  number =       "3",
  pages =        "18:1--18:??",
  month =        nov,
  year =         "2007",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1278387.1278392",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:17:01 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "This paper describes the use of desktop and immersive
                 virtual environments to study judgments that
                 pedestrians make when deciding to cross a street. In
                 particular, we assess the ability of people to
                 discriminate and estimate time-to-contact (TTC) for
                 approaching vehicles under a variety of conditions.
                 Four experiments observing TTC judgments under various
                 conditions are described. We examine the effect of type
                 of vehicle, viewpoint, presentation mode, and TTC value
                 on TTC judgments. We find no significant effect of type
                 of vehicle or of viewpoint, extending prior work to
                 cover all views typically encountered by pedestrians.
                 Discrimination of short values for TTC judgments is
                 generally consistent with the literature, but
                 performance degrades significantly for long TTC values.
                 Finally, we find no significant difference between
                 judgments made in a desktop environment versus a
                 head-mounted display, indicating that tracking the
                 approaching vehicle with one's head does not aid
                 discrimination. In general, people appear to use
                 strategies similar to those that pedestrians use to
                 make real-world, street-crossing decisions.",
  acknowledgement = ack-nhfb,
  articleno =    "18",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "time-to-contact (TTC); Virtual reality (VR)",
}

@Article{Vogel:2007:CNS,
  author =       "Julia Vogel and Adrian Schwaninger and Christian
                 Wallraven and Heinrich H. B{\"u}lthoff",
  title =        "Categorization of natural scenes: Local versus global
                 information and the role of color",
  journal =      j-TAP,
  volume =       "4",
  number =       "3",
  pages =        "19:1--19:??",
  month =        nov,
  year =         "2007",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1278387.1278393",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:17:01 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Categorization of scenes is a fundamental process of
                 human vision that allows us to efficiently and rapidly
                 analyze our surroundings. Several studies have explored
                 the processes underlying human scene categorization,
                 but they have focused on processing global image
                 information. In this study, we present both
                 psychophysical and computational experiments that
                 investigate the role of local versus global image
                 information in scene categorization. In a first set of
                 human experiments, categorization performance is tested
                 when only local or only global image information is
                 present. Our results suggest that humans rely on local,
                 region-based information as much as on global,
                 configural information. In addition, humans seem to
                 integrate both types of information for intact scene
                 categorization. In a set of computational experiments,
                 human performance is compared to two state-of-the-art
                 computer vision approaches that have been shown to be
                 psychophysically plausible and that model either local
                 or global information. In addition to the influence of
                 local versus global information, in a second series of
                 experiments, we investigated the effect of color on the
                 categorization performance of both the human observers
                 and the computational model. Analysis of the human data
                 suggests that color is an additional channel of
                 perceptual information that leads to higher
                 categorization results at the expense of increased
                 reaction times in the intact condition. However, it
                 does not affect reaction times when only local
                 information is present. When color is removed, the
                 employed computational model follows the relative
                 performance decrease of human observers for each scene
                 category and can thus be seen as a perceptually
                 plausible model for human scene categorization based on
                 local image information.",
  acknowledgement = ack-nhfb,
  articleno =    "19",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "computational gist; computational modeling; global
                 configural information; local region-based information;
                 scene classification; Scene perception; semantic
                 modeling",
}

@Article{Akyuz:2008:PET,
  author =       "Ahmet O{\u{g}}uz Aky{\"u}z and Erik Reinhard",
  title =        "Perceptual evaluation of tone-reproduction operators
                 using the Cornsweet--Craik--{O}'Brien illusion",
  journal =      j-TAP,
  volume =       "4",
  number =       "4",
  pages =        "1:1--1:??",
  month =        jan,
  year =         "2008",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1278760.1278761",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:17:12 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "High dynamic-range images cannot be directly displayed
                 on conventional display devices, but have to be
                 tone-mapped first. For this purpose, a large set of
                 tone-reproduction operators is currently available.
                 However, it is unclear which operator is most suitable
                 for any given task. In addition, different tasks may
                 place different requirements upon each operator. In
                 this paper we evaluate several tone-reproduction
                 operators using a paradigm that does not require the
                 construction of a real high dynamic-range scene, nor
                 does it require the availability of a high
                 dynamic-range display device. The user study involves a
                 task that relates to the evaluation of contrast, which
                 is an important attribute that needs to be preserved
                 under tone reproduction.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "dynamic-range compression; high dynamic-range imaging;
                 Tone-mapping operators; visual psychophysics",
}

@Article{Radun:2008:CQI,
  author =       "Jenni Radun and Tuomas Leisti and Jukka H{\"a}kkinen
                 and Harri Ojanen and Jean-Luc Olives and Tero Vuori and
                 G{\"o}te Nyman",
  title =        "Content and quality: Interpretation-based estimation
                 of image quality",
  journal =      j-TAP,
  volume =       "4",
  number =       "4",
  pages =        "2:1--2:??",
  month =        jan,
  year =         "2008",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1278760.1278762",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:17:12 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Test image contents affect subjective image-quality
                 evaluations. Psychometric methods might show that
                 contents have an influence on image quality, but they
                 do not tell what this influence is like, i.e., how the
                 contents influence image quality. To obtain a holistic
                 description of subjective image quality, we have used
                 an interpretation-based quality (IBQ) estimation
                 approach, which combines qualitative and quantitative
                 methodology. The method enables simultaneous
                 examination of psychometric results and the subjective
                 meanings related to the perceived image-quality
                 changes. In this way, the relationship between
                 subjective feature detection, subjective preferences,
                 and interpretations are revealed. We report a study
                 that shows that different impressions are conveyed in
                 five test image contents after similar sharpness
                 variations. Thirty na{\"\i}ve observers classified and
                 freely described the images after which magnitude
                 estimation was used to verify that they distinguished
                 the changes in the images. The data suggest that in the
                 case of high image quality, the test image selection is
                 crucial. If subjective evaluation is limited only to
                 technical defects in test images, important subjective
                 information of image-quality experience is lost. The
                 approach described here can be used to examine image
                 quality and it will help image scientists to evaluate
                 their test images.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "image contents; Image quality; qualitative
                 methodology; subjective measurement",
}

@Article{VandenBerg:2008:PDI,
  author =       "Ronald {Van den Berg} and Frans W. Cornelissen and Jos
                 B. T. M. Roerdink",
  title =        "Perceptual dependencies in information visualization
                 assessed by complex visual search",
  journal =      j-TAP,
  volume =       "4",
  number =       "4",
  pages =        "3:1--3:??",
  month =        jan,
  year =         "2008",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1278760.1278763",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:17:12 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "A common approach for visualizing data sets is to map
                 them to images in which distinct data dimensions are
                 mapped to distinct visual features, such as color, size
                 and orientation. Here, we consider visualizations in
                 which different data dimensions should receive equal
                 weight and attention. Many of the end-user tasks
                 performed on these images involve a form of visual
                 search. Often, it is simply assumed that features can
                 be judged independently of each other in such tasks.
                 However, there is evidence for perceptual dependencies
                 when simultaneously presenting multiple features. Such
                 dependencies could potentially affect information
                 visualizations that contain combinations of features
                 for encoding information and, thereby, bias subjects
                 into unequally weighting the relevance of different
                 data dimensions. We experimentally assess (1) the
                 presence of judgment dependencies in a visualization
                 task (searching for a target node in a node-link
                 diagram) and (2) how feature contrast relates to
                 salience. From a visualization point of view, our most
                 relevant findings are that (a) to equalize saliency
                 (and thus bottom-up weighting) of size and color, color
                 contrasts have to become very low. Moreover,
                 orientation is less suitable for representing
                 information that consists of a large range of data
                 values, because it does not show a clear relationship
                 between contrast and salience; (b) color and size are
                 features that can be used independently to represent
                 information, at least as far as the range of colors
                 that were used in our study are concerned; (c) the
                 concept of (static) feature salience hierarchies is
                 wrong; how salient a feature is compared to another is
                 not fixed, but a function of feature contrasts; (d)
                 final decisions appear to be as good an indicator of
                 perceptual performance as indicators based on measures
                 obtained from individual fixations. Eye tracking,
                 therefore, does not necessarily present a benefit for
                 user studies that aim at evaluating performance in
                 search tasks.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "Color; feature hierarchy; feature interaction; human
                 vision; information visualization; node-link diagrams;
                 orientation; perceptual dependencies; psychophysics;
                 visual features; visual search",
}

@Article{Wallraven:2008:EPR,
  author =       "Christian Wallraven and Martin Breidt and Douglas W.
                 Cunningham and Heinrich H. B{\"u}lthoff",
  title =        "Evaluating the perceptual realism of animated facial
                 expressions",
  journal =      j-TAP,
  volume =       "4",
  number =       "4",
  pages =        "4:1--4:??",
  month =        jan,
  year =         "2008",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1278760.1278764",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:17:12 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "The human face is capable of producing an astonishing
                 variety of expressions---expressions for which
                 sometimes the smallest difference changes the perceived
                 meaning considerably. Producing realistic-looking
                 facial animations that are able to transmit this degree
                 of complexity continues to be a challenging research
                 topic in computer graphics. One important question that
                 remains to be answered is: When are facial animations
                 good enough? Here we present an integrated framework in
                 which psychophysical experiments are used in a first
                 step to systematically evaluate the perceptual quality
                 of several different computer-generated animations with
                 respect to real-world video sequences. The first
                 experiment provides an evaluation of several animation
                 techniques, exposing specific animation parameters that
                 are important to achieve perceptual fidelity. In a
                 second experiment, we then use these benchmarked
                 animation techniques in the context of perceptual
                 research in order to systematically investigate the
                 spatiotemporal characteristics of expressions. A third
                 and final experiment uses the quality measures that
                 were developed in the first two experiments to examine
                 the perceptual impact of changing facial features to
                 improve the animation techniques. Using such an
                 integrated approach, we are able to provide important
                 insights into facial expressions for both the
                 perceptual and computer graphics community.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "3D-scanning; avatar; evaluation of facial animations;
                 perceptually adaptive graphics; psychophysics;
                 recognition",
}

@Article{Jagnow:2008:EMA,
  author =       "Robert Jagnow and Julie Dorsey and Holly Rushmeier",
  title =        "Evaluation of methods for approximating shapes used to
                 synthesize {$3$D} solid textures",
  journal =      j-TAP,
  volume =       "4",
  number =       "4",
  pages =        "5:1--5:??",
  month =        jan,
  year =         "2008",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1278760.1278765",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:17:12 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "In modern computer graphics applications, textures
                 play an important role in conveying the appearance of
                 real-world materials. But while surface appearance can
                 often be effectively captured with a photograph, it is
                 difficult to use example imagery to synthesize fully
                 three-dimensional (3D) solid textures that are
                 perceptually similar to their inputs. Specifically,
                 this research focuses on human perception of 3D solid
                 textures composed of aggregate particles in a binding
                 matrix. Holding constant an established algorithm for
                 approximating particle distributions, we examine the
                 problem of estimating particle shape. We consider four
                 methods for approximating plausible particle
                 shapes---including two methods of our own contribution.
                 We compare the performance of these methods under a
                 variety of input conditions using automated,
                 perceptually motivated metrics, as well as a
                 psychophysical experiment. In the course of assessing
                 the relative performance of the four algorithms, we
                 also evaluate the reliability of the automated metrics
                 in predicting the results of the experiment.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "Shape estimation; shape perception; solid textures;
                 texture synthesis; volumetric textures",
}

@Article{Klatzky:2008:EAR,
  author =       "Roberta L. Klatzky and Bing Wu and Damion Shelton and
                 George Stetten",
  title =        "Effectiveness of augmented-reality visualization
                 versus cognitive mediation for learning actions in near
                 space",
  journal =      j-TAP,
  volume =       "5",
  number =       "1",
  pages =        "1:1--1:??",
  month =        jan,
  year =         "2008",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1279640.1279641",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:17:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "The present study examined the impact of
                 augmented-reality visualization, in comparison to
                 conventional ultrasound (CUS), on the learning of
                 ultrasound-guided needle insertion. Whereas CUS
                 requires cognitive processes for localizing targets,
                 our augmented-reality device, called the ``sonic
                 flashlight'' (SF) enables direct perceptual guidance.
                 Participants guided a needle to an ultrasound-localized
                 target within opaque fluid. In three experiments, the
                 SF showed higher accuracy and lower variability in
                 aiming and endpoint placements than did CUS. The SF,
                 but not CUS, readily transferred to new targets and
                 starting points for action. These effects were evident
                 in visually guided action (needle and target
                 continuously visible) and visually directed action
                 (target alone visible). The results have application to
                 learning to visualize surgical targets through
                 ultrasound.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "augmented reality; learning; motor control;
                 Perception; spatial cognition",
}

@Article{Ware:2008:VGT,
  author =       "Colin Ware and Peter Mitchell",
  title =        "Visualizing graphs in three dimensions",
  journal =      j-TAP,
  volume =       "5",
  number =       "1",
  pages =        "2:1--2:??",
  month =        jan,
  year =         "2008",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1279640.1279642",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:17:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "It has been known for some time that larger graphs can
                 be interpreted if laid out in 3D and displayed with
                 stereo and/or motion depth cues to support spatial
                 perception. However, prior studies were carried out
                 using displays that provided a level of detail far
                 short of what the human visual system is capable of
                 resolving. Therefore, we undertook a graph
                 comprehension study using a very high resolution
                 stereoscopic display. In our first experiment, we
                 examined the effect of stereoscopic display, kinetic
                 depth, and using 3D tubes versus lines to display the
                 links. The results showed a much greater benefit for 3D
                 viewing than previous studies. For example, with both
                 motion and stereoscopic depth cues, unskilled observers
                 could see paths between nodes in 333 node graphs with
                 less than a 10\% error rate. Skilled observers could
                 see up to a 1000-node graph with less than a 10\% error
                 rate. This represented an order of magnitude increase
                 over 2D display. In our second experiment, we varied
                 both nodes and links to understand the constraints on
                 the number of links and the size of graph that can be
                 reliably traced. We found the difference between number
                 of links and number of nodes to best account for error
                 rates and suggest that this is evidence for a
                 ``perceptual phase transition.'' These findings are
                 discussed in terms of their implications for
                 information display.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "graph visualization; network visualization;
                 stereoscopic displays; Visualization",
}

@Article{Elhelw:2008:GBS,
  author =       "Mohamed Elhelw and Marios Nicolaou and Adrian Chung
                 and Guang-Zhong Yang and M. Stella Atkins",
  title =        "A gaze-based study for investigating the perception of
                 visual realism in simulated scenes",
  journal =      j-TAP,
  volume =       "5",
  number =       "1",
  pages =        "3:1--3:??",
  month =        jan,
  year =         "2008",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1279640.1279643",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:17:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Visual realism has been a major objective of computer
                 graphics since the inception of the field. However, the
                 perception of visual realism is not a well-understood
                 process and is usually attributed to a combination of
                 visual cues and image features that are difficult to
                 define or measure. For highly complex images, the
                 problem is even more involved. The purpose of this
                 paper is to present a study based on eye tracking for
                 investigating the perception of visual realism of
                 static images with different visual qualities. The
                 eye-fixation clusters helped to define salient image
                 features corresponding to 3D surface details and light
                 transfer properties that attract observers' attention.
                 This enabled the definition and categorization of image
                 attributes affecting the perception of photorealism.
                 The dynamics of the visual behavior of different
                 observer groups were examined by analyzing saccadic eye
                 movements. We also demonstrated how the different image
                 categories used in the experiments were perceived with
                 varying degrees of visual realism. The results
                 presented can be used as a basis for investigating the
                 impact of individual image features on the perception
                 of visual realism. This study suggests that post-recall
                 or simple abstraction of visual experience is not
                 accurate and the use of eye tracking provides an
                 effective way of determining relevant features that
                 affect visual realism, thus allowing for improved
                 rendering techniques that target these features.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "eye tracking; human--computer interaction;
                 photorealistic rendering; simulation environment;
                 Visual perception; visual realism",
}

@Article{Palmer:2008:EAT,
  author =       "Evan M. Palmer and Timothy C. Clausner and Philip J.
                 Kellman",
  title =        "Enhancing air traffic displays via perceptual cues",
  journal =      j-TAP,
  volume =       "5",
  number =       "1",
  pages =        "4:1--4:??",
  month =        jan,
  year =         "2008",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1279640.1279644",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:17:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "We examined graphical representations of aircraft
                 altitude in simulated air traffic control (ATC)
                 displays. In two experiments, size and contrast cues
                 correlated with altitude improved participants' ability
                 to detect future aircraft collisions (conflicts).
                 Experiment 1 demonstrated that, across several set
                 sizes, contrast and size cues to altitude improved
                 accuracy at identifying conflicts. Experiment 2
                 demonstrated that graphical cues for representing
                 altitude both improved accuracy and reduced search time
                 for finding conflicts in large set size displays. The
                 addition of size and contrast cues to ATC displays may
                 offer specific benefits in aircraft conflict
                 detection.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "air traffic control; applied cognitive science;
                 Human--computer interaction; visualization",
}

@Article{Watters:2008:VDL,
  author =       "Paul Watters and Frances Martin and H. Steffen
                 Stripf",
  title =        "Visual detection of {LSB}-encoded natural image
                 steganography",
  journal =      j-TAP,
  volume =       "5",
  number =       "1",
  pages =        "5:1--5:??",
  month =        jan,
  year =         "2008",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1279640.1328775",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:17:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Many steganographic systems embed hidden messages
                 inside the least significant bit layers of colour
                 natural images. The presence of these messages can be
                 difficult to detect by using statistical steganalysis.
                 However, visual steganalysis by humans may be more
                 successful in natural image discrimination. This study
                 examined whether humans could detect least-significant
                 bit steganography in 15 color natural images from the
                 VisTex database using a controlled same/different task
                 ($N = 58$) and a yes/no task ($N = 61$). While $d
                 \prime > 1$ was observed for color layers 4--8, layers
                 1--3 had $d \prime < 1$ in both experiments. Thus,
                 layers 1--3 appear to be highly resistant to visual
                 steganalysis.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "counterterrorism; Steganography",
}

@Article{Reinhard:2008:E,
  author =       "Erik Reinhard and Heinrich B{\"u}lthoff",
  title =        "Editorial",
  journal =      j-TAP,
  volume =       "5",
  number =       "2",
  pages =        "6:1--6:??",
  month =        may,
  year =         "2008",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1279920.1361703",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:17:31 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Wang:2008:TSP,
  author =       "Qi Wang and Vincent Hayward",
  title =        "Tactile synthesis and perceptual inverse problems seen
                 from the viewpoint of contact mechanics",
  journal =      j-TAP,
  volume =       "5",
  number =       "2",
  pages =        "7:1--7:??",
  month =        may,
  year =         "2008",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1279920.1279921",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:17:31 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "A contact-mechanics analysis was used to explain a
                 tactile illusion engendered by straining the fingertip
                 skin tangentially in a progressive wave pattern
                 resulting in the perception of a moving undulating
                 surface. We derived the strain tensor field induced by
                 a sinusoidal surface sliding on a finger as well as the
                 field created by a tactile transducer array deforming
                 the fingerpad skin by lateral traction. We found that
                 the first field could be well approximated by the
                 second. Our results have several implications. First,
                 tactile displays using lateral skin deformation can
                 generate tactile sensations similar to those using
                 normal skin deformation. Second, a synthesis approach
                 can achieve this result if some constraints on the
                 design of tactile stimulators are met. Third, the
                 mechanoreceptors embedded in the skin must respond to
                 the deviatoric part of the strain tensor field and not
                 to its volumetric part. Finally, many tactile stimuli
                 might represent, for the brain, an inverse problem to
                 be solved, such specific examples of ``tactile
                 metameres'' are given.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "computational tactile perception; contact mechanics;
                 Haptics; Lateral skin deformation; Tactile sensing;
                 Tactile synthesis; tactile transducers arrays",
}

@Article{Jay:2008:UHC,
  author =       "Caroline Jay and Robert Stevens and Roger Hubbold and
                 Mashhuda Glencross",
  title =        "Using haptic cues to aid nonvisual structure
                 recognition",
  journal =      j-TAP,
  volume =       "5",
  number =       "2",
  pages =        "8:1--8:??",
  month =        may,
  year =         "2008",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1279920.1279922",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:17:31 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Retrieving information presented visually is difficult
                 for visually disabled users. Current accessibility
                 technologies, such as screen readers, fail to convey
                 presentational layout or structure. Information
                 presented in graphs or images is almost impossible to
                 convey through speech alone. In this paper, we present
                 the results of an experimental study investigating the
                 role of touch (haptic) and auditory cues in aiding
                 structure recognition when visual presentation is
                 missing. We hypothesize that by guiding users toward
                 nodes in a graph structure using force fields, users
                 will find it easier to recognize overall structure.
                 Nine participants were asked to explore simple 3D
                 structures containing nodes (spheres or cubes) laid out
                 in various spatial configurations and asked to identify
                 the nodes and draw their overall structure. Various
                 combinations of haptic and auditory feedback were
                 explored. Our results demonstrate that haptic cues
                 significantly helped participants to quickly recognize
                 nodes and structure. Surprisingly, auditory cues alone
                 did not speed up node recognition; however, when they
                 were combined with haptics both node identification and
                 structure recognition significantly improved. This
                 result demonstrates that haptic feedback plays an
                 important role in enabling people to recall spatial
                 layout.",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "accessibility; haptic perception; Multimodal cues;
                 visual disability",
}

@Article{Peters:2008:ACT,
  author =       "Robert J. Peters and Laurent Itti",
  title =        "Applying computational tools to predict gaze direction
                 in interactive visual environments",
  journal =      j-TAP,
  volume =       "5",
  number =       "2",
  pages =        "9:1--9:??",
  month =        may,
  year =         "2008",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1279920.1279923",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:17:31 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Future interactive virtual environments will be
                 ``attention-aware,'' capable of predicting, reacting
                 to, and ultimately influencing the visual attention of
                 their human operators. Before such environments can be
                 realized, it is necessary to operationalize our
                 understanding of the relevant aspects of visual
                 perception, in the form of fully automated
                 computational heuristics that can efficiently identify
                 locations that would attract human gaze in complex
                 dynamic environments. One promising approach to
                 designing such heuristics draws on ideas from
                 computational neuroscience. We compared several
                 neurobiologically inspired heuristics with eye-movement
                 recordings from five observers playing video games, and
                 found that human gaze was better predicted by
                 heuristics that detect outliers from the global
                 distribution of visual features than by purely local
                 heuristics. Heuristics sensitive to dynamic events
                 performed best overall. Further, heuristic prediction
                 power differed more between games than between
                 different human observers. While other factors clearly
                 also influence eye position, our findings suggest that
                 simple neurally inspired algorithmic methods can
                 account for a significant portion of human gaze
                 behavior in a naturalistic, interactive setting. These
                 algorithms may be useful in the implementation of
                 interactive virtual environments, both to predict the
                 cognitive state of human operators, as well as to
                 effectively endow virtual agents in the system with
                 humanlike visual behavior.",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "Active vision; computational modeling; eye-movements;
                 immersive environments; video games; visual attention",
}

@Article{Tarr:2008:IFA,
  author =       "Michael J. Tarr and Athinodoros S. Georghiades and
                 Cullen D. Jackson",
  title =        "Identifying faces across variations in lighting:
                 Psychophysics and computation",
  journal =      j-TAP,
  volume =       "5",
  number =       "2",
  pages =        "10:1--10:??",
  month =        may,
  year =         "2008",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1279920.1279924",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:17:31 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Humans have the ability to identify objects under
                 varying lighting conditions with extraordinary
                 accuracy. We investigated the behavioral aspects of
                 this ability and compared it to the performance of the
                 illumination cones (IC) model of Belhumeur and Kriegman
                 [1998]. In five experiments, observers learned 10 faces
                 under a small subset of illumination directions. We
                 then tested observers' recognition ability under
                 different illuminations. Across all experiments,
                 recognition performance was found to be dependent on
                 the distance between the trained and tested
                 illumination directions. This effect was modulated by
                 the nature of the trained illumination directions.
                 Generalizations from frontal illuminations were
                 different than generalizations from extreme
                 illuminations. Similarly, the IC model was also
                 sensitive to whether the trained images were
                 near-frontal or extreme. Thus, we find that the nature
                 of the images in the training set affects the accuracy
                 of an object's representation under variable lighting
                 for both humans and the model. Beyond this general
                 correspondence, the microstructure of the
                 generalization patterns for both humans and the IC
                 model were remarkably similar, suggesting that the two
                 systems may employ related algorithms.",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "face recognition; human psychophysics; Illumination
                 invariance; image-based models; object recognition",
}

@Article{Bicego:2008:DFC,
  author =       "Manuele Bicego and Enrico Grosso and Andrea Lagorio
                 and Gavin Brelstaff and Linda Brodo and Massimo
                 Tistarelli",
  title =        "Distinctiveness of faces: a computational approach",
  journal =      j-TAP,
  volume =       "5",
  number =       "2",
  pages =        "11:1--11:??",
  month =        may,
  year =         "2008",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1279920.1279925",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:17:31 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "This paper develops and demonstrates an original
                 approach to face-image analysis based on identifying
                 distinctive areas of each individual's face by its
                 comparison to others in the population. The method
                 differs from most others---that we refer as unary
                 ---where salient regions are defined by analyzing only
                 images of the same individual. We extract a set of
                 multiscale patches from each face image before
                 projecting them into a common feature space. The degree
                 of ``distinctiveness'' of any patch depends on its
                 distance in feature space from patches mapped from
                 other individuals. First a pairwise analysis is
                 developed and then a simple generalization to the
                 multiple-face case is proposed. A perceptual
                 experiment, involving 45 observers, indicates the
                 method to be fairly compatible with how humans mark
                 faces as distinct. A quantitative example of face
                 authentication is also performed in order to show the
                 essential role played by the distinctive information. A
                 comparative analysis shows that performance of our
                 n-ary approach is as good as several contemporary
                 unary, or binary, methods, while tapping a
                 complementary source of information. Furthermore, we
                 show it can also provide a useful degree of
                 illumination invariance.",
  acknowledgement = ack-nhfb,
  articleno =    "11",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "face authentication; illumination changes; log-polar
                 representation",
}

@Article{Grave:2008:TMO,
  author =       "Justine Grave and Roland Bremond",
  title =        "A tone-mapping operator for road visibility
                 experiments",
  journal =      j-TAP,
  volume =       "5",
  number =       "2",
  pages =        "12:1--12:??",
  month =        may,
  year =         "2008",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1279920.1361704",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:17:31 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "One may wish to use computer graphic images to carry
                 out road visibility studies. Unfortunately, most
                 display devices still have a limited luminance dynamic
                 range, especially in driving simulators. In this paper,
                 we propose a tone-mapping operator (TMO) to compress
                 the luminance dynamic range while preserving the
                 driver's performance for a visual task relevant for a
                 driving situation. We address three display issues of
                 some consequences for road image display: luminance
                 dynamics, image quantization, and high minimum
                 displayable luminance. Our TMO characterizes the
                 effects of local adaptation with a bandpass
                 decomposition of the image using a Laplacian pyramid,
                 and processes the levels separately in order to mimic
                 the human visual system. The contrast perception model
                 uses the visibility level, a usual index in road
                 visibility engineering applications. To assess our
                 algorithm, a psychophysical experiment devoted to a
                 target detection task was designed. Using a Landolt
                 ring, the visual performances of 30 observers were
                 measured: they stared first at a high-dynamic range
                 image and then at the same image processed by a TMO and
                 displayed on a low-dynamic range monitor, for
                 comparison. The evaluation was completed with a visual
                 appearance evaluation. Our operator gives good
                 performances for three typical road situations (one in
                 daylight and two at night), after comparison with four
                 standard TMOs from the literature. The psychovisual
                 assessment of our TMO is limited to these driving
                 situations.",
  acknowledgement = ack-nhfb,
  articleno =    "12",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "HDR images; psychophysics; road visibility; visual
                 performance",
}

@Article{Nees:2008:DDT,
  author =       "Michael A. Nees and Bruce N. Walker",
  title =        "Data density and trend reversals in auditory graphs:
                 Effects on point-estimation and trend-identification
                 tasks",
  journal =      j-TAP,
  volume =       "5",
  number =       "3",
  pages =        "13:1--13:??",
  month =        aug,
  year =         "2008",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1402236.1402237",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Sep 15 19:02:24 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Auditory graphs --- displays that represent
                 quantitative information with sound --- have the
                 potential to make data (and therefore science) more
                 accessible for diverse user populations. No research to
                 date, however, has systematically addressed the
                 attributes of data that contribute to the complexity
                 (the ease or difficulty of comprehension) of auditory
                 graphs. A pair of studies examined the role of data
                 density (i.e., the number of discrete data points
                 presented per second) and the number of trend reversals
                 for both point-estimation and trend-identification
                 tasks with auditory graphs. For the point-estimation
                 task, more trend reversals led to performance
                 decrements. For the trend-identification task, a large
                 main effect was again observed for trend reversals, but
                 an interaction suggested that the effect of the number
                 of trend reversals was different across lower data
                 densities (i.e., as density increased from 1 to 2 data
                 points per second). Results are discussed in terms of
                 data sonification applications and rhythmic theories of
                 auditory pattern perception.",
  acknowledgement = ack-nhfb,
  articleno =    "13",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "auditory display; Auditory graphs; sonification",
}

@Article{Lecuyer:2008:SMS,
  author =       "Anatole L{\'e}cuyer and Jean-Marie Burkhardt and
                 Chee-Hian Tan",
  title =        "A study of the modification of the speed and size of
                 the cursor for simulating pseudo-haptic bumps and
                 holes",
  journal =      j-TAP,
  volume =       "5",
  number =       "3",
  pages =        "14:1--14:??",
  month =        aug,
  year =         "2008",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1402236.1402238",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Sep 15 19:02:24 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "In previous work on so-called pseudo-haptic textures,
                 we investigated the possibility of simulating
                 sensations of texture without haptic devices by using
                 the sole manipulation of the speed of a mouse cursor (a
                 technique called speed technique). In this paper, we
                 describe another technique (called Size technique) to
                 enhance the speed technique and simulate texture
                 sensations by varying the size of the cursor according
                 to the local height of the texture displayed on the
                 computer screen. With the size technique, the user
                 would see an increase (decrease) in cursor size
                 corresponding to a positive (negative) slope of the
                 texture. We have conducted a series of experiments to
                 study and compare the use of both the size and speed
                 technique for simulating simple shapes like bumps and
                 holes. In Experiment 1, our results showed that
                 participants could successfully identify bumps and
                 holes using the size technique alone. Performances
                 obtained with the size technique reached a similar
                 level of accuracy as found previously with the speed
                 technique alone. In Experiment 2, we determined a point
                 of subjective equality between bumps simulated by each
                 technique separately, which suggests that the two
                 techniques provide information that can be perceptually
                 equivalent. In Experiment 3, using paradoxical
                 situations of conflict between the two techniques, we
                 have found that participants' answers were more
                 influenced by the size technique, suggesting a
                 dominance of the size over the speed technique.
                 Furthermore, we have found a mutual reinforcement of
                 the techniques, i.e., when the two techniques were
                 consistently combined, the participants were more
                 efficient in identifying the simulated shapes. In
                 Experiment 4, we further observed the complex
                 interactions between the information associated with
                 the two techniques in the perception and in the
                 decision process related to the accurate identification
                 of bumps and holes. Taken together, our results promote
                 the use of both techniques for the low-cost simulation
                 of texture sensations in applications, such as
                 videogames, internet, and graphical user interfaces.",
  acknowledgement = ack-nhfb,
  articleno =    "14",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "bump; control/display ratio; cursor; hole;
                 Pseudo-haptic; size; speed; texture",
}

@Article{Amemiya:2008:LMI,
  author =       "Tomohiro Amemiya and Hideyuki Ando and Taro Maeda",
  title =        "Lead-me interface for a pulling sensation from
                 hand-held devices",
  journal =      j-TAP,
  volume =       "5",
  number =       "3",
  pages =        "15:1--15:??",
  month =        aug,
  year =         "2008",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1402236.1402239",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Sep 15 19:02:24 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "When a small mass in a hand-held device oscillates
                 along a single axis with asymmetric acceleration
                 (strongly peaked in one direction and diffuse in the
                 other), the holder typically experiences a kinesthetic
                 illusion characterized by the sensation of being
                 continuously pushed or pulled by the device. This
                 effect was investigated because of its potential
                 application to a hand-held, nongrounded, haptic device
                 that can convey a sense of a continuous translational
                 force in one direction, which is a key missing piece in
                 haptic research. A 1 degree-of-freedom (DOF) haptic
                 device based on a crank-slider mechanism was
                 constructed. The device converts the constant rotation
                 of an electric motor into the constrained movement of a
                 small mass with asymmetric acceleration. The frequency
                 that maximizes the perceived movement offered by the
                 haptic device was investigated. Tests using three
                 subjects showed that for the prototype, the best
                 frequencies were 5 and 10 cycles per second.",
  acknowledgement = ack-nhfb,
  articleno =    "15",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "Haptic perception; interface using illusionary
                 sensation; mobile device",
}

@Article{Fontana:2008:ADP,
  author =       "Federico Fontana and Davide Rocchesso",
  title =        "Auditory distance perception in an acoustic pipe",
  journal =      j-TAP,
  volume =       "5",
  number =       "3",
  pages =        "16:1--16:??",
  month =        aug,
  year =         "2008",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1402236.1402240",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Sep 15 19:02:24 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "In a study of auditory distance perception, we
                 investigated the effects of exaggeration the acoustic
                 cue of reverberation where the intensity of sound did
                 not vary noticeably. The set of stimuli was obtained by
                 moving a sound source inside a 10.2-m long pipe having
                 a 0.3-m diameter. Twelve subjects were asked to listen
                 to a speech sound while keeping their head inside the
                 pipe and then to estimate the egocentric distance from
                 the sound source using a magnitude production
                 procedure. The procedure was repeated eighteen times
                 using six different positions of the sound source.
                 Results show that the point at which perceived distance
                 equals physical distance is located approximately 3.5 m
                 away from the listening point, with an average range of
                 distance estimates of approximately 3.3 m, i.e., 1.65
                 to 4.9 m. The absence of intensity cues makes the
                 acoustic pipe a potentially interesting modeling
                 paradigm for the design of auditory interfaces in which
                 distance is rendered independently of loudness. The
                 proposed acoustic environment also confirms the known
                 unreliability of certain distance cues.",
  acknowledgement = ack-nhfb,
  articleno =    "16",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "Acoustic pipe; auditory display; distance perception",
}

@Article{Kuhl:2008:RRL,
  author =       "Scott A. Kuhl and Sarah H. Creem-Regehr and William B.
                 Thompson",
  title =        "Recalibration of rotational locomotion in immersive
                 virtual environments",
  journal =      j-TAP,
  volume =       "5",
  number =       "3",
  pages =        "17:1--17:??",
  month =        aug,
  year =         "2008",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1402236.1402241",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Sep 15 19:02:24 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "This work uses an immersive virtual environment (IVE)
                 to examine how people maintain a calibration between
                 biomechanical and visual information for rotational
                 self-motion. First, we show that no rotational
                 recalibration occurs when visual and biomechanical
                 rates of rotation are matched. Next, we demonstrate
                 that mismatched physical and visual rotation rates
                 cause rotational recalibration. Although previous work
                 has shown that rotational locomotion can be
                 recalibrated in real environments, this work extends
                 the finding to virtual environments. We further show
                 that people do not completely recalibrate left and
                 right rotations independently when different
                 visual--biomechanical discrepancies are used for left
                 and right rotations during a recalibration phase.
                 Finally, since the majority of participants did not
                 notice mismatched physical and visual rotation rates,
                 we discuss the implications of using such mismatches to
                 enable IVE users to explore a virtual space larger than
                 the physical space they are in.",
  acknowledgement = ack-nhfb,
  articleno =    "17",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "Perception; recalibration; rotation; virtual
                 environments",
}

@Article{Fleming:2009:GES,
  author =       "Roland Fleming and Michael Langer",
  title =        "Guest editorial: Special issue on {Applied Perception
                 in Graphics and Visualization (APGV07)}",
  journal =      j-TAP,
  volume =       "5",
  number =       "4",
  pages =        "18:1--18:??",
  month =        jan,
  year =         "2009",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1462048.1462049",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Feb 2 14:38:02 MST 2009",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "18",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Morvan:2009:PAT,
  author =       "Yann Morvan and Carol O'Sullivan",
  title =        "A perceptual approach to trimming and tuning
                 unstructured lumigraphs",
  journal =      j-TAP,
  volume =       "5",
  number =       "4",
  pages =        "19:1--19:??",
  month =        jan,
  year =         "2009",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1462048.1462050",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Feb 2 14:38:02 MST 2009",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "We present a novel perceptual method to reduce the
                 visual redundancy of unstructured lumigraphs, an image
                 based representation designed for interactive
                 rendering. We combine features of the unstructured
                 lumigraph algorithm and image fidelity metrics to
                 efficiently rank the perceptual impact of the removal
                 of subregions of input views ({\em subviews\/}). We use
                 a greedy approach to estimate the order in which
                 subviews should be pruned to minimize perceptual
                 degradation at each step. Renderings using varying
                 numbers of subviews can then be easily visualized with
                 confidence that the retained subviews are well chosen,
                 thus facilitating the choice of how many to retain. The
                 regions of the input views that are left are repacked
                 into a texture atlas. Our method takes advantage of any
                 scene geometry information available but only requires
                 a very coarse approximation. We perform a user study to
                 validate its behaviour, as well as investigate the
                 impact of the choice of image fidelity metric as well
                 as that of user parameters. The three metrics
                 considered fall in the physical, statistical and
                 perceptual categories. The overall benefit of our
                 method is the semiautomation of the view selection
                 process, resulting in unstructured lumigraphs that are
                 thriftier in texture memory use and faster to render.
                 Using the same framework, we adjust the parameters of
                 the unstructured lumigraph algorithm to optimise it on
                 a scene by scene basis.",
  acknowledgement = ack-nhfb,
  articleno =    "19",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "Image-based rendering; perceptual metrics",
}

@Article{McDonnell:2009:EEM,
  author =       "Rachel McDonnell and Sophie J{\"o}rg and Jessica K.
                 Hodgins and Fiona Newell and Carol O'Sullivan",
  title =        "Evaluating the effect of motion and body shape on the
                 perceived sex of virtual characters",
  journal =      j-TAP,
  volume =       "5",
  number =       "4",
  pages =        "20:1--20:??",
  month =        jan,
  year =         "2009",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1462048.1462051",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Feb 2 14:38:02 MST 2009",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "In this paper, our aim is to determine factors that
                 influence the perceived sex of virtual characters. In
                 Experiment 1, four different model types were used:
                 highly realistic male and female models, an androgynous
                 character, and a point light walker. Three different
                 types of motion were applied to all models: motion
                 captured male and female walks, and neutral synthetic
                 walks. We found that both form and motion influence sex
                 perception for these characters: for neutral synthetic
                 motions, form determines perceived sex, whereas natural
                 motion affects the perceived sex of both androgynous
                 and realistic forms. These results indicate that the
                 use of neutral walks is better than creating ambiguity
                 by assigning an incongruent motion. In Experiment 2 we
                 investigated further the influence of body shape and
                 motion on realistic male and female models and found
                 that adding stereotypical indicators of sex to the body
                 shapes influenced sex perception. Also, that
                 exaggerated female body shapes influences sex
                 judgements more than exaggerated male shapes. These
                 results have implications for variety and realism when
                 simulating large crowds of virtual characters.",
  acknowledgement = ack-nhfb,
  articleno =    "20",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "graphics; motion capture; Perception",
}

@Article{Lavoue:2009:LRM,
  author =       "Guillaume Lavou{\'e}",
  title =        "A local roughness measure for {$3$D} meshes and its
                 application to visual masking",
  journal =      j-TAP,
  volume =       "5",
  number =       "4",
  pages =        "21:1--21:??",
  month =        jan,
  year =         "2009",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1462048.1462052",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Feb 2 14:38:02 MST 2009",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "3D models are subject to a wide variety of processing
                 operations such as compression, simplification or
                 watermarking, which may introduce some geometric
                 artifacts on the shape. The main issue is to maximize
                 the compression/simplification ratio or the watermark
                 strength while minimizing these visual degradations.
                 However few algorithms exploit the human visual system
                 to {\em hide\/} these degradations, while perceptual
                 attributes could be quite relevant for this task.
                 Particularly, the {\em masking effect\/} defines the
                 fact that one visual pattern can hide the visibility of
                 another. In this context we introduce an algorithm for
                 estimating the {\em roughness\/} of a 3D mesh, as a
                 local measure of geometric noise on the surface.
                 Indeed, a textured (or {\em rough\/}) region is able to
                 hide geometric distortions much better than a smooth
                 one. Our measure is based on curvature analysis on
                 local windows of the mesh and is independent of the
                 resolution/connectivity of the object. The accuracy and
                 the robustness of our measure, together with its
                 relevance regarding visual masking have been
                 demonstrated through extensive comparisons with
                 state-of-the-art and subjective experiment. Two
                 applications are also presented, in which the roughness
                 is used to lead (and improve) respectively compression
                 and watermarking algorithms.",
  acknowledgement = ack-nhfb,
  articleno =    "21",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "3D mesh; Curvature; Masking; Roughness; subjective
                 evaluation",
}

@Article{Murphy:2009:HIM,
  author =       "Hunter A. Murphy and Andrew T. Duchowski and Richard
                 A. Tyrrell",
  title =        "Hybrid image\slash model-based gaze-contingent
                 rendering",
  journal =      j-TAP,
  volume =       "5",
  number =       "4",
  pages =        "22:1--22:??",
  month =        jan,
  year =         "2009",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1462048.1462053",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Feb 2 14:38:02 MST 2009",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "A nonisotropic hybrid image/model-based
                 gaze-contingent rendering technique utilizing ray
                 casting on a GPU is discussed. Empirical evidence
                 derived from human subject experiments indicates an
                 inverse relationship between a peripherally degraded
                 scene's high-resolution inset size and mean search
                 time, a trend consistent with existing image-based and
                 model-based techniques. In addition, the data suggest
                 that maintaining a target's silhouette edges decreases
                 search times when compared to targets with degraded
                 edges. However, analysis suggests a point of
                 diminishing returns with an inset larger than
                 $15^\circ$ when target discrimination is a component of
                 visual search. Benefits of the hybrid technique include
                 simplicity of design and parallelizability, both
                 conducive to GPU implementation.",
  acknowledgement = ack-nhfb,
  articleno =    "22",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "Eye tracking; Level of Detail",
}

@Article{Boucheny:2009:PEV,
  author =       "Christian Boucheny and Georges-Pierre Bonneau and
                 Jacques Droulez and Guillaume Thibault and Stephane
                 Ploix",
  title =        "A perceptive evaluation of volume rendering
                 techniques",
  journal =      j-TAP,
  volume =       "5",
  number =       "4",
  pages =        "23:1--23:??",
  month =        jan,
  year =         "2009",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1462048.1462054",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Feb 2 14:38:02 MST 2009",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "The display of space filling data is still a challenge
                 for the community of visualization. Direct volume
                 rendering (DVR) is one of the most important techniques
                 developed to achieve direct perception of such
                 volumetric data. It is based on semitransparent
                 representations, where the data are accumulated in a
                 depth-dependent order. However, it produces images that
                 may be difficult to understand, and thus several
                 techniques have been proposed so as to improve its
                 effectiveness, using for instance lighting models or
                 simpler representations (e.g., maximum intensity
                 projection). In this article, we present three
                 perceptual studies that examine how DVR meets its
                 goals, in either static or dynamic context. We show
                 that a static representation is highly ambiguous, even
                 in simple cases, but this can be counterbalanced by use
                 of dynamic cues (i.e., motion parallax) provided that
                 the rendering parameters are correctly tuned. In
                 addition, perspective projections are demonstrated to
                 provide relevant information to disambiguate depth
                 perception in dynamic displays.",
  acknowledgement = ack-nhfb,
  articleno =    "23",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "Direct volume rendering; perception of transparency;
                 perspective projection; structure from motion",
}

@Article{Feixas:2009:UIT,
  author =       "Miquel Feixas and Mateu Sbert and Francisco
                 Gonz{\'a}lez",
  title =        "A unified information-theoretic framework for
                 viewpoint selection and mesh saliency",
  journal =      j-TAP,
  volume =       "6",
  number =       "1",
  pages =        "1:1--1:??",
  month =        feb,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Feb 23 08:25:26 MST 2009",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Hattenberger:2009:PIG,
  author =       "Timothy J. Hattenberger and Mark D. Fairchild and
                 Garrett M. Johnson and Carl Salvaggio",
  title =        "A psychophysical investigation of global illumination
                 algorithms used in augmented reality",
  journal =      j-TAP,
  volume =       "6",
  number =       "1",
  pages =        "2:1--2:??",
  month =        feb,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Feb 23 08:25:26 MST 2009",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Li:2009:NEF,
  author =       "Yanfang Li and Volkan Patoglu and Marcia K.
                 O'Malley",
  title =        "Negative efficacy of fixed gain error reducing shared
                 control for training in virtual environments",
  journal =      j-TAP,
  volume =       "6",
  number =       "1",
  pages =        "3:1--3:??",
  month =        feb,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Feb 23 08:25:26 MST 2009",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Gray:2009:SRC,
  author =       "Rob Gray and Rayka Mohebbi and Hong Z. Tan",
  title =        "The spatial resolution of crossmodal attention:
                 Implications for the design of multimodal interfaces",
  journal =      j-TAP,
  volume =       "6",
  number =       "1",
  pages =        "4:1--4:??",
  month =        feb,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Feb 23 08:25:26 MST 2009",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Li:2009:PIM,
  author =       "Li Li and Bernard D. Adelstein and Stephen R. Ellis",
  title =        "Perception of image motion during head movement",
  journal =      j-TAP,
  volume =       "6",
  number =       "1",
  pages =        "5:1--5:??",
  month =        feb,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Feb 23 08:25:26 MST 2009",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Khan:2009:CPE,
  author =       "Masood Mehmood Khan and Robert D. Ward and Michael
                 Ingleby",
  title =        "Classifying pretended and evoked facial expressions of
                 positive and negative affective states using infrared
                 measurement of skin temperature",
  journal =      j-TAP,
  volume =       "6",
  number =       "1",
  pages =        "6:1--6:??",
  month =        feb,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Feb 23 08:25:26 MST 2009",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Riecke:2009:MSE,
  author =       "Bernhard E. Riecke and Aleksander V{\"a}ljam{\"a}e and
                 J{\"o}rg Schulte-Pelkum",
  title =        "Moving sounds enhance the visually-induced self-motion
                 illusion (circular vection) in virtual reality",
  journal =      j-TAP,
  volume =       "6",
  number =       "2",
  pages =        "7:1--7:??",
  month =        feb,
  year =         "2009",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1498700.1498701",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Apr 13 08:51:27 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "While rotating visual and auditory stimuli have long
                 been known to elicit self-motion illusions (``circular
                 vection''), audiovisual interactions have hardly been
                 investigated. Here, two experiments investigated
                 whether visually induced circular vection can be
                 enhanced by concurrently rotating auditory cues that
                 match visual landmarks (e.g., a fountain sound).
                 Participants sat behind a curved projection screen
                 displaying rotating panoramic renderings of a market
                 place. Apart from a no-sound condition, headphone-based
                 auditory stimuli consisted of mono sound, ambient
                 sound, or low-/high-spatial resolution auralizations
                 using generic head-related transfer functions (HRTFs).
                 While merely adding nonrotating (mono or ambient) sound
                 showed no effects, moving sound stimuli facilitated
                 both vection and presence in the virtual environment.
                 This spatialization benefit was maximal for a medium
                 ($20^\circ \times 15^\circ$) FOV, reduced for a larger
                 ($54^\circ \times 45^\circ$) FOV and unexpectedly
                 absent for the smallest ($10^\circ \times 7.5^\circ$)
                 FOV. Increasing auralization spatial fidelity (from
                 low, comparable to five-channel home theatre systems,
                 to high, $5^\circ$ resolution) provided no further
                 benefit, suggesting a ceiling effect. In conclusion,
                 both self-motion perception and presence can benefit
                 from adding moving auditory stimuli. This has important
                 implications both for multimodal cue integration
                 theories and the applied challenge of building
                 affordable yet effective motion simulators.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "Audiovisual interactions; presence; psychophysics;
                 self-motion simulation; spatial sound; vection; virtual
                 reality",
}

@Article{Willemsen:2009:EHM,
  author =       "Peter Willemsen and Mark B. Colton and Sarah H.
                 Creem-Regehr and William B. Thompson",
  title =        "The effects of head-mounted display mechanical
                 properties and field of view on distance judgments in
                 virtual environments",
  journal =      j-TAP,
  volume =       "6",
  number =       "2",
  pages =        "8:1--8:??",
  month =        feb,
  year =         "2009",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1498700.1498702",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Apr 13 08:51:27 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Research has shown that people are able to judge
                 distances accurately in full-cue, real-world
                 environments using visually directed actions. However,
                 in virtual environments viewed with head-mounted
                 display (HMD) systems, there is evidence that people
                 act as though the virtual space is smaller than
                 intended. This is a surprising result given how well
                 people act in real environments. The behavior in the
                 virtual setting may be linked to distortions in the
                 available visual cues or to a person's ability to
                 locomote without vision. Either could result from
                 issues related to added mass, moments of inertia, and
                 restricted field of view in HMDs. This article
                 describes an experiment in which distance judgments
                 based on normal real-world and HMD viewing are compared
                 with judgments based on real-world viewing while
                 wearing two specialized devices. One is a mock HMD,
                 which replicated the mass, moments of inertia, and
                 field of view of the HMD and the other an inertial
                 headband designed to replicate the mass and moments of
                 inertia of the HMD, but constructed to not restrict the
                 field of view of the observer or otherwise feel like
                 wearing a helmet. Distance judgments using the mock HMD
                 showed a statistically significant underestimation
                 relative to the no restriction condition but not of a
                 magnitude sufficient to account for all the distance
                 compression seen in the HMD. Indicated distances with
                 the inertial headband were not significantly smaller
                 than those made with no restrictions.",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "distance judgments; head-mounted displays;
                 Perception",
}

@Article{Duchowski:2009:SVS,
  author =       "Andrew T. Duchowski and David Bate and Paris
                 Stringfellow and Kaveri Thakur and Brian J. Melloy and
                 Anand K. Gramopadhye",
  title =        "On spatiochromatic visual sensitivity and peripheral
                 color {LOD} management",
  journal =      j-TAP,
  volume =       "6",
  number =       "2",
  pages =        "9:1--9:??",
  month =        feb,
  year =         "2009",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1498700.1498703",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Apr 13 08:51:27 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Empirical findings from a gaze-contingent color
                 degradation study report the effects of artificial
                 reduction of the human visual system's sensitivity to
                 peripheral chromaticity on visual search performance.
                 To our knowledge, this is the first such investigation
                 of peripheral color reduction. For unimpeded
                 performance, results suggest that, unlike
                 spatiotemporal content, peripheral chromaticity cannot
                 be reduced within the central $20^\circ$ visual angle.
                 Somewhat analogous to dark adaptation, reduction of
                 peripheral color tends to simulate scotopic viewing
                 conditions. This holds significant implications for
                 chromatic Level Of Detail management. Specifically,
                 while peripheral spatiotemporal detail can be
                 attenuated without affecting visual search, often
                 dramatically (e.g., spatial detail can be so reduced up
                 to 50\% at about $5^\circ$), peripheral chromatic
                 reduction is likely to be noticed much sooner.
                 Therefore, color LOD reduction (e.g., via compression),
                 should be maintained isotropically across the central
                 $20^\circ$ visual field.",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "Gaze-contingent displays",
}

@Article{Harper:2009:TDV,
  author =       "Simon Harper and Eleni Michailidou and Robert
                 Stevens",
  title =        "Toward a definition of visual complexity as an
                 implicit measure of cognitive load",
  journal =      j-TAP,
  volume =       "6",
  number =       "2",
  pages =        "10:1--10:??",
  month =        feb,
  year =         "2009",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1498700.1498704",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Apr 13 08:51:27 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "The visual complexity of Web pages is much talked
                 about; ``complex Web pages are difficult to use,'' but
                 often regarded as a subjective decision by the user.
                 This subjective decision is of limited use if we wish
                 to understand the importance of visual complexity, what
                 it means, and how it can be used. We theorize that by
                 understanding a user's visual perception of Web page
                 complexity, we can understand the cognitive effort
                 required for interaction with that page. This is
                 important because by using an easily identifiable
                 measure, such as visual complexity, as an implicit
                 marker of cognitive load, we can design Web pages which
                 are easier to interact with. We have devised an initial
                 empirical experiment, using card sorting and triadic
                 elicitation, to test our theories and assumptions, and
                 have built an initial baseline sequence of 20 Web pages
                 along with a library of qualitative and anecdotal
                 feedback. Using this library, we define visual
                 complexity, ergo perceived interaction complexity, and
                 by taking these pages as ``prototypes'' and ranking
                 them into a sequence of complexity, we are able to
                 group them into: simple, neutral, and complex. This
                 means we can now work toward a definition of visual
                 complexity as an implicit measure of cognitive load.",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "knowledge elicitation; semantic Web; visual
                 complexity; visual impairment; Web accessibility",
}

@Article{Canosa:2009:RWV,
  author =       "Roxanne L. Canosa",
  title =        "Real-world vision: Selective perception and task",
  journal =      j-TAP,
  volume =       "6",
  number =       "2",
  pages =        "11:1--11:??",
  month =        feb,
  year =         "2009",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1498700.1498705",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Apr 13 08:51:27 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Visual perception is an inherently selective process.
                 To understand when and why a particular region of a
                 scene is selected, it is imperative to observe and
                 describe the eye movements of individuals as they go
                 about performing specific tasks. In this sense, vision
                 is an active process that integrates scene properties
                 with specific, goal-oriented oculomotor behavior. This
                 study is an investigation of how task influences the
                 visual selection of stimuli from a scene. Four eye
                 tracking experiments were designed and conducted to
                 determine how everyday tasks affect oculomotor
                 behavior. A portable eyetracker was created for the
                 specific purpose of bringing the experiments out of the
                 laboratory and into the real world, where natural
                 behavior is most likely to occur. The experiments
                 provide evidence that the human visual system is not a
                 passive collector of salient environmental stimuli, nor
                 is vision general-purpose. Rather, vision is active and
                 specific, tightly coupled to the requirements of a task
                 and a plan of action. The experiments support the
                 hypothesis that the purpose of selective attention is
                 to maximize task efficiency by fixating relevant
                 objects in the scene. A computational model of visual
                 attention is presented that imposes a high-level
                 constraint on the bottom-up salient properties of a
                 scene for the purpose of locating regions that are
                 likely to correspond to foreground objects rather than
                 background or other salient nonobject stimuli. In
                 addition to improving the correlation to human subject
                 fixation densities over a strictly bottom-up model
                 [Itti et al. 1998; Parkhurst et al. 2002], this model
                 predicts a central fixation tendency when that tendency
                 is warranted, and not as an artificially primed
                 location bias.",
  acknowledgement = ack-nhfb,
  articleno =    "11",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "Active vision; eye-tracking; saliency modeling",
}

@Article{Creem-Regehr:2009:GE,
  author =       "Sarah Creem-Regehr and Karol Myszkowski",
  title =        "Guest editorial",
  journal =      j-TAP,
  volume =       "6",
  number =       "3",
  pages =        "13:1--13:??",
  month =        aug,
  year =         "2009",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1577755.1577756",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Aug 31 16:34:11 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "13",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{McDonnell:2009:IRB,
  author =       "Rachel McDonnell and Sophie J{\"o}rg and Joanna McHugh
                 and Fiona N. Newell and Carol O'Sullivan",
  title =        "Investigating the role of body shape on the perception
                 of emotion",
  journal =      j-TAP,
  volume =       "6",
  number =       "3",
  pages =        "14:1--14:??",
  month =        aug,
  year =         "2009",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1577755.1577757",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Aug 31 16:34:11 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "In order to analyze the emotional content of motions
                 portrayed by different characters, we created real and
                 virtual replicas of an actor exhibiting six basic
                 emotions: sadness, happiness, surprise, fear, anger,
                 and disgust. In addition to the video of the real
                 actor, his actions were applied to five virtual body
                 shapes: a low- and high-resolution virtual counterpart,
                 a cartoon-like character, a wooden mannequin, and a
                 zombie-like character (Figures 1 and 2). In a point
                 light condition, we also tested whether the absence of
                 a body affected the perceived emotion of the movements.
                 Participants were asked to rate the actions based on a
                 list of 41 more complex emotions. We found that the
                 perception of emotional actions is highly robust and to
                 the most part independent of the character's body, so
                 long as form is present. When motion alone is present,
                 emotions were generally perceived as less intense than
                 in the cases where form was present.",
  acknowledgement = ack-nhfb,
  articleno =    "14",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "graphics; motion capture; Perception",
}

@Article{Reitsma:2009:ESP,
  author =       "Paul S. A. Reitsma and Carol O'Sullivan",
  title =        "Effect of scenario on perceptual sensitivity to errors
                 in animation",
  journal =      j-TAP,
  volume =       "6",
  number =       "3",
  pages =        "15:1--15:??",
  month =        aug,
  year =         "2009",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1577755.1577758",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Aug 31 16:34:11 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "A deeper understanding of what makes animation
                 perceptually plausible would benefit a number of
                 applications, such as approximate collision detection
                 and goal-directed animation. In a series of
                 psychophysical experiments, we examine how measurements
                 of perceptual sensitivity in realistic physical
                 simulations compare to similar measurements done in
                 more abstract settings. We find that participant
                 tolerance for certain types of errors is significantly
                 higher in a realistic snooker scenario than in the
                 abstract test settings previously used to examine those
                 errors. By contrast, we find tolerance for errors
                 displayed in realistic but more neutral environments
                 was not different from tolerance for those errors in
                 abstract settings. Additionally, we examine the
                 interaction of auditory and visual cues in determining
                 participant sensitivity to spatiotemporal errors in
                 rigid body collisions. We find that participants are
                 predominantly affected by visual cues. Finally, we find
                 that tolerance for spatial gaps during collision events
                 is constant for a wide range of viewing angles if the
                 effect of foreshortening and occlusion caused by the
                 viewing angle is taken into account.",
  acknowledgement = ack-nhfb,
  articleno =    "15",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "Animation; graphics; perception; psychophysics",
}

@Article{Munn:2009:FAI,
  author =       "Susan M. Munn and Jeff B. Pelz",
  title =        "{FixTag}: An algorithm for identifying and tagging
                 fixations to simplify the analysis of data collected by
                 portable eye trackers",
  journal =      j-TAP,
  volume =       "6",
  number =       "3",
  pages =        "16:1--16:??",
  month =        aug,
  year =         "2009",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1577755.1577759",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Aug 31 16:34:11 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Video-based eye trackers produce an output video
                 showing where a subject is looking, the subject's
                 Point-of-Regard (POR), for each frame of a video of the
                 scene. This information can be extremely valuable, but
                 its analysis can be overwhelming. Analysis of
                 eye-tracked data from portable (wearable) eye trackers
                 is especially daunting, as the scene video may be
                 constantly changing, rendering automatic analysis more
                 difficult. A common way to begin analysis of POR data
                 is to group these data into fixations. In a previous
                 article, we compared the fixations identified (i.e.,
                 start and end marked) automatically by an algorithm to
                 those identified manually by users (i.e., manual
                 coders). Here, we extend this automatic identification
                 of fixations to tagging each fixation to a
                 Region-of-Interest (ROI). Our fixation tagging
                 algorithm, FixTag, requires the relative 3D positions
                 of the vertices of ROIs and calibration of the scene
                 camera. Fixation tagging is performed by first
                 calculating the camera projection matrices for
                 keyframes of the scene video (captured by the eye
                 tracker) via an iterative structure and motion recovery
                 algorithm. These matrices are then used to project 3D
                 ROI vertices into the keyframes. A POR for each
                 fixation is matched to a point in the closest keyframe,
                 which is then checked against the 2D projected ROI
                 vertices for tagging. Our fixation tags were compared
                 to those produced by three manual coders tagging the
                 automatically identified fixations for two different
                 scenarios. For each scenario, eight ROIs were defined
                 along with the 3D positions of eight calibration
                 points. Therefore, 17 tags were available for each
                 fixation: 8 for ROIs, 8 for calibration points, and 1
                 for ``other.'' For the first scenario, a subject was
                 tracked looking through products on four store shelves,
                 resulting in 182 automatically identified fixations.
                 Our automatic tagging algorithm produced tags that
                 matched those produced by at least one manual coder for
                 181 out of the 182 fixations (99.5\% agreement). For
                 the second scenario, a subject was tracked looking at
                 two posters on adjoining walls of a room. Our algorithm
                 matched at least one manual coder's tag for 169
                 fixations out of 172 automatically identified (98.3\%
                 agreement).",
  acknowledgement = ack-nhfb,
  articleno =    "16",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "coding; eye tracking; Fixations; portable; wearable",
}

@Article{McNamara:2009:STP,
  author =       "Ann McNamara and Reynold Bailey and Cindy Grimm",
  title =        "Search task performance using subtle gaze direction
                 with the presence of distractions",
  journal =      j-TAP,
  volume =       "6",
  number =       "3",
  pages =        "17:1--17:??",
  month =        aug,
  year =         "2009",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1577755.1577760",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Aug 31 16:34:11 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "A new experiment is presented that demonstrates the
                 usefulness of an image space modulation technique
                 called subtle gaze direction (SGD) for guiding the user
                 in a simple searching task. SGD uses image space
                 modulations in the luminance channel to guide a
                 viewer's gaze about a scene without interrupting their
                 visual experience. The goal of SGD is to direct a
                 viewer's gaze to certain regions of a scene without
                 introducing noticeable changes in the image. Using a
                 simple searching task, we compared performance using no
                 modulation, using subtle modulation, and using obvious
                 modulation. Results from the experiments show improved
                 performance when using subtle gaze direction, without
                 affecting the user's perception of the image. We then
                 extend the experiment to evaluate performance with the
                 presence of distractors. The distractors took the form
                 of extra modulations, which do not correspond to a
                 target in the image. Experimentation shows, that, even
                 in the presence of distractors, more accurate results
                 are returned on a simple search task using SGD, as
                 compared to results returned when no modulation at all
                 is used. Results establish the potential of the method
                 for a wide range of applications including gaming,
                 perceptually based rendering, navigation in virtual
                 environments, and medical search tasks.",
  acknowledgement = ack-nhfb,
  articleno =    "17",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "Eye tracking; gaze direction; image manipulation;
                 luminance; psychophysics",
}

@Article{Filip:2009:URG,
  author =       "Ji{\v{r}}{\'\i} Filip and Michael J. Chantler and
                 Michal Haindl",
  title =        "On uniform resampling and gaze analysis of
                 bidirectional texture functions",
  journal =      j-TAP,
  volume =       "6",
  number =       "3",
  pages =        "18:1--18:??",
  month =        aug,
  year =         "2009",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1577755.1577761",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Aug 31 16:34:11 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "The use of illumination and view-dependent texture
                 information is recently the best way to capture the
                 appearance of real-world materials accurately. One
                 example is the Bidirectional Texture Function. The main
                 disadvantage of these data is their massive size. In
                 this article, we employ perceptually-based methods to
                 allow more efficient handling of these data. In the
                 first step, we analyse different uniform resampling by
                 means of a psychophysical study with 11 subjects,
                 comparing original data with rendering of a uniformly
                 resampled version over the hemisphere of illumination
                 and view-dependent textural measurements. We have found
                 that down-sampling in view and illumination azimuthal
                 angles is less apparent than in elevation angles and
                 that illumination directions can be down-sampled more
                 than view directions without loss of visual accuracy.
                 In the second step, we analyzed subjects gaze fixation
                 during the experiment. The gaze analysis confirmed
                 results from the experiment and revealed that subjects
                 were fixating at locations aligned with direction of
                 main gradient in rendered stimuli. As this gradient was
                 mostly aligned with illumination gradient, we conclude
                 that subjects were observing materials mainly in
                 direction of illumination gradient. Our results provide
                 interesting insights in human perception of real
                 materials and show promising consequences for
                 development of more efficient compression and rendering
                 algorithms using these kind of massive data.",
  acknowledgement = ack-nhfb,
  articleno =    "18",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "BTF; eye tracking; phychophysical experiment; texture
                 compression; uniform resampling; visual degradation",
}

@Article{Kuhl:2009:HCE,
  author =       "Scott A. Kuhl and William B. Thompson and Sarah H.
                 Creem-Regehr",
  title =        "{HMD} calibration and its effects on distance
                 judgments",
  journal =      j-TAP,
  volume =       "6",
  number =       "3",
  pages =        "19:1--19:??",
  month =        aug,
  year =         "2009",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1577755.1577762",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Aug 31 16:34:11 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Most head-mounted displays (HMDs) suffer from
                 substantial optical distortion, and vendor-supplied
                 specifications for field-of-view often are at variance
                 with reality. Unless corrected, such displays do not
                 present perspective-related visual cues in a
                 geometrically correct manner. Distorted geometry has
                 the potential to affect applications of HMDs, which
                 depend on precise spatial perception. This article
                 provides empirical evidence for the degree to which
                 common geometric distortions affect one type of spatial
                 judgment in virtual environments. We show that
                 minification or magnification in the HMD that would
                 occur from misstated HMD field of view causes
                 significant changes in distance judgments. Incorrectly
                 calibrated pitch and pincushion distortion, however, do
                 not cause statistically significant changes in distance
                 judgments for the degree of distortions examined. While
                 the means for determining the optical distortion of
                 display systems are well known, they are often not used
                 in non-see-through HMDs due to problems in measuring
                 and correcting for distortion. As a result, we also
                 provide practical guidelines for creating geometrically
                 calibrated systems.",
  acknowledgement = ack-nhfb,
  articleno =    "19",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "field of view; Immersive virtual environment;
                 minification; perception; pincushion distortion;
                 pitch",
}

@Article{Riecke:2009:ASM,
  author =       "Bernhard E. Riecke and Daniel Feuereissen and John J.
                 Rieser",
  title =        "Auditory self-motion simulation is facilitated by
                 haptic and vibrational cues suggesting the possibility
                 of actual motion",
  journal =      j-TAP,
  volume =       "6",
  number =       "3",
  pages =        "20:1--20:??",
  month =        aug,
  year =         "2009",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1577755.1577763",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Aug 31 16:34:11 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Sound fields rotating around stationary blindfolded
                 listeners sometimes elicit auditory circular vection,
                 the illusion that the listener is physically rotating.
                 Experiment 1 investigated whether auditory circular
                 vection depends on participants' situational awareness
                 of ``movability,'' that is, whether they sense/know
                 that actual motion is possible or not. While previous
                 studies often seated participants on movable chairs to
                 suspend the disbelief of self-motion, it has never been
                 investigated whether this does, in fact, facilitate
                 auditory vection. To this end, 23 blindfolded
                 participants were seated on a hammock chair with their
                 feet either on solid ground (``movement impossible'')
                 or suspended (``movement possible'') while listening to
                 individualized binaural recordings of two sound sources
                 rotating synchronously at $60^\circ / s$. Although
                 participants never physically moved, situational
                 awareness of movability facilitated auditory vection.
                 Moreover, adding slight vibrations like the ones
                 resulting from actual chair rotation increased the
                 frequency and intensity of vection. Experiment 2
                 extended these findings and showed that
                 nonindividualized binaural recordings were as effective
                 in inducing auditory circular vection as individualized
                 recordings. These results have important implications
                 both for our theoretical understanding of self-motion
                 perception and for the applied field of self-motion
                 simulations, where vibrations, nonindividualized
                 binaural sound, and the cognitive/perceptual framework
                 of movability can typically be provided at minimal cost
                 and effort.",
  acknowledgement = ack-nhfb,
  articleno =    "20",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "auditory vection; circular vection; cue-integration;
                 higher-level/cognitive influences; HRTF; human factors;
                 individualized binaural recordings; psychophysics;
                 Self-motion illusions; self-motion simulation; spatial
                 sound; vibrations; virtual reality",
}

@Article{Bodenheimer:2009:GE,
  author =       "Bobby Bodenheimer and Carol O'Sullivan",
  title =        "Guest editorial",
  journal =      j-TAP,
  volume =       "6",
  number =       "4",
  pages =        "21:1--21:??",
  month =        sep,
  year =         "2009",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1609967.1609968",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Oct 1 09:18:09 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "21",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{McDonnell:2009:TBS,
  author =       "Rachel McDonnell and Cathy Ennis and Simon Dobbyn and
                 Carol O'Sullivan",
  title =        "Talking bodies: Sensitivity to desynchronization of
                 conversations",
  journal =      j-TAP,
  volume =       "6",
  number =       "4",
  pages =        "22:1--22:??",
  month =        sep,
  year =         "2009",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1609967.1609969",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Oct 1 09:18:09 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "In this article, we investigate human sensitivity to
                 the coordination and timing of conversational body
                 language for virtual characters. First, we captured the
                 full body motions (excluding faces and hands) of three
                 actors conversing about a range of topics, in either a
                 polite (i.e., one person talking at a time) or
                 debate/argument style. Stimuli were then created by
                 applying the motion-captured conversations from the
                 actors to virtual characters. In a 2AFC experiment,
                 participants viewed paired sequences of synchronized
                 and desynchronized conversations and were asked to
                 guess which was the real one. Detection performance was
                 above chance for both conversation styles but more so
                 for the polite conversations, where desynchronization
                 was more noticeable.",
  acknowledgement = ack-nhfb,
  articleno =    "22",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "graphics; motion capture; Perception",
}

@Article{Jimenez:2009:SSP,
  author =       "Jorge Jimenez and Veronica Sundstedt and Diego
                 Gutierrez",
  title =        "Screen-space perceptual rendering of human skin",
  journal =      j-TAP,
  volume =       "6",
  number =       "4",
  pages =        "23:1--23:??",
  month =        sep,
  year =         "2009",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1609967.1609970",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Oct 1 09:18:09 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "We propose a novel skin shader which translates the
                 simulation of subsurface scattering from texture space
                 to a screen-space diffusion approximation. It naturally
                 scales well while maintaining a perceptually plausible
                 result. This technique allows us to ensure real-time
                 performance even when several characters may appear on
                 screen at the same time. The visual realism of the
                 resulting images is validated using a subjective
                 psychophysical preference experiment. Our results show
                 that, independent of distance and light position, the
                 images rendered using our novel shader have as high
                 visual realism as a previously developed
                 physically-based shader.",
  acknowledgement = ack-nhfb,
  articleno =    "23",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "perception; psychophysics; Real-time skin rendering",
}

@Article{Yu:2009:PIA,
  author =       "Insu Yu and Andrew Cox and Min H. Kim and Tobias
                 Ritschel and Thorsten Grosch and Carsten Dachsbacher
                 and Jan Kautz",
  title =        "Perceptual influence of approximate visibility in
                 indirect illumination",
  journal =      j-TAP,
  volume =       "6",
  number =       "4",
  pages =        "24:1--24:??",
  month =        sep,
  year =         "2009",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1609967.1609971",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Oct 1 09:18:09 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "In this article we evaluate the use of approximate
                 visibility for efficient global illumination.
                 Traditionally, accurate visibility is used in light
                 transport. However, the indirect illumination we
                 perceive on a daily basis is rarely of high-frequency
                 nature, as the most significant aspect of light
                 transport in real-world scenes is diffuse, and thus
                 displays a smooth gradation. This raises the question
                 of whether accurate visibility is perceptually
                 necessary in this case. To answer this question, we
                 conduct a psychophysical study on the perceptual
                 influence of approximate visibility on indirect
                 illumination. This study reveals that accurate
                 visibility is not required and that certain
                 approximations may be introduced.",
  acknowledgement = ack-nhfb,
  articleno =    "24",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "Global illumination; perception; visibility",
}

@Article{Morvan:2009:HOT,
  author =       "Yann Morvan and Carol O'Sullivan",
  title =        "Handling occluders in transitions from panoramic
                 images: a perceptual study",
  journal =      j-TAP,
  volume =       "6",
  number =       "4",
  pages =        "25:1--25:??",
  month =        sep,
  year =         "2009",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1609967.1609972",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Oct 1 09:18:09 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Panoramic images are very effective at conveying a
                 visual sense of presence at very low cost and great
                 ease of authoring. They are, however, limited in the
                 navigation options they offer, unlike 3D
                 representations. It is therefore desirable to provide
                 pleasing transitions from one panorama to another, or
                 from a panorama to a 3D model. We focus on motions
                 where the viewers move toward an area of interest, and
                 on the problem of dealing with occluders in their path.
                 We discuss existing transition approaches, with
                 emphasis on the additional information they require and
                 on the constraints they place on the authoring process.
                 We propose a compromise approach based on faking the
                 parallax effect with occluder mattes. We conduct a user
                 study to determine whether additional information does
                 in fact increase the visual appeal of transitions. We
                 observe that the creation of occluder mattes alone is
                 only justified if the fake parallax effect can be
                 synchronized with the camera motion (but not
                 necessarily consistent with it), and if viewpoint
                 discrepancies at occlusion boundaries are small. The
                 faster the transition, the less perceptual value there
                 is in creating mattes. Information on view alignment is
                 always useful, as a dissolve effect is always preferred
                 to fading to black and back.",
  acknowledgement = ack-nhfb,
  articleno =    "25",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "content mixing; occlusion; panorama; transitioning;
                 User study",
}

@Article{To:2009:PDN,
  author =       "M. P. S. To and I. D. Gilchrist and T. Troscianko and
                 J. S. B. Kho and D. J. Tolhurst",
  title =        "Perception of differences in natural-image stimuli:
                 Why is peripheral viewing poorer than foveal?",
  journal =      j-TAP,
  volume =       "6",
  number =       "4",
  pages =        "26:1--26:??",
  month =        sep,
  year =         "2009",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1609967.1609973",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Oct 1 09:18:09 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Visual Difference Predictor (VDP) models have played a
                 key role in digital image applications such as the
                 development of image quality metrics. However, little
                 attention has been paid to their applicability to
                 peripheral vision. Central (i.e., foveal) vision is
                 extremely sensitive for the contrast detection of
                 simple stimuli such as sinusoidal gratings, but
                 peripheral vision is less sensitive. Furthermore,
                 crowding is a well-documented phenomenon whereby
                 differences in suprathreshold peripherally viewed
                 target objects (such as individual letters or patches
                 of sinusoidal grating) become more difficult to
                 discriminate when surrounded by other objects
                 (flankers). We examine three factors that might
                 influence the degree of crowding with natural-scene
                 stimuli (cropped from photographs of natural scenes):
                 (1) location in the visual field, (2) distance between
                 target and flankers, and (3) flanker-target similarity.
                 We ask how these factors affect crowding in a
                 suprathreshold discrimination experiment where
                 observers rate the perceived differences between two
                 sequentially presented target patches of natural
                 images. The targets might differ in the shape, size,
                 arrangement, or color of items in the scenes. Changes
                 in uncrowded peripheral targets are perceived to be
                 less than for the same changes viewed foveally.
                 Consistent with previous research on simple stimuli, we
                 find that crowding in the periphery (but not in the
                 fovea) reduces the magnitudes of perceived changes even
                 further, especially when the flankers are closer and
                 more similar to the target. We have tested VDP models
                 based on the response behavior of neurons in visual
                 cortex and the inhibitory interactions between them.
                 The models do not explain the lower ratings for
                 peripherally viewed changes even when the lower
                 peripheral contrast sensitivity was accounted for; nor
                 could they explain the effects of crowding, which
                 others have suggested might arise from errors in the
                 spatial localization of features in the peripheral
                 image. This suggests that conventional VDP models do
                 not port well to peripheral vision.",
  acknowledgement = ack-nhfb,
  articleno =    "26",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "crowding; image difference metrics; peripheral vision;
                 Peripheral vision; psychophysical testing; VDP models",
}

@Article{Bonneel:2010:BPA,
  author =       "Nicolas Bonneel and Clara Suied and Isabelle
                 Viaud-Delmon and George Drettakis",
  title =        "Bimodal perception of audio-visual material properties
                 for virtual environments",
  journal =      j-TAP,
  volume =       "7",
  number =       "1",
  pages =        "1:1--1:??",
  month =        jan,
  year =         "2010",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Mar 15 18:53:12 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Rienks:2010:DHO,
  author =       "Rutger Rienks and Ronald Poppe and Dirk Heylen",
  title =        "Differences in head orientation behavior for speakers
                 and listeners: An experiment in a virtual environment",
  journal =      j-TAP,
  volume =       "7",
  number =       "1",
  pages =        "2:1--2:??",
  month =        jan,
  year =         "2010",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Mar 15 18:53:12 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Giudice:2010:SLN,
  author =       "Nicholas A. Giudice and Jonathan Z. Bakdash and Gordon
                 E. Legge and Rudrava Roy",
  title =        "Spatial learning and navigation using a virtual verbal
                 display",
  journal =      j-TAP,
  volume =       "7",
  number =       "1",
  pages =        "3:1--3:??",
  month =        jan,
  year =         "2010",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Mar 15 18:53:12 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Lu:2010:VCE,
  author =       "Aidong Lu and Ross Maciejewski and David S. Ebert",
  title =        "Volume composition and evaluation using eye-tracking
                 data",
  journal =      j-TAP,
  volume =       "7",
  number =       "1",
  pages =        "4:1--4:??",
  month =        jan,
  year =         "2010",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Mar 15 18:53:12 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Berger:2010:SBF,
  author =       "Daniel R. Berger and J{\"o}rg Schulte-Pelkum and
                 Heinrich H. B{\"u}lthoff",
  title =        "Simulating believable forward accelerations on a
                 {Stewart} motion platform",
  journal =      j-TAP,
  volume =       "7",
  number =       "1",
  pages =        "5:1--5:??",
  month =        jan,
  year =         "2010",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Mar 15 18:53:12 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Frintrop:2010:CVA,
  author =       "Simone Frintrop and Erich Rome and Henrik I.
                 Christensen",
  title =        "Computational visual attention systems and their
                 cognitive foundations: a survey",
  journal =      j-TAP,
  volume =       "7",
  number =       "1",
  pages =        "6:1--6:??",
  month =        jan,
  year =         "2010",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Mar 15 18:53:12 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Cooke:2010:MSA,
  author =       "Theresa Cooke and Christian Wallraven and Heinrich H.
                 B{\"u}lthoff",
  title =        "Multidimensional scaling analysis of haptic
                 exploratory procedures",
  journal =      j-TAP,
  volume =       "7",
  number =       "1",
  pages =        "7:1--7:??",
  month =        jan,
  year =         "2010",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Mar 15 18:53:12 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Shamir:2010:IES,
  author =       "Lior Shamir and Tomasz Macura and Nikita Orlov and D.
                 Mark Eckley and Ilya G. Goldberg",
  title =        "Impressionism, expressionism, surrealism: Automated
                 recognition of painters and schools of art",
  journal =      j-TAP,
  volume =       "7",
  number =       "2",
  pages =        "8:1--8:??",
  month =        feb,
  year =         "2010",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Mar 15 18:53:15 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Mania:2010:CTS,
  author =       "Katerina Mania and Shahrul Badariah and Matthew Coxon
                 and Phil Watten",
  title =        "Cognitive transfer of spatial awareness states from
                 immersive virtual environments to reality",
  journal =      j-TAP,
  volume =       "7",
  number =       "2",
  pages =        "9:1--9:??",
  month =        feb,
  year =         "2010",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Mar 15 18:53:15 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{VanMensvoort:2010:PMO,
  author =       "Koert {Van Mensvoort} and Peter Vos and Dik J. Hermes
                 and Robert {Van Liere}",
  title =        "Perception of mechanically and optically simulated
                 bumps and holes",
  journal =      j-TAP,
  volume =       "7",
  number =       "2",
  pages =        "10:1--10:??",
  month =        feb,
  year =         "2010",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Mar 15 18:53:15 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Souman:2010:MVW,
  author =       "Jan L. Souman and Paolo Robuffo Giordano and Ilja
                 Frissen and Alessandro De Luca and Marc O. Ernst",
  title =        "Making virtual walking real: Perceptual evaluation of
                 a new treadmill control algorithm",
  journal =      j-TAP,
  volume =       "7",
  number =       "2",
  pages =        "11:1--11:??",
  month =        feb,
  year =         "2010",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Mar 15 18:53:15 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "11",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Kim:2010:MSH,
  author =       "Youngmin Kim and Amitabh Varshney and David W. Jacobs
                 and Fran{\c{c}}ois Guimbreti{\`e}re",
  title =        "Mesh saliency and human eye fixations",
  journal =      j-TAP,
  volume =       "7",
  number =       "2",
  pages =        "12:1--12:??",
  month =        feb,
  year =         "2010",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Mar 15 18:53:15 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "12",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Newsham:2010:CLQ,
  author =       "Guy R. Newsham and Duygu Cetegen and Jennifer A.
                 Veitch and Lorne Whitehead",
  title =        "Comparing lighting quality evaluations of real scenes
                 with those from high dynamic range and conventional
                 images",
  journal =      j-TAP,
  volume =       "7",
  number =       "2",
  pages =        "13:1--13:??",
  month =        feb,
  year =         "2010",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Mar 15 18:53:15 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "13",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Mion:2010:POA,
  author =       "Luca Mion and Giovanni {De Poli} and Ennio
                 Rapan{\`a}",
  title =        "Perceptual organization of affective and sensorial
                 expressive intentions in music performance",
  journal =      j-TAP,
  volume =       "7",
  number =       "2",
  pages =        "14:1--14:??",
  month =        feb,
  year =         "2010",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Mar 15 18:53:15 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "14",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Blank:2010:IRP,
  author =       "Amy Blank and Allison M. Okamura and Katherine J.
                 Kuchenbecker",
  title =        "Identifying the role of proprioception in upper-limb
                 prosthesis control: Studies on targeted motion",
  journal =      j-TAP,
  volume =       "7",
  number =       "3",
  pages =        "15:1--15:??",
  month =        jun,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1773965.1773966",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Jul 22 12:46:16 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Proprioception plays a crucial role in enabling humans
                 to move purposively and interact with their physical
                 surroundings. Current technology in upper-limb
                 prostheses, while beginning to incorporate some haptic
                 feedback, does not provide amputees with proprioceptive
                 information about the state of the limb. Thus, the
                 wearer must visually monitor the limb, which is often
                 inconvenient or even impossible for some tasks. This
                 work seeks to quantify the potential benefits of
                 incorporating proprioceptive motion feedback into
                 upper-limb prosthesis designs. We apply a noninvasive
                 method for controlling the availability of
                 proprioceptive motion feedback in unimpaired
                 individuals in a human subject study to compare the
                 benefits of visual and proprioceptive motion feedback
                 in targeted motion tasks. Combined results of the
                 current study and our previous study using a different
                 task indicate that the addition of proprioceptive
                 motion feedback improves targeting accuracy under
                 nonsighted conditions and, for some tasks, under
                 sighted conditions as well. This work motivates the
                 development of methods for providing artificial
                 proprioceptive feedback to a prosthesis wearer.",
  acknowledgement = ack-nhfb,
  articleno =    "15",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "Human psychophysics; motion control; proprioception;
                 prosthetic limb control; vision",
}

@Article{Radun:2010:EMV,
  author =       "Jenni Radun and Tuomas Leisti and Toni Virtanen and
                 Jukka H{\"a}kkinen and Tero Vuori and G{\"o}te Nyman",
  title =        "Evaluating the multivariate visual quality performance
                 of image-processing components",
  journal =      j-TAP,
  volume =       "7",
  number =       "3",
  pages =        "16:1--16:??",
  month =        jun,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1773965.1773967",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Jul 22 12:46:16 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "The estimation of image quality is a demanding task,
                 especially when estimating different high-quality
                 imaging products or their components. The challenge is
                 the multivariate nature of image quality as well as the
                 need to use na{\"\i}ve observers as test subjects,
                 since they are the actual end-users of the products.
                 Here, we use a subjective approach suitable for
                 estimating the quality performance of different imaging
                 device components with na{\"\i}ve observers --- the
                 interpretation-based quality (IBQ) approach. From two
                 studies with 61 na{\"\i}ve observers, 17 natural
                 image contents, and 13 different camera image signal
                 processor pipelines, we determined the subjectively
                 crucial image quality attributes and dimensions and the
                 description of each pipeline's perceived image quality
                 performance. We found that the subjectively most
                 important image quality dimensions were color
                 shift/naturalness, darkness, and sharpness. The first
                 dimension, which was related to naturalness and colors,
                 distinguished the good-quality pipelines from the
                 middle- and low-quality groups, and the dimensions of
                 darkness and sharpness described why the quality failed
                 in the low-quality pipelines. The study suggests that
                 the high-level concept naturalness is a requirement for
                 high-quality images, whereas quality can fail for other
                 reasons in low-quality images, and this failure can be
                 described by low-level concepts, such as darkness and
                 sharpness.",
  acknowledgement = ack-nhfb,
  articleno =    "16",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "correspondence analysis; Image quality; qualitative
                 methodology; quality dimensions; subjective
                 measurements",
}

@Article{Andersen:2010:WME,
  author =       "Tue Haste Andersen and Shumin Zhai",
  title =        "``Writing with music'': Exploring the use of auditory
                 feedback in gesture interfaces",
  journal =      j-TAP,
  volume =       "7",
  number =       "3",
  pages =        "17:1--17:??",
  month =        jun,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1773965.1773968",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Jul 22 12:46:16 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "We investigate the use of auditory feedback in
                 pen-gesture interfaces in a series of informal and
                 formal experiments. Initial iterative exploration
                 showed that gaining performance advantage with auditory
                 feedback was possible using absolute cues and state
                 feedback after the gesture was produced and recognized.
                 However, gaining learning or performance advantage from
                 auditory feedback tightly coupled with the pen-gesture
                 articulation and recognition process was more
                 difficult. To establish a systematic baseline,
                 Experiment 1 formally evaluated gesture production
                 accuracy as a function of auditory and visual feedback.
                 Size of gestures and the aperture of the closed
                 gestures were influenced by the visual or auditory
                 feedback, while other measures such as shape distance
                 and directional difference were not, supporting the
                 theory that feedback is too slow to strongly influence
                 the production of pen stroke gestures. Experiment 2
                 focused on the subjective aspects of auditory feedback
                 in pen-gesture interfaces. Participants' rating on the
                 dimensions of being wonderful and stimulating was
                 significantly higher with musical auditory feedback.
                 Several lessons regarding pen gestures and auditory
                 feedback are drawn from our exploration: a few simple
                 functions such as indicating the pen-gesture
                 recognition results can be achieved, gaining
                 performance and learning advantage through tightly
                 coupled process-based auditory feedback is difficult,
                 pen-gesture sets and their recognizers can be designed
                 to minimize visual dependence, and people's subjective
                 experience of gesture interaction can be influenced
                 using musical auditory feedback. These lessons may
                 serve as references and stepping stones toward future
                 research and development in pen-gesture interfaces with
                 auditory feedback.",
  acknowledgement = ack-nhfb,
  articleno =    "17",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "Audio; auditory interface; feedback; gesture; music;
                 pen; sound; text input",
}

@Article{Kim:2010:PGG,
  author =       "Juno Kim and Stephen A. Palmisano and April Ash and
                 Robert S. Allison",
  title =        "Pilot gaze and glideslope control",
  journal =      j-TAP,
  volume =       "7",
  number =       "3",
  pages =        "18:1--18:??",
  month =        jun,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1773965.1773969",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Jul 22 12:46:16 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "We examined the eye movements of pilots as they
                 carried out simulated aircraft landings under day and
                 night lighting conditions. Our five students and five
                 certified pilots were instructed to quickly achieve and
                 then maintain a constant 3-degree glideslope relative
                 to the runway. However, both groups of pilots were
                 found to make significant glideslope control errors,
                 especially during simulated night approaches. We found
                 that pilot gaze was directed most often toward the
                 runway and to the ground region located immediately in
                 front of the runway, compared to other visual scene
                 features. In general, their gaze was skewed toward the
                 near half of the runway and tended to follow the runway
                 threshold as it moved on the screen. Contrary to
                 expectations, pilot gaze was not consistently directed
                 at the aircraft's simulated aimpoint (i.e., its
                 predicted future touchdown point based on scene
                 motion). However, pilots did tend to fly the aircraft
                 so that this point was aligned with the runway
                 threshold. We conclude that the supplementary
                 out-of-cockpit visual cues available during day landing
                 conditions facilitated glideslope control performance.
                 The available evidence suggests that these
                 supplementary visual cues are acquired through
                 peripheral vision, without the need for active
                 fixation.",
  acknowledgement = ack-nhfb,
  articleno =    "18",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "aviation; gaze; glideslope control; landing; Vision",
}

@Article{Kjellin:2010:EVS,
  author =       "Andreas Kjellin and Lars Winkler Pettersson and Stefan
                 Seipel and Mats Lind",
  title =        "Evaluating {$2$D} and {$3$D} visualizations of
                 spatiotemporal information",
  journal =      j-TAP,
  volume =       "7",
  number =       "3",
  pages =        "19:1--19:??",
  month =        jun,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1773965.1773970",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Jul 22 12:46:16 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Time-varying geospatial data presents some specific
                 challenges for visualization. Here, we report the
                 results of three experiments aiming at evaluating the
                 relative efficiency of three existing visualization
                 techniques for a class of such data. The class chosen
                 was that of object movement, especially the movements
                 of vehicles in a fictitious landscape. Two different
                 tasks were also chosen. One was to predict where three
                 vehicles will meet in the future given a visualization
                 of their past movement history. The second task was to
                 estimate the order in which four vehicles arrived at a
                 specific place. Our results reveal that previous
                 findings had generalized human perception in these
                 situations and that large differences in user
                 efficiency exist for a given task between different
                 types of visualizations depicting the same data.
                 Furthermore, our results are in line with earlier
                 general findings on the nature of human perception of
                 both object shape and scene changes. Finally, the need
                 for new taxonomies of data and tasks based on results
                 from perception research is discussed.",
  acknowledgement = ack-nhfb,
  articleno =    "19",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "2D; 3D; animation; space--time cube; spatiotemporal;
                 user studies",
}

@Article{Pineo:2010:NMF,
  author =       "Daniel Pineo and Colin Ware",
  title =        "Neural modeling of flow rendering effectiveness",
  journal =      j-TAP,
  volume =       "7",
  number =       "3",
  pages =        "20:1--20:??",
  month =        jun,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1773965.1773971",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Jul 22 12:46:16 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "It has been previously proposed that understanding the
                 mechanisms of contour perception can provide a theory
                 for why some flow rendering methods allow for better
                 judgments of advection pathways than others. In this
                 article, we develop this theory through a numerical
                 model of the primary visual cortex of the brain (Visual
                 Area 1) where contour enhancement is understood to
                 occur according to most neurological theories. We apply
                 a two-stage model of contour perception to various
                 visual representations of flow fields evaluated using
                 the advection task of Laidlaw et al. In the first
                 stage, contour {\em enhancement\/} is modeled based on
                 Li's cortical model. In the second stage, a model of
                 streamline {\em tracing\/} is proposed, designed to
                 support the advection task. We examine the predictive
                 power of the model by comparing its performance to that
                 of human subjects on the advection task with four
                 different visualizations. The results show the same
                 overall pattern for humans and the model. In both
                 cases, the best performance was obtained with an
                 aligned streamline based method, which tied with a
                 LIC-based method. Using a regular or jittered grid of
                 arrows produced worse results. The model yields
                 insights into the relative strengths of different flow
                 visualization methods for the task of visualizing
                 advection pathways.",
  acknowledgement = ack-nhfb,
  articleno =    "20",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "Contour perception; flow visualization; perceptual
                 theory; visual cortex; visualization",
}

@Article{Mania:2010:EAS,
  author =       "Katerina Mania and Martin S. Banks",
  title =        "Editorial -- {APGV 2010} special issue",
  journal =      j-TAP,
  volume =       "7",
  number =       "4",
  pages =        "21:1--21:??",
  month =        jul,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1823738.1823739",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Jul 22 12:46:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "21",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Hodgins:2010:SAA,
  author =       "Jessica Hodgins and Sophie J{\"o}rg and Carol
                 O'Sullivan and Sang Il Park and Moshe Mahler",
  title =        "The saliency of anomalies in animated human
                 characters",
  journal =      j-TAP,
  volume =       "7",
  number =       "4",
  pages =        "22:1--22:??",
  month =        jul,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1823738.1823740",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Jul 22 12:46:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Virtual characters are much in demand for animated
                 movies, games, and other applications. Rapid advances
                 in performance capture and advanced rendering
                 techniques have allowed the movie industry in
                 particular to create characters that appear very
                 human-like. However, with these new capabilities has
                 come the realization that such characters are yet not
                 quite ``right.'' One possible hypothesis is that these
                 virtual humans fall into an ``Uncanny Valley'', where
                 the viewer's emotional response is repulsion or
                 rejection, rather than the empathy or emotional
                 engagement that their creators had hoped for. To
                 explore these issues, we created three animated
                 vignettes of an arguing couple with detailed motion for
                 the face, eyes, hair, and body. In a set of perceptual
                 experiments, we explore the relative importance of
                 different anomalies using two different methods: a
                 questionnaire to determine the emotional response to
                 the full-length vignettes, with and without facial
                 motion and audio; and a 2AFC (two alternative forced
                 choice) task to compare the performance of a virtual
                 ``actor'' in short clips (extracts from the vignettes)
                 depicting a range of different facial and body
                 anomalies. We found that the facial anomalies are
                 particularly salient, even when very significant body
                 animation anomalies are present.",
  acknowledgement = ack-nhfb,
  articleno =    "22",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "eye tracking; Human animation; motion capture;
                 perception of human motion; virtual characters",
}

@Article{Carter:2010:PMG,
  author =       "Elizabeth J. Carter and Lavanya Sharan and Laura
                 Trutoiu and Iain Matthews and Jessica K. Hodgins",
  title =        "Perceptually motivated guidelines for voice
                 synchronization in film",
  journal =      j-TAP,
  volume =       "7",
  number =       "4",
  pages =        "23:1--23:??",
  month =        jul,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1823738.1823741",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Jul 22 12:46:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "We consume video content in a multitude of ways,
                 including in movie theaters, on television, on DVDs and
                 Blu-rays, online, on smart phones, and on portable
                 media players. For quality control purposes, it is
                 important to have a uniform viewing experience across
                 these various platforms. In this work, we focus on
                 voice synchronization, an aspect of video quality that
                 is strongly affected by current post-production and
                 transmission practices. We examined the synchronization
                 of an actor's voice and lip movements in two distinct
                 scenarios. First, we simulated the temporal mismatch
                 between the audio and video tracks that can occur
                 during dubbing or during broadcast. Next, we recreated
                 the pitch changes that result from conversions between
                 formats with different frame rates. We show, for the
                 first time, that these audio visual mismatches affect
                 viewer enjoyment. When temporal synchronization is
                 noticeably absent, there is a decrease in the perceived
                 performance quality and the perceived emotional
                 intensity of a performance. For pitch changes, we find
                 that higher pitch voices are not preferred, especially
                 for male actors. Based on our findings, we advise that
                 mismatched audio and video signals negatively affect
                 viewer experience.",
  acknowledgement = ack-nhfb,
  articleno =    "23",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "auditory perceptual research; human perception and
                 performance; Multisensory perception and integration;
                 visual psychophysics",
}

@Article{Wijntjes:2010:PPS,
  author =       "Maarten W. A. Wijntjes and Sylvia C. Pont",
  title =        "Pointing in pictorial space: Quantifying the perceived
                 relative depth structure in mono and stereo images of
                 natural scenes",
  journal =      j-TAP,
  volume =       "7",
  number =       "4",
  pages =        "24:1--24:??",
  month =        jul,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1823738.1823742",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Jul 22 12:46:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Although there has recently been a large increase in
                 commercial 3D applications, relatively little is known
                 about the quantitative perceptual improvement from
                 binocular disparity. In this study we developed a
                 method to measure the perceived relative depth
                 structure of natural scenes. Observers were instructed
                 to adjust the direction of a virtual pointer from one
                 object to another. The pointing data was used to
                 reconstruct the relative logarithmic depths of the
                 objects in pictorial space. The results showed that the
                 relative depth structure is more similar between
                 observers for stereo images than for mono images in two
                 out of three scenes. A similar result was found for the
                 depth range: for the same two scenes the stereo images
                 were perceived as having more depth than the monocular
                 images. In addition, our method allowed us to determine
                 the subjective center of projection. We found that the
                 pointing settings fitted the reconstructed depth best
                 for substantially wider fields of view than the
                 veridical center of projection for both mono and stereo
                 images. The results indicate that the improvement from
                 binocular disparity depends on the scene content:
                 scenes with sufficient monocular information may not
                 profit much from binocular disparity.",
  acknowledgement = ack-nhfb,
  articleno =    "24",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "binocular disparity; Depth perception; natural
                 scenes",
}

@Article{Couture:2010:ADD,
  author =       "Vincent Couture and Michael S. Langer and
                 S{\'e}bastien Roy",
  title =        "Analysis of disparity distortions in omnistereoscopic
                 displays",
  journal =      j-TAP,
  volume =       "7",
  number =       "4",
  pages =        "25:1--25:??",
  month =        jul,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1823738.1823743",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Jul 22 12:46:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "An omnistereoscopic image is a pair of panoramic
                 images that enables stereoscopic depth perception all
                 around an observer. An omnistereo projection on a
                 cylindrical display does not require tracking of the
                 observer's viewing direction. However, such a display
                 introduces stereo distortions. In this article, we
                 investigate two projection models for rendering 3D
                 scenes in omnistereo. The first is designed to give
                 zero disparity errors at the center of the visual
                 field. The second is the well-known slit-camera model.
                 For both models, disparity errors are shown to increase
                 gradually in the periphery, as visual stereo acuity
                 decreases. We use available data on human stereoscopic
                 acuity limits to argue that depth distortions caused by
                 these models are so small that they cannot be
                 perceived.",
  acknowledgement = ack-nhfb,
  articleno =    "25",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "depth acuity; median plane; Panorama; perception;
                 stereo",
}

@Article{Grechkin:2010:HDP,
  author =       "Timofey Y. Grechkin and Tien Dat Nguyen and Jodie M.
                 Plumert and James F. Cremer and Joseph K. Kearney",
  title =        "How does presentation method and measurement protocol
                 affect distance estimation in real and virtual
                 environments?",
  journal =      j-TAP,
  volume =       "7",
  number =       "4",
  pages =        "26:1--26:??",
  month =        jul,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1823738.1823744",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Jul 22 12:46:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "We conducted two experiments that compared distance
                 perception in real and virtual environments in six
                 visual presentation methods using either timed imagined
                 walking or direct blindfolded walking, while
                 controlling for several other factors that could
                 potentially impact distance perception. Our
                 presentation conditions included unencumbered real
                 world, real world seen through an HMD, virtual world
                 seen through an HMD, augmented reality seen through an
                 HMD, virtual world seen on multiple, large immersive
                 screens, and photo-based presentation of the real world
                 seen on multiple, large immersive screens. We found
                 that there was a similar degree of underestimation of
                 distance in the HMD and large-screen presentations of
                 virtual environments. We also found that while wearing
                 the HMD can cause some degree of distance
                 underestimation, this effect depends on the measurement
                 protocol used. Finally, we found that photo-based
                 presentation did not help to improve distance
                 perception in a large-screen immersive display system.
                 The discussion focuses on points of similarity and
                 difference with previous work on distance estimation in
                 real and virtual environments.",
  acknowledgement = ack-nhfb,
  articleno =    "26",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "Distance estimation; egocentric depth perception;
                 head-mounted displays; large-screen immersive displays;
                 perception; virtual environments",
}

@Article{Aydin:2010:VSE,
  author =       "Tun{\c{c}} Ozan Aydin and Martin {\v{C}}ad{\'\i}k and
                 Karol Myszkowski and Hans-Peter Seidel",
  title =        "Visually significant edges",
  journal =      j-TAP,
  volume =       "7",
  number =       "4",
  pages =        "27:1--27:??",
  month =        jul,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1823738.1823745",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Jul 22 12:46:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Numerous image processing and computer graphics
                 methods make use of either explicitly computed strength
                 of image edges, or an implicit edge strength definition
                 that is integrated into their algorithms. In both
                 cases, the end result is highly affected by the
                 computation of edge strength. We address several
                 shortcomings of the widely used gradient
                 magnitude-based edge strength model through the
                 computation of a hypothetical Human Visual System (HVS)
                 response at edge locations. Contrary to gradient
                 magnitude, the resulting ``visual significance'' values
                 account for various HVS mechanisms such as luminance
                 adaptation and visual masking, and are scaled in
                 perceptually linear units that are uniform across
                 images. The visual significance computation is
                 implemented in a fast multiscale second-generation
                 wavelet framework which we use to demonstrate the
                 differences in image retargeting, HDR image stitching,
                 and tone mapping applications with respect to the
                 gradient magnitude model. Our results suggest that
                 simple perceptual models provide qualitative
                 improvements on applications utilizing edge strength at
                 the cost of a modest computational burden.",
  acknowledgement = ack-nhfb,
  articleno =    "27",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "Edge strength; HDR; visual perception",
}

@Article{Vicentini:2010:EFT,
  author =       "M. Vicentini and S. Galvan and D. Botturi and P.
                 Fiorini",
  title =        "Evaluation of force and torque magnitude
                 discrimination thresholds on the human hand-arm
                 system",
  journal =      j-TAP,
  volume =       "8",
  number =       "1",
  pages =        "1:1--1:??",
  month =        oct,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1857893.1857894",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Dec 9 12:00:41 MST 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "This article reports on experiments about haptic
                 perception aimed at measuring the force/torque
                 differential thresholds applied to the hand-arm system.
                 The experimental work analyzes how force is sent back
                 to the user by means of a 6 degrees-of-freedom haptic
                 device. Our findings on force perception indicate that
                 the just-noticeable-difference is generally higher than
                 previously reported in the literature and not constant
                 along the stimulus continuum. We found evidence that
                 the thresholds change also among the different
                 directions. Furthermore, asymmetries in force
                 perceptions, which were not described in previous
                 reports, can be evinced for most of the directions.
                 These findings support our claim that human beings
                 perceive forces differently along different directions,
                 thus suggesting that perception can also be enhanced by
                 suitable signal processing, that is, with a
                 manipulation of the force signal before it reaches the
                 haptic device.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Mourkoussis:2010:QFV,
  author =       "Nicholaos Mourkoussis and Fiona M. Rivera and Tom
                 Troscianko and Tim Dixon and Rycharde Hawkes and
                 Katerina Mania",
  title =        "Quantifying fidelity for virtual environment
                 simulations employing memory schema assumptions",
  journal =      j-TAP,
  volume =       "8",
  number =       "1",
  pages =        "2:1--2:??",
  month =        oct,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1857893.1857895",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Dec 9 12:00:41 MST 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "In a virtual environment (VE), efficient techniques
                 are often needed to economize on rendering computation
                 without compromising the information transmitted. The
                 reported experiments devise a functional fidelity
                 metric by exploiting research on memory schemata.
                 According to the proposed measure, similar information
                 would be transmitted across synthetic and real-world
                 scenes depicting a specific schema. This would
                 ultimately indicate which areas in a VE could be
                 rendered in lower quality without affecting information
                 uptake. We examine whether computationally more
                 expensive scenes of greater visual fidelity affect
                 memory performance after exposure to immersive VEs, or
                 whether they are merely more aesthetically pleasing
                 than their diminished visual quality counterparts.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Llobera:2010:PMD,
  author =       "Joan Llobera and Bernhard Spanlang and Giulio Ruffini
                 and Mel Slater",
  title =        "Proxemics with multiple dynamic characters in an
                 immersive virtual environment",
  journal =      j-TAP,
  volume =       "8",
  number =       "1",
  pages =        "3:1--3:??",
  month =        oct,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1857893.1857896",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Dec 9 12:00:41 MST 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "An experiment was carried out to examine the impact on
                 electrodermal activity of people when approached by
                 groups of one or four virtual characters at varying
                 distances. It was premised on the basis of proxemics
                 theory that the closer the approach of the virtual
                 characters to the participant, the greater the level of
                 physiological arousal. Physiological arousal was
                 measured by the number of skin conductance responses
                 within a short time period after the approach, and the
                 maximum change in skin conductance level 5 seconds
                 after the approach. The virtual characters were each
                 either female or a cylinder of human size, and one or
                 four characters approached each subject a total of 12
                 times.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Bernhard:2010:EPD,
  author =       "Matthias Bernhard and Efstathios Stavrakis and Michael
                 Wimmer",
  title =        "An empirical pipeline to derive gaze prediction
                 heuristics for {$3$D} action games",
  journal =      j-TAP,
  volume =       "8",
  number =       "1",
  pages =        "4:1--4:??",
  month =        oct,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1857893.1857897",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Dec 9 12:00:41 MST 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Gaze analysis and prediction in interactive virtual
                 environments, such as games, is a challenging topic
                 since the 3D perspective and variations of the
                 viewpoint as well as the current task introduce many
                 variables that affect the distribution of gaze. In this
                 article, we present a novel pipeline to study
                 eye-tracking data acquired from interactive 3D
                 applications. The result of the pipeline is an
                 importance map which scores the amount of gaze spent on
                 each object. This importance map is then used as a
                 heuristic to predict a user's visual attention
                 according to the object properties present at runtime.
                 The novelty of this approach is that the analysis is
                 performed in object space and the importance map is
                 defined in the feature space of high-level
                 properties.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Li:2010:SCS,
  author =       "Bing Li and Weihua Xiong and De Xu and Hong Bao",
  title =        "A supervised combination strategy for illumination
                 chromaticity estimation",
  journal =      j-TAP,
  volume =       "8",
  number =       "1",
  pages =        "5:1--5:??",
  month =        oct,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1857893.1857898",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Dec 9 12:00:41 MST 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Color constancy is an important perceptual ability of
                 humans to recover the color of objects invariant of
                 light information. It is also necessary for a robust
                 machine vision system. Until now, a number of color
                 constancy algorithms have been proposed in the
                 literature. In particular, the edge-based color
                 constancy uses the edge of an image to estimate light
                 color. It is shown to be a rich framework that can
                 represent many existing illumination estimation
                 solutions with various parameter settings. However,
                 color constancy is an ill-posed problem; every
                 algorithm is always given out under some assumptions
                 and can only produce the best performance when these
                 assumptions are satisfied.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Hasic:2010:PGH,
  author =       "Jasminka Hasic and Alan Chalmers and Elena Sikudova",
  title =        "Perceptually guided high-fidelity rendering exploiting
                 movement bias in visual attention",
  journal =      j-TAP,
  volume =       "8",
  number =       "1",
  pages =        "6:1--6:??",
  month =        oct,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1857893.1857899",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Dec 9 12:00:41 MST 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "A major obstacle for real-time rendering of
                 high-fidelity graphics is computational complexity. A
                 key point to consider in the pursuit of ``realism in
                 real time'' in computer graphics is that the Human
                 Visual System (HVS) is a fundamental part of the
                 rendering pipeline. The human eye is only capable of
                 sensing image detail in a $2^\circ$ foveal region,
                 relying on rapid eye movements, or saccades, to jump
                 between points of interest. These points of interest
                 are prioritized based on the saliency of the objects in
                 the scene or the task the user is performing. Such
                 ``glimpses'' of a scene are then assembled by the HVS
                 into a coherent, but inevitably imperfect, visual
                 perception of the environment.",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Hover:2010:UBE,
  author =       "Raphael H{\"o}ver and Massimiliano {Di Luca} and
                 Matthias Harders",
  title =        "User-based evaluation of data-driven haptic
                 rendering",
  journal =      j-TAP,
  volume =       "8",
  number =       "1",
  pages =        "7:1--7:??",
  month =        oct,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1857893.1857900",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Dec 9 12:00:41 MST 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "In this article, the data-driven haptic rendering
                 approach presented in our earlier work is assessed. The
                 approach relies on recordings from real objects from
                 which a data-driven model is derived that captures the
                 haptic properties of the object. We conducted two
                 studies. In the first study, the Just Noticeable
                 Difference (JND) for small forces, as encountered in
                 our set-up, was determined. JNDs were obtained both for
                 active and passive user interaction. A conservative
                 threshold curve was derived that was then used to guide
                 the model generation in the second study. The second
                 study examined the achievable rendering fidelity for
                 two objects with different stiffnesses.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Hassaine:2010:IPP,
  author =       "Djamel Hassaine and Nicolas S. Holliman and Simon P.
                 Liversedge",
  title =        "Investigating the performance of path-searching tasks
                 in depth on multiview displays",
  journal =      j-TAP,
  volume =       "8",
  number =       "1",
  pages =        "8:1--8:??",
  month =        oct,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1857893.1857901",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Dec 9 12:00:41 MST 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Multiview auto-stereoscopic displays support both
                 stereopsis and head motion parallax depth cues and
                 could be superior for certain tasks. Previous work
                 suggests that a high viewpoint density (100 views/10cm
                 at the eye) is required to convincingly support motion
                 parallax. However, it remains unclear how viewpoint
                 density affects task performance, and this factor is
                 critical in determining display and system design
                 requirements. Therefore, we present a simulated
                 multiview display apparatus to undertake experiments
                 using a path-searching task in which we control two
                 independent variables: the stereoscopic depth and the
                 viewpoint density. In the first experiment, we varied
                 both cues and found that even small amounts of stereo
                 depth (2cm) reliably improved task accuracy and reduced
                 latency, whereas there was no evidence of dependence on
                 viewpoint density.",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Wilkie:2011:MLC,
  author =       "Richard M. Wilkie and John P. Wann and Robert S.
                 Allison",
  title =        "Modeling locomotor control: The advantages of mobile
                 gaze",
  journal =      j-TAP,
  volume =       "8",
  number =       "2",
  pages =        "9:1--9:??",
  month =        jan,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1870076.1870077",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Wed Jan 26 14:12:04 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "In 1958, JJ Gibson put forward proposals on the visual
                 control of locomotion. Research in the last 50 years
                 has served to clarify the sources of visual and
                 nonvisual information that contribute to successful
                 steering, but has yet to determine how this information
                 is optimally combined under conditions of uncertainty.
                 Here, we test the conditions under which a locomotor
                 robot with a mobile camera can steer effectively using
                 simple visual and extra-retinal parameters to examine
                 how such models cope with the noisy real-world visual
                 and motor estimates that are available to humans.",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Ennis:2011:PES,
  author =       "Cathy Ennis and Christopher Peters and Carol
                 O'Sullivan",
  title =        "Perceptual effects of scene context and viewpoint for
                 virtual pedestrian crowds",
  journal =      j-TAP,
  volume =       "8",
  number =       "2",
  pages =        "10:1--10:??",
  month =        jan,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1870076.1870078",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Wed Jan 26 14:12:04 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "In this article, we evaluate the effects of position,
                 orientation, and camera viewpoint on the plausibility
                 of pedestrian formations. In a set of three perceptual
                 studies, we investigated how humans perceive
                 characteristics of virtual crowds in static scenes
                 reconstructed from annotated still images, where the
                 orientations and positions of the individuals have been
                 modified. We found that by applying rules based on the
                 contextual information of the scene, we improved the
                 perceived realism of the crowd formations when compared
                 to random formations. We also examined the effect of
                 camera viewpoint on the plausibility of virtual
                 pedestrian scenes, and we found that an eye-level
                 viewpoint is more effective for disguising random
                 behaviors, while a canonical viewpoint results in these
                 behaviors being perceived as less realistic than an
                 isometric or top-down viewpoint.",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Stich:2011:PMI,
  author =       "Timo Stich and Christian Linz and Christian Wallraven
                 and Douglas Cunningham and Marcus Magnor",
  title =        "Perception-motivated interpolation of image
                 sequences",
  journal =      j-TAP,
  volume =       "8",
  number =       "2",
  pages =        "11:1--11:??",
  month =        jan,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1870076.1870079",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Wed Jan 26 14:12:04 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "We present a method for image interpolation that is
                 able to create high-quality, perceptually convincing
                 transitions between recorded images. By implementing
                 concepts derived from human vision, the problem of a
                 physically correct image interpolation is relaxed to
                 that of image interpolation which is perceived as
                 visually correct by human observers. We find that it
                 suffices to focus on exact edge correspondences,
                 homogeneous regions and coherent motion to compute
                 convincing results. A user study confirms the visual
                 quality of the proposed image interpolation approach.
                 We show how each aspect of our approach increases
                 perceived quality of the result.",
  acknowledgement = ack-nhfb,
  articleno =    "11",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Rosenholtz:2011:DPV,
  author =       "Ruth Rosenholtz and Amal Dorai and Rosalind Freeman",
  title =        "Do predictions of visual perception aid design?",
  journal =      j-TAP,
  volume =       "8",
  number =       "2",
  pages =        "12:1--12:??",
  month =        jan,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1870076.1870080",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Wed Jan 26 14:12:04 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Understanding and exploiting the abilities of the
                 human visual system is an important part of the design
                 of usable user interfaces and information
                 visualizations. Designers traditionally learn
                 qualitative rules of thumb for how to enable quick,
                 easy, and veridical perception of their design. More
                 recently, work in human and computer vision has
                 produced more quantitative models of human perception,
                 which take as input arbitrary, complex images of a
                 design. In this article, we ask whether models of
                 perception aid the design process, using our tool
                 DesignEye as a working example of a perceptual tool
                 incorporating such models. Through a series of
                 interactions with designers and design teams, we find
                 that the models can help, but in somewhat unexpected
                 ways.",
  acknowledgement = ack-nhfb,
  articleno =    "12",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Huckauf:2011:OSG,
  author =       "Anke Huckauf and Mario H. Urbina",
  title =        "Object selection in gaze controlled systems: What you
                 don't look at is what you get",
  journal =      j-TAP,
  volume =       "8",
  number =       "2",
  pages =        "13:1--13:??",
  month =        jan,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1870076.1870081",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Wed Jan 26 14:12:04 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Controlling computers using eye movements can provide
                 a fast and efficient alternative to the computer mouse.
                 However, implementing object selection in
                 gaze-controlled systems is still a challenge. Dwell
                 times or fixations on a certain object typically used
                 to elicit the selection of this object show several
                 disadvantages. We studied deviations of critical
                 thresholds by an individual and task-specific
                 adaptation method. This demonstrated an enormous
                 variability of optimal dwell times. We developed an
                 alternative approach using antisaccades for selection.
                 For selection by antisaccades, highlighted objects are
                 copied to one side of the object. The object is
                 selected when fixating to the side opposed to that copy
                 requiring to inhibit an automatic gaze shift toward new
                 objects.",
  acknowledgement = ack-nhfb,
  articleno =    "13",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Phillips:2011:ORE,
  author =       "P. Jonathon Phillips and Fang Jiang and Abhijit
                 Narvekar and Julianne Ayyad and Alice J. O'Toole",
  title =        "An other-race effect for face recognition algorithms",
  journal =      j-TAP,
  volume =       "8",
  number =       "2",
  pages =        "14:1--14:??",
  month =        jan,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1870076.1870082",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Wed Jan 26 14:12:04 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Psychological research indicates that humans recognize
                 faces of their own race more accurately than faces of
                 other races. This ``other-race effect'' occurs for
                 algorithms tested in a recent international competition
                 for state-of-the-art face recognition algorithms. We
                 report results for a Western algorithm made by fusing
                 eight algorithms from Western countries and an East
                 Asian algorithm made by fusing five algorithms from
                 East Asian countries. At the low false accept rates
                 required for most security applications, the Western
                 algorithm recognized Caucasian faces more accurately
                 than East Asian faces and the East Asian algorithm
                 recognized East Asian faces more accurately than
                 Caucasian faces.",
  acknowledgement = ack-nhfb,
  articleno =    "14",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{TenHolt:2011:HIS,
  author =       "Gineke A. {Ten Holt} and Andrea J. {Van Doorn} and
                 Marcel J. T. Reinders and Emile A. Hendriks and Huib
                 {De Ridder}",
  title =        "Human-inspired search for redundancy in automatic sign
                 language recognition",
  journal =      j-TAP,
  volume =       "8",
  number =       "2",
  pages =        "15:1--15:??",
  month =        jan,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1870076.1870083",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Wed Jan 26 14:12:04 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Human perception of sign language can serve as
                 inspiration for the improvement of automatic
                 recognition systems. Experiments with human signers
                 show that sign language signs contain redundancy over
                 time. In this article, experiments are conducted to
                 investigate whether comparable redundancies also exist
                 for an automatic sign language recognition system. Such
                 redundancies could be exploited, for example, by
                 reserving more processing resources for the more
                 informative phases of a sign, or by discarding
                 uninformative phases. In the experiments, an automatic
                 system is trained and tested on isolated fragments of
                 sign language signs. The stimuli used were similar to
                 those of the human signer experiments, allowing us to
                 compare the results.",
  acknowledgement = ack-nhfb,
  articleno =    "15",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Endres:2011:EHO,
  author =       "Dominik Endres and Andrea Christensen and Lars Omlor
                 and Martin A. Giese",
  title =        "Emulating human observers with {Bayesian} binning:
                 Segmentation of action streams",
  journal =      j-TAP,
  volume =       "8",
  number =       "3",
  pages =        "16:1--16:??",
  month =        aug,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2010325.2010326",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Aug 23 18:20:29 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Natural body movements arise in the form of temporal
                 sequences of individual actions. During visual action
                 analysis, the human visual system must accomplish a
                 temporal segmentation of the action stream into
                 individual actions. Such temporal segmentation is also
                 essential to build hierarchical models for action
                 synthesis in computer animation. Ideally, such
                 segmentations should be computed automatically in an
                 unsupervised manner. We present an unsupervised
                 segmentation algorithm that is based on Bayesian
                 Binning (BB) and compare it to human segmentations
                 derived from psychophysical data. BB has the advantage
                 that the observation model can be easily exchanged.
                 Moreover, being an exact Bayesian method, BB allows for
                 the automatic determination of the number and positions
                 of segmentation points.",
  acknowledgement = ack-nhfb,
  articleno =    "16",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Trutoiu:2011:MAE,
  author =       "Laura C. Trutoiu and Elizabeth J. Carter and Iain
                 Matthews and Jessica K. Hodgins",
  title =        "Modeling and animating eye blinks",
  journal =      j-TAP,
  volume =       "8",
  number =       "3",
  pages =        "17:1--17:??",
  month =        aug,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2010325.2010327",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Aug 23 18:20:29 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Facial animation often falls short in conveying the
                 nuances present in the facial dynamics of humans. In
                 this article, we investigate the subtleties of the
                 spatial and temporal aspects of eye blinks.
                 Conventional methods for eye blink animation generally
                 employ temporally and spatially symmetric sequences;
                 however, naturally occurring blinks in humans show a
                 pronounced asymmetry on both dimensions. We present an
                 analysis of naturally occurring blinks that was
                 performed by tracking data from high-speed video using
                 active appearance models. Based on this analysis, we
                 generate a set of key-frame parameters that closely
                 match naturally occurring blinks. We compare the
                 perceived naturalness of blinks that are animated based
                 on real data to those created using textbook animation
                 curves.",
  acknowledgement = ack-nhfb,
  articleno =    "17",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Napieralski:2011:NFD,
  author =       "Phillip E. Napieralski and Bliss M. Altenhoff and
                 Jeffrey W. Bertrand and Lindsay O. Long and Sabarish
                 V. Babu and Christopher C. Pagano and Justin Kern and
                 Timothy A. Davis",
  title =        "Near-field distance perception in real and virtual
                 environments using both verbal and action responses",
  journal =      j-TAP,
  volume =       "8",
  number =       "3",
  pages =        "18:1--18:??",
  month =        aug,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2010325.2010328",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Aug 23 18:20:29 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Few experiments have been performed to investigate
                 near-field egocentric distance estimation in an
                 Immersive Virtual Environment (IVE) as compared to the
                 Real World (RW). This article investigates near-field
                 distance estimation in IVEs and RW conditions using
                 physical reach and verbal report measures, by using an
                 apparatus similar to that used by Bingham and Pagano
                 [1998]. Analysis of our experiment shows distance
                 compression in both the IVE and RW conditions in
                 participants' perceptual judgments to targets. This is
                 consistent with previous research in both action space
                 in an IVE and reach space with Augmented Reality (AR).
                 Analysis of verbal responses from participants revealed
                 that participants underestimated significantly less in
                 the virtual world as compared to the RW.",
  acknowledgement = ack-nhfb,
  articleno =    "18",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Williams:2011:EWP,
  author =       "Betsy Williams and Stephen Bailey and Gayathri
                 Narasimham and Muqun Li and Bobby Bodenheimer",
  title =        "Evaluation of walking in place on a {Wii} balance
                 board to explore a virtual environment",
  journal =      j-TAP,
  volume =       "8",
  number =       "3",
  pages =        "19:1--19:??",
  month =        aug,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2010325.2010329",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Aug 23 18:20:29 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "In this work, we present a method of ``Walking In
                 Place'' (WIP) on the Nintendo Wii Fit Balance Board to
                 explore a virtual environment. We directly compare our
                 method to joystick locomotion and normal walking. The
                 joystick proves inferior to physically walking and to
                 WIP on the Wii Balance Board (WIP--Wii). Interestingly,
                 we find that physically exploring an environment on
                 foot is equivalent in terms of spatial orientation to
                 exploring an environment using our WIP--Wii method.",
  acknowledgement = ack-nhfb,
  articleno =    "19",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Navarro:2011:PCM,
  author =       "Fernando Navarro and Susana Castillo and Francisco J.
                 Ser{\'o}n and Diego Gutierrez",
  title =        "Perceptual considerations for motion blur rendering",
  journal =      j-TAP,
  volume =       "8",
  number =       "3",
  pages =        "20:1--20:??",
  month =        aug,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2010325.2010330",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Aug 23 18:20:29 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Motion blur is a frequent requirement for the
                 rendering of high-quality animated images. However, the
                 computational resources involved are usually higher
                 than those for images that have not been temporally
                 antialiased. In this article we study the influence of
                 high-level properties such as object material and
                 speed, shutter time, and antialiasing level. Based on
                 scenes containing variations of these parameters, we
                 design different psychophysical experiments to
                 determine how influential they are in the perception of
                 image quality. This work gives insights on the effects
                 these parameters have and exposes certain situations
                 where motion blurred stimuli may be indistinguishable
                 from a gold standard.",
  acknowledgement = ack-nhfb,
  articleno =    "20",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Hodgson:2011:RWE,
  author =       "Eric Hodgson and Eric Bachmann and David Waller",
  title =        "Redirected walking to explore virtual environments:
                 Assessing the potential for spatial interference",
  journal =      j-TAP,
  volume =       "8",
  number =       "4",
  pages =        "22:1--22:??",
  month =        nov,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2043603.2043604",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Dec 15 09:27:03 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "22",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Rosli:2011:AGC,
  author =       "Roslizawaty Mohd Rosli and Hong Z. Tan and Robert W.
                 Proctor and Rob Gray",
  title =        "Attentional gradient for crossmodal proximal-distal
                 tactile cueing of visual spatial attention",
  journal =      j-TAP,
  volume =       "8",
  number =       "4",
  pages =        "23:1--23:??",
  month =        nov,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2043603.2043605",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Dec 15 09:27:03 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "23",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Bernhard:2011:BTF,
  author =       "Matthias Bernhard and Karl Grosse and Michael
                 Wimmer",
  title =        "Bimodal task-facilitation in a virtual traffic
                 scenario through spatialized sound rendering",
  journal =      j-TAP,
  volume =       "8",
  number =       "4",
  pages =        "24:1--24:??",
  month =        nov,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2043603.2043606",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Dec 15 09:27:03 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "24",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Souman:2011:CEU,
  author =       "J. L. Souman and P. Robuffo Giordano and M. Schwaiger
                 and I. Frissen and T. Th{\"u}mmel and H. Ulbrich and
                 A. De Luca and H. H. B{\"u}lthoff and M. O. Ernst",
  title =        "{CyberWalk}: Enabling unconstrained omnidirectional
                 walking through virtual environments",
  journal =      j-TAP,
  volume =       "8",
  number =       "4",
  pages =        "25:1--25:??",
  month =        nov,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2043603.2043607",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Dec 15 09:27:03 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "25",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Nguyen:2011:ESC,
  author =       "Tien Dat Nguyen and Christine J. Ziemer and Timofey
                 Grechkin and Benjamin Chihak and Jodie M. Plumert and
                 James F. Cremer and Joseph K. Kearney",
  title =        "Effects of scale change on distance perception in
                 virtual environments",
  journal =      j-TAP,
  volume =       "8",
  number =       "4",
  pages =        "26:1--26:??",
  month =        nov,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2043603.2043608",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Dec 15 09:27:03 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "26",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Thumfart:2011:MHA,
  author =       "Stefan Thumfart and Richard H. A. H. Jacobs and Edwin
                 Lughofer and Christian Eitzinger and Frans
                 W. Cornelissen and Werner Groissboeck and Roland
                 Richter",
  title =        "Modeling human aesthetic perception of visual
                 textures",
  journal =      j-TAP,
  volume =       "8",
  number =       "4",
  pages =        "27:1--27:??",
  month =        nov,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2043603.2043609",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Dec 15 09:27:03 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "27",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Au:2011:IMV,
  author =       "Carmen E. Au and James J. Clark",
  title =        "Integrating multiple views with virtual mirrors to
                 facilitate scene understanding",
  journal =      j-TAP,
  volume =       "8",
  number =       "4",
  pages =        "28:1--28:??",
  month =        nov,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2043603.2043610",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Dec 15 09:27:03 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "28",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Vanhala:2012:VFA,
  author =       "Toni Vanhala and Veikko Surakka and Matthieu Courgeon
                 and Jean-Claude Martin",
  title =        "Voluntary facial activations regulate physiological
                 arousal and subjective experiences during virtual
                 social stimulation",
  journal =      j-TAP,
  volume =       "9",
  number =       "1",
  pages =        "1:1--1:??",
  month =        mar,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2134203.2134204",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Mar 30 17:41:07 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Exposure to distressing computer-generated stimuli and
                 feedback of physiological changes during exposure have
                 been effective in the treatment of anxiety disorders
                 (e.g., social phobia). Here we studied voluntary facial
                 activations as a method for regulating more spontaneous
                 physiological changes during virtual social
                 stimulation. Twenty-four participants with a low or
                 high level of social anxiety activated either the
                 corrugator supercilii (used in frowning) or the
                 zygomaticus major (used in smiling) facial muscle to
                 keep a female or a male computer character walking
                 towards them. The more socially anxious participants
                 had a higher level of skin conductance throughout the
                 trials as compared to less anxious participants. Within
                 both groups, short-term skin conductance responses were
                 enhanced both during and after facial activations; and
                 corrugator supercilii activations facilitated longer
                 term electrodermal relaxation. Zygomaticus major
                 activations had opposite effects on subjective
                 emotional ratings of the less and the more socially
                 anxious. In sum, voluntary facial activations were
                 effective in regulating emotional arousal during
                 virtual social exposure. Corrugator supercilii
                 activation was found an especially promising method for
                 facilitating autonomic relaxation.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Bulling:2012:MRR,
  author =       "Andreas Bulling and Jamie A. Ward and Hans Gellersen",
  title =        "Multimodal recognition of reading activity in transit
                 using body-worn sensors",
  journal =      j-TAP,
  volume =       "9",
  number =       "1",
  pages =        "2:1--2:??",
  month =        mar,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2134203.2134205",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Mar 30 17:41:07 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Reading is one of the most well-studied visual
                 activities. Vision research traditionally focuses on
                 understanding the perceptual and cognitive processes
                 involved in reading. In this work we recognize reading
                 activity by jointly analyzing eye and head movements of
                 people in an everyday environment. Eye movements are
                 recorded using an electrooculography (EOG) system; body
                 movements using body-worn inertial measurement units.
                 We compare two approaches for continuous recognition of
                 reading: String matching (STR) that explicitly models
                 the characteristic horizontal saccades during reading,
                 and a support vector machine (SVM) that relies on 90
                 eye movement features extracted from the eye movement
                 data. We evaluate both methods in a study performed
                 with eight participants reading while sitting at a
                 desk, standing, walking indoors and outdoors, and
                 riding a tram. We introduce a method to segment reading
                 activity by exploiting the sensorimotor coordination of
                 eye and head movements during reading. Using
                 person-independent training, we obtain an average
                 precision for recognizing reading of 88.9\% (recall
                 72.3\%) using STR and of 87.7\% (recall 87.9\%) using
                 SVM over all participants. We show that the proposed
                 segmentation scheme improves the performance of
                 recognizing reading events by more than 24\%. Our work
                 demonstrates that the joint analysis of eye and body
                 movements is beneficial for reading recognition and
                 opens up discussion on the wider applicability of a
                 multimodal recognition approach to other visual and
                 physical activities.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Kastanis:2012:RLU,
  author =       "Iason Kastanis and Mel Slater",
  title =        "Reinforcement learning utilizes proxemics: An avatar
                 learns to manipulate the position of people in
                 immersive virtual reality",
  journal =      j-TAP,
  volume =       "9",
  number =       "1",
  pages =        "3:1--3:??",
  month =        mar,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2134203.2134206",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Mar 30 17:41:07 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "A reinforcement learning (RL) method was used to train
                 a virtual character to move participants to a specified
                 location. The virtual environment depicted an alleyway
                 displayed through a wide field-of-view head-tracked
                 stereo head-mounted display. Based on proxemics theory,
                 we predicted that when the character approached within
                 a personal or intimate distance to the participants,
                 they would be inclined to move backwards out of the
                 way. We carried out a between-groups experiment with 30
                 female participants, with 10 assigned arbitrarily to
                 each of the following three groups: In the Intimate
                 condition the character could approach within 0.38m and
                 in the Social condition no nearer than 1.2m. In the
                 Random condition the actions of the virtual character
                 were chosen randomly from among the same set as in the
                 RL method, and the virtual character could approach
                 within 0.38m. The experiment continued in each case
                 until the participant either reached the target or 7
                 minutes had elapsed. The distributions of the times
                 taken to reach the target showed significant
                 differences between the three groups, with 9 out of 10
                 in the Intimate condition reaching the target
                 significantly faster than the 6 out of 10 who reached
                 the target in the Social condition. Only 1 out of 10 in
                 the Random condition reached the target. The experiment
                 is an example of applied presence theory: we rely on
                 the many findings that people tend to respond
                 realistically in immersive virtual environments, and
                 use this to get people to achieve a task of which they
                 had been unaware. This method opens up the door for
                 many such applications where the virtual environment
                 adapts to the responses of the human participants with
                 the aim of achieving particular goals.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Jerald:2012:SMT,
  author =       "Jason Jerald and Mary Whitton and Frederick P.
                 {Brooks, Jr.}",
  title =        "Scene-motion thresholds during head yaw for immersive
                 virtual environments",
  journal =      j-TAP,
  volume =       "9",
  number =       "1",
  pages =        "4:1--4:??",
  month =        mar,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2134203.2134207",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Mar 30 17:41:07 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "In order to better understand how scene motion is
                 perceived in immersive virtual environments, we
                 measured scene-motion thresholds under different
                 conditions across three experiments. Thresholds were
                 measured during quasi-sinusoidal head yaw, single
                 left-to-right or right-to-left head yaw, different
                 phases of head yaw, slow to fast head yaw, scene motion
                 relative to head yaw, and two scene-illumination
                 levels. We found that across various conditions (1)
                 thresholds are greater when the scene moves with head
                 yaw (corresponding to gain {$<$}1.0) than when the
                 scene moves against head yaw (corresponding to gain
                 {$>$}1.0), and (2) thresholds increase as head motion
                 increases.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Ziat:2012:EVM,
  author =       "Mounia Ziat and Carmen Au and Amin Haji Abolhassani
                 and James J. Clark",
  title =        "Enhancing visuospatial map learning through action on
                 cellphones",
  journal =      j-TAP,
  volume =       "9",
  number =       "1",
  pages =        "5:1--5:??",
  month =        mar,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2134203.2134208",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Mar 30 17:41:07 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "The visuospatial learning of a map on cellphone
                 displays was examined. The spatial knowledge of human
                 participants was assessed after they had learned the
                 relative positions of London Underground stations on a
                 map via passive, marginally active, or active
                 exploration. Following learning, the participants were
                 required to answer questions in relation to the spatial
                 representation and distribution of the stations on the
                 map. Performances were compared between conditions
                 involving (1) without auditory cues versus continuous
                 auditory cues; (2) without auditory cues versus
                 noncontinuous auditory cues; and (3) continuous
                 auditory cues versus noncontinuous auditory cues.
                 Results showed that the participants perfomed better
                 following active and marginally-active explorations, as
                 compared to purely passive learning. These results also
                 suggest that under specific conditions (i.e.,
                 continuous sound with extremely fast tempo) there is no
                 benefit to spatial abilities from active exploration
                 over passive observation; while continuous sound with
                 moderate to fast tempo is effective for simple actions
                 (i.e., key press).",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Watanabe:2012:GCV,
  author =       "Junji Watanabe and Taro Maeda and Hideyuki Ando",
  title =        "Gaze-contingent visual presentation technique with
                 electro-ocular-graph-based saccade detection",
  journal =      j-TAP,
  volume =       "9",
  number =       "2",
  pages =        "6:1--6:??",
  month =        jun,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2207216.2207217",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Wed Jun 13 17:24:25 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "When a single column of light sources flashes quickly
                 in a temporal pattern during a horizontal saccade eye
                 movement, two-dimensional images can be perceived in
                 the space neighboring the light source. This perceptual
                 phenomenon has been applied to light devices for visual
                 arts and entertainment. However, a serious drawback in
                 exploiting this perceptual phenomenon for a visual
                 information display is that a two-dimensional image
                 cannot be viewed if there is any discrepancy between
                 the ocular motility and the flicker timing. We overcame
                 this drawback by combining the saccade-based display
                 with an electro-ocular-graph-based sensor for detecting
                 the saccade. The saccade onset is measured with the
                 electro-ocular-graph-based sensor in real time and the
                 saccade-based display is activated instantaneously as
                 the saccade begins. The psychophysical experiments
                 described in this article demonstrates that the method
                 that we used can detect saccades with low latency and
                 allows the saccade-based display to convey visual
                 information more effectively than when the light
                 sources continuously blink regardless of the observer's
                 eye movements.",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Ziemek:2012:EEO,
  author =       "Tina Ziemek and Sarah Creem-Regehr and William
                 Thompson and Ross Whitaker",
  title =        "Evaluating the effectiveness of orientation indicators
                 with an awareness of individual differences",
  journal =      j-TAP,
  volume =       "9",
  number =       "2",
  pages =        "7:1--7:??",
  month =        jun,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2207216.2207218",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Wed Jun 13 17:24:25 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Understanding how users perceive 3D geometric objects
                 can provide a basis for creating more effective tools
                 for visualization in applications such as CAD or
                 medical imaging. This article examines how orientation
                 indicators affect users' accuracy in perceiving the
                 shape of a 3D object shown as multiple views. Multiple
                 views force users to infer the orientation of an object
                 and recognize corresponding features between distinct
                 vantage points. These are difficult tasks, and not all
                 users are able to carry them out accurately. We use a
                 cognitive experimental paradigm to evaluate the
                 effectiveness of two types of orientation indicators on
                 a person's ability to compare views of objects
                 presented in different orientations. The orientation
                 indicators implemented were colocated, which shared a
                 center-point with the 3D object, or noncolocated with
                 (displaced from) the 3D object. The study accounts for
                 additional factors including object complexity, axis of
                 rotation, and users' individual differences in spatial
                 abilities. Our results show that an orientation
                 indicator helps users in comparing multiple views, and
                 that the effect is influenced by the type of aid, a
                 person's spatial ability, and the difficulty of the
                 task. In addition to establishing an effect of an
                 orientation indicator, this article helps demonstrate
                 the application of a particular experimental paradigm
                 and analysis, as well as the importance of considering
                 individual differences when designing interface aids.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Laitinen:2012:PTF,
  author =       "Mikko-Ville Laitinen and Tapani Pihlajam{\"a}ki and
                 Cumhur Erkut and Ville Pulkki",
  title =        "Parametric time-frequency representation of spatial
                 sound in virtual worlds",
  journal =      j-TAP,
  volume =       "9",
  number =       "2",
  pages =        "8:1--8:??",
  month =        jun,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2207216.2207219",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Wed Jun 13 17:24:25 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Directional audio coding (DirAC) is a parametric
                 time-frequency domain method for processing spatial
                 audio based on psychophysical assumptions and on
                 energetic analysis of the sound field. Methods to use
                 DirAC in spatial sound synthesis for virtual worlds are
                 presented in this article. Formal listening tests are
                 used to show that DirAC can be used to position and to
                 control the spatial extent of virtual sound sources
                 with good audio quality. It is also shown that DirAC
                 can be used to generate reverberation for N-channel
                 horizontal listening with only two monophonic
                 reverberators without a prominent loss in quality when
                 compared with quality obtained with N-channel
                 reverberators.",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Leroy:2012:RTA,
  author =       "Laure Leroy and Philippe Fuchs and Guillaume Moreau",
  title =        "Real-time adaptive blur for reducing eye strain in
                 stereoscopic displays",
  journal =      j-TAP,
  volume =       "9",
  number =       "2",
  pages =        "9:1--9:??",
  month =        jun,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2207216.2207220",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Wed Jun 13 17:24:25 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Stereoscopic devices are widely used (immersion-based
                 working environments, stereoscopically-viewed movies,
                 auto-stereoscopic screens). In some instances, exposure
                 to stereoscopic immersion techniques can be lengthy,
                 and so eye strain sets in. We propose a method for
                 reducing eye strain induced by stereoscopic vision.
                 After reviewing sources of eye strain linked to
                 stereoscopic vision, we focus on one of these sources:
                 images with high frequency content associated with
                 large disparities. We put forward an algorithm for
                 removing the irritating high frequencies in high
                 horizontal disparity zones (i.e., for virtual objects
                 appearing far from the real screen level). We elaborate
                 on our testing protocol to establish that our image
                 processing method reduces eye strain caused by
                 stereoscopic vision, both objectively and subjectively.
                 We subsequently quantify the positive effects of our
                 algorithm on the relief of eye strain and discuss
                 further research perspectives.",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{McDonnell:2012:ISI,
  author =       "Rachel McDonnell and Veronica Sundstedt",
  title =        "Introduction to special issue {SAP 2012}",
  journal =      j-TAP,
  volume =       "9",
  number =       "3",
  pages =        "10:1--10:??",
  month =        jul,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2325722.2325723",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Jul 31 17:40:12 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Schumacher:2012:WFP,
  author =       "Matthaeus Schumacher and Volker Blanz",
  title =        "Which facial profile do humans expect after seeing a
                 frontal view? a comparison with a linear face model",
  journal =      j-TAP,
  volume =       "9",
  number =       "3",
  pages =        "11:1--11:??",
  month =        jul,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2325722.2325724",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Jul 31 17:40:12 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Manipulated versions of three-dimensional faces that
                 have different profiles, but almost the same appearance
                 in frontal views, provide a novel way to investigate if
                 and how humans use class-specific knowledge to infer
                 depth from images of faces. After seeing a frontal
                 view, participants have to select the profile that
                 matches that view. The profiles are original (ground
                 truth), average, random other, and two solutions
                 computed with a linear face model (3D Morphable Model).
                 One solution is based on 2D vertex positions, the other
                 on pixel colors in the frontal view. The human
                 responses demonstrate that humans neither guess nor
                 just choose the average profile. The results also
                 indicate that humans actually use the information from
                 the front view, and not just rely on the plausibility
                 of the profiles per se. All our findings are perfectly
                 consistent with a correlation-based inference in a
                 linear face model. The results also verify that the 3D
                 reconstructions from our computational algorithms
                 (stimuli 4 and 5) are similar to what humans expect,
                 because they are chosen to be the true profile equally
                 often as the ground-truth profiles. Our experiments
                 shed new light on the mechanisms of human face
                 perception and present a new quality measure for 3D
                 reconstruction algorithms.",
  acknowledgement = ack-nhfb,
  articleno =    "11",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Mustafa:2012:STE,
  author =       "Maryam Mustafa and Stefan Guthe and Marcus Magnor",
  title =        "Single-trial {EEG} classification of artifacts in
                 videos",
  journal =      j-TAP,
  volume =       "9",
  number =       "3",
  pages =        "12:1--12:??",
  month =        jul,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2325722.2325725",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Jul 31 17:40:12 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "In this article we use an ElectroEncephaloGraph (EEG)
                 to explore the perception of artifacts that typically
                 appear during rendering and determine the perceptual
                 quality of a sequence of images. Although there is an
                 emerging interest in using an EEG for image quality
                 assessment, one of the main impediments to the use of
                 an EEG is the very low Signal-to-Noise Ratio (SNR)
                 which makes it exceedingly difficult to distinguish
                 neural responses from noise. Traditionally,
                 event-related potentials have been used for analysis of
                 EEG data. However, they rely on averaging and so
                 require a large number of participants and trials to
                 get meaningful data. Also, due to the low SNR ERP's
                 are not suited for single-trial classification. We
                 propose a novel wavelet-based approach for evaluating
                 EEG signals which allows us to predict the perceived
                 image quality from only a single trial. Our
                 wavelet-based algorithm is able to filter the EEG data
                 and remove noise, eliminating the need for many
                 participants or many trials. With this approach it is
                 possible to use data from only 10 electrode channels
                 for single-trial classification and predict the
                 presence of an artifact with an accuracy of 85\%. We
                 also show that it is possible to differentiate and
                 classify a trial based on the exact type of artifact
                 viewed. Our work is particularly useful for
                 understanding how the human visual system responds to
                 different types of degradations in images and videos.
                 An understanding of the perception of typical
                 image-based rendering artifacts forms the basis for the
                 optimization of rendering and masking algorithms.",
  acknowledgement = ack-nhfb,
  articleno =    "12",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Niu:2012:VES,
  author =       "Yaqing Niu and Rebecca M. Todd and Matthew Kyan and
                 Adam K. Anderson",
  title =        "Visual and emotional salience influence eye
                 movements",
  journal =      j-TAP,
  volume =       "9",
  number =       "3",
  pages =        "13:1--13:??",
  month =        jul,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2325722.2325726",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Jul 31 17:40:12 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "In natural vision both stimulus features and
                 cognitive/affective factors influence an observer's
                 attention. However, the relationship between
                 stimulus-driven (bottom-up) and cognitive/affective
                 (top-down) factors remains controversial: How well does
                 the classic visual salience model account for gaze
                 locations? Can emotional salience counteract strong
                 visual stimulus signals and shift attention allocation
                 irrespective of bottom-up features? Here we compared
                 Itti and Koch's [2000] and Spectral Residual (SR)
                 visual salience model and explored the impact of visual
                 salience and emotional salience on eye movement
                 behavior, to understand the competition between visual
                 salience and emotional salience and how they affect
                 gaze allocation in complex scenes viewing. Our results
                 show the insufficiency of visual salience models in
                 predicting fixation. Emotional salience can override
                 visual salience and can determine attention allocation
                 in complex scenes. These findings are consistent with
                 the hypothesis that cognitive/affective factors play a
                 dominant role in active gaze control.",
  acknowledgement = ack-nhfb,
  articleno =    "13",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Zhang:2012:MAV,
  author =       "Ruimin Zhang and Anthony Nordman and James Walker and
                 Scott A. Kuhl",
  title =        "Minification affects verbal- and action-based distance
                 judgments differently in head-mounted displays",
  journal =      j-TAP,
  volume =       "9",
  number =       "3",
  pages =        "14:1--14:??",
  month =        jul,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2325722.2325727",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Jul 31 17:40:12 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Numerous studies report that people underestimate
                 egocentric distances in Head-Mounted Display (HMD)
                 virtual environments compared to real environments as
                 measured by direct blind walking. Geometric
                 minification, or rendering graphics with a larger field
                 of view than the display's field of view, has been
                 shown to eliminate this underestimation in a virtual
                 hallway environment [Kuhl et al. 2006, 2009]. This
                 study demonstrates that minification affects blind
                 walking in a sparse classroom and does not influence
                 verbal reports of distance. Since verbal reports of
                 distance have been reported to be compressed in real
                 environments, we speculate that minification in an HMD
                 replicates peoples' real-world blind walking and verbal
                 report distance judgments. We also demonstrate a new
                 method for quantifying any unintentional miscalibration
                 in our experiments. This process involves using the HMD
                 in an augmented reality configuration and having each
                 participant indicate where the targets and horizon
                 appeared after each experiment. More work is necessary
                 to understand how and why minification changes verbal-
                 and walking-based egocentric distance judgments
                 differently.",
  acknowledgement = ack-nhfb,
  articleno =    "14",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Couture:2012:PBS,
  author =       "Vincent Couture and Michael S. Langer and
                 S{\'e}bastien Roy",
  title =        "Perception of blending in stereo motion panoramas",
  journal =      j-TAP,
  volume =       "9",
  number =       "3",
  pages =        "15:1--15:??",
  month =        jul,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2325722.2325728",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Jul 31 17:40:12 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Most methods for synthesizing panoramas assume that
                 the scene is static. A few methods have been proposed
                 for synthesizing stereo or motion panoramas, but there
                 has been little attempt to synthesize panoramas that
                 have both stereo and motion. One faces several
                 challenges in synthesizing stereo motion panoramas, for
                 example, to ensure temporal synchronization between
                 left and right views in each frame, to avoid spatial
                 distortion of moving objects, and to continuously loop
                 the video in time. We have recently developed a stereo
                 motion panorama method that tries to address some of
                 these challenges. The method blends space-time regions
                 of a video XYT volume, such that the blending regions
                 are distinct and translate over time. This article
                 presents a perception experiment that evaluates certain
                 aspects of the method, namely how well observers can
                 detect such blending regions. We measure detection time
                 thresholds for different blending widths and for
                 different scenes, and for monoscopic versus
                 stereoscopic videos. Our results suggest that blending
                 may be more effective in image regions that do not
                 contain coherent moving objects that can be tracked
                 over time. For example, we found moving water and
                 partly transparent smoke were more effectively blended
                 than swaying branches. We also found that performance
                 in the task was roughly the same for mono versus stereo
                 videos.",
  acknowledgement = ack-nhfb,
  articleno =    "15",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{OToole:2012:CFR,
  author =       "Alice J. O'Toole and Xaiobo An and Joseph Dunlop and
                 Vaidehi Natu and P. Jonathon Phillips",
  title =        "Comparing face recognition algorithms to humans on
                 challenging tasks",
  journal =      j-TAP,
  volume =       "9",
  number =       "4",
  pages =        "16:1--16:??",
  month =        oct,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2355598.2355599",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Oct 22 11:06:19 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "We compared face identification by humans and machines
                 using images taken under a variety of uncontrolled
                 illumination conditions in both indoor and outdoor
                 settings. Natural variations in a person's day-to-day
                 appearance (e.g., hair style, facial expression, hats,
                 glasses, etc.) contributed to the difficulty of the
                 task. Both humans and machines matched the identity of
                 people (same or different) in pairs of frontal view
                 face images. The degree of difficulty introduced by
                 photometric and appearance-based variability was
                 estimated using a face recognition algorithm created by
                 fusing three top-performing algorithms from a recent
                 international competition. The algorithm computed
                 similarity scores for a constant set of same-identity
                 and different-identity pairings from multiple images.
                 Image pairs were assigned to good, moderate, and poor
                 accuracy groups by ranking the similarity scores for
                 each identity pairing, and dividing these rankings into
                 three strata. This procedure isolated the role of
                 photometric variables from the effects of the
                 distinctiveness of particular identities. Algorithm
                 performance for these constant identity pairings varied
                 dramatically across the groups. In a series of
                 experiments, humans matched image pairs from the good,
                 moderate, and poor conditions, rating the likelihood
                 that the images were of the same person (1: sure same
                 --- 5: sure different). Algorithms were more accurate
                 than humans in the good and moderate conditions, but
                 were comparable to humans in the poor accuracy
                 condition. To date, these are the most variable
                 illumination- and appearance-based recognition
                 conditions on which humans and machines have been
                 compared. The finding that machines were never less
                 accurate than humans on these challenging frontal
                 images suggests that face recognition systems may be
                 ready for applications with comparable difficulty. We
                 speculate that the superiority of algorithms over
                 humans in the less challenging conditions may be due to
                 the algorithms' use of detailed, view-specific identity
                 information. Humans may consider this information less
                 important due to its limited potential for robust
                 generalization in suboptimal viewing conditions.",
  acknowledgement = ack-nhfb,
  articleno =    "16",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Alonso-Arevalo:2012:CSC,
  author =       "Miguel A. Alonso-Arevalo and Simon Shelley and Dik
                 Hermes and Jacqueline Hollowood and Michael Pettitt and
                 Sarah Sharples and Armin Kohlrausch",
  title =        "Curve shape and curvature perception through
                 interactive sonification",
  journal =      j-TAP,
  volume =       "9",
  number =       "4",
  pages =        "17:1--17:??",
  month =        oct,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2355598.2355600",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Oct 22 11:06:19 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "In this article we present an approach that uses sound
                 to communicate geometrical data related to a virtual
                 object. This has been developed in the framework of a
                 multimodal interface for product design. The interface
                 allows a designer to evaluate the quality of a 3-D
                 shape using touch, vision, and sound. Two important
                 considerations addressed in this article are the nature
                 of the data that is sonified and the haptic interaction
                 between the user and the interface, which in fact
                 triggers the sound and influences its characteristics.
                 Based on these considerations, we present a number of
                 sonification strategies that are designed to map the
                 geometrical data of interest into sound. The
                 fundamental frequency of various sounds was used to
                 convey the curve shape or the curvature to the
                 listeners. Two evaluation experiments are described,
                 one involves participants with a varied background, the
                 other involved the intended users, i.e. participants
                 with a background in industrial design. The results
                 show that independent of the sonification method used
                 and independent of whether the curve shape or the
                 curvature were sonified, the sonification was quite
                 successful. In the first experiment participants had a
                 success rate of about 80\% in a multiple choice task,
                 in the second experiment it took the participants on
                 average less than 20 seconds to find the maximum,
                 minimum or inflection points of the curvature of a test
                 curve.",
  acknowledgement = ack-nhfb,
  articleno =    "17",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Rocchesso:2012:PRP,
  author =       "Davide Rocchesso and Stefano Delle Monache",
  title =        "Perception and replication of planar sonic gestures",
  journal =      j-TAP,
  volume =       "9",
  number =       "4",
  pages =        "18:1--18:??",
  month =        oct,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2355598.2355601",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Oct 22 11:06:19 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "As tables, boards, and walls become surfaces where
                 interaction can be supported by auditory displays, it
                 becomes important to know how accurately and
                 effectively a spatial gesture can be rendered by means
                 of an array of loudspeakers embedded in the surface.
                 Two experiments were designed and performed to assess:
                 (i) how sequences of sound pulses are perceived as
                 gestures when the pulses are distributed in space and
                 time along a line; (ii) how the timing of pulses
                 affects the perceived and reproduced continuity of
                 sequences; and (iii) how effectively a second parallel
                 row of speakers can extend sonic gestures to a
                 two-dimensional space. Results show that azimuthal
                 trajectories can be effectively replicated and that
                 switching between discrete and continuous gestures
                 occurs within the range of inter-pulse interval from 75
                 to 300ms. The vertical component of sonic gestures
                 cannot be reliably replicated.",
  acknowledgement = ack-nhfb,
  articleno =    "18",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Rebillat:2012:AVA,
  author =       "Marc R{\'e}billat and Xavier Boutillon and {\'E}tienne
                 Corteel and Brian F. G. Katz",
  title =        "Audio, visual, and audio-visual egocentric distance
                 perception by moving subjects in virtual environments",
  journal =      j-TAP,
  volume =       "9",
  number =       "4",
  pages =        "19:1--19:??",
  month =        oct,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2355598.2355602",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Oct 22 11:06:19 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "We present a study on audio, visual, and audio-visual
                 egocentric distance perception by moving subjects in
                 virtual environments. Audio-visual rendering is
                 provided using tracked passive visual stereoscopy and
                 acoustic wave field synthesis (WFS). Distances are
                 estimated using indirect blind-walking (triangulation)
                 under each rendering condition. Experimental results
                 show that distances perceived in the virtual
                 environment are systematically overestimated for
                 rendered distances closer than the position of the
                 audio-visual rendering system and underestimated for
                 farther distances. Interestingly, subjects perceived
                 each virtual object at a modality-independent distance
                 when using the audio modality, the visual modality, or
                 the combination of both. WFS was able to synthesize
                 perceptually meaningful sound fields. Dynamic
                 audio-visual cues were used by subjects when estimating
                 the distances in the virtual world. Moving may have
                 provided subjects with a better visual distance
                 perception of close distances than if they were static.
                 No correlation between the feeling of presence and the
                 visual distance underestimation has been found. To
                 explain the observed perceptual distance compression,
                 it is proposed that, due to conflicting distance cues,
                 the audio-visual rendering system physically anchors
                 the virtual world to the real world. Virtual objects
                 are thus attracted by the physical audio-visual
                 rendering system.",
  acknowledgement = ack-nhfb,
  articleno =    "19",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Healey:2012:LRV,
  author =       "Christopher G. Healey and Amit P. Sawant",
  title =        "On the limits of resolution and visual angle in
                 visualization",
  journal =      j-TAP,
  volume =       "9",
  number =       "4",
  pages =        "20:1--20:??",
  month =        oct,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2355598.2355603",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Oct 22 11:06:19 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "This article describes a perceptual level-of-detail
                 approach for visualizing data. Properties of a dataset
                 that cannot be resolved in the current display
                 environment need not be shown, for example, when too
                 few pixels are used to render a data element, or when
                 the element's subtended visual angle falls below the
                 acuity limits of our visual system. To identify these
                 situations, we asked: (1) What type of information can
                 a human user perceive in a particular display
                 environment? (2) Can we design visualizations that
                 control what they represent relative to these limits?
                 and (3) Is it possible to dynamically update a
                 visualization as the display environment changes, to
                 continue to effectively utilize our perceptual
                 abilities? To answer these questions, we conducted
                 controlled experiments that identified the pixel
                 resolution and subtended visual angle needed to
                 distinguish different values of luminance, hue, size,
                 and orientation. This information is summarized in a
                 perceptual display hierarchy, a formalization
                 describing how many pixels- resolution -and how much
                 physical area on a viewer's retina- visual angle -is
                 required for an element's visual properties to be
                 readily seen. We demonstrate our theoretical results by
                 visualizing historical climatology data from the
                 International Panel for Climate Change.",
  acknowledgement = ack-nhfb,
  articleno =    "20",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Oulasvirta:2012:HRR,
  author =       "Antti Oulasvirta and Antti Nurminen and Tiia
                 Suomalainen",
  title =        "How real is real enough? {Optimal} reality sampling
                 for fast recognition of mobile imagery",
  journal =      j-TAP,
  volume =       "9",
  number =       "4",
  pages =        "21:1--21:??",
  month =        oct,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2355598.2355604",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Oct 22 11:06:19 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "We present the first study to discover optimal reality
                 sampling for mobile imagery. In particular, we identify
                 the minimum information required for fast recognition
                 of images of directly perceivable real-world buildings
                 displayed on a mobile device. Resolution, image size,
                 and JPEG compression of images of fa{\c{c}}ades were
                 manipulated in a same--different recognition task
                 carried out in the field. Best-effort performance is
                 shown to be reachable with significantly lower detail
                 granularity than previously thought. For best user
                 performance, we recommend presenting images as large as
                 possible on the screen and decreasing resolution
                 accordingly.",
  acknowledgement = ack-nhfb,
  articleno =    "21",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Merer:2013:PCM,
  author =       "Adrien Merer and Mitsuko Aramaki and S{\o}lvi Ystad
                 and Richard Kronland-Martinet",
  title =        "Perceptual characterization of motion evoked by sounds
                 for synthesis control purposes",
  journal =      j-TAP,
  volume =       "10",
  number =       "1",
  pages =        "1:1--1:??",
  month =        feb,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2422105.2422106",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Feb 28 16:35:15 MST 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "This article addresses the question of synthesis and
                 control of sound attributes from a perceptual point of
                 view. We focused on an attribute related to the general
                 concept of motion evoked by sounds. To investigate this
                 concept, we tested 40 monophonic abstract sounds on
                 listeners via a questionnaire and drawings, using a
                 parametrized custom interface. This original procedure,
                 which was defined with synthesis and control
                 perspectives in mind, provides an alternative means of
                 determining intuitive control parameters for
                 synthesizing sounds evoking motion. Results showed that
                 three main shape categories (linear, with regular
                 oscillations, and with circular oscillations) and three
                 types of direction (rising, descending, and horizontal)
                 were distinguished by the listeners. In addition, the
                 subjects were able to perceive the low-frequency
                 oscillations (below 8 Hz) quite accurately. Three size
                 categories (small, medium, and large) and three levels
                 of randomness (none, low amplitude irregularities, and
                 high amplitude irregularities) and speed (constant
                 speed and speeds showing medium and large variations)
                 were also observed in our analyses of the participants'
                 drawings. We further performed a perceptual test to
                 confirm the relevance of the contribution of some
                 variables with synthesized sounds combined with visual
                 trajectories. Based on these results, a general
                 typology of evoked motion was drawn up and an intuitive
                 control strategy was designed, based on a symbolic
                 representation of continuous trajectories (provided by
                 devices such as motion capture systems, pen tablets,
                 etc.). These generic tools could be used in a wide
                 range of applications such as sound design, virtual
                 reality, sonification, and music.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Bojrab:2013:PIL,
  author =       "Micah Bojrab and Michel Abdul-Massih and Bedrich
                 Benes",
  title =        "Perceptual importance of lighting phenomena in
                 rendering of animated water",
  journal =      j-TAP,
  volume =       "10",
  number =       "1",
  pages =        "2:1--2:??",
  month =        feb,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2422105.2422107",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Feb 28 16:35:15 MST 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Recent years have seen increasing research in
                 perceptually-driven reductions in the costs of
                 realistically rendered imagery. Water is complex and
                 recognizable, and continues to be in the forefront of
                 research. However, the contribution of individual
                 lighting phenomena to the perceived realism of virtual
                 water has not been addressed. All these phenomena have
                 costs associated with their rendering, but does the
                 visual benefit outweigh these costs? This study
                 investigates the human perception of various
                 illumination components found in water-rich virtual
                 environments. The investigation uses a traditional
                 psychophysical analysis to examine viewer perception of
                 these lighting phenomena as they relate to the
                 rendering cost, and ultimately reveals common trends in
                 perceptual value. Five different scenes with a wide
                 range of water and lighting dynamics were tested for
                 perceptual value by one hundred participants. Our
                 results provide an importance comparison for lighting
                 phenomena in the rendering of water, and cost
                 reductions can be made with little or no effect on the
                 perceived quality of the imagery if viewed in a
                 scenario similar to our testing.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Selmanovic:2013:GSH,
  author =       "Elmedin Selmanovi{\'c} and Kurt Debattista and Thomas
                 Bashford-Rogers and Alan Chalmers",
  title =        "Generating stereoscopic {HDR} images using {HDR--LDR}
                 image pairs",
  journal =      j-TAP,
  volume =       "10",
  number =       "1",
  pages =        "3:1--3:??",
  month =        feb,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2422105.2422108",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Feb 28 16:35:15 MST 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "A number of novel imaging technologies have been
                 gaining popularity over the past few years. Foremost
                 among these are stereoscopy and high dynamic range
                 (HDR) Imaging. While a large body of research has
                 looked into each of these imaging technologies
                 independently, very little work has attempted to
                 combine them. This is mostly due to the current
                 limitations in capture and display. In this article, we
                 mitigate problems of capturing Stereoscopic HDR (SHDR)
                 that would potentially require two HDR cameras, by
                 capturing an HDR and LDR pair and using it to generate
                 3D stereoscopic HDR content. We ran a detailed user
                 study to compare four different methods of generating
                 SHDR content. The methods investigated were the
                 following: two based on expanding the luminance of the
                 LDR image, and two utilizing stereo correspondence
                 methods, which were adapted for our purposes. Results
                 demonstrate that one of the stereo correspondence
                 methods may be considered perceptually
                 indistinguishable from the ground truth (image pair
                 captured using two HDR cameras), while the other
                 methods are all significantly distinct from the ground
                 truth.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Gamper:2013:SSD,
  author =       "Hannes Gamper and Christina Dicke and Mark
                 Billinghurst and Kai Puolam{\"a}ki",
  title =        "Sound sample detection and numerosity estimation using
                 auditory display",
  journal =      j-TAP,
  volume =       "10",
  number =       "1",
  pages =        "4:1--4:??",
  month =        feb,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2422105.2422109",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Feb 28 16:35:15 MST 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "This article investigates the effect of various design
                 parameters of auditory information display on user
                 performance in two basic information retrieval tasks.
                 We conducted a user test with 22 participants in which
                 sets of sound samples were presented. In the first
                 task, the test participants were asked to detect a
                 given sample among a set of samples. In the second
                 task, the test participants were asked to estimate the
                 relative number of instances of a given sample in two
                 sets of samples. We found that the stimulus onset
                 asynchrony (SOA) of the sound samples had a significant
                 effect on user performance in both tasks. For the
                 sample detection task, the average error rate was about
                 10\% with an SOA of 100 ms. For the numerosity
                 estimation task, an SOA of at least 200 ms was
                 necessary to yield average error rates lower than 30\%
                 . Other parameters, including the samples' sound type
                 (synthesized speech or earcons) and spatial quality
                 (multichannel loudspeaker or diotic headphone
                 playback), had no substantial effect on user
                 performance. These results suggest that diotic, or
                 indeed monophonic, playback with appropriately chosen
                 SOA may be sufficient in practical applications for
                 users to perform the given information retrieval tasks,
                 if information about the sample location is not
                 relevant. If location information was provided through
                 spatial playback of the samples, test subjects were
                 able to simultaneously detect and localize a sample
                 with reasonable accuracy.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Zhao:2013:API,
  author =       "Mingtian Zhao and Song-Chun Zhu",
  title =        "Abstract painting with interactive control of
                 perceptual entropy",
  journal =      j-TAP,
  volume =       "10",
  number =       "1",
  pages =        "5:1--5:??",
  month =        feb,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2422105.2422110",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Feb 28 16:35:15 MST 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "This article presents a framework for generating
                 abstract art from photographs. The aesthetics of
                 abstract art is largely attributed to its greater
                 perceptual ambiguity than photographs. According to
                 psychological theories [Berlyne 1971], the ambiguity
                 tends to invoke moderate mental effort in the viewer
                 for interpreting the underlying contents, and this
                 process is usually accompanied by subtle aesthetic
                 pleasure. We study this phenomenon through human
                 experiments comparing the subjects' interpretations of
                 abstract art and photographs, and quantitatively
                 verify, the increased perceptual ambiguities in terms
                 of recognition accuracy and response time. Based on the
                 studies, we measure the level of perceptual ambiguity
                 using entropy, as it measures uncertainty levels in
                 information theory, and propose a painterly rendering
                 method with interactive control of the ambiguity
                 levels. Given an input photograph, we first segment it
                 into regions corresponding to different objects and
                 parts in an interactive manner and organize them into a
                 hierarchical parse tree representation. Then we execute
                 a painterly rendering process with image obscuring
                 operators to transfer the photograph into an abstract
                 painting style with increased perceptual ambiguities in
                 both the scene and individual objects. Finally, using
                 kernel density estimation and message-passing
                 algorithms, we compute and control the ambiguity levels
                 numerically to the desired levels, during which we may
                 predict and control the viewer's perceptual path among
                 the image contents by assigning different ambiguity
                 levels to different objects. We have evaluated the
                 rendering results using a second set of human
                 experiments, and verified that they achieve similar
                 abstract effects to original abstract paintings.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Kyto:2013:IRD,
  author =       "Mikko Kyt{\"o} and Aleksi M{\"a}kinen and Jukka
                 H{\"a}kkinen and Pirkko Oittinen",
  title =        "Improving relative depth judgments in augmented
                 reality with auxiliary augmentations",
  journal =      j-TAP,
  volume =       "10",
  number =       "1",
  pages =        "6:1--6:??",
  month =        feb,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2422105.2422111",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Feb 28 16:35:15 MST 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Significant depth judgment errors are common in
                 augmented reality. This study presents a visualization
                 approach for improving relative depth judgments in
                 augmented reality. The approach uses auxiliary
                 augmented objects in addition to the main augmentation
                 to support ordinal and interval depth judgment tasks.
                 The auxiliary augmentations are positioned spatially
                 near real-world objects, and the location of the main
                 augmentation can be deduced based on the relative depth
                 cues between the augmented objects. In the experimental
                 part, the visualization approach was tested in the
                 ``X-ray'' visualization case with a video see-through
                 system. Two relative depth cues, in addition to motion
                 parallax, were used between graphical objects: relative
                 size and binocular disparity. The results show that the
                 presence of auxiliary objects significantly reduced
                 errors in depth judgment. Errors in judging the ordinal
                 location with respect to a wall (front, at, or behind)
                 and judging depth intervals were reduced. In addition
                 to reduced errors, the presence of auxiliary
                 augmentation increased the confidence in depth
                 judgments, and it was subjectively preferred. The
                 visualization approach did not have an effect on the
                 viewing time.",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Bouchara:2013:CMS,
  author =       "Tifanie Bouchara and Christian Jacquemin and Brian F.
                 G. Katz",
  title =        "Cueing multimedia search with audiovisual blur",
  journal =      j-TAP,
  volume =       "10",
  number =       "2",
  pages =        "7:1--7:??",
  month =        may,
  year =         "2013",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Jun 1 11:28:31 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Situated in the context of multimedia browsing, this
                 study concerns perceptual processes involved in
                 searching for an audiovisual object displayed among
                 several distractors. The aim of the study is to
                 increase the perceptual saliency of the target in order
                 to enhance the search process. As blurring distractors
                 and maintaining the target sharp has proved to be a
                 great facilitator of visual search, we propose
                 combining visual blur with an audio blur analogue to
                 improve multimodal search. Three perceptual experiments
                 were performed in which participants had to retrieve an
                 audiovisual object from a set of six competing stimuli.
                 The first two experiments explored the effect of blur
                 level on unimodal search tasks. A third experiment
                 investigated the influence of an audio and visual
                 modality combination with both modalities cued on an
                 audiovisual search task. Results showed that both
                 visual and audio blurs render stimuli distractors less
                 prominent and thus helped users focus on a sharp target
                 more easily. Performances were also faster and more
                 accurate in the bimodal condition than in either
                 unimodal search task, auditory or visual. Our work
                 suggests that audio and audiovisual interfaces
                 dedicated to multimedia search could benefit from
                 different uses of blur on presentation strategies.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Zhan:2013:MDF,
  author =       "Ce Zhan and Wanqing Li and Philip Ogunbona",
  title =        "Measuring the degree of face familiarity based on
                 extended {NMF}",
  journal =      j-TAP,
  volume =       "10",
  number =       "2",
  pages =        "8:1--8:??",
  month =        may,
  year =         "2013",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Jun 1 11:28:31 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Getting familiar with a face is an important cognitive
                 process in human perception of faces, but little study
                 has been reported on how to objectively measure the
                 degree of familiarity. In this article, a method is
                 proposed to quantitatively measure the familiarity of a
                 face with respect to a set of reference faces that have
                 been seen previously. The proposed method models the
                 context-free and context-dependent forms of familiarity
                 suggested by psychological studies and accounts for the
                 key factors, namely exposure frequency, exposure
                 intensity and similar exposure, that affect human
                 perception of face familiarity. Specifically, the
                 method divides the reference set into nonexclusive
                 groups and measures the familiarity of a given face by
                 aggregating the similarities of the face to the
                 individual groups. In addition, the nonnegative matrix
                 factorization (NMF) is extended in this paper to learn
                 a compact and localized subspace representation for
                 measuring the similarities of the face with respect to
                 the individual groups. The proposed method has been
                 evaluated through experiments that follow the protocols
                 commonly used in psychological studies and has been
                 compared with subjective evaluation. Results have shown
                 that the proposed measurement is highly consistent with
                 the subjective judgment of face familiarity. Moreover,
                 a face recognition method is devised using the concept
                 of face familiarity and the results on the standard
                 FERET evaluation protocols have further verified the
                 efficacy of the proposed familiarity measurement.",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "nonnegative matrix factorization (NMF)",
}

@Article{Nymoen:2013:ACB,
  author =       "Kristian Nymoen and Rolf Inge God{\o}y and Alexander
                 Refsum Jensenius and Jim Torresen",
  title =        "Analyzing correspondence between sound objects and
                 body motion",
  journal =      j-TAP,
  volume =       "10",
  number =       "2",
  pages =        "9:1--9:??",
  month =        may,
  year =         "2013",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Jun 1 11:28:31 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Links between music and body motion can be studied
                 through experiments called sound-tracing. One of the
                 main challenges in such research is to develop robust
                 analysis techniques that are able to deal with the
                 multidimensional data that musical sound and body
                 motion present. The article evaluates four different
                 analysis methods applied to an experiment in which
                 participants moved their hands following perceptual
                 features of short sound objects. Motion capture data
                 has been analyzed and correlated with a set of
                 quantitative sound features using four different
                 methods: (a) a pattern recognition classifier, (b)
                 $t$-tests, (c) Spearman's $ \rho $ correlation, and (d)
                 canonical correlation. This article shows how the
                 analysis methods complement each other, and that
                 applying several analysis techniques to the same data
                 set can broaden the knowledge gained from the
                 experiment.",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Sugano:2013:GBJ,
  author =       "Yusuke Sugano and Yasuyuki Matsushita and Yoichi
                 Sato",
  title =        "Graph-based joint clustering of fixations and visual
                 entities",
  journal =      j-TAP,
  volume =       "10",
  number =       "2",
  pages =        "10:1--10:??",
  month =        may,
  year =         "2013",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Jun 1 11:28:31 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "We present a method that extracts groups of fixations
                 and image regions for the purpose of gaze analysis and
                 image understanding. Since the attentional relationship
                 between visual entities conveys rich information,
                 automatically determining the relationship provides us
                 a semantic representation of images. We show that, by
                 jointly clustering human gaze and visual entities, it
                 is possible to build meaningful and comprehensive
                 metadata that offer an interpretation about how people
                 see images. To achieve this, we developed a clustering
                 method that uses a joint graph structure between
                 fixation points and over-segmented image regions to
                 ensure a cross-domain smoothness constraint. We show
                 that the proposed clustering method achieves better
                 performance in relating attention to visual entities in
                 comparison with standard clustering techniques.",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Ruddle:2013:LWV,
  author =       "Roy A. Ruddle and Ekaterina Volkova and Heinrich H.
                 B{\"u}lthoff",
  title =        "Learning to walk in virtual reality",
  journal =      j-TAP,
  volume =       "10",
  number =       "2",
  pages =        "11:1--11:??",
  month =        may,
  year =         "2013",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Jun 1 11:28:31 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "This article provides longitudinal data for when
                 participants learned to travel with a walking metaphor
                 through virtual reality (VR) worlds, using interfaces
                 that ranged from joystick-only, to linear and
                 omnidirectional treadmills, and actual walking in VR.
                 Three metrics were used: travel time, collisions (a
                 measure of accuracy), and the speed profile. The time
                 that participants required to reach asymptotic
                 performance for traveling, and what that asymptote was,
                 varied considerably between interfaces. In particular,
                 when a world had tight turns (0.75 m corridors),
                 participants who walked were more proficient than those
                 who used a joystick to locomote and turned either
                 physically or with a joystick, even after 10 minutes of
                 training. The speed profile showed that this was caused
                 by participants spending a notable percentage of the
                 time stationary, irrespective of whether or not they
                 frequently played computer games. The study shows how
                 speed profiles can be used to help evaluate
                 participants' proficiency with travel interfaces,
                 highlights the need for training to be structured to
                 addresses specific weaknesses in proficiency (e.g.,
                 start-stop movement), and for studies to measure and
                 report that proficiency.",
  acknowledgement = ack-nhfb,
  articleno =    "11",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Geigel:2013:ISI,
  author =       "Joe Geigel and Jeanine Stefanucci",
  title =        "Introduction to special issue {SAP 2013}",
  journal =      j-TAP,
  volume =       "10",
  number =       "3",
  pages =        "12:1--12:??",
  month =        aug,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2506206.2506207",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Aug 16 07:50:57 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "12",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Tompkin:2013:PAA,
  author =       "James Tompkin and Min H. Kim and Kwang In Kim and Jan
                 Kautz and Christian Theobalt",
  title =        "Preference and artifact analysis for video transitions
                 of places",
  journal =      j-TAP,
  volume =       "10",
  number =       "3",
  pages =        "13:1--13:??",
  month =        aug,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2501601",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Aug 16 07:50:57 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Emerging interfaces for video collections of places
                 attempt to link similar content with seamless
                 transitions. However, the automatic computer vision
                 techniques that enable these transitions have many
                 failure cases which lead to artifacts in the final
                 rendered transition. Under these conditions, which
                 transitions are preferred by participants and which
                 artifacts are most objectionable? We perform an
                 experiment with participants comparing seven transition
                 types, from movie cuts and dissolves to image-based
                 warps and virtual camera transitions, across five
                 scenes in a city. This document describes how we
                 condition this experiment on slight and considerable
                 view change cases, and how we analyze the feedback from
                 participants to find their preference for transition
                 types and artifacts. We discover that transition
                 preference varies with view change, that automatic
                 rendered transitions are significantly preferred even
                 with some artifacts, and that dissolve transitions are
                 comparable to less-sophisticated rendered transitions.
                 This leads to insights into what visual features are
                 important to maintain in a rendered transition, and to
                 an artifact ordering within our transitions.",
  acknowledgement = ack-nhfb,
  articleno =    "13",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Mccrae:2013:SPP,
  author =       "James Mccrae and Niloy J. Mitra and Karan Singh",
  title =        "Surface perception of planar abstractions",
  journal =      j-TAP,
  volume =       "10",
  number =       "3",
  pages =        "14:1--14:??",
  month =        aug,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2501853",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Aug 16 07:50:57 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Various algorithms have been proposed to create planar
                 abstractions of 3D models, but there has been no
                 systematic effort to evaluate the effectiveness of such
                 abstractions in terms of perception of the abstracted
                 surfaces. In this work, we perform a large
                 crowd-sourced study involving approximately 70k samples
                 to evaluate how well users can orient gauges on planar
                 abstractions of commonly occurring models. We test four
                 styles of planar abstractions against ground truth
                 surface representations, and analyze the data to
                 discover a wide variety of correlations between task
                 error and measurements relating to surface-specific
                 properties such as curvature, local thickness and
                 medial axis distance, and abstraction-specific
                 properties. We use these discovered correlations to
                 create linear models to predict error in surface
                 understanding at a given point, for both surface
                 representations and planar abstractions. Our predictive
                 models reveal the geometric causes most responsible for
                 error, and we demonstrate their potential use to build
                 upon existing planar abstraction techniques in order to
                 improve perception of the abstracted surface.",
  acknowledgement = ack-nhfb,
  articleno =    "14",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Chen:2013:SPT,
  author =       "Jianhui Chen and Robert S. Allison",
  title =        "Shape perception of thin transparent objects with
                 stereoscopic viewing",
  journal =      j-TAP,
  volume =       "10",
  number =       "3",
  pages =        "15:1--15:??",
  month =        aug,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2506206.2506208",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Aug 16 07:50:57 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Many materials, including water surfaces, jewels, and
                 glassware exhibit transparent refractions. The human
                 visual system can somehow recover 3D shape from
                 refracted images. While previous research has
                 elucidated various visual cues that can facilitate
                 visual perception of transparent objects, most of them
                 focused on monocular material perception. The question
                 of shape perception of transparent objects is much more
                 complex and few studies have been undertaken,
                 particular in terms of binocular vision. In this
                 article, we first design a system for stereoscopic
                 surface orientation estimation with photo-realistic
                 stimuli. It displays pre-rendered stereoscopic images
                 and a real-time S3D (Stereoscopic 3D) shape probe
                 simultaneously. Then we estimate people's perception of
                 the shape of thin transparent objects using a gauge
                 figure task. Our results suggest that people can
                 consistently perceive the surface orientation of thin
                 transparent objects, and stereoscopic viewing improves
                 the precision of estimates. To explain the results, we
                 present an edge-aware orientation map based on image
                 gradients and structure tensors to illustrate the
                 orientation information in images. We also decomposed
                 the normal direction of the surface into azimuth angle
                 and slant angle to explain why additional depth
                 information can improve the accuracy of perceived
                 normal direction.",
  acknowledgement = ack-nhfb,
  articleno =    "15",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Easa:2013:EMD,
  author =       "Haider K. Easa and Rafal K. Mantiuk and Ik Soo Lim",
  title =        "Evaluation of monocular depth cues on a
                 high-dynamic-range display for visualization",
  journal =      j-TAP,
  volume =       "10",
  number =       "3",
  pages =        "16:1--16:??",
  month =        aug,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2504568",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Aug 16 07:50:57 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "The aim of this work is to identify the depth cues
                 that provide intuitive depth-ordering when used to
                 visualize abstract data. In particular we focus on the
                 depth cues that are effective on a high-dynamic-range
                 (HDR) display: contrast and brightness. In an
                 experiment participants were shown a visualization of
                 the volume layers at different depths with a single
                 isolated monocular cue as the only indication of depth.
                 The observers were asked to identify which slice of the
                 volume appears to be closer. The results show that
                 brightness, contrast and relative size are the most
                 effective monocular depth cues for providing an
                 intuitive depth ordering.",
  acknowledgement = ack-nhfb,
  articleno =    "16",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Argelaguet:2013:EIP,
  author =       "Ferran Argelaguet and David Antonio G{\'o}mez
                 J{\'a}uregui and Maud Marchal and Anatole L{\'e}cuyer",
  title =        "Elastic images: Perceiving local elasticity of images
                 through a novel pseudo-haptic deformation effect",
  journal =      j-TAP,
  volume =       "10",
  number =       "3",
  pages =        "17:1--17:??",
  month =        aug,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2501599",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Aug 16 07:50:57 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "We introduce the Elastic Images, a novel pseudo-haptic
                 feedback technique which enables the perception of the
                 local elasticity of images without the need of any
                 haptic device. The proposed approach focus on whether
                 visual feedback is able to induce a sensation of
                 stiffness when the user interacts with an image using a
                 standard mouse. The user, when clicking on a Elastic
                 Image, is able to deform it locally according to its
                 elastic properties. To reinforce the effect, we also
                 propose the generation of procedural shadows and
                 creases to simulate the compressibility of the image
                 and several mouse cursors replacements to enhance
                 pressure and stiffness perception. A psychophysical
                 experiment was conducted to quantify this novel
                 pseudo-haptic perception and determine its perceptual
                 threshold (or its Just Noticeable Difference). The
                 results showed that users were able to recognize up to
                 eight different stiffness values with our proposed
                 method and confirmed that it provides a perceivable and
                 exploitable sensation of elasticity. The potential
                 applications of the proposed approach range from
                 pressure sensing in product catalogs and games, or its
                 usage in graphical user interfaces for increasing the
                 expressiveness of widgets.",
  acknowledgement = ack-nhfb,
  articleno =    "17",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Kelly:2013:SPV,
  author =       "Jonathan W. Kelly and Melissa Burton and Brice Pollock
                 and Eduardo Rubio and Michael Curtis and Julio {De La
                 Cruz} and Stephen Gilbert and Eliot Winer",
  title =        "Space perception in virtual environments: Displacement
                 from the center of projection causes less distortion
                 than predicted by cue-based models",
  journal =      j-TAP,
  volume =       "10",
  number =       "4",
  pages =        "18:1--18:??",
  month =        oct,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2536764.2536765",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Mar 13 09:38:52 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Virtual reality systems commonly include both
                 monocular and binocular depth cues, which have the
                 potential to provide viewers with a realistic
                 impression of spatial properties of the virtual
                 environment. However, when multiple viewers share the
                 same display, only one viewer typically receives the
                 projectively correct images. All other viewers
                 experience the same images despite displacement from
                 the center of projection (CoP). Three experiments
                 evaluated perceptual distortions caused by displacement
                 from the CoP and compared those percepts to predictions
                 of models based on monocular and binocular viewing
                 geometry. Leftward and rightward displacement from the
                 CoP caused virtual angles on the ground plane to be
                 judged as larger and smaller, respectively, compared to
                 judgments from the CoP. Backward and forward
                 displacement caused rectangles on the ground plane to
                 be judged as larger and smaller in depth, respectively,
                 compared to judgments from the CoP. Judgment biases
                 were in the same direction as cue-based model
                 predictions but of smaller magnitude. Displacement from
                 the CoP had asymmetric effects on perceptual judgments,
                 unlike model predictions. Perceptual distortion
                 occurred with monocular cues alone but was exaggerated
                 when binocular cues were added. The results are
                 grounded in terms of practical implications for
                 multiuser virtual environments.",
  acknowledgement = ack-nhfb,
  articleno =    "18",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Yildiz:2013:FAP,
  author =       "Zeynep Cipiloglu Yildiz and Abdullah Bulbul and Tolga
                 Capin",
  title =        "A framework for applying the principles of depth
                 perception to information visualization",
  journal =      j-TAP,
  volume =       "10",
  number =       "4",
  pages =        "19:1--19:??",
  month =        oct,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2536764.2536766",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Mar 13 09:38:52 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "During the visualization of 3D content, using the
                 depth cues selectively to support the design goals and
                 enabling a user to perceive the spatial relationships
                 between the objects are important concerns. In this
                 novel solution, we automate this process by proposing a
                 framework that determines important depth cues for the
                 input scene and the rendering methods to provide these
                 cues. While determining the importance of the cues, we
                 consider the user's tasks and the scene's spatial
                 layout. The importance of each depth cue is calculated
                 using a fuzzy logic--based decision system. Then,
                 suitable rendering methods that provide the important
                 cues are selected by performing a cost-profit analysis
                 on the rendering costs of the methods and their
                 contribution to depth perception. Possible cue
                 conflicts are considered and handled in the system. We
                 also provide formal experimental studies designed for
                 several visualization tasks. A statistical analysis of
                 the experiments verifies the success of our
                 framework.",
  acknowledgement = ack-nhfb,
  articleno =    "19",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Nunez-Varela:2013:MGC,
  author =       "Jose Nunez-Varela and Jeremy L. Wyatt",
  title =        "Models of gaze control for manipulation tasks",
  journal =      j-TAP,
  volume =       "10",
  number =       "4",
  pages =        "20:1--20:??",
  month =        oct,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2536764.2536767",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Mar 13 09:38:52 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Human studies have shown that gaze shifts are mostly
                 driven by the current task demands. In manipulation
                 tasks, gaze leads action to the next manipulation
                 target. One explanation is that fixations gather
                 information about task relevant properties, where task
                 relevance is signalled by reward. This work presents
                 new computational models of gaze shifting, where the
                 agent imagines ahead in time the informational effects
                 of possible gaze fixations. Building on our previous
                 work, the contributions of this article are: (i) the
                 presentation of two new gaze control models, (ii)
                 comparison of their performance to our previous model,
                 (iii) results showing the fit of all these models to
                 previously published human data, and (iv) integration
                 of a visual search process. The first new model selects
                 the gaze that most reduces positional uncertainty of
                 landmarks (Unc), and the second maximises expected
                 rewards by reducing positional uncertainty (RU). Our
                 previous approach maximises the expected gain in
                 cumulative reward by reducing positional uncertainty
                 (RUG). In experiment ii the models are tested on a
                 simulated humanoid robot performing a manipulation
                 task, and each model's performance is characterised by
                 varying three environmental variables. This experiment
                 provides evidence that the RUG model has the best
                 overall performance. In experiment iii, we compare the
                 hand-eye coordination timings of the models in a robot
                 simulation to those obtained from human data. This
                 provides evidence that only the models that incorporate
                 both uncertainty and reward (RU and RUG) match human
                 data.",
  acknowledgement = ack-nhfb,
  articleno =    "20",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Gaffary:2013:CAC,
  author =       "Yoren Gaffary and Victoria Eyharabide and Jean-Claude
                 Martin and Mehdi Ammi",
  title =        "Clustering approach to characterize haptic expressions
                 of emotions",
  journal =      j-TAP,
  volume =       "10",
  number =       "4",
  pages =        "21:1--21:??",
  month =        oct,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2536764.2536768",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Mar 13 09:38:52 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Several studies have investigated the relevance of
                 haptics to physically convey various types of emotion.
                 However, they use basic analysis approaches to identify
                 the relevant features for an effective communication of
                 emotion. This article presents an advanced analysis
                 approach, based on the clustering technique, that
                 enables the extraction of the general features of
                 affective haptic expressions as well as the
                 identification of specific features in order to
                 discriminate between close emotions that are difficult
                 to differentiate. This approach was tested in the
                 context of affective communication through a virtual
                 handshake. It uses a haptic device, which enables the
                 expression of 3D movements. The results of this
                 research were compared to those of the standard
                 Analysis of Variance method in order to highlight the
                 advantages and limitations of each approach.",
  acknowledgement = ack-nhfb,
  articleno =    "21",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Marentakis:2013:PIG,
  author =       "G. Marentakis and S. Mcadams",
  title =        "Perceptual impact of gesture control of
                 spatialization",
  journal =      j-TAP,
  volume =       "10",
  number =       "4",
  pages =        "22:1--22:??",
  month =        oct,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2536764.2536769",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Mar 13 09:38:52 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "In two experiments, visual cues from gesture control
                 of spatialization were found to affect auditory
                 movement perception depending on the identifiability of
                 auditory motion trajectories, the congruency of
                 audiovisual stimulation, the sensory focus of
                 attention, and the attentional process involved.
                 Visibility of the performer's gestures improved spatial
                 audio trajectory identification, but it shifted the
                 listeners' attention to vision, impairing auditory
                 motion encoding in the case of incongruent stimulation.
                 On the other hand, selectively directing attention to
                 audition resulted in interference from the visual cues
                 for acoustically ambiguous trajectories. Auditory
                 motion information was poorly preserved when dividing
                 attention between auditory and visual movement feedback
                 from performance gestures. An auditory focus of
                 attention is a listener strategy that maximizes
                 performance, due to the improvement caused by congruent
                 visual stimulation and its robustness to interference
                 from incongruent stimulation for acoustically
                 unambiguous trajectories. Attentional strategy and
                 auditory motion calibration are two aspects that need
                 to be considered when employing gesture control of
                 spatialization.",
  acknowledgement = ack-nhfb,
  articleno =    "22",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Turchet:2013:WPA,
  author =       "Luca Turchet and Stefania Serafin and Paola Cesari",
  title =        "Walking pace affected by interactive sounds simulating
                 stepping on different terrains",
  journal =      j-TAP,
  volume =       "10",
  number =       "4",
  pages =        "23:1--23:??",
  month =        oct,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2536764.2536770",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Mar 13 09:38:52 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "This article investigates whether auditory feedback
                 affects natural locomotion patterns. Individuals were
                 provided with footstep sounds simulating different
                 surface materials. The sounds were interactively
                 generated using shoes with pressure sensors. Results
                 showed that subjects' walking speed changed as a
                 function of the type of simulated ground material. This
                 effect may arise due to the presence of conflicting
                 information between the auditory and foot-haptic
                 modality, or because of an adjustment of locomotion to
                 the physical properties evoked by the sounds simulating
                 the ground materials. The results reported in this
                 study suggest that auditory feedback may be more
                 important in the regulation of walking in natural
                 environments than has been acknowledged. Furthermore,
                 auditory feedback could be used to develop novel
                 approaches to the design of therapeutic and
                 rehabilitation procedures for locomotion.",
  acknowledgement = ack-nhfb,
  articleno =    "23",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Lylykangas:2013:IVS,
  author =       "Jani Lylykangas and Veikko Surakka and Jussi Rantala
                 and Roope Raisamo",
  title =        "Intuitiveness of vibrotactile speed regulation cues",
  journal =      j-TAP,
  volume =       "10",
  number =       "4",
  pages =        "24:1--24:??",
  month =        oct,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2536764.2536771",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Mar 13 09:38:52 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Interpretations of vibrotactile stimulations were
                 compared between two participant groups. In both
                 groups, the task was to evaluate specifically designed
                 tactile stimulations presented to the wrist or chest.
                 Ascending, constant, and descending vibration frequency
                 profiles of the stimuli represented information for
                 three different speed regulation instructions:
                 ``accelerate your speed,'' ``keep your speed
                 constant,'' and ``decelerate your speed,''
                 respectively. The participants were treated differently
                 so that one of the groups was first taught (i.e.,
                 primed) the meanings of the stimuli, whereas the other
                 group was not taught (i.e., unprimed). The results
                 showed that the stimuli were evaluated nearly equally
                 in the primed and the unprimed groups. The best
                 performing stimuli communicated the three intended
                 meanings in the rate of 88\% to 100\% in the primed
                 group and in the unprimed group in the rate of 71\% to
                 83\%. Both groups performed equally in evaluating
                 ``keep your speed constant'' and ``decelerate your
                 speed'' information. As the unprimed participants
                 performed similarly to the primed participants, the
                 results suggest that vibrotactile stimulation can be
                 intuitively understood. The results suggest further
                 that carefully designed vibrotactile stimulations could
                 be functional in delivering easy-to-understand feedback
                 on how to regulate the speed of movement, such as in
                 physical exercise and rehabilitation applications.",
  acknowledgement = ack-nhfb,
  articleno =    "24",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Blom:2013:VTC,
  author =       "Kristopher J. Blom and Steffi Beckhaus",
  title =        "Virtual travel collisions: Response method influences
                 perceived realism of virtual environments",
  journal =      j-TAP,
  volume =       "10",
  number =       "4",
  pages =        "25:1--25:??",
  month =        oct,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2536764.2536772",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Mar 13 09:38:52 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Travel methods are the most basic and widespread
                 interaction method with virtual environments. They are
                 the primary and often the only way the user
                 interactively experiences the environment. We present a
                 study composed of three experiments that investigates
                 how virtual collisions methods and feedback impact user
                 perception of the realism of collisions and the virtual
                 environment. A wand-based virtual travel method was
                 used to navigate maze environments in an immersive
                 projective system. The results indicated that the
                 introduction of collision handling significantly
                 improved the user's perception of the realism of the
                 environment and collisions. An effect of feedback on
                 the perceived level of realism of collisions and
                 solidity of the environment was also found. Our results
                 indicate that feedback should be context appropriate,
                 e.g. fitting to a collision with the object; yet, the
                 modality and richness of feedback were only important
                 in that traditional color change feedback did not
                 perform as well as audio or haptic feedback. In
                 combination, the experiments indicated that in
                 immersive virtual environments the stop collision
                 handling method produced a more realistic impression
                 than the slide method that is popular in games. In
                 total, the study suggests that feedback fitting the
                 collision context, coupled with the stop handling
                 method, provides the best perceived realism of
                 collisions and scene.",
  acknowledgement = ack-nhfb,
  articleno =    "25",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Lin:2013:SMA,
  author =       "Kai-Hsiang Lin and Xiaodan Zhuang and Camille
                 Goudeseune and Sarah King and Mark Hasegawa-Johnson and
                 Thomas S. Huang",
  title =        "Saliency-maximized audio visualization and efficient
                 audio-visual browsing for faster-than-real-time human
                 acoustic event detection",
  journal =      j-TAP,
  volume =       "10",
  number =       "4",
  pages =        "26:1--26:??",
  month =        oct,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2536764.2536773",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Mar 13 09:38:52 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Browsing large audio archives is challenging because
                 of the limitations of human audition and attention.
                 However, this task becomes easier with a suitable
                 visualization of the audio signal, such as a
                 spectrogram transformed to make unusual audio events
                 salient. This transformation maximizes the mutual
                 information between an isolated event's spectrogram and
                 an estimate of how salient the event appears in its
                 surrounding context. When such spectrograms are
                 computed and displayed with fluid zooming over many
                 temporal orders of magnitude, sparse events in long
                 audio recordings can be detected more quickly and more
                 easily. In particular, in a 1/10-real-time acoustic
                 event detection task, subjects who were shown
                 saliency-maximized rather than conventional
                 spectrograms performed significantly better. Saliency
                 maximization also improves the mutual information
                 between the ground truth of nonbackground sounds and
                 visual saliency, more than other common enhancements to
                 visualization.",
  acknowledgement = ack-nhfb,
  articleno =    "26",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Komogortsev:2013:LOP,
  author =       "Oleg Komogortsev and Corey Holland and Sampath
                 Jayarathna and Alex Karpov",
  title =        "{$2$D} Linear oculomotor plant mathematical model:
                 Verification and biometric applications",
  journal =      j-TAP,
  volume =       "10",
  number =       "4",
  pages =        "27:1--27:??",
  month =        oct,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2536764.2536774",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Mar 13 09:38:52 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "This article assesses the ability of a two-dimensional
                 (2D) linear homeomorphic oculomotor plant mathematical
                 model to simulate normal human saccades on a 2D plane.
                 The proposed model is driven by a simplified pulse-step
                 neuronal control signal and makes use of linear
                 simplifications to account for the unique
                 characteristics of the eye globe and the extraocular
                 muscles responsible for horizontal and vertical eye
                 movement. The linear nature of the model sacrifices
                 some anatomical accuracy for computational speed and
                 analytic tractability, and may be implemented as two
                 one-dimensional models for parallel signal simulation.
                 Practical applications of the model might include
                 improved noise reduction and signal recovery facilities
                 for eye tracking systems, additional metrics from which
                 to determine user effort during usability testing, and
                 enhanced security in biometric identification systems.
                 The results indicate that the model is capable of
                 produce oblique saccades with properties resembling
                 those of normal human saccades and is capable of
                 deriving muscle constants that are viable as biometric
                 indicators. Therefore, we conclude that sacrifice in
                 the anatomical accuracy of the model produces
                 negligible effects on the accuracy of saccadic
                 simulation on a 2D plane and may provide a usable model
                 for applications in computer science, human-computer
                 interaction, and related fields.",
  acknowledgement = ack-nhfb,
  articleno =    "27",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Caramiaux:2014:RSS,
  author =       "B. Caramiaux and F. Bevilacqua and T. Bianco and N.
                 Schnell and O. Houix and P. Susini",
  title =        "The Role of Sound Source Perception in Gestural Sound
                 Description",
  journal =      j-TAP,
  volume =       "11",
  number =       "1",
  pages =        "1:1--1:??",
  month =        apr,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2536811",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Apr 22 18:09:09 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "We investigated gesture description of sound stimuli
                 performed during a listening task. Our hypothesis is
                 that the strategies in gestural responses depend on the
                 level of identification of the sound source and
                 specifically on the identification of the action
                 causing the sound. To validate our hypothesis, we
                 conducted two experiments. In the first experiment, we
                 built two corpora of sounds. The first corpus contains
                 sounds with identifiable causal actions. The second
                 contains sounds for which no causal actions could be
                 identified. These corpora properties were validated
                 through a listening test. In the second experiment,
                 participants performed arm and hand gestures
                 synchronously while listening to sounds taken from
                 these corpora. Afterward, we conducted interviews
                 asking participants to verbalize their experience while
                 watching their own video recordings. They were
                 questioned on their perception of the listened sounds
                 and on their gestural strategies. We showed that for
                 the sounds where causal action can be identified,
                 participants mainly mimic the action that has produced
                 the sound. In the other case, when no action can be
                 associated with the sound, participants trace contours
                 related to sound acoustic features. We also found that
                 the interparticipants' gesture variability is higher
                 for causal sounds compared to noncausal sounds.
                 Variability demonstrates that, in the first case,
                 participants have several ways of producing the same
                 action, whereas in the second case, the sound features
                 tend to make the gesture responses consistent.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Stransky:2014:ELT,
  author =       "Debi Stransky and Laurie M. Wilcox and Robert S.
                 Allison",
  title =        "Effects of Long-Term Exposure on Sensitivity and
                 Comfort with Stereoscopic Displays",
  journal =      j-TAP,
  volume =       "11",
  number =       "1",
  pages =        "2:1--2:??",
  month =        apr,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2536810",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Apr 22 18:09:09 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Stereoscopic 3D media has recently increased in
                 appreciation and availability. This popularity has led
                 to concerns over the health effects of habitual viewing
                 of stereoscopic 3D content; concerns that are largely
                 hypothetical. Here we examine the effects of repeated,
                 long-term exposure to stereoscopic 3D in the workplace
                 on several measures of stereoscopic sensitivity
                 (discrimination, depth matching, and fusion limits)
                 along with reported negative symptoms associated with
                 viewing stereoscopic 3D. We recruited a group of adult
                 stereoscopic 3D industry experts and compared their
                 performance with observers who were (i) inexperienced
                 with stereoscopic 3D, (ii) researchers who study
                 stereopsis, and (iii) vision researchers with little or
                 no experimental stereoscopic experience. Unexpectedly,
                 we found very little difference between the four groups
                 on all but the depth discrimination task, and the
                 differences that did occur appear to reflect
                 task-specific training or experience. Thus, we found no
                 positive or negative consequences of repeated and
                 extended exposure to stereoscopic 3D in these
                 populations.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Wang:2014:OGL,
  author =       "Rui I. Wang and Brandon Pelfrey and Andrew T.
                 Duchowski and Donald H. House",
  title =        "Online {$3$D} Gaze Localization on Stereoscopic
                 Displays",
  journal =      j-TAP,
  volume =       "11",
  number =       "1",
  pages =        "3:1--3:??",
  month =        apr,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2593689",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Apr 22 18:09:09 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "This article summarizes our previous work on
                 developing an online system to allow the estimation of
                 3D gaze depth using eye tracking in a stereoscopic
                 environment. We report on recent extensions allowing us
                 to report the full 3D gaze position. Our system employs
                 a 3D calibration process that determines the parameters
                 of a mapping from a naive depth estimate, based simply
                 on triangulation, to a refined 3D gaze point estimate
                 tuned to a particular user. We show that our system is
                 an improvement on the geometry-based 3D gaze estimation
                 returned by a proprietary algorithm provided with our
                 tracker. We also compare our approach with that of the
                 Parameterized Self-Organizing Map (PSOM) method, due to
                 Essig and colleagues, which also individually
                 calibrates to each user. We argue that our method is
                 superior in speed and ease of calibration, is easier to
                 implement, and does not require an iterative solver to
                 produce a gaze position, thus guaranteeing computation
                 at the rate of tracker acquisition. In addition, we
                 report on a user study that indicates that, compared
                 with PSOM, our method more accurately estimates gaze
                 depth, and is nearly as accurate in estimating
                 horizontal and vertical position. Results are verified
                 on two different 4D eye tracking systems, a high
                 accuracy Wheatstone haploscope and a medium accuracy
                 active stereo display. Thus, it is the recommended
                 method for applications that primarily require gaze
                 depth information, while its ease of use makes it
                 suitable for many applications requiring full 3D gaze
                 position.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Pacchierotti:2014:ITT,
  author =       "Claudio Pacchierotti and Asad Tirmizi and Domenico
                 Prattichizzo",
  title =        "Improving Transparency in Teleoperation by Means of
                 Cutaneous Tactile Force Feedback",
  journal =      j-TAP,
  volume =       "11",
  number =       "1",
  pages =        "4:1--4:??",
  month =        apr,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2604969",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Apr 22 18:09:09 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "A study on the role of cutaneous and kinesthetic force
                 feedback in teleoperation is presented. Cutaneous cues
                 provide less transparency than kinesthetic force, but
                 they do not affect the stability of the teleoperation
                 system. On the other hand, kinesthesia provides a
                 compelling illusion of telepresence but affects the
                 stability of the haptic loop. However, when employing
                 common grounded haptic interfaces, it is not possible
                 to independently control the cutaneous and kinesthetic
                 components of the interaction. For this reason, many
                 control techniques ensure a stable interaction by
                 scaling down both kinesthetic and cutaneous force
                 feedback, even though acting on the cutaneous channel
                 is not necessary. We discuss here the feasibility of a
                 novel approach. It aims at improving the realism of the
                 haptic rendering, while preserving its stability, by
                 modulating cutaneous force to compensate for a lack of
                 kinesthesia. We carried out two teleoperation
                 experiments, evaluating (1) the role of cutaneous
                 stimuli when reducing kinesthesia and (2) the extent to
                 which an overactuation of the cutaneous channel can
                 fully compensate for a lack of kinesthetic force
                 feedback. Results showed that, to some extent, it is
                 possible to compensate for a lack of kinesthesia with
                 the aforementioned technique, without significant
                 performance degradation. Moreover, users showed a high
                 comfort level in using the proposed system.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Hecher:2014:CPS,
  author =       "Michael Hecher and Matthias Bernhard and Oliver
                 Mattausch and Daniel Scherzer and Michael Wimmer",
  title =        "A Comparative Perceptual Study of Soft-Shadow
                 Algorithms",
  journal =      j-TAP,
  volume =       "11",
  number =       "2",
  pages =        "5:1--5:??",
  month =        jul,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2620029",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Sep 13 13:10:24 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "We performed a perceptual user study of algorithms
                 that approximate soft shadows in real time. Although a
                 huge body of soft-shadow algorithms have been proposed,
                 to our knowledge this is the first methodical study for
                 comparing different real-time shadow algorithms with
                 respect to their plausibility and visual appearance. We
                 evaluated soft-shadow properties like penumbra overlap
                 with respect to their relevance to shadow perception in
                 a systematic way, and we believe that our results can
                 be useful to guide future shadow approaches in their
                 methods of evaluation. In this study, we also capture
                 the predominant case of an inexperienced user observing
                 shadows without comparing to a reference solution, such
                 as when watching a movie or playing a game. One
                 important result of this experiment is to
                 scientifically verify that real-time soft-shadow
                 algorithms, despite having become physically based and
                 very realistic, can nevertheless be intuitively
                 distinguished from a correct solution by untrained
                 users.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Ramic-Brkic:2014:OAV,
  author =       "Belma Ramic-Brkic and Alan Chalmers",
  title =        "Olfactory Adaptation in Virtual Environments",
  journal =      j-TAP,
  volume =       "11",
  number =       "2",
  pages =        "6:1--6:??",
  month =        jul,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2617917",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Sep 13 13:10:24 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Visual perception is becoming increasingly important
                 in computer graphics. Research on human visual
                 perception has led to the development of
                 perception-driven computer graphics techniques, where
                 knowledge of the human visual system (HVS) and, in
                 particular, its weaknesses are exploited when rendering
                 and displaying 3D graphics. Findings on limitations of
                 the HVS have been used to maintain high perceived
                 quality but reduce the computed quality of some of the
                 image without this quality difference being perceived.
                 This article investigates the amount of time for which
                 (if at all) such limitations could be exploited in the
                 presence of smell. The results show that for our
                 experiment, adaptation to smell does indeed affect
                 participants' ability to determine quality difference
                 in the animations. Having been exposed to a smell
                 before undertaking the experiment, participants were
                 able to determine the quality in a similar fashion to
                 the ``no smell'' condition, whereas without adaptation,
                 participants were not able to distinguish the quality
                 difference.",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Fan:2014:HPV,
  author =       "Shaojing Fan and Rangding Wang and Tian-Tsong Ng and
                 Cheston Y.-C. Tan and Jonathan S. Herberg and Bryan L.
                 Koenig",
  title =        "Human Perception of Visual Realism for Photo and
                 Computer-Generated Face Images",
  journal =      j-TAP,
  volume =       "11",
  number =       "2",
  pages =        "7:1--7:??",
  month =        jul,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2620030",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Sep 13 13:10:24 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Computer-generated (CG) face images are common in
                 video games, advertisements, and other media. CG faces
                 vary in their degree of realism, a factor that impacts
                 viewer reactions. Therefore, efficient control of
                 visual realism of face images is important. Efficient
                 control is enabled by a deep understanding of visual
                 realism perception: the extent to which viewers judge
                 an image as a real photograph rather than a CG image.
                 Across two experiments, we explored the processes
                 involved in visual realism perception of face images.
                 In Experiment 1, participants made visual realism
                 judgments on original face images, inverted face
                 images, and images of faces that had the top and bottom
                 halves misaligned. In Experiment 2, participants made
                 visual realism judgments on original face images,
                 scrambled faces, and images that showed different parts
                 of faces. Our findings indicate that both holistic and
                 piecemeal processing are involved in visual realism
                 perception of faces, with holistic processing becoming
                 more dominant when resolution is lower. Our results
                 also suggest that shading information is more important
                 than color for holistic processing, and that inversion
                 makes visual realism judgments harder for realistic
                 images but not for unrealistic images. Furthermore, we
                 found that eyes are the most influential face part for
                 visual realism, and face context is critical for
                 evaluating realism of face parts. To the best of our
                 knowledge, this work is a first realism-centric study
                 attempting to bridge the human perception of visual
                 realism on face images with general face perception
                 tasks.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Chen:2014:NHF,
  author =       "Fangmei Chen and Yong Xu and David Zhang",
  title =        "A New Hypothesis on Facial Beauty Perception",
  journal =      j-TAP,
  volume =       "11",
  number =       "2",
  pages =        "8:1--8:??",
  month =        jul,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2622655",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Sep 13 13:10:24 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "In this article, a new hypothesis on facial beauty
                 perception is proposed: the weighted average of two
                 facial geometric features is more attractive than the
                 inferior one between them. Extensive evidences support
                 the new hypothesis. We collected 390 well-known
                 beautiful face images (e.g., Miss Universe, movie
                 stars, and super models) as well as 409 common face
                 images from multiple sources. Dozens of volunteers
                 rated the face images according to their
                 attractiveness. Statistical regression models are
                 trained on this database. Under the empirical risk
                 principle, the hypothesis is tested on 318,801 pairs of
                 images and receives consistently supportive results. A
                 corollary of the hypothesis is attractive facial
                 geometric features construct a convex set. This
                 corollary derives a convex hull based face
                 beautification method, which guarantees attractiveness
                 and minimizes the before--after difference.
                 Experimental results show its superiority to
                 state-of-the-art geometric based face beautification
                 methods. Moreover, the mainstream hypotheses on facial
                 beauty perception (e.g., the averageness, symmetry, and
                 golden ratio hypotheses) are proved to be compatible
                 with the proposed hypothesis.",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Kaaresoja:2014:TTP,
  author =       "Topi Kaaresoja and Stephen Brewster and Vuokko Lantz",
  title =        "Towards the Temporally Perfect Virtual Button:
                 Touch-Feedback Simultaneity and Perceived Quality in
                 Mobile Touchscreen Press Interactions",
  journal =      j-TAP,
  volume =       "11",
  number =       "2",
  pages =        "9:1--9:??",
  month =        jul,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2611387",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Sep 13 13:10:24 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Pressing a virtual button is still the major
                 interaction method in touchscreen mobile phones.
                 Although phones are becoming more and more powerful,
                 operating system software is getting more and more
                 complex, causing latency in interaction. We were
                 interested in gaining insight into touch-feedback
                 simultaneity and the effects of latency on the
                 perceived quality of touchscreen buttons. In an
                 experiment, we varied the latency between touch and
                 feedback between 0 and 300 ms for tactile, audio, and
                 visual feedback modalities. We modelled the proportion
                 of simultaneity perception as a function of latency for
                 each modality condition. We used a Gaussian model
                 fitted with the maximum likelihood estimation method to
                 the observations. These models showed that the point of
                 subjective simultaneity (PSS) was 5ms for tactile, 19ms
                 for audio, and 32ms for visual feedback. Our study
                 included the scoring of perceived quality for all of
                 the different latency conditions. The perceived quality
                 dropped significantly between latency conditions 70 and
                 100 ms when the feedback modality was tactile or audio,
                 and between 100 and 150 ms when the feedback modality
                 was visual. When the latency was 300ms for all feedback
                 modalities, the quality of the buttons was rated
                 significantly lower than in all of the other latency
                 conditions, suggesting that a long latency between a
                 touch on the screen and feedback is problematic for
                 users. Together with PSS and these quality ratings, a
                 75\% threshold was established to define a guideline
                 for the recommended latency range between touch and
                 feedback. Our guideline suggests that tactile feedback
                 latency should be between 5 and 50 ms, audio feedback
                 latency between 20 and 70 ms, and visual feedback
                 latency between 30 and 85 ms. Using these values will
                 ensure that users will perceive the feedback as
                 simultaneous with the finger's touch. These values also
                 ensure that the users do not perceive reduced quality.
                 These results will guide engineers and designers of
                 touchscreen interactions by showing the trade-offs
                 between latency and user preference and the effects
                 that their choices might have on the quality of the
                 interactions and feedback they design.",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Vicovaro:2014:PEM,
  author =       "Michele Vicovaro and Ludovic Hoyet and Luigi Burigana
                 and Carol O'sullivan",
  title =        "Perceptual Evaluation of Motion Editing for Realistic
                 Throwing Animations",
  journal =      j-TAP,
  volume =       "11",
  number =       "2",
  pages =        "10:1--10:??",
  month =        jul,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2617916",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Sep 13 13:10:24 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Animation budget constraints during the development of
                 a game often call for the use of a limited set of
                 generic motions. Editing operations are thus generally
                 required to animate virtual characters with a
                 sufficient level of variety. Evaluating the perceptual
                 plausibility of edited animations can therefore
                 contribute greatly towards producing visually plausible
                 animations. In this article, we study observers'
                 sensitivity to manipulations of overarm and underarm
                 biological throwing animations. In the first
                 experiment, we modified the release velocity of the
                 ball while leaving the motion of the virtual thrower
                 and the angle of release of the ball unchanged. In the
                 second experiment, we evaluated the possibility of
                 further modifying throwing animations by simultaneously
                 editing the motion of the thrower and the release
                 velocity of the ball, using dynamic time warping. In
                 both experiments, we found that participants perceived
                 shortened underarm throws to be particularly unnatural.
                 We also found that modifying the thrower's motion in
                 addition to modifying the release velocity of the ball
                 does not significantly improve the perceptual
                 plausibility of edited throwing animations. In the
                 third experiment, we modified the angle of release of
                 the ball while leaving the magnitude of release
                 velocity and the motion of the thrower unchanged, and
                 found that this editing operation is efficient for
                 improving the perceptual plausibility of shortened
                 underarm throws. Finally, in Experiment 4, we replaced
                 the virtual human thrower with a mechanical throwing
                 device (a ramp) and found the opposite pattern of
                 sensitivity to modifications of the release velocity,
                 indicating that biological and physical throws are
                 subject to different perceptual rules. Our results
                 provide valuable guidelines for developers of games and
                 virtual reality applications by specifying thresholds
                 for the perceptual plausibility of throwing
                 manipulations while also providing several interesting
                 insights for researchers in visual perception of
                 biological motion.",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Trutoiu:2014:STL,
  author =       "Laura C. Trutoiu and Elizabeth J. Carter and Nancy
                 Pollard and Jeffrey F. Cohn and Jessica K. Hodgins",
  title =        "Spatial and Temporal Linearities in Posed and
                 Spontaneous Smiles",
  journal =      j-TAP,
  volume =       "11",
  number =       "3",
  pages =        "12:1--12:??",
  month =        aug,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2641569",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Sep 13 13:10:27 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Creating facial animations that convey an animator's
                 intent is a difficult task because animation techniques
                 are necessarily an approximation of the subtle motion
                 of the face. Some animation techniques may result in
                 linearization of the motion of vertices in space
                 (blendshapes, for example), and other, simpler
                 techniques may result in linearization of the motion in
                 time. In this article, we consider the problem of
                 animating smiles and explore how these simplifications
                 in space and time affect the perceived genuineness of
                 smiles. We create realistic animations of spontaneous
                 and posed smiles from high-resolution motion capture
                 data for two computer-generated characters. The motion
                 capture data is processed to linearize the spatial or
                 temporal properties of the original animation. Through
                 perceptual experiments, we evaluate the genuineness of
                 the resulting smiles. Both space and time impact the
                 perceived genuineness. We also investigate the effect
                 of head motion in the perception of smiles and show
                 similar results for the impact of linearization on
                 animations with and without head motion. Our results
                 indicate that spontaneous smiles are more heavily
                 affected by linearizing the spatial and temporal
                 properties than posed smiles. Moreover, the spontaneous
                 smiles were more affected by temporal linearization
                 than spatial linearization. Our results are in
                 accordance with previous research on linearities in
                 facial animation and allow us to conclude that a model
                 of smiles must include a nonlinear model of
                 velocities.",
  acknowledgement = ack-nhfb,
  articleno =    "12",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Bernhard:2014:GOM,
  author =       "Matthias Bernhard and Efstathios Stavrakis and Michael
                 Hecher and Michael Wimmer",
  title =        "Gaze-to-Object Mapping during Visual Search in {$3$D}
                 Virtual Environments",
  journal =      j-TAP,
  volume =       "11",
  number =       "3",
  pages =        "14:1--14:??",
  month =        aug,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2644812",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Sep 13 13:10:27 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Stimuli obtained from highly dynamic 3D virtual
                 environments and synchronous eye-tracking data are
                 commonly used by algorithms that strive to correlate
                 gaze to scene objects, a process referred to as
                 gaze-to-object mapping (GTOM). We propose to address
                 this problem with a probabilistic approach using
                 Bayesian inference. The desired result of the inference
                 is a predicted probability density function (PDF)
                 specifying for each object in the scene a probability
                 to be attended by the user. To evaluate the quality of
                 a predicted attention PDF, we present a methodology to
                 assess the information value (i.e., likelihood) in the
                 predictions of different approaches that can be used to
                 infer object attention. To this end, we propose an
                 experiment based on a visual search task, which allows
                 us to determine the object of attention at a certain
                 point in time under controlled conditions. We perform
                 this experiment with a wide range of static and dynamic
                 visual scenes to obtain a ground-truth evaluation
                 dataset, allowing us to assess GTOM techniques in a set
                 of 30 particularly challenging cases.",
  acknowledgement = ack-nhfb,
  articleno =    "14",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Kellnhofer:2014:SDN,
  author =       "Petr Kellnhofer and Tobias Ritschel and Peter Vangorp
                 and Karol Myszkowski and Hans-Peter Seidel",
  title =        "Stereo Day-for-Night: Retargeting Disparity for
                 Scotopic Vision",
  journal =      j-TAP,
  volume =       "11",
  number =       "3",
  pages =        "15:1--15:??",
  month =        aug,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2644813",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Sep 13 13:10:27 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Several approaches attempt to reproduce the appearance
                 of a scotopic low-light night scene on a photopic
                 display (``day-for-night'') by introducing color
                 desaturation, loss of acuity, and the Purkinje shift
                 toward blue colors. We argue that faithful stereo
                 reproduction of night scenes on photopic stereo
                 displays requires manipulation of not only color but
                 also binocular disparity. To this end, we performed a
                 psychophysics experiment to devise a model of disparity
                 at scotopic luminance levels. Using this model, we can
                 match binocular disparity of a scotopic stereo content
                 displayed on a photopic monitor to the disparity that
                 would be perceived if the scene was actually scotopic.
                 The model allows for real-time computation of common
                 stereo content as found in interactive applications
                 such as simulators or computer games.",
  acknowledgement = ack-nhfb,
  articleno =    "15",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Bhardwaj:2015:DAP,
  author =       "Amit Bhardwaj and Subhasis Chaudhuri and Onkar
                 Dabeer",
  title =        "Design and Analysis of Predictive Sampling of Haptic
                 Signals",
  journal =      j-TAP,
  volume =       "11",
  number =       "4",
  pages =        "16:1--16:??",
  month =        jan,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2670533",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jan 12 11:49:53 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "In this article, we identify adaptive sampling
                 strategies for haptic signals. Our approach relies on
                 experiments wherein we record the response of several
                 users to haptic stimuli. We then learn different
                 classifiers to predict the user response based on a
                 variety of causal signal features. The classifiers that
                 have good prediction accuracy serve as candidates to be
                 used in adaptive sampling. We compare the resultant
                 adaptive samplers based on their rate-distortion
                 tradeoff using synthetic as well as natural data. For
                 our experiments, we use a haptic device with a maximum
                 force level of 3 N and 10 users. Each user is subjected
                 to several piecewise constant haptic signals and is
                 required to click a button whenever he perceives a
                 change in the signal. For classification, we not only
                 use classifiers based on level crossings and Weber's
                 law but also random forests using a variety of causal
                 signal features. The random forest typically yields the
                 best prediction accuracy and a study of the importance
                 of variables suggests that the level crossings and
                 Weber's classifier features are most dominant. The
                 classifiers based on level crossings and Weber's law
                 have good accuracy (more than 90\%) and are only
                 marginally inferior to random forests. The level
                 crossings classifier consistently outperforms the one
                 based on Weber's law even though the gap is small.
                 Given their simple parametric form, the level crossings
                 and Weber's law--based classifiers are good candidates
                 to be used for adaptive sampling. We study their
                 rate-distortion performance and find that the level
                 crossing sampler is superior. For example, for haptic
                 signals obtained while exploring various rendered
                 objects, for an average sampling rate of 10 samples per
                 second, the level crossings adaptive sampler has a mean
                 square error about 3dB less than the Weber sampler.",
  acknowledgement = ack-nhfb,
  articleno =    "16",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Koulieris:2015:AHL,
  author =       "George Alex Koulieris and George Drettakis and Douglas
                 Cunningham and Katerina Mania",
  title =        "An Automated High-Level Saliency Predictor for Smart
                 Game Balancing",
  journal =      j-TAP,
  volume =       "11",
  number =       "4",
  pages =        "17:1--17:??",
  month =        jan,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2637479",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jan 12 11:49:53 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Successfully predicting visual attention can
                 significantly improve many aspects of computer
                 graphics: scene design, interactivity and rendering.
                 Most previous attention models are mainly based on
                 low-level image features, and fail to take into account
                 high-level factors such as scene context, topology, or
                 task. Low-level saliency has previously been combined
                 with task maps, but only for predetermined tasks. Thus,
                 the application of these methods to graphics (e.g., for
                 selective rendering) has not achieved its full
                 potential. In this article, we present the first
                 automated high-level saliency predictor incorporating
                 two hypotheses from perception and cognitive science
                 that can be adapted to different tasks. The first
                 states that a scene is comprised of objects expected to
                 be found in a specific context as well objects out of
                 context which are salient (scene schemata) while the
                 other claims that viewer's attention is captured by
                 isolated objects (singletons). We propose a new model
                 of attention by extending Eckstein's Differential
                 Weighting Model. We conducted a formal eye-tracking
                 experiment which confirmed that object saliency guides
                 attention to specific objects in a game scene and
                 determined appropriate parameters for a model. We
                 present a GPU-based system architecture that estimates
                 the probabilities of objects to be attended in real-
                 time. We embedded this tool in a game level editor to
                 automatically adjust game level difficulty based on
                 object saliency, offering a novel way to facilitate
                 game design. We perform a study confirming that game
                 level completion time depends on object topology as
                 predicted by our system.",
  acknowledgement = ack-nhfb,
  articleno =    "17",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Zhang:2015:DFA,
  author =       "Tingting Zhang and Louise O'hare and Paul B. Hibbard
                 and Harold T. Nefs and Ingrid Heynderickx",
  title =        "Depth of Field Affects Perceived Depth in
                 Stereographs",
  journal =      j-TAP,
  volume =       "11",
  number =       "4",
  pages =        "18:1--18:??",
  month =        jan,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2667227",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jan 12 11:49:53 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Although it has been reported that depth of field
                 influences depth perception in nonstereo photographs,
                 it remains unclear how depth of field affects depth
                 perception under stereo viewing conditions. We showed
                 participants stereo photographs with different depths
                 of field using a Wheatstone stereoscope and a
                 commercially available 3D TV. The depicted scene
                 contained a floor, a background, and a measuring probe
                 at different locations. Participants drew a floor plan
                 of the depicted scene to scale. We found that perceived
                 depth decreased with decreasing depth of field for
                 shallow depths of field in scenes containing a
                 height-in-the-field cue. For larger depths of field,
                 different effects were found depending on the display
                 system and the viewing distance. There was no effect on
                 perceived depth using the 3D TV, but perceived depth
                 decreased with increasing depth of field using the
                 Wheatstone stereoscope. However, in the 3D TV case, we
                 found that the perceived depth decreased with
                 increasing depth of field in scenes in which the
                 height-in-the-field cue was removed. This indicates
                 that the effect of depth of field on perceived depth
                 may be influenced by other depth cues in the scene,
                 such as height-in-the-field cues.",
  acknowledgement = ack-nhfb,
  articleno =    "18",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Stenholt:2015:BUC,
  author =       "Rasmus Stenholt",
  title =        "On the Benefits of Using Constant Visual Angle Glyphs
                 in Interactive Exploration of {$3$D} Scatterplots",
  journal =      j-TAP,
  volume =       "11",
  number =       "4",
  pages =        "19:1--19:??",
  month =        jan,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2677971",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jan 12 11:49:53 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Visual exploration of clouds of data points is an
                 important application of virtual environments. The
                 common goal of this activity is to use the strengths of
                 human perception to identify interesting structures in
                 data, which are often not detected using traditional,
                 computational analysis methods. In this article, we
                 seek to identify some of the parameters that affect how
                 well structures in visualized data clouds can be
                 identified by a human observer. Two of the primary
                 factors tested are the volumetric densities of the
                 visualized structures and the presence/absence of
                 clutter around the displayed structures. Furthermore,
                 we introduce a new approach to glyph
                 visualization-constant visual angle (CVA) glyphs-which
                 has the potential to mitigate the effect of clutter at
                 the cost of dispensing with the common real-world depth
                 cue of relative size. In a controlled experiment where
                 test subjects had to locate and select visualized
                 structures in an immersive virtual environment, we
                 identified several significant results. One result is
                 that CVA glyphs ease perception of structures in
                 cluttered environments while not deteriorating it when
                 clutter is absent. Another is the existence of
                 threshold densities, above which perception of
                 structures becomes easier and more precise.",
  acknowledgement = ack-nhfb,
  articleno =    "19",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Komogortsev:2015:BOP,
  author =       "Oleg Komogortsev and Corey Holland and Alex Karpov and
                 Larry R. Price",
  title =        "Biometrics via Oculomotor Plant Characteristics:
                 Impact of Parameters in Oculomotor Plant Model",
  journal =      j-TAP,
  volume =       "11",
  number =       "4",
  pages =        "20:1--20:??",
  month =        jan,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2668891",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jan 12 11:49:53 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "This article proposes and evaluates a novel biometric
                 approach utilizing the internal, nonvisible, anatomical
                 structure of the human eye. The proposed method
                 estimates the anatomical properties of the human
                 oculomotor plant from the measurable properties of
                 human eye movements, utilizing a two-dimensional linear
                 homeomorphic model of the oculomotor plant. The derived
                 properties are evaluated within a biometric framework
                 to determine their efficacy in both verification and
                 identification scenarios. The results suggest that the
                 physical properties derived from the oculomotor plant
                 model are capable of achieving 20.3\% equal error rate
                 and 65.7\% rank-1 identification rate on
                 high-resolution equipment involving 32 subjects, with
                 biometric samples taken over four recording sessions;
                 or 22.2\% equal error rate and 12.6\% rank-1
                 identification rate on low-resolution equipment
                 involving 172 subjects, with biometric samples taken
                 over two recording sessions.",
  acknowledgement = ack-nhfb,
  articleno =    "20",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Moscoso:2015:ASI,
  author =       "Claudia Moscoso and Barbara Matusiak and U. Peter
                 Svensson and Krzysztof Orleanski",
  title =        "Analysis of Stereoscopic Images as a New Method for
                 Daylighting Studies",
  journal =      j-TAP,
  volume =       "11",
  number =       "4",
  pages =        "21:1--21:??",
  month =        jan,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2665078",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jan 12 11:49:53 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "This article presents the comparison analysis and
                 results of an experiment designed with two presentation
                 modes: real environments and stereoscopic images. The
                 aim of this article is of a methodological nature, with
                 a main objective of analyzing the usability of
                 stereoscopic image presentation as a research tool to
                 evaluate the daylight impact on the perceived
                 architectural quality of small rooms. Twenty-six
                 participants evaluated 12 different stimuli, divided in
                 equal parts between real rooms and stereoscopic images.
                 The stimuli were two similar rooms of different
                 achromatic-colored surfaces (white and black) with
                 three different daylight openings in each room. The
                 participants assessed nine architectural quality
                 attributes on a semantic differential scale. A
                 pragmatic statistical approach (Bland--Altman Approach)
                 for assessing agreement between two methods was used.
                 Results suggest that stereoscopic image presentation is
                 an accurate method to be used when evaluating all nine
                 attributes in the white room and nearly all attributes
                 in the black room.",
  acknowledgement = ack-nhfb,
  articleno =    "21",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{DePoli:2015:RID,
  author =       "Giovanni {De Poli} and Sergio Canazza and Antonio
                 Rod{\`a} and Emery Schubert",
  title =        "The Role of Individual Difference in Judging
                 Expressiveness of Computer-Assisted Music Performances
                 by Experts",
  journal =      j-TAP,
  volume =       "11",
  number =       "4",
  pages =        "22:1--22:??",
  month =        jan,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2668124",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jan 12 11:49:53 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Computational systems for generating expressive
                 musical performances have been studied for several
                 decades now. These models are generally evaluated by
                 comparing their predictions with actual performances,
                 both from a performance parameter and a subjective
                 point of view, often focusing on very specific aspects
                 of the model. However, little is known about how
                 listeners evaluate the generated performances and what
                 factors influence their judgement and appreciation. In
                 this article, we present two studies, conducted during
                 two dedicated workshops, to start understanding how the
                 audience judges entire performances employing different
                 approaches to generating musical expression. In the
                 preliminary study, 40 participants completed a
                 questionnaire in response to five different
                 computer-generated and computer-assisted performances,
                 rating preference and describing the expressiveness of
                 the performances. In the second, ``GATM'' (Gruppo di
                 Analisi e Teoria Musicale) study, 23 participants also
                 completed the Music Cognitive Style questionnaire.
                 Results indicated that music systemizers tend to
                 describe musical expression in terms of the formal
                 aspects of the music, and music empathizers tend to
                 report expressiveness in terms of emotions and
                 characters. However, high systemizers did not differ
                 from high empathizers in their mean preference score
                 across the five pieces. We also concluded that
                 listeners tend not to focus on the basic technical
                 aspects of playing when judging computer-assisted and
                 computer-generated performances. Implications for the
                 significance of individual differences in judging
                 musical expression are discussed.",
  acknowledgement = ack-nhfb,
  articleno =    "22",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Leyrer:2015:EHM,
  author =       "Markus Leyrer and Sally A. Linkenauger and Heinrich H.
                 B{\"u}lthoff and Betty J. Mohler",
  title =        "Eye Height Manipulations: a Possible Solution to
                 Reduce Underestimation of Egocentric Distances in
                 Head-Mounted Displays",
  journal =      j-TAP,
  volume =       "12",
  number =       "1",
  pages =        "1:1--1:??",
  month =        mar,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2699254",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Mar 17 19:00:36 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Virtual reality technology can be considered a
                 multipurpose tool for diverse applications in various
                 domains, for example, training, prototyping, design,
                 entertainment, and research investigating human
                 perception. However, for many of these applications, it
                 is necessary that the designed and computer-generated
                 virtual environments are perceived as a replica of the
                 real world. Many research studies have shown that this
                 is not necessarily the case. Specifically, egocentric
                 distances are underestimated compared to real-world
                 estimates regardless of whether the virtual environment
                 is displayed in a head-mounted display or on an
                 immersive large-screen display. While the main reason
                 for this observed distance underestimation is still
                 unknown, we investigate a potential approach to reduce
                 or even eliminate this distance underestimation.
                 Building up on the angle of declination below the
                 horizon relationship for perceiving egocentric
                 distances, we describe how eye height manipulations in
                 virtual reality should affect perceived distances. In
                 addition, we describe how this relationship could be
                 exploited to reduce distance underestimation for
                 individual users. In a first experiment, we investigate
                 the influence of a manipulated eye height on an
                 action-based measure of egocentric distance perception.
                 We found that eye height manipulations have similar
                 predictable effects on an action-based measure of
                 egocentric distance as we previously observed for a
                 cognitive measure. This might make this approach more
                 useful than other proposed solutions across different
                 scenarios in various domains, for example, for
                 collaborative tasks. In three additional experiments,
                 we investigate the influence of an individualized
                 manipulation of eye height to reduce distance
                 underestimation in a sparse-cue and a rich-cue
                 environment. In these experiments, we demonstrate that
                 a simple eye height manipulation can be used to
                 selectively alter perceived distances on an individual
                 basis, which could be helpful to enable every user to
                 have an experience close to what was intended by the
                 content designer.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Niewiadomski:2015:EWP,
  author =       "Radoslaw Niewiadomski and Catherine Pelachaud",
  title =        "The Effect of Wrinkles, Presentation Mode, and
                 Intensity on the Perception of Facial Actions and
                 Full-Face Expressions of Laughter",
  journal =      j-TAP,
  volume =       "12",
  number =       "1",
  pages =        "2:1--2:??",
  month =        mar,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2699255",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Mar 17 19:00:36 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "This article focuses on the identification and
                 perception of facial action units displayed alone as
                 well as the meaning decoding and perception of
                 full-face synthesized expressions of laughter. We argue
                 that the adequate representation of single action units
                 is important in the decoding and perception of
                 full-face expressions. In particular, we focus on three
                 factors that may influence the identification and
                 perception of single actions and full-face expressions:
                 their presentation mode (static vs. dynamic), their
                 intensity, and the presence of wrinkles. For the
                 purpose of this study, we used a hybrid approach for
                 animation synthesis that combines data-driven and
                 procedural animations with synthesized wrinkles
                 generated using a bump mapping method. Using such
                 animation technique, we created animations of single
                 action units and full-face movements of two virtual
                 characters. Next, we conducted two studies to evaluate
                 the role of presentation mode, intensity, and wrinkles
                 in single actions and full-face context-free
                 expressions. Our evaluation results show that intensity
                 and presentation mode influence (1) the identification
                 of single action units and (2) the perceived quality of
                 the animation. At the same time, wrinkles (3) are
                 useful in the identification of a single action unit
                 and (4) influence the perceived meaning attached to the
                 animation of full-face expressions. Thus, all factors
                 are important for successful communication of
                 expressions displayed by virtual characters.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Hakala:2015:DAC,
  author =       "Jussi H. Hakala and Pirkko Oittinen and Jukka P.
                 H{\"a}kkinen",
  title =        "Depth Artifacts Caused by Spatial Interlacing in
                 Stereoscopic {$3$D} Displays",
  journal =      j-TAP,
  volume =       "12",
  number =       "1",
  pages =        "3:1--3:??",
  month =        mar,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2699266",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Mar 17 19:00:36 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Most spatially interlacing stereoscopic 3D displays
                 display odd and even rows of an image to either the
                 left or the right eye of the viewer. The visual system
                 then fuses the interlaced image into a single percept.
                 This row-based interlacing creates a small vertical
                 disparity between the images; however, interlacing may
                 also induce horizontal disparities, thus generating
                 depth artifacts. Whether people perceive the depth
                 artifacts and, if so, what is the magnitude of the
                 artifacts are unknown. In this study, we hypothesized
                 and tested if people perceive interlaced edges on
                 different depth levels. We tested oblique edge
                 orientations ranging from 2 degrees to 32 degrees and
                 pixel sizes ranging from 16 to 79 arcsec of visual
                 angle in a depth probe experiment. Five participants
                 viewed the visual stimuli through a stereoscope under
                 three viewing conditions: noninterlaced, interlaced,
                 and row averaged (i.e., where even and odd rows are
                 averaged). Our results indicated that people perceive
                 depth artifacts when viewing interlaced stereoscopic
                 images and that these depth artifacts increase with
                 pixel size and decrease with edge orientation angle. A
                 pixel size of 32 arcsec of visual angle still evoked
                 depth percepts, whereas 16 arcsec did not.
                 Row-averaging images effectively eliminated these depth
                 artifacts. These findings have implications for display
                 design, content production, image quality studies, and
                 stereoscopic games and software.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Abhari:2015:VEM,
  author =       "Kamyar Abhari and John S. H. Baxter and Ali R. Khan
                 and Terry M. Peters and Sandrine {De Ribaupierre} and
                 Roy Eagleson",
  title =        "Visual Enhancement of {MR} Angiography Images to
                 Facilitate Planning of Arteriovenous Malformation
                 Interventions",
  journal =      j-TAP,
  volume =       "12",
  number =       "1",
  pages =        "4:1--4:??",
  month =        mar,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2701425",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Mar 17 19:00:36 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "The primary purpose of medical image visualization is
                 to improve patient outcomes by facilitating the
                 inspection, analysis, and interpretation of patient
                 data. This is only possible if the users' perceptual
                 and cognitive limitations are taken into account during
                 every step of design, implementation, and evaluation of
                 interactive displays. Visualization of medical images,
                 if executed effectively and efficiently, can empower
                 physicians to explore patient data rapidly and
                 accurately with minimal cognitive effort. This article
                 describes a specific case study in biomedical
                 visualization system design and evaluation, which is
                 the visualization of MR angiography images for planning
                 arteriovenous malformation (AVM) interventions. The
                 success of an AVM intervention greatly depends on the
                 surgeon gaining a full understanding of the anatomy of
                 the malformation and its surrounding structures.
                 Accordingly, the purpose of this study was to
                 investigate the usability of visualization modalities
                 involving contour enhancement and stereopsis in the
                 identification and localization of vascular structures
                 using objective user studies. Our preliminary results
                 indicate that contour enhancement, particularly when
                 combined with stereopsis, results in improved
                 performance enhancement of the perception of
                 connectivity and relative depth between different
                 structures.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Albrecht:2015:ADP,
  author =       "Robert Albrecht and Tapio Lokki",
  title =        "Auditory Distance Presentation in an Urban Augmented
                 Reality Environment",
  journal =      j-TAP,
  volume =       "12",
  number =       "2",
  pages =        "5:1--5:??",
  month =        apr,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2723568",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Apr 11 08:23:16 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Presenting points of interest in the environment by
                 means of audio augmented reality offers benefits
                 compared with traditional visual augmented reality and
                 map-based approaches. However, presentation of distant
                 virtual sound sources is problematic. This study looks
                 at combining well-known auditory distance cues to
                 convey the distance of points of interest. The results
                 indicate that although the provided cues are
                 intuitively mapped to relatively short distances, users
                 can with only little training learn to map these cues
                 to larger distances.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Lin:2015:AJH,
  author =       "Qiufeng Lin and John Rieser and Bobby Bodenheimer",
  title =        "Affordance Judgments in {HMD}-Based Virtual
                 Environments: Stepping over a Pole and Stepping off a
                 Ledge",
  journal =      j-TAP,
  volume =       "12",
  number =       "2",
  pages =        "6:1--6:??",
  month =        apr,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2720020",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Apr 11 08:23:16 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "People judge what they can and cannot do all the time
                 when acting in the physical world. Can I step over that
                 fence or do I need to duck under it? Can I step off of
                 that ledge or do I need to climb off of it? These
                 qualities of the environment that people perceive that
                 allow them to act are called affordances. This article
                 compares people's judgments of affordances on two tasks
                 in both the real world and in virtual environments
                 presented with head-mounted displays. The two tasks
                 were stepping over or ducking under a pole, and
                 stepping straight off of a ledge. Comparisons between
                 the real world and virtual environments are important
                 because they allow us to evaluate the fidelity of
                 virtual environments. Another reason is that virtual
                 environment technologies enable precise control of the
                 myriad perceptual cues at work in the physical world
                 and deepen our understanding of how people use vision
                 to decide how to act. In the experiments presented
                 here, the presence or absence of a self-avatar-an
                 animated graphical representation of a person embedded
                 in the virtual environment-was a central factor.
                 Another important factor was the presence or absence of
                 action, that is, whether people performed the task or
                 reported that they could or could not perform the task.
                 The results show that animated self-avatars provide
                 critical information for people deciding what they can
                 and cannot do in virtual environments, and that action
                 is significant in people's affordance judgments.",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Renner:2015:ISB,
  author =       "Rebekka S. Renner and Erik Steindecker and Mathias
                 M{\"u}Ller and Boris M. Velichkovsky and Ralph Stelzer
                 and Sebastian Pannasch and Jens R. Helmert",
  title =        "The Influence of the Stereo Base on Blind and Sighted
                 Reaches in a Virtual Environment",
  journal =      j-TAP,
  volume =       "12",
  number =       "2",
  pages =        "7:1--7:??",
  month =        apr,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2724716",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Apr 11 08:23:16 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "In virtual environments, perceived distances are
                 frequently reported to be shorter than intended. One
                 important parameter for spatial perception in a
                 stereoscopic virtual environment is the stereo
                 base-that is, the distance between the two viewing
                 cameras. We systematically varied the stereo base
                 relative to the interpupillary distance (IPD) and
                 examined influences on distance and size perception.
                 Furthermore, we tested whether an individual adjustment
                 of the stereo base through an alignment task would
                 reduce the errors in distance estimation. Participants
                 performed reaching movements toward a virtual tennis
                 ball either with closed eyes (blind reaches) or open
                 eyes (sighted reaches). Using the participants'
                 individual IPD, the stereo base was set to (a) the IPD,
                 (b) proportionally smaller, (c) proportionally larger,
                 or (d) adjusted according to the individual performance
                 in an alignment task that was conducted beforehand.
                 Overall, consistent with previous research, distances
                 were underestimated. As expected, with a smaller stereo
                 base, the virtual object was perceived as being farther
                 away and bigger, in contrast to a larger stereo base,
                 where the virtual object was perceived to be nearer and
                 smaller. However, the manipulation of the stereo base
                 influenced blind reaching estimates to a smaller extent
                 than expected, which might be due to a combination of
                 binocular disparity and pictorial depth cues. In
                 sighted reaching, when visual feedback was available,
                 presumably the use of disparity matching led to a
                 larger effect of the stereo base. The use of an
                 individually adjusted stereo base diminished the
                 average underestimation but did not reduce
                 interindividual variance. Interindividual differences
                 were task specific and could not be explained through
                 differences in stereo acuity or fixation disparity.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Interrante:2015:WMN,
  author =       "Victoria Interrante and Diego Gutierrez",
  title =        "Welcome Message from the New {Editors-in-Chief}",
  journal =      j-TAP,
  volume =       "12",
  number =       "3",
  pages =        "8:1--8:??",
  month =        jul,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2798732",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Jul 28 17:30:54 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "8e",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Williams:2015:IPE,
  author =       "Duncan Williams and Alexis Kirke and Eduardo Miranda
                 and Ian Daly and James Hallowell and James Weaver and
                 Asad Malik and Etienne Roesch and Faustina Hwang and
                 Slawomir Nasuto",
  title =        "Investigating Perceived Emotional Correlates of
                 Rhythmic Density in Algorithmic Music Composition",
  journal =      j-TAP,
  volume =       "12",
  number =       "3",
  pages =        "8:1--8:??",
  month =        jul,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2749466",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Jul 28 17:30:54 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Affective algorithmic composition is a growing field
                 that combines perceptually motivated affective
                 computing strategies with novel music generation. This
                 article presents work toward the development of one
                 application. The long-term goal is to develop a
                 responsive and adaptive system for inducing affect that
                 is both controlled and validated by biophysical
                 measures. Literature documenting perceptual responses
                 to music identifies a variety of musical features and
                 possible affective correlations, but perceptual
                 evaluations of these musical features for the purposes
                 of inclusion in a music generation system are not
                 readily available. A discrete feature, rhythmic density
                 (a function of note duration in each musical bar,
                 regardless of tempo), was selected because it was shown
                 to be well-correlated with affective responses in
                 existing literature. A prototype system was then
                 designed to produce controlled degrees of variation in
                 rhythmic density via a transformative algorithm. A
                 two-stage perceptual evaluation of a stimulus set
                 created by this prototype was then undertaken. First,
                 listener responses from a pairwise scaling experiment
                 were analyzed via Multidimensional Scaling Analysis
                 (MDS). The statistical best-fit solution was rotated
                 such that stimuli with the largest range of variation
                 were placed across the horizontal plane in two
                 dimensions. In this orientation, stimuli with
                 deliberate variation in rhythmic density appeared
                 farther from the source material used to generate them
                 than from stimuli generated by random permutation.
                 Second, the same stimulus set was then evaluated
                 according to the order suggested in the rotated
                 two-dimensional solution in a verbal elicitation
                 experiment. A Verbal Protocol Analysis (VPA) found that
                 listener perception of the stimulus set varied in at
                 least two commonly understood emotional descriptors,
                 which might be considered affective correlates of
                 rhythmic density. Thus, these results further
                 corroborate previous studies wherein musical parameters
                 are monitored for changes in emotional expression and
                 that some similarly parameterized control of perceived
                 emotional content in an affective algorithmic
                 composition system can be achieved and provide a
                 methodology for evaluating and including further
                 possible musical features in such a system. Some
                 suggestions regarding the test procedure and analysis
                 techniques are also documented here.",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Pfluger:2015:SFW,
  author =       "Hermann Pfl{\"u}ger and Benjamin H{\"o}ferlin and
                 Michael Raschke and Thomas Ertl",
  title =        "Simulating Fixations When Looking at Visual Arts",
  journal =      j-TAP,
  volume =       "12",
  number =       "3",
  pages =        "9:1--9:??",
  month =        jul,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2736286",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Jul 28 17:30:54 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "When people look at pictures, they fixate on specific
                 areas. The sequences of such fixations are so
                 characteristic for certain pictures that metrics can be
                 derived that allow successful grouping of similar
                 pieces of visual art. However, determining enough
                 fixation sequences by eye tracking is not practically
                 feasible for large groups of people and pictures. In
                 order to get around this limitation, we present a novel
                 algorithm that simulates eye movements by calculating
                 scan paths for images and time frames in real time. The
                 basis of our algorithm is an attention model that
                 combines and optimizes rectangle features with
                 Adaboost. The model is adapted to the characteristics
                 of the retina, and its input is dependent on a few
                 earlier fixations. This method results in significant
                 improvements compared to previous approaches. Our
                 simulation process delivers the same data structures as
                 an eye tracker, thus can be analyzed by standard
                 eye-tracking software. A comparison with recorded data
                 from eye tracking experiments shows that our algorithm
                 for simulating fixations has a very good prediction
                 quality for the stimulus areas on which many subjects
                 focus. We also compare the results with those from
                 earlier works. Finally, we demonstrate how the
                 presented algorithm can be used to calculate the
                 similarity of pictures in terms of human perception.",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Allison:2015:PTS,
  author =       "Robert S. Allison and Laurie M. Wilcox",
  title =        "Perceptual Tolerance to Stereoscopic {$3$D} Image
                 Distortion",
  journal =      j-TAP,
  volume =       "12",
  number =       "3",
  pages =        "10:1--10:??",
  month =        jul,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2770875",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Jul 28 17:30:54 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "An intriguing aspect of picture perception is the
                 viewer's tolerance to variation in viewing position,
                 perspective, and display size. These factors are also
                 present in stereoscopic media, where there are
                 additional parameters associated with the camera
                 arrangement (e.g., separation, orientation). The
                 predicted amount of depth from disparity can be
                 obtained trigonometrically; however, perceived depth in
                 complex scenes often differs from geometric predictions
                 based on binocular disparity alone. To evaluate the
                 extent and the cause of deviations from geometric
                 predictions of depth from disparity in naturalistic
                 scenes, we recorded stereoscopic footage of an indoor
                 scene with a range of camera separations (camera
                 interaxial (IA) ranged from 3 to 95 mm) and displayed
                 them on a range of screen sizes. In a series of
                 experiments participants estimated 3D distances in the
                 scene relative to a reference scene, compared depth
                 between shots with different parameters, or reproduced
                 the depth between pairs of objects in the scene using
                 reaching or blind walking. The effects of IA and screen
                 size were consistently and markedly smaller than
                 predicted from the binocular viewing geometry,
                 suggesting that observers are able to compensate for
                 the predicted distortions. We conclude that the
                 presence of multiple realistic monocular depth cues
                 drives normalization of perceived depth from binocular
                 disparity. It is not clear to what extent these
                 differences are due to cognitive as opposed to
                 perceptual factors. However, it is notable that these
                 normalization processes are not task specific; they are
                 evident in both perception- and action-oriented
                 tasks.",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Zibrek:2015:EEM,
  author =       "Katja Zibrek and Ludovic Hoyet and Kerstin Ruhland and
                 Rachel Mcdonnell",
  title =        "Exploring the Effect of Motion Type and Emotions on
                 the Perception of Gender in Virtual Humans",
  journal =      j-TAP,
  volume =       "12",
  number =       "3",
  pages =        "11:1--11:??",
  month =        jul,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2767130",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Jul 28 17:30:54 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "In this article, we investigate the perception of
                 gender from the motion of virtual humans under
                 different emotional conditions and explore the effect
                 of emotional bias on gender perception (e.g., anger
                 being attributed to males more than females). As motion
                 types can present different levels of physiological
                 cues, we also explore how two types of motion (walking
                 and conversations) are affected by emotional bias.
                 Walking typically displays more physiological cues
                 about gender (e.g., hip sway) and therefore is expected
                 to be less affected by emotional bias. To investigate
                 these effects, we used a corpus of captured facial and
                 body motions from four male and four female actors,
                 performing basic emotions through conversation and
                 walk. We expected that the appearance of the model
                 would also influence gender perception; therefore, we
                 displayed both male and female motions on two virtual
                 models of different sex. Two experiments were then
                 conducted to assess gender judgments from these
                 motions. In both experiments, participants were asked
                 to rate how male or female they considered the motions
                 to be under different emotional states, then classified
                 the emotions to determine how accurately they were
                 portrayed by actors. Overall, both experiments showed
                 that gender ratings were affected by the displayed
                 emotion. However, we found that conversations were
                 influenced by gender stereotypes to a greater extent
                 than walking motions. This was particularly true for
                 anger, which was perceived as male on both male and
                 female motions, and sadness, which was perceived as
                 less male when portrayed by male actors. We also found
                 a slight effect of the model when observing gender on
                 different types of virtual models. These results have
                 implications for the design and animation of virtual
                 humans.",
  acknowledgement = ack-nhfb,
  articleno =    "11",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Mihtsentu:2015:DVS,
  author =       "Mezgeb Tesfayesus Mihtsentu and Colin Ware",
  title =        "Discrete Versus Solid: Representing Quantity Using
                 Linear, Area, and Volume Glyphs",
  journal =      j-TAP,
  volume =       "12",
  number =       "3",
  pages =        "12:1--12:??",
  month =        jul,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2767129",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Jul 28 17:30:54 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "It is common in infographics for quantities to be
                 represented by stacks of discrete blocks. For example,
                 a magazine illustration showing automobile production
                 in different countries might use stacks of blocks with
                 each block representing a thousand cars. This is unlike
                 what is done to represent quantity in the charts used
                 by statisticians, or for quantitative glyphs used in
                 maps. In these cases, solid bars or solid area glyphs
                 such as circles are commonly used to represent
                 quantity. This raises the question of whether breaking
                 bars, area, or volume glyphs into discrete blocks can
                 improve the rapid estimation of quantity. We report on
                 a study where participants compared quantities
                 represented using bar, area, and volume glyphs in both
                 solid and discrete variants. The discrete variants used
                 up to $4$, $ 4 \times 4$, and $ 4 \times 4 \times 4$
                 blocks or $ 10$, $ 10 \times 10$, and $ 10 \times 10
                 \times 10$ blocks for bar, area, and volume,
                 respectively. The results show that people are
                 significantly more accurate in estimating quantities
                 using the discrete versions, but they take somewhat
                 longer. For both areas and volumes, the accuracy gains
                 were considerable.",
  acknowledgement = ack-nhfb,
  articleno =    "12",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Kuhl:2015:ISI,
  author =       "Scott Kuhl and Rafal Mantiuk and Betsy Sanders",
  title =        "Introduction to Special Issue {SAP 2015}",
  journal =      j-TAP,
  volume =       "12",
  number =       "4",
  pages =        "13:1--13:??",
  month =        sep,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2815623",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Sep 10 07:42:21 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "13",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Abebe:2015:ECF,
  author =       "Mekides Assefa Abebe and Tania Pouli and Jonathan
                 Kervec",
  title =        "Evaluating the Color Fidelity of {ITMOs} and {HDR}
                 Color Appearance Models",
  journal =      j-TAP,
  volume =       "12",
  number =       "4",
  pages =        "14:1--14:??",
  month =        sep,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2808232",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Sep 10 07:42:21 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "With the increasing availability of high-dynamic-range
                 (HDR) displays comes the need to remaster existing
                 content in a way that takes advantage of the extended
                 range of luminance and contrast that such displays
                 offer. At the same time, it is crucial that the
                 creative intent of the director is preserved through
                 such changes as much as possible. In this article, we
                 compare several approaches for dynamic range extension
                 to assess their ability to correctly reproduce the
                 color appearance of standard dynamic range (SDR) images
                 on HDR displays. A number of state-of-the-art inverse
                 tone mapping operators (ITMOs) combined with a standard
                 chromatic adaptation transform (CAT) as well as some
                 HDR color appearance models have been evaluated through
                 a psychophysical study, making use of an HDR display as
                 well as HDR ground-truth data. We found that global
                 ITMOs lead to the most reliable performance when
                 combined with a standard CAT, while more complex
                 methods were found to be more scene dependent, and
                 often less preferred than the unprocessed SDR image.
                 HDR color appearance models, albeit being the most
                 complete solutions for accurate color reproduction,
                 were found to not be well suited to the problem of
                 dynamic range expansion, suggesting that further
                 research may be necessary to provide accurate color
                 management in the context of inverse tone mapping.",
  acknowledgement = ack-nhfb,
  articleno =    "14",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Wilcox:2015:EVP,
  author =       "Laurie M. Wilcox and Robert S. Allison and John
                 Helliker and Bert Dunk and Roy C. Anthony",
  title =        "Evidence that Viewers Prefer Higher Frame-Rate Film",
  journal =      j-TAP,
  volume =       "12",
  number =       "4",
  pages =        "15:1--15:??",
  month =        sep,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2810039",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Sep 10 07:42:21 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "High frame-rate (HFR) movie-making refers to the
                 capture and projection of movies at frame rates several
                 times higher than the traditional 24 frames per second.
                 This higher frame rate theoretically improves the
                 quality of motion portrayed in movies, and helps avoid
                 motion blur, judder, and other undesirable artifacts.
                 However, there is considerable debate in the cinema
                 industry regarding the acceptance of HFR content given
                 anecdotal reports of hyper-realistic imagery that
                 reveals too much set and costume detail. Despite the
                 potential theoretical advantages, there has been little
                 empirical investigation of the impact of high
                 frame-rate techniques on the viewer experience. In this
                 study, we use stereoscopic 3D content, filmed and
                 projected at multiple frame rates (24, 48, and 60 fps),
                 with shutter angles ranging from $ 180^\circ $ to $
                 358^\circ $, to evaluate viewer preferences. In a
                 paired-comparison paradigm, we assessed preferences
                 along a set of five attributes (realism, motion
                 smoothness, blur/clarity, quality of depth, and overall
                 preference). The resulting data show a clear preference
                 for higher frame rates, particularly when contrasting
                 24 fps with 48 or 60 fps. We found little impact of
                 shutter angle on viewers' choices, with the exception
                 of one measure (motion smoothness) for one clip type.
                 These data are the first empirical evidence of the
                 advantages afforded by high frame-rate capture and
                 presentation in a cinema context.",
  acknowledgement = ack-nhfb,
  articleno =    "15",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Jun:2015:BFU,
  author =       "Eunice Jun and Jeanine K. Stefanucci and Sarah H.
                 Creem-Regehr and Michael N. Geuss and William B.
                 Thompson",
  title =        "Big Foot: Using the Size of a Virtual Foot to Scale
                 Gap Width",
  journal =      j-TAP,
  volume =       "12",
  number =       "4",
  pages =        "16:1--16:??",
  month =        sep,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2811266",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Sep 10 07:42:21 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Spatial perception research in the real world and in
                 virtual environments suggests that the body (e.g.,
                 hands) plays a role in the perception of the scale of
                 the world. However, little research has closely
                 examined how varying the size of virtual body parts may
                 influence judgments of action capabilities and spatial
                 layout. Here, we questioned whether changing the size
                 of virtual feet would affect judgments of stepping over
                 and estimates of the width of a gap. Participants
                 viewed their disembodied virtual feet as small or large
                 and judged both their ability to step over a gap and
                 the size of gaps shown in the virtual world. Foot size
                 affected both affordance judgments and size estimates
                 such that those with enlarged virtual feet estimated
                 they could step over larger gaps and that the extent of
                 the gap was smaller. Shrunken feet led to the
                 perception of a reduced ability to step over a gap and
                 smaller estimates of width. The results suggest that
                 people use their visually perceived foot size to scale
                 virtual spaces. Regardless of foot size, participants
                 felt that they owned the feet rendered in the virtual
                 world. Seeing disembodied, but motion-tracked, virtual
                 feet affected spatial judgments, suggesting that the
                 presentation of a single tracked body part is
                 sufficient to produce similar effects on perception, as
                 has been observed with the presence of fully co-located
                 virtual self-avatars or other body parts in the past.",
  acknowledgement = ack-nhfb,
  articleno =    "16",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Legde:2015:MAP,
  author =       "Katharina Legde and Susana Castillo and Douglas W.
                 Cunningham",
  title =        "Multimodal Affect: Perceptually Evaluating an
                 Affective Talking Head",
  journal =      j-TAP,
  volume =       "12",
  number =       "4",
  pages =        "17:1--17:??",
  month =        sep,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2811265",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Sep 10 07:42:21 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Many tasks such as driving or rapidly sorting items
                 can be best achieved by direct actions. Other tasks
                 such as giving directions, being guided through a
                 museum, or organizing a meeting are more easily solved
                 verbally. Since computers are increasingly being used
                 in all aspects of daily life, it would be of great
                 advantage if we could communicate verbally with them.
                 Although advanced interactions with computers are
                 possible, a vast majority of interactions are still
                 based on the WIMP (Window, Icon, Menu, Point) metaphor
                 [Hevner and Chatterjee 2010] and are, therefore, via
                 simple text and gesture commands. The field of
                 affective interfaces is working toward making computers
                 more accessible by giving them (rudimentary)
                 natural-language abilities, including using synthesized
                 speech, facial expressions, and virtual body motions.
                 Once the computer is granted a virtual body, however,
                 it must be given the ability to use it to nonverbally
                 convey socio-emotional information (such as emotions,
                 intentions, mental state, and expectations) or it will
                 likely be misunderstood. Here, we present a simple
                 affective talking head along with the results of an
                 experiment on the multimodal expression of emotion. The
                 results show that although people can sometimes
                 recognize the intended emotion from the semantic
                 content of the text even when the face does not convey
                 affect, they are considerably better at it when the
                 face also shows emotion. Moreover, when both face and
                 text convey emotion, people can detect different levels
                 of emotional intensity.",
  acknowledgement = ack-nhfb,
  articleno =    "17",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Tan:2015:PLI,
  author =       "Minghui Tan and Jean-Fran{\c{c}}ois Lalonde and
                 Lavanya Sharan and Holly Rushmeier and Carol
                 O'sullivan",
  title =        "The Perception of Lighting Inconsistencies in
                 Composite Outdoor Scenes",
  journal =      j-TAP,
  volume =       "12",
  number =       "4",
  pages =        "18:1--18:??",
  month =        sep,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2810038",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Sep 10 07:42:21 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "It is known that humans can be insensitive to large
                 changes in illumination. For example, if an object of
                 interest is extracted from one digital photograph and
                 inserted into another, we do not always notice the
                 differences in illumination between the object and its
                 new background. This inability to spot illumination
                 inconsistencies is often the key to success in digital
                 ``doctoring'' operations. We present a set of
                 experiments in which we explore the perception of
                 illumination in outdoor scenes. Our results can be used
                 to predict when and why inconsistencies go unnoticed.
                 Applications of the knowledge gained from our studies
                 include smarter digital ``cut-and-paste'' and digital
                 ``fake'' detection tools, and image-based composite
                 scene backgrounds for layout and previsualization.",
  acknowledgement = ack-nhfb,
  articleno =    "18",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Kiiski:2015:SHS,
  author =       "Hanni Kiiski and Ludovic Hoyet and Andy T. Woods and
                 Carol O'Sullivan and Fiona N. Newell",
  title =        "Strutting Hero, Sneaking Villain: Utilizing Body
                 Motion Cues to Predict the Intentions of Others",
  journal =      j-TAP,
  volume =       "13",
  number =       "1",
  pages =        "1:1--1:??",
  month =        dec,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2791293",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Dec 21 17:38:35 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "A better understanding of how intentions and traits
                 are perceived from body movements is required for the
                 design of more effective virtual characters that behave
                 in a socially realistic manner. For this purpose,
                 realistic body motion, captured from human movements,
                 is being used more frequently for creating characters
                 with natural animations in games and entertainment.
                 However, it is not always clear for programmers and
                 designers which specific motion parameters best convey
                 specific information such as certain emotions,
                 intentions, or traits. We conducted two experiments to
                 investigate whether the perceived traits of actors
                 could be determined from their body motion, and whether
                 these traits were associated with their perceived
                 intentions. We first recorded body motions from 26
                 professional actors, who were instructed to move in a
                 ``hero''-like or a ``villain''-like manner. In the
                 first experiment, 190 participants viewed individual
                 video recordings of these actors and were required to
                 provide ratings to the body motion stimuli along a
                 series of different cognitive dimensions (intentions,
                 attractiveness, dominance, trustworthiness, and
                 distinctiveness). The intersubject ratings across
                 observers were highly consistent, suggesting that
                 social traits are readily determined from body motion.
                 Moreover, correlational analyses between these ratings
                 revealed consistent associations across traits, for
                 example, that perceived ``good'' intentions were
                 associated with higher ratings of attractiveness and
                 dominance. Experiment 2 was designed to elucidate the
                 qualitative body motion cues that were critical for
                 determining specific intentions and traits from the
                 hero- and villain-like body movements. The results
                 revealed distinct body motions that were readily
                 associated with the perception of either ``good'' or
                 ``bad'' intentions. Moreover, regression analyses
                 revealed that these ratings accurately predicted the
                 perception of the portrayed character type. These
                 findings indicate that intentions and social traits are
                 communicated effectively via specific sets of body
                 motion features. Furthermore, these results have
                 important implications for the design of the motion of
                 virtual characters to convey desired social
                 information.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Giraud:2015:PEP,
  author =       "Tom Giraud and Florian Focone and Virginie Demulier
                 and Jean Claude Martin and Brice Isableu",
  title =        "Perception of Emotion and Personality through
                 Full-Body Movement Qualities: a Sport Coach Case
                 Study",
  journal =      j-TAP,
  volume =       "13",
  number =       "1",
  pages =        "2:1--2:??",
  month =        dec,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2791294",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Dec 21 17:38:35 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Virtual sport coaches guide users through their
                 physical activity and provide motivational support.
                 Users' motivation can rapidly decay if the movements of
                 the virtual coach are too stereotyped. Kinematic
                 patterns generated while performing a predefined
                 fitness movement can elicit and help to prolong users'
                 interaction and interest in training. Human body
                 kinematics has been shown to convey various social
                 attributes such as gender, identity, and acted
                 emotions. To date, no study provides information
                 regarding how spontaneous emotions and personality
                 traits together are perceived from full-body movements.
                 In this article, we study how people make reliable
                 inferences regarding spontaneous emotional dimensions
                 and personality traits of human coaches from kinematic
                 patterns they produced when performing a fitness
                 sequence. Movements were presented to participants via
                 a virtual mannequin to isolate the influence of
                 kinematics on perception. Kinematic patterns of
                 biological movement were analyzed in terms of movement
                 qualities according to the effort-shape [Dell 1977]
                 notation proposed by Laban [1950]. Three studies were
                 performed to provide an analysis of the process leading
                 to perception: from coaches' states and traits through
                 bodily movements to observers' social perception.
                 Thirty-two participants (i.e., observers) were asked to
                 rate the movements of the virtual mannequin in terms of
                 conveyed emotion dimensions, personality traits
                 (five-factor model of personality), and perceived
                 movement qualities (effort-shape) from 56 fitness
                 movement sequences. The results showed high reliability
                 for most of the evaluated dimensions, confirming
                 interobserver agreement from kinematics at zero
                 acquaintance. A large expressive halo merging emotional
                 (e.g., perceived intensity) and personality aspects
                 (e.g., extraversion) was found, driven by perceived
                 kinematic impulsivity and energy. Observers'
                 perceptions were partially accurate for emotion
                 dimensions and were not accurate for personality
                 traits. Together, these results contribute to both the
                 understanding of dimensions of social perception
                 through movement and the design of expressive virtual
                 sport coaches.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Kokkinara:2015:EVC,
  author =       "Elena Kokkinara and Mel Slater and Joan
                 L{\'o}pez-Moliner",
  title =        "The Effects of Visuomotor Calibration to the Perceived
                 Space and Body, through Embodiment in Immersive Virtual
                 Reality",
  journal =      j-TAP,
  volume =       "13",
  number =       "1",
  pages =        "3:1--3:??",
  month =        dec,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2818998",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Dec 21 17:38:35 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "We easily adapt to changes in the environment that
                 involve cross-sensory discrepancies (e.g., between
                 vision and proprioception). Adaptation can lead to
                 changes in motor commands so that the experienced
                 sensory consequences are appropriate for the new
                 environment (e.g., we program a movement differently
                 while wearing prisms that shift our visual space). In
                 addition to these motor changes, perceptual judgments
                 of space can also be altered (e.g., how far can I reach
                 with my arm?). However, in previous studies that
                 assessed perceptual judgments of space after visuomotor
                 adaptation, the manipulation was always a planar
                 spatial shift, whereas changes in body perception could
                 not directly be assessed. In this study, we
                 investigated the effects of velocity-dependent
                 (spatiotemporal) and spatial scaling distortions of arm
                 movements on space and body perception, taking
                 advantage of immersive virtual reality. Exploiting the
                 perceptual illusion of embodiment in an entire virtual
                 body, we endowed subjects with new spatiotemporal or
                 spatial 3D mappings between motor commands and their
                 sensory consequences. The results imply that
                 spatiotemporal manipulation of 2 and 4 times faster can
                 significantly change participants' proprioceptive
                 judgments of a virtual object's size without affecting
                 the perceived body ownership, although it did affect
                 the agency of the movements. Equivalent spatial
                 manipulations of 11 and 22 degrees of angular offset
                 also had a significant effect on the perceived virtual
                 object's size; however, the mismatched information did
                 not affect either the sense of body ownership or
                 agency. We conclude that adaptation to spatial and
                 spatiotemporal distortion can similarly change our
                 perception of space, although spatiotemporal
                 distortions can more easily be detected.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Krejtz:2015:GTE,
  author =       "Krzysztof Krejtz and Andrew Duchowski and Tomasz
                 Szmidt and Izabela Krejtz and Fernando Gonz{\'a}lez
                 Perilli and Ana Pires and Anna Vilaro and Natalia
                 Villalobos",
  title =        "Gaze Transition Entropy",
  journal =      j-TAP,
  volume =       "13",
  number =       "1",
  pages =        "4:1--4:??",
  month =        dec,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2834121",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Dec 21 17:38:35 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "This article details a two-step method of quantifying
                 eye movement transitions between areas of interest
                 (AOIs). First, individuals' gaze switching patterns,
                 represented by fixated AOI sequences, are modeled as
                 Markov chains. Second, Shannon's entropy coefficient of
                 the fit Markov model is computed to quantify the
                 complexity of individual switching patterns. To
                 determine the overall distribution of attention over
                 AOIs, the entropy coefficient of individuals'
                 stationary distribution of fixations is calculated. The
                 novelty of the method is that it captures the
                 variability of individual differences in eye movement
                 characteristics, which are then summarized
                 statistically. The method is demonstrated on gaze data
                 collected from two studies, during free viewing of
                 classical art paintings. Normalized Shannon's entropy,
                 derived from individual transition matrices, is related
                 to participants' individual differences as well as to
                 either their aesthetic impression or recognition of
                 artwork. Low transition and high stationary entropies
                 suggest greater curiosity mixed with a higher
                 subjective aesthetic affinity toward artwork, possibly
                 indicative of visual scanning of the artwork in a more
                 deliberate way. Meanwhile, both high transition and
                 stationary entropies may be indicative of recognition
                 of familiar artwork.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Schuwerk:2015:CEC,
  author =       "Clemens Schuwerk and Xiao Xu and Rahul Chaudhari and
                 Eckehard Steinbach",
  title =        "Compensating the Effect of Communication Delay in
                 Client-Server--Based Shared Haptic Virtual
                 Environments",
  journal =      j-TAP,
  volume =       "13",
  number =       "1",
  pages =        "5:1--5:??",
  month =        dec,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2835176",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Dec 21 17:38:35 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Shared haptic virtual environments can be realized
                 using a client-server architecture. In this
                 architecture, each client maintains a local copy of the
                 virtual environment (VE). A centralized physics
                 simulation running on a server calculates the object
                 states based on haptic device position information
                 received from the clients. The object states are sent
                 back to the clients to update the local copies of the
                 VE, which are used to render interaction forces
                 displayed to the user through a haptic device.
                 Communication delay leads to delayed object state
                 updates and increased force feedback rendered at the
                 clients. In this article, we analyze the effect of
                 communication delay on the magnitude of the rendered
                 forces at the clients for cooperative multi-user
                 interactions with rigid objects. The analysis reveals
                 guidelines on the tolerable communication delay. If
                 this delay is exceeded, the increased force magnitude
                 becomes haptically perceivable. We propose an adaptive
                 force rendering scheme to compensate for this effect,
                 which dynamically changes the stiffness used in the
                 force rendering at the clients. Our experimental
                 results, including a subjective user study, verify the
                 applicability of the analysis and the proposed scheme
                 to compensate the effect of time-varying communication
                 delay in a multi-user SHVE.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Rigas:2016:BRE,
  author =       "Ioannis Rigas and Oleg Komogortsev and Reza Shadmehr",
  title =        "Biometric Recognition via Eye Movements: Saccadic
                 Vigor and Acceleration Cues",
  journal =      j-TAP,
  volume =       "13",
  number =       "2",
  pages =        "6:1--6:??",
  month =        mar,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2842614",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Mar 3 17:40:03 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Previous research shows that human eye movements can
                 serve as a valuable source of information about the
                 structural elements of the oculomotor system and they
                 also can open a window to the neural functions and
                 cognitive mechanisms related to visual attention and
                 perception. The research field of eye movement-driven
                 biometrics explores the extraction of
                 individual-specific characteristics from eye movements
                 and their employment for recognition purposes. In this
                 work, we present a study for the incorporation of
                 dynamic saccadic features into a model of eye
                 movement-driven biometrics. We show that when these
                 features are added to our previous biometric framework
                 and tested on a large database of 322 subjects, the
                 biometric accuracy presents a relative improvement in
                 the range of 31.6--33.5\% for the verification
                 scenario, and in range of 22.3--53.1\% for the
                 identification scenario. More importantly, this
                 improvement is demonstrated for different types of
                 visual stimulus (random dot, text, video), indicating
                 the enhanced robustness offered by the incorporation of
                 saccadic vigor and acceleration cues.",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Holmes:2016:AII,
  author =       "Olivia Holmes and Martin S. Banks and Hany Farid",
  title =        "Assessing and Improving the Identification of
                 Computer-Generated Portraits",
  journal =      j-TAP,
  volume =       "13",
  number =       "2",
  pages =        "7:1--7:??",
  month =        mar,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2871714",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Mar 3 17:40:03 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Modern computer graphics are capable of generating
                 highly photorealistic images. Although this can be
                 considered a success for the computer graphics
                 community, it has given rise to complex forensic and
                 legal issues. A compelling example comes from the need
                 to distinguish between computer-generated and
                 photographic images as it pertains to the legality and
                 prosecution of child pornography in the United States.
                 We performed psychophysical experiments to determine
                 the accuracy with which observers are capable of
                 distinguishing computer-generated from photographic
                 images. We find that observers have considerable
                 difficulty performing this task-more difficulty than we
                 observed 5 years ago when computer-generated imagery
                 was not as photorealistic. We also find that observers
                 are more likely to report that an image is photographic
                 rather than computer generated, and that resolution has
                 surprisingly little effect on performance. Finally, we
                 find that a small amount of training greatly improves
                 accuracy.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Hyde:2016:EAC,
  author =       "Jennifer Hyde and Elizabeth J. Carter and Sara Kiesler
                 and Jessica K. Hodgins",
  title =        "Evaluating Animated Characters: Facial Motion
                 Magnitude Influences Personality Perceptions",
  journal =      j-TAP,
  volume =       "13",
  number =       "2",
  pages =        "8:1--8:??",
  month =        mar,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2851499",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Mar 3 17:40:03 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Animated characters are expected to fulfill a variety
                 of social roles across different domains. To be
                 successful and effective, these characters must display
                 a wide range of personalities. Designers and animators
                 create characters with appropriate personalities by
                 using their intuition and artistic expertise. Our goal
                 is to provide evidence-based principles for creating
                 social characters. In this article, we describe the
                 results of two experiments that show how exaggerated
                 and damped facial motion magnitude influence
                 impressions of cartoon and more realistic animated
                 characters. In our first experiment, participants
                 watched animated characters that varied in rendering
                 style and facial motion magnitude. The participants
                 then rated the different animated characters on
                 extroversion, warmth, and competence, which are social
                 traits that are relevant for characters used in
                 entertainment, therapy, and education. We found that
                 facial motion magnitude affected these social traits in
                 cartoon and realistic characters differently. Facial
                 motion magnitude affected ratings of cartoon
                 characters' extroversion and competence more than their
                 warmth. In contrast, facial motion magnitude affected
                 ratings of realistic characters' extroversion but not
                 their competence nor warmth. We ran a second experiment
                 to extend the results of the first. In the second
                 experiment, we added emotional valence as a variable.
                 We also asked participants to rate the characters on
                 more specific aspects of warmth, such as
                 respectfulness, calmness, and attentiveness. Although
                 the characters' emotional valence did not affect
                 ratings, we found that facial motion magnitude
                 influenced ratings of the characters' respectfulness
                 and calmness but not attentiveness. These findings
                 provide a basis for how animators can fine-tune facial
                 motion to control perceptions of animated characters'
                 personalities.",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Wang:2016:AIH,
  author =       "Yingying Wang and Jean E. Fox Tree and Marilyn Walker
                 and Michael Neff",
  title =        "Assessing the Impact of Hand Motion on Virtual
                 Character Personality",
  journal =      j-TAP,
  volume =       "13",
  number =       "2",
  pages =        "9:1--9:??",
  month =        mar,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2874357",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Mar 3 17:40:03 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Designing virtual characters that are capable of
                 conveying a sense of personality is important for
                 generating realistic experiences, and thus a key goal
                 in computer animation research. Though the influence of
                 gesture and body motion on personality perception has
                 been studied, little is known about which attributes of
                 hand pose and motion convey particular personality
                 traits. Using the ``Big Five'' model as a framework for
                 evaluating personality traits, this work examines how
                 variations in hand pose and motion impact the
                 perception of a character's personality. As has been
                 done with facial motion, we first study hand motion in
                 isolation as a requirement for running controlled
                 experiments that avoid the combinatorial explosion of
                 multimodal communication (all combinations of facial
                 expressions, arm movements, body movements, and hands)
                 and allow us to understand the communicative content of
                 hands. We determined a set of features likely to
                 reflect personality, based on research in psychology
                 and previous human motion perception work: shape,
                 direction, amplitude, speed, and manipulation. Then we
                 captured realistic hand motion varying these attributes
                 and conducted three perceptual experiments to determine
                 the contribution of these attributes to the character's
                 personalities. Both hand poses and the amplitude of
                 hand motion affected the perception of all five
                 personality traits. Speed impacted all traits except
                 openness. Direction impacted extraversion and openness.
                 Manipulation was perceived as an indicator of
                 introversion, disagreeableness, neuroticism, and less
                 openness to experience. From these results, we
                 generalize guidelines for designing detailed hand
                 motion that can add to the expressiveness and
                 personality of characters. We performed an evaluation
                 study that combined hand motion with gesture and body
                 motion. Even in the presence of body motion, hand
                 motion still significantly impacted the perception of a
                 character's personality and could even be the dominant
                 factor in certain situations.",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Kawabe:2016:DLP,
  author =       "Takahiro Kawabe and Taiki Fukiage and Masataka
                 Sawayama and Shin'ya Nishida",
  title =        "Deformation Lamps: a Projection Technique to Make
                 Static Objects Perceptually Dynamic",
  journal =      j-TAP,
  volume =       "13",
  number =       "2",
  pages =        "10:1--10:??",
  month =        mar,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2874358",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Mar 3 17:40:03 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Light projection is a powerful technique that can be
                 used to edit the appearance of objects in the real
                 world. Based on pixel-wise modification of light
                 transport, previous techniques have successfully
                 modified static surface properties such as surface
                 color, dynamic range, gloss, and shading. Here, we
                 propose an alternative light projection technique that
                 adds a variety of illusory yet realistic distortions to
                 a wide range of static 2D and 3D projection targets.
                 The key idea of our technique, referred to as
                 (Deformation Lamps), is to project only dynamic
                 luminance information, which effectively activates the
                 motion (and shape) processing in the visual system
                 while preserving the color and texture of the original
                 object. Although the projected dynamic luminance
                 information is spatially inconsistent with the color
                 and texture of the target object, the observer's brain
                 automatically combines these sensory signals in such a
                 way as to correct the inconsistency across visual
                 attributes. We conducted a psychophysical experiment to
                 investigate the characteristics of the inconsistency
                 correction and found that the correction was critically
                 dependent on the retinal magnitude of the
                 inconsistency. Another experiment showed that the
                 perceived magnitude of image deformation produced by
                 our techniques was underestimated. The results ruled
                 out the possibility that the effect obtained by our
                 technique stemmed simply from the physical change in an
                 object's appearance by light projection. Finally, we
                 discuss how our techniques can make the observers
                 perceive a vivid and natural movement, deformation, or
                 oscillation of a variety of static objects, including
                 drawn pictures, printed photographs, sculptures with 3D
                 shading, and objects with natural textures including
                 human bodies.",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Krejtz:2016:DAF,
  author =       "Krzysztof Krejtz and Andrew Duchowski and Izabela
                 Krejtz and Agnieszka Szarkowska and Agata Kopacz",
  title =        "Discerning Ambient\slash Focal Attention with
                 Coefficient {$K$}",
  journal =      j-TAP,
  volume =       "13",
  number =       "3",
  pages =        "11:1--11:??",
  month =        may,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2896452",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat May 28 17:45:30 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "We introduce coefficient K, defined on a novel
                 parametric scale, derived from processing a
                 traditionally eye-tracked time course of eye movements.
                 Positive and negative ordinates of K indicate focal or
                 ambient viewing, respectively, while the abscissa
                 serves to indicate time, so that K acts as a dynamic
                 indicator of fluctuation between ambient/focal visual
                 behavior. The coefficient indicates the difference
                 between fixation duration and its subsequent saccade
                 amplitude expressed in standard deviation units,
                 facilitating parametric statistical testing. To
                 validate K empirically, we test its utility by
                 capturing ambient and focal attention during serial and
                 parallel visual search tasks (Study 1). We then show
                 how K quantitatively depicts the difference in scanning
                 behaviors when attention is guided by audio description
                 during perception of art (Study 2).",
  acknowledgement = ack-nhfb,
  articleno =    "11",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Subedar:2016:BD,
  author =       "Mahesh M. Subedar and Lina J. Karam",
  title =        "{$3$D} Blur Discrimination",
  journal =      j-TAP,
  volume =       "13",
  number =       "3",
  pages =        "12:1--12:??",
  month =        may,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2896453",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat May 28 17:45:30 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Blur is an important attribute in the study and
                 modeling of the human visual system. In the blur
                 discrimination experiments, just-noticeable additional
                 blur required to differentiate from the reference blur
                 level is measured. The past studies on blur
                 discrimination have measured the sensitivity of the
                 human visual system to blur using two-dimensional (2D)
                 test patterns. In this study, subjective tests are
                 performed to measure blur discrimination thresholds
                 using stereoscopic 3D test patterns. Specifically, how
                 the binocular disparity affects the blur sensitivity is
                 measured on a passive stereoscopic display. A passive
                 stereoscopic display renders the left and right eye
                 images in a row interleaved format. The subjects have
                 to wear circularly polarized glasses to filter the
                 appropriate images to the left and right eyes.
                 Positive, negative, and zero disparity values are
                 considered in these experiments. A positive disparity
                 value projects the objects behind the display screen, a
                 negative disparity value projects the objects in front
                 of the display screen, and a zero disparity value
                 projects the objects at the display plane. The blur
                 discrimination thresholds are measured for both
                 symmetric and asymmetric stereo viewing cases. In the
                 symmetric viewing case, the same level of additional
                 blur is applied to the left and right eye stimulus. In
                 the asymmetric viewing case, different levels of
                 additional blur are applied to the left and right eye
                 stimuli. The results of this study indicate that, in
                 the symmetric stereo viewing case, binocular disparity
                 does not affect the blur discrimination thresholds for
                 the selected 3D test patterns. As a consequence of
                 these findings, we conclude that the models developed
                 for 2D blur discrimination can be used for 3D blur
                 discrimination. We also show that the Weber model
                 provides a good fit to the blur discrimination
                 threshold measurements for the symmetric stereo viewing
                 case. In the asymmetric viewing case, the blur
                 discrimination thresholds decreased, and the decrease
                 in threshold values is found to be dominated by eye
                 observing the higher blur.",
  acknowledgement = ack-nhfb,
  articleno =    "12",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Koenderink:2016:CPI,
  author =       "Jan Koenderink and Andrea {Van Doorn} and Vebj{\o}rn
                 Ekroll",
  title =        "Color Picking: The Initial 20s",
  journal =      j-TAP,
  volume =       "13",
  number =       "3",
  pages =        "13:1--13:??",
  month =        may,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2883613",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat May 28 17:45:30 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Color pickers are widely used in all kinds of display
                 applications. They vary greatly in their utility,
                 depending on user expertise. We focus on
                 nonprofessional, occasional users. Such users may spend
                 from a few seconds up to a few minutes to select a
                 color. Yet, typically they reach final accuracy within
                 the initial 20s. Additional effort leads to random
                 walks in the neighborhood of the target. We explore the
                 efficaciousness of five generic color pickers,
                 analyzing the results in terms of generic user
                 interface properties. There is a major dichotomy
                 between three-slider interfaces, and those that offer
                 some form of 2D selectivity. The accuracy in rgb
                 coordinates is about one-tenth to one-twentieth of the
                 full scale (often 0--255 in r, g, and b), whereas a
                 little over 100 hues are resolved. The most efficient
                 color picker, which is presently rarely used in popular
                 applications, is much more efficient than the worst
                 one. We speculate that this derives from a closer match
                 to the user's internal representation of color space.
                 The study results in explicit recommendations for the
                 implementation of user-friendly and efficient color
                 tools.",
  acknowledgement = ack-nhfb,
  articleno =    "13",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Radun:2016:IDI,
  author =       "Jenni Radun and Mikko Nuutinen and Tuomas Leisti and
                 Jukka H{\"a}kkinen",
  title =        "Individual Differences in Image-Quality Estimations:
                 Estimation Rules and Viewing Strategies",
  journal =      j-TAP,
  volume =       "13",
  number =       "3",
  pages =        "14:1--14:??",
  month =        may,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2890504",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat May 28 17:45:30 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Subjective image-quality estimation with high-quality
                 images is often a preference-estimation task.
                 Preferences are subjective, and individual differences
                 exist. Individual differences are also seen in the eye
                 movements of people. A task's subjectivity can result
                 from people using different rules as a basis for their
                 estimation. Using two studies, we investigated whether
                 different preference-estimation rules are related to
                 individual differences in viewing behaviour by
                 examining the process of preference estimation of
                 high-quality images. The estimation rules were measured
                 from free subjective reports on important
                 quality-related attributes (Study 1) and from
                 estimations of the attributes' importance in preference
                 estimation (Study 2). The free reports showed that the
                 observers used both feature-based image-quality
                 attributes (e.g., sharpness, illumination) and abstract
                 attributes, which include an interpretation of the
                 image features (e.g., atmosphere and naturalness). In
                 addition, the observers were classified into three
                 viewing-strategy groups differing in fixation durations
                 in both studies. These groups also used different
                 estimation rules. In both studies, the group with
                 medium-length fixations differed in their estimation
                 rules from the other groups. In Study 1, the observers
                 in this group used more abstract attributes than those
                 in the other groups; in Study 2, they considered
                 atmosphere to be a more important image feature. The
                 study shows that individual differences in a
                 quality-estimation task are related to both estimation
                 rules and viewing strategies, and that the difference
                 is related to the level of abstraction of the
                 estimations.",
  acknowledgement = ack-nhfb,
  articleno =    "14",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Boi:2016:RUA,
  author =       "Paolo Boi and Gianni Fenu and Lucio Davide Spano and
                 Valentino Vargiu",
  title =        "Reconstructing User's Attention on the {Web} through
                 Mouse Movements and Perception-Based Content
                 Identification",
  journal =      j-TAP,
  volume =       "13",
  number =       "3",
  pages =        "15:1--15:??",
  month =        may,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2912124",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat May 28 17:45:30 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Eye tracking is one of the most exploited techniques
                 in literature for finding usability problems in
                 web-based user interfaces (UIs). However, it is usually
                 employed in a laboratory setting, considering that an
                 eye-tracker is not commonly used in web browsing. In
                 contrast, web application providers usually exploit
                 remote techniques for large-scale user studies (e.g.
                 A/B testing), tracking low-level interactions such as
                 mouse clicks and movements. In this article, we discuss
                 a method for predicting whether the user is looking at
                 the content pointed by the cursor, exploiting the mouse
                 movement data and a segmentation of the contents in a
                 web page. We propose an automatic method for segmenting
                 content groups inside a web page that, applying both
                 image and code analysis techniques, identifies the
                 user-perceived group of contents with a mean
                 pixel-based error around the 20\%. In addition, we show
                 through a user study that such segmentation information
                 enhances the precision and the accuracy in predicting
                 the correlation between between the user's gaze and the
                 mouse position at the content level, without relaying
                 on user-specific features.",
  acknowledgement = ack-nhfb,
  articleno =    "15",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Morrison-Smith:2016:UAC,
  author =       "Sarah Morrison-Smith and Megan Hofmann and Yang Li and
                 Jaime Ruiz",
  title =        "Using Audio Cues to Support Motion Gesture Interaction
                 on Mobile Devices",
  journal =      j-TAP,
  volume =       "13",
  number =       "3",
  pages =        "16:1--16:??",
  month =        may,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2897516",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat May 28 17:45:30 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Motion gestures are an underutilized input modality
                 for mobile interaction despite numerous potential
                 advantages. Negulescu et al. found that the lack of
                 feedback on attempted motion gestures made it difficult
                 for participants to diagnose and correct errors,
                 resulting in poor recognition performance and user
                 frustration. In this article, we describe and evaluate
                 a training and feedback technique, Glissando, which
                 uses audio characteristics to provide feedback on the
                 system's interpretation of user input. This technique
                 enables feedback by verbally confirming correct
                 gestures and notifying users of errors in addition to
                 providing continuous feedback by manipulating the pitch
                 of distinct musical notes mapped to each of three
                 dimensional axes in order to provide both spatial and
                 temporal information.",
  acknowledgement = ack-nhfb,
  articleno =    "16",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Shamir:2016:DBA,
  author =       "Lior Shamir and Jenny Nissel and Ellen Winner",
  title =        "Distinguishing between Abstract Art by Artists vs.
                 Children and Animals: Comparison between Human and
                 Machine Perception",
  journal =      j-TAP,
  volume =       "13",
  number =       "3",
  pages =        "17:1--17:??",
  month =        may,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2912125",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat May 28 17:45:30 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Abstract expressionism is a school of art
                 characterized by nonrepresentational paintings where
                 color, composition, and brush strokes are used to
                 express emotion. These works are often misunderstood by
                 the public who see them as requiring no skill and as
                 images that even a child could have created. However, a
                 recent series of studies has shown that ordinary adults
                 untrained in art or art history, as well as young
                 children, can differentiate paintings by abstract
                 expressionists and superficially similar works by
                 preschool children and even animals (monkeys, apes,
                 elephants). Adults perform this distinction with an
                 accuracy rate of ~64\%, significantly higher than
                 chance. Here we ask whether machine perception can do
                 as well. Using the same paintings, we show that in
                 ~68\% of the cases the computer algorithm can
                 discriminate between abstract paintings and the work of
                 children and animals. We also applied a method that
                 computes the correlation between the degree of
                 artisticity deduced from human perception of the
                 paintings and the visual content of the images, and we
                 show significant correlation between perceived
                 artisticity and visual content. The image content
                 descriptor that was the strongest predictor of correct
                 identification was the fractality of the painting. We
                 also show that the computer algorithm predicts the
                 perceived intentionality of the paintings by humans.
                 These results confirm perceptible differences between
                 works by abstract expressionists and superficially
                 similar ones by the untrained and show that people see
                 more than they think they see when looking at abstract
                 expressionism.",
  acknowledgement = ack-nhfb,
  articleno =    "17",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Bailey:2016:ISI,
  author =       "Reynold Bailey and Laura Trutoiu",
  title =        "Introduction to Special Issue {SAP 2016}",
  journal =      j-TAP,
  volume =       "13",
  number =       "4",
  pages =        "18:1--18:??",
  month =        jul,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2954927",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Aug 25 07:23:41 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "18",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Ebrahimi:2016:EEV,
  author =       "Elham Ebrahimi and Sabarish V. Babu and Christopher C.
                 Pagano and Sophie J{\"o}rg",
  title =        "An Empirical Evaluation of Visuo-Haptic Feedback on
                 Physical Reaching Behaviors During {$3$D} Interaction
                 in Real and Immersive Virtual Environments",
  journal =      j-TAP,
  volume =       "13",
  number =       "4",
  pages =        "19:1--19:??",
  month =        jul,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2947617",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Aug 25 07:23:41 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "In an initial study, we characterized the properties
                 of human reach motion in the presence or absence of
                 visuo-haptic feedback in real and Immersive Virtual
                 Environments (IVEs) or virtual reality within a
                 participant's maximum arm reach. Our goal is to
                 understand how physical reaching actions to the
                 perceived location of targets in the presence or
                 absence of visuo-haptic feedback are different between
                 real and virtual viewing conditions. Typically,
                 participants reach to the perceived location of objects
                 in the three-dimensional (3D) environment to perform
                 selection and manipulation actions during 3D
                 interaction in applications such as virtual assembly or
                 rehabilitation. In these tasks, participants typically
                 have distorted perceptual information in the IVE as
                 compared to the real world, in part due to
                 technological limitations such as minimal visual field
                 of view, resolution, latency, and jitter. In an
                 empirical evaluation, we asked the following questions:
                 (i) how do the perceptual differences between virtual
                 and real world affect our ability to accurately reach
                 to the locations of 3D objects, and (ii) how do the
                 motor responses of participants differ between the
                 presence or absence of visual and haptic feedback? We
                 examined factors such as velocity and distance of
                 physical reaching behavior between the real world and
                 IVE, both in the presence or absence of visuo-haptic
                 information. The results suggest that physical reach
                 responses vary systematically between real and virtual
                 environments, especially in situations involving the
                 presence or absence of visuo-haptic feedback. The
                 implications of our study provide a methodological
                 framework for the analysis of reaching motions for
                 selection and manipulation with novel 3D interaction
                 metaphors and to successfully characterize visuo-haptic
                 versus non-visuo-haptic physical reaches in virtual and
                 real-world situations.",
  acknowledgement = ack-nhfb,
  articleno =    "19",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Ondrej:2016:FDA,
  author =       "Jan Ondrej and Cathy Ennis and Niamh A. Merriman and
                 Carol O'sullivan",
  title =        "{FrankenFolk}: Distinctiveness and Attractiveness of
                 Voice and Motion",
  journal =      j-TAP,
  volume =       "13",
  number =       "4",
  pages =        "20:1--20:??",
  month =        jul,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2948066",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Aug 25 07:23:41 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "It is common practice in movies and games to use
                 different actors for the voice and body/face motion of
                 a virtual character. What effect does the combination
                 of these different modalities have on the perception of
                 the viewer? In this article, we conduct a series of
                 experiments to evaluate the distinctiveness and
                 attractiveness of human motions (face and body) and
                 voices. We also create combination characters called
                 FrankenFolks, where we mix and match the voice, body
                 motion, face motion, and avatar of different actors and
                 ask which modality is most dominant when determining
                 distinctiveness and attractiveness or whether the
                 effects are cumulative.",
  acknowledgement = ack-nhfb,
  articleno =    "20",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Rungta:2016:PCP,
  author =       "Atul Rungta and Sarah Rust and Nicolas Morales and
                 Roberta Klatzky and Ming Lin and Dinesh Manocha",
  title =        "Psychoacoustic Characterization of Propagation Effects
                 in Virtual Environments",
  journal =      j-TAP,
  volume =       "13",
  number =       "4",
  pages =        "21:1--21:??",
  month =        jul,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2947508",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Aug 25 07:23:41 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "As sound propagation algorithms become faster and more
                 accurate, the question arises as to whether the
                 additional efforts to improve fidelity actually offer
                 perceptual benefits over existing techniques. Could
                 environmental sound effects go the way of music, where
                 lower-fidelity compressed versions are actually favored
                 by listeners? Here we address this issue with two
                 acoustic phenomena that are known to have perceptual
                 effects on humans and that, accordingly, might be
                 expected to heighten their experience with simulated
                 environments. We present two studies comparing
                 listeners' perceptual response to both accurate and
                 approximate algorithms simulating two key acoustic
                 effects: diffraction and reverberation. For each
                 effect, we evaluate whether increased numerical
                 accuracy of a propagation algorithm translates into
                 increased perceptual differentiation in interactive
                 virtual environments. Our results suggest that auditory
                 perception does benefit from the increased accuracy,
                 with subjects showing better perceptual differentiation
                 when experiencing the more accurate rendering method:
                 the diffraction experiment shows a more linearly
                 decaying sound field (with respect to the diffraction
                 angle) for the accurate diffraction method, whereas the
                 reverberation experiment shows that more accurate
                 reverberation, after modest user experience, results in
                 near-logarithmic response to increasing room volume.",
  acknowledgement = ack-nhfb,
  articleno =    "21",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Jain:2016:MCP,
  author =       "Eakta Jain and Lisa Anthony and Aishat Aloba and
                 Amanda Castonguay and Isabella Cuba and Alex Shaw and
                 Julia Woodward",
  title =        "Is the Motion of a Child Perceivably Different from
                 the Motion of an Adult?",
  journal =      j-TAP,
  volume =       "13",
  number =       "4",
  pages =        "22:1--22:??",
  month =        jul,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2947616",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Aug 25 07:23:41 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Artists and animators have observed that children's
                 movements are quite different from adults performing
                 the same action. Previous computer graphics research on
                 human motion has primarily focused on adult motion.
                 There are open questions as to how different child
                 motion actually is, and whether the differences will
                 actually impact animation and interaction. We report
                 the first explicit study of the perception of child
                 motion (ages 5 to 9 years old), compared to analogous
                 adult motion. We used markerless motion capture to
                 collect an exploratory corpus of child and adult
                 motion, and conducted a perceptual study with point
                 light displays to discover whether naive viewers could
                 identify a motion as belonging to a child or an adult.
                 We find that people are generally successful at this
                 task. This work has implications for creating more
                 engaging and realistic avatars for games, online social
                 media, and animated videos and movies.",
  acknowledgement = ack-nhfb,
  articleno =    "22",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Rummukainen:2016:RRM,
  author =       "Olli Rummukainen and Catarina Mendon{\c{c}}a",
  title =        "Reproducing Reality: Multimodal Contributions in
                 Natural Scene Discrimination",
  journal =      j-TAP,
  volume =       "14",
  number =       "1",
  pages =        "1:1--1:??",
  month =        aug,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2915917",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Aug 25 07:23:42 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Most research on multisensory processing focuses on
                 impoverished stimuli and simple tasks. In consequence,
                 very little is known about the sensory contributions in
                 the perception of real environments. Here, we presented
                 23 participants with paired comparison tasks, where
                 natural scenes were discriminated in three perceptually
                 meaningful attributes: movement, openness, and
                 noisiness. The goal was to assess the auditory and
                 visual modality contributions in scene discrimination
                 with short ({$<$}=500ms) natural scene exposures. The
                 scenes were reproduced in an immersive audiovisual
                 environment with 3D sound and surrounding visuals.
                 Movement and openness were found to be mainly visual
                 attributes with some input from auditory information.
                 In some scenes, the auditory system was able to derive
                 information about movement and openness that was
                 comparable with audiovisual condition already after
                 500ms stimulation. Noisiness was mainly auditory, but
                 visual information was found to have a facilitatory
                 role in a few scenes. The sensory weights were highly
                 imbalanced in favor of the stronger modality, but the
                 weaker modality was able to affect the bimodal estimate
                 in some scenes.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Akyuz:2016:PME,
  author =       "Ahmet Oguz Aky{\"u}z and Osman Kaya",
  title =        "A Proposed Methodology for Evaluating {HDR} False
                 Color Maps",
  journal =      j-TAP,
  volume =       "14",
  number =       "1",
  pages =        "2:1--2:??",
  month =        aug,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2911986",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Aug 25 07:23:42 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Color mapping, which involves assigning colors to the
                 individual elements of an underlying data distribution,
                 is a commonly used method for data visualization.
                 Although color maps are used in many disciplines and
                 for a variety of tasks, in this study we focus on its
                 usage for visualizing luminance maps. Specifically, we
                 ask ourselves the question of how to best visualize a
                 luminance distribution encoded in a high-dynamic-range
                 (HDR) image using false colors such that the resulting
                 visualization is the most descriptive. To this end, we
                 first propose a definition for descriptiveness. We then
                 propose a methodology to evaluate it subjectively.
                 Then, we propose an objective metric that correlates
                 well with the subjective evaluation results. Using this
                 metric, we evaluate several false coloring strategies
                 using a large number of HDR images. Finally, we conduct
                 a second psychophysical experiment using images
                 representing a diverse set of scenes. Our results
                 indicate that the luminance compression method has a
                 significant effect and the commonly used logarithmic
                 compression is inferior to histogram equalization.
                 Furthermore, we find that the default color scale of
                 the Radiance global illumination software consistently
                 performs well when combined with histogram
                 equalization. On the other hand, the commonly used
                 rainbow color scale was found to be inferior. We
                 believe that the proposed methodology is suitable for
                 evaluating future color mapping strategies as well.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Marentakis:2016:TID,
  author =       "Georgios Marentakis and Cathryn Griffiths and Stephen
                 Mcadams",
  title =        "Top-Down Influences in the Detection of Spatial
                 Displacement in a Musical Scene",
  journal =      j-TAP,
  volume =       "14",
  number =       "1",
  pages =        "3:1--3:??",
  month =        aug,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2911985",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Aug 25 07:23:42 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "We investigated the detection of sound displacement in
                 a four-voice musical piece under conditions that
                 manipulated the attentional setting (selective or
                 divided attention), the sound source numerosity, the
                 spatial dispersion of the voices, and the tonal
                 complexity of the piece. Detection was easiest when
                 each voice was played in isolation and performance
                 deteriorated when source numerosity increased and
                 uncertainty with respect to the voice in which
                 displacement would occur was introduced. Restricting
                 the area occupied by the voices improved performance in
                 agreement with the auditory spotlight hypothesis as did
                 reducing the tonal complexity of the piece. Performance
                 under increased numerosity conditions depended on the
                 voice in which displacement occurred. The results
                 highlight the importance of top-down processes in the
                 context of the detection of spatial displacement in a
                 musical scene.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Meyer:2016:SVC,
  author =       "Benjamin Meyer and Steve Grogorick and Mark Vollrath
                 and Marcus Magnor",
  title =        "Simulating Visual Contrast Reduction during Nighttime
                 Glare Situations on Conventional Displays",
  journal =      j-TAP,
  volume =       "14",
  number =       "1",
  pages =        "4:1--4:??",
  month =        aug,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2934684",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Aug 25 07:23:42 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Bright glare in nighttime situations strongly
                 decreases human contrast perception. Nighttime
                 simulations therefore require a way to realistically
                 depict contrast perception of the user. Due to the
                 limited luminance of popular as well as specialized
                 high-dynamic range displays, physical adaptation of the
                 human eye cannot yet be replicated in a physically
                 correct manner in a simulation environment. To overcome
                 this limitation, we propose a method to emulate the
                 adaptation in nighttime glare situations using a
                 perception-based model. We implemented a postprocessing
                 tone mapping algorithm that simulates the corresponding
                 contrast reduction effect for a night-driving
                 simulation with glares from oncoming vehicles
                 headlights. During glare, tone mapping reduces image
                 contrast in accordance with the incident veiling
                 luminance. As the glare expires, the contrast starts to
                 normalize smoothly over time. The conversion of glare
                 parameters and elapsed time into image contrast during
                 the readaptation phase is based on extensive user
                 studies carried out first in a controlled laboratory
                 setup. Additional user studies have then been conducted
                 in field tests to ensure validity of the derived
                 time-dependent tone-mapping function and to verify
                 transferability onto real-world traffic scenarios.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Blissing:2016:EVL,
  author =       "Bj{\"o}rn Blissing and Fredrik Bruzelius and Olle
                 Eriksson",
  title =        "Effects of Visual Latency on Vehicle Driving
                 Behavior",
  journal =      j-TAP,
  volume =       "14",
  number =       "1",
  pages =        "5:1--5:??",
  month =        aug,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2971320",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Aug 25 07:23:42 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Using mixed reality in vehicles provides a potential
                 alternative to using driving simulators when studying
                 driver-vehicle interaction. However, virtual reality
                 systems introduce latency in the visual system that may
                 alter driving behavior, which, in turn, results in
                 questionable validity. Previous studies have mainly
                 focused on visual latency as a separate phenomenon. In
                 this work, latency is studied from a task-dependent
                 viewpoint to investigate how participants' driving
                 behavior changed with increased latency. In this study,
                 the investigation was performed through experiments in
                 which regular drivers were subjected to different
                 levels of visual latency while performing a simple
                 slalom driving task. The drivers' performances were
                 recorded and evaluated in both lateral and longitudinal
                 directions along with self-assessment questionnaires
                 regarding task performance and difficulty. All
                 participants managed to complete the driving tasks
                 successfully, even under high latency conditions, but
                 were clearly affected by the increased visual latency.
                 The results suggest that drivers compensate for longer
                 latencies by steering more and increasing the safety
                 margins but without reducing their speed.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Bernardo:2016:IQU,
  author =       "Marco V. Bernardo and Ant{\'o}nio M. G. Pinheiro and
                 Paulo T. Fiadeiro and Manuela Pereira",
  title =        "Image Quality under Chromatic Impairments",
  journal =      j-TAP,
  volume =       "14",
  number =       "1",
  pages =        "6:1--6:??",
  month =        aug,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2964908",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Aug 25 07:23:42 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "The influence of chromatic impairments on the
                 perceived image quality is studied in this article.
                 Under the D65 standard illuminant, a set of
                 hyperspectral images were represented into the CIELAB
                 color space, and the corresponding chromatic
                 coordinates were subdivided into clusters with the k
                 -means algorithm. Each color cluster was shifted by a
                 predefined chromatic impairment $ \Delta E^*_{ab} $
                 with random direction in $ a^* b^* $ chromatic
                 coordinates only. Applying impairments of 3, 6, 9, 12,
                 and 15 in $ a^* b^* $ coordinates to five hyperspectral
                 images a set of modified images was generated. Those
                 images were shown to subjects that were asked to rank
                 their quality based on their naturalness. The Mean
                 Opinion Score of the subjective evaluations was
                 computed to quantify the sensitivity to the chromatic
                 variations. This article is also complemented with an
                 objective evaluation of the quality using several
                 state-of-the-art metrics and using the CIEDE2000 color
                 difference among others. Analyzing the correlations
                 between subjective and objective quality evaluation
                 helps us to conclude that the proposed quality
                 estimators based on the CIEDE2000 provide the best
                 representation. Moreover, it was concluded that the
                 established quality metrics only become reliable by
                 averaging their results on each color component.",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{VargasMartin:2016:DSF,
  author =       "Miguel {Vargas Martin} and Victor Cho and Gabriel
                 Aversano",
  title =        "Detection of Subconscious Face Recognition Using
                 Consumer-Grade Brain-Computer Interfaces",
  journal =      j-TAP,
  volume =       "14",
  number =       "1",
  pages =        "7:1--7:??",
  month =        aug,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2955097",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Aug 29 06:47:37 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "We test the possibility of tapping the subconscious
                 mind for face recognition using consumer-grade BCIs. To
                 this end, we performed an experiment whereby subjects
                 were presented with photographs of famous persons with
                 the expectation that about 20\% of them would be
                 (consciously) recognized; and since the photos are of
                 famous persons, we expected that subjects would have
                 seen before some of the 80\% they didn't (consciously)
                 recognize. Further, we expected that their subconscious
                 would have recognized some of those in the 80\% pool
                 that they had seen before. An exit questionnaire and a
                 set of criteria allowed us to label recognitions as
                 conscious, false, no recognitions, or subconscious
                 recognitions. We analyzed a number of event related
                 potentials training and testing a support vector
                 machine. We found that our method is capable of
                 differentiating between no recognitions and
                 subconscious recognitions with promising accuracy
                 levels, suggesting that tapping the subconscious mind
                 for face recognition is feasible.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Balas:2017:SSM,
  author =       "Benjamin Balas and Catherine Conlin and Dylan
                 Shipman",
  title =        "Summary Statistics and Material Categorization in the
                 Visual Periphery",
  journal =      j-TAP,
  volume =       "14",
  number =       "2",
  pages =        "8:1--8:??",
  month =        feb,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2967498",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Apr 8 10:32:27 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Material categorization from natural texture images
                 proceeds quickly and accurately, supporting a number of
                 visual and motor behaviors. In real-world settings,
                 mechanisms for material categorization must function
                 effectively based on the input from foveal vision,
                 where image representation is high fidelity, and the
                 input from peripheral vision, which is comparatively
                 impoverished. What features support successful material
                 categorization in the visual periphery, given the known
                 reductions in acuity, contrast sensitivity, and other
                 lossy transforms that reduce the fidelity of image
                 representations? In general, the visual features that
                 support material categorization remain largely unknown,
                 but recent work suggests that observers' abilities in a
                 number of tasks that depend on peripheral vision can be
                 accounted for by assuming that the visual system has
                 access to only summary statistics (texture-like
                 descriptors) of image structure. We therefore
                 hypothesized that a model of peripheral vision based on
                 the Portilla-Simoncelli texture synthesis algorithm
                 might account for material categorization abilities in
                 the visual periphery. Using natural texture images and
                 synthetic images made from these stimuli, we compared
                 performance across material categories to determine
                 whether observer performance with natural inputs could
                 be predicted by their performance with synthetic images
                 that reflect the constraints of a texture code.",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Jones:2017:VFV,
  author =       "J. Adam Jones and David M. Krum and Mark T. Bolas",
  title =        "Vertical Field-of-View Extension and Walking
                 Characteristics in Head-Worn Virtual Environments",
  journal =      j-TAP,
  volume =       "14",
  number =       "2",
  pages =        "9:1--9:??",
  month =        feb,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2983631",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Apr 8 10:32:27 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "In this article, we detail a series of experiments
                 that examines the effect of vertical field-of-view
                 extension and the addition of non-specific peripheral
                 visual stimulation on gait characteristics and distance
                 judgments in a head-worn virtual environment.
                 Specifically, we examined four field-of-view
                 configurations: a common 60${}^\circ $ diagonal field
                 of view (48${}^\circ $ $ \times $ 40${}^\circ $), a
                 60${}^\circ $ diagonal field of view with the addition
                 of a luminous white frame in the far periphery, a field
                 of view with an extended upper edge, and a field of
                 view with an extended lower edge. We found that
                 extension of the field of view, either with spatially
                 congruent or spatially non-informative visuals,
                 resulted in improved distance judgments and changes in
                 observed posture. However, these effects were not equal
                 across all field-of-view configurations, suggesting
                 that some configurations may be more appropriate than
                 others when balancing performance, cost, and
                 ergonomics.",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Perrotin:2017:SLD,
  author =       "Olivier Perrotin and Christophe D'Alessandro",
  title =        "Seeing, Listening, Drawing: Interferences between
                 Sensorimotor Modalities in the Use of a Tablet Musical
                 Interface",
  journal =      j-TAP,
  volume =       "14",
  number =       "2",
  pages =        "10:1--10:??",
  month =        feb,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2990501",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Apr 8 10:32:27 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Audio, visual, and proprioceptive actions are involved
                 when manipulating a graphic tablet musical interface.
                 Previous works suggested a possible dominance of the
                 visual over the auditory modality in this situation.
                 The main goal of the present study is to examine the
                 interferences between these modalities in visual,
                 audio, and audio-visual target acquisition tasks.
                 Experiments are based on a movement replication
                 paradigm, where a subject controls a cursor on a screen
                 or the pitch of a synthesized sound by changing the
                 stylus position on a covered graphic tablet. The
                 experiments consisted of the following tasks: (1) a
                 target acquisition task that was aimed at a visual
                 target (reaching a cue with the cursor displayed on a
                 screen), an audio target (reaching a reference note by
                 changing the pitch of the sound played in headsets), or
                 an audio-visual target, and (2) the replication of the
                 target acquisition movement in the opposite direction.
                 In the return phase, visual and audio feedback were
                 suppressed. Different gain factors perturbed the
                 relationships among the stylus movements, visual cursor
                 movements, and audio pitch movements. The deviations
                 between acquisition and return movements were analyzed.
                 The results showed that hand amplitudes varied in
                 accordance with visual, audio, and audio-visual
                 perturbed gains, showing a larger effect for the visual
                 modality. This indicates that visual, audio, and
                 audio-visual actions interfered with the motor modality
                 and confirms the spatial representation of pitch
                 reported in previous studies. In the audio-visual
                 situation, vision dominated over audition, as the
                 latter had no significant influence on motor movement.
                 Consequently, visual feedback is helpful for musical
                 targeting of pitch on a graphic tablet, at least during
                 the learning phase of the instrument. This result is
                 linked to the underlying spatial organization of pitch
                 perception. Finally, this work brings a complementary
                 approach to previous studies showing that audition may
                 dominate over vision for other aspects of musical sound
                 (e.g., timing, rhythm, and timbre).",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Guo:2017:SOV,
  author =       "Jinjiang Guo and Vincent Vidal and Irene Cheng and
                 Anup Basu and Atilla Baskurt and Guillaume Lavoue",
  title =        "Subjective and Objective Visual Quality Assessment of
                 Textured {$3$D} Meshes",
  journal =      j-TAP,
  volume =       "14",
  number =       "2",
  pages =        "11:1--11:??",
  month =        feb,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2996296",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Apr 8 10:32:27 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Objective visual quality assessment of 3D models is a
                 fundamental issue in computer graphics. Quality
                 assessment metrics may allow a wide range of processes
                 to be guided and evaluated, such as level of detail
                 creation, compression, filtering, and so on. Most
                 computer graphics assets are composed of geometric
                 surfaces on which several texture images can be mapped
                 to make the rendering more realistic. While some
                 quality assessment metrics exist for geometric
                 surfaces, almost no research has been conducted on the
                 evaluation of texture-mapped 3D models. In this
                 context, we present a new subjective study to evaluate
                 the perceptual quality of textured meshes, based on a
                 paired comparison protocol. We introduce both texture
                 and geometry distortions on a set of 5 reference models
                 to produce a database of 136 distorted models,
                 evaluated using two rendering protocols. Based on
                 analysis of the results, we propose two new metrics for
                 visual quality assessment of textured mesh, as
                 optimized linear combinations of accurate geometry and
                 texture quality measurements. These proposed perceptual
                 metrics outperform their counterparts in terms of
                 correlation with human opinion. The database, along
                 with the associated subjective scores, will be made
                 publicly available online.",
  acknowledgement = ack-nhfb,
  articleno =    "11",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Janeh:2017:WVR,
  author =       "Omar Janeh and Eike Langbehn and Frank Steinicke and
                 Gerd Bruder and Alessandro Gulberti and Monika
                 Poetter-Nerger",
  title =        "Walking in Virtual Reality: Effects of Manipulated
                 Visual Self-Motion on Walking Biomechanics",
  journal =      j-TAP,
  volume =       "14",
  number =       "2",
  pages =        "12:1--12:??",
  month =        feb,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3022731",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Apr 8 10:32:27 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Walking constitutes the predominant form of
                 self-propelled movement from one geographic location to
                 another in our real world. Likewise, walking in virtual
                 environments (VEs) is an essential part of a users
                 experience in many application domains requiring a high
                 degree of interactivity. However, researchers and
                 practitioners often observe that basic implementations
                 of virtual walking, in which head-tracked movements are
                 mapped isometrically to a VE are not estimated as
                 entirely natural. Instead, users estimate a virtual
                 walking velocity as more natural when it is slightly
                 increased compared to the users physical body movement.
                 In this article, we investigate the effects of such
                 nonisometric mappings between physical movements and
                 virtual motions in the VE on walking velocity and
                 biomechanics of the gait cycle. Therefore, we performed
                 an experiment in which we measured and analyzed
                 parameters of the biomechanics of walking under
                 conditions with isometric as well as nonisometric
                 mappings. Our results show significant differences in
                 most gait parameters when walking in the VE in the
                 isometric mapping condition compared to the
                 corresponding parameters in the real world. For
                 nonisometric mappings we found an increased divergence
                 of gait parameters depending on the velocity of visual
                 self-motion feedback. The results revealed a
                 symmetrical effect of gait detriments for up- or
                 down-scaled virtual velocities, which we discuss in the
                 scope of the previous findings.",
  acknowledgement = ack-nhfb,
  articleno =    "12",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Faul:2017:TPU,
  author =       "Franz Faul",
  title =        "Toward a Perceptually Uniform Parameter Space for
                 Filter Transparency",
  journal =      j-TAP,
  volume =       "14",
  number =       "2",
  pages =        "13:1--13:??",
  month =        feb,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3022732",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Apr 8 10:32:27 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Filter models of perceptual transparency relate to
                 regularities in the retinal projections caused by light
                 transmitting objects like clear liquids or glass and
                 have been found to predict the color conditions for
                 perceptual transparency more accurately than
                 alternative models. An important but unsolved problem
                 is how exactly the model parameters are related to the
                 properties of the perceived transparent layer. We
                 previously proposed a parametrization in terms of hue,
                 saturation, overall transmittance and clarity of the
                 filter that seems to capture important dimensions of
                 the phenomenal impressions. However, these parameters
                 are not independent and the corresponding scales are
                 not perceptually uniform. Here, an invertible
                 transformation of this parameter space is proposed that
                 strongly mitigates these problems. This results in a
                 more intuitively interpretable parameter set that seems
                 well suited for the analysis of existing stimuli and
                 the generation of transparent overlays with predefined
                 perceptual properties. The latter property makes it
                 suitable for graphics and visualization applications.",
  acknowledgement = ack-nhfb,
  articleno =    "13",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Piorkowski:2017:ADG,
  author =       "Rafal Pi{\'o}rkowski and Radoslaw Mantiuk and Adam
                 Siekawa",
  title =        "Automatic Detection of Game Engine Artifacts Using
                 Full Reference Image Quality Metrics",
  journal =      j-TAP,
  volume =       "14",
  number =       "3",
  pages =        "14:1--14:??",
  month =        jul,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3047407",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Sep 19 10:26:41 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Contemporary game engines offer an outstanding
                 graphics quality but they are not free from visual
                 artifacts. A typical example is aliasing, which,
                 despite advanced antialiasing techniques, is still
                 visible to the game players. Essential deteriorations
                 are the shadow acne and peter panning responsible for
                 deficiency of the shadow mapping technique. Also
                 Z-fighting, caused by the incorrect order of drawing
                 polygons, significantly affects the quality of the
                 graphics and makes the gameplay more difficult. In this
                 work, we propose a technique, in which visibility of
                 deteriorations is uncovered by the objective image
                 quality metrics (IQMs). We test the efficiency of a
                 simple mathematically based metric and advanced IQMs: a
                 Spatial extension of CIELAB (S-CIELAB), the Structural
                 SIMilarity Index (SSIM), the Multiscale Structural
                 SIMilarity Index (MS-SSIM), and the High Dynamic Range
                 Visual Difference Predictor-2 (HDR-VDP-2).
                 Additionally, we evaluate the Color Image Difference
                 (CID) metric, which is recommended to detect the
                 differences in colors. To find out which metric is the
                 most effective for the detection of the game engine
                 artifacts, we build a database of manually marked
                 images with representative set of artifacts. We conduct
                 subjective experiments in which people manually mark
                 the visible local artifacts in the screenshots from the
                 games. Then the detection maps averaged over a number
                 of observers are compared with results generated by
                 IQMs. The obtained results show that SSIM and MS-SSIM
                 metrics outperform other techniques. However, the
                 results are not indisputable, because, for small and
                 scattered aliasing artifacts, HDR-VDP-2 metrics report
                 the results most consistent with the average human
                 observer. As a proof of concept, we propose an
                 application in which resolution of the shadow maps is
                 controlled by the SSIM metric to avoid perceptually
                 visible aliasing artifacts on the shadow edges.",
  acknowledgement = ack-nhfb,
  articleno =    "14",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Homayouni:2017:RIS,
  author =       "Maryam Homayouni and Payman Aflaki and Miska M.
                 Hannuksela and Moncef Gabbouj",
  title =        "Row-Interleaved Sampling for Depth-Enhanced {$3$D}
                 Video Coding for Polarized Displays",
  journal =      j-TAP,
  volume =       "14",
  number =       "3",
  pages =        "15:1--15:??",
  month =        jul,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3047409",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Sep 19 10:26:41 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Passive stereoscopic displays create the illusion of
                 three dimensions by employing orthogonal polarizing
                 filters and projecting two images onto the same screen.
                 In this article, a coding scheme targeting
                 depth-enhanced stereoscopic video coding for polarized
                 displays is introduced. We propose to use asymmetric
                 row-interleaved sampling for texture and depth views
                 prior to encoding. The performance of the proposed
                 scheme is compared with several other schemes, and the
                 objective results confirm the superior performance of
                 the proposed method. Furthermore, subjective evaluation
                 proves that no quality degradation is introduced by the
                 proposed coding scheme compared to the reference
                 method.",
  acknowledgement = ack-nhfb,
  articleno =    "15",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Katsunuma:2017:FAC,
  author =       "Takafumi Katsunuma and Keita Hirai and Takahiko
                 Horiuchi",
  title =        "Fabric Appearance Control System for Example-Based
                 Interactive Texture and Color Design",
  journal =      j-TAP,
  volume =       "14",
  number =       "3",
  pages =        "16:1--16:??",
  month =        jul,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3054953",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Sep 19 10:26:41 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Texture and color are important factors of fabric
                 appearance. A system that could intuitively manipulate
                 and design fabric texture and color would be a very
                 powerful tool. This article presents an interactive
                 fabric appearance design system that modulates the
                 texture patterns of input fabric example images and
                 transfers the color patterns from other input images
                 onto them. For this purpose, we propose a method to
                 synthesize a natural texture image based on our
                 findings from subjective experiments: (1) intensity and
                 its deviation of two input images are significantly
                 related to the realistic appearance of synthesized
                 textures and (2) the spatial-frequency and edge
                 intensity of two different input images significantly
                 influence the natural appearance of synthesized texture
                 perception. In our procedure, first, the texture
                 pattern of an input fabric image is modulated in terms
                 of undulation, thickness, and roughness. Next, we
                 transfer the color pattern of an original color image
                 onto the modulated texture pattern in the YIQ color
                 space. To perform this color transfer, we use the IQ
                 component of the color image. To reduce the unnatural
                 appearance of the output color-transfer image, we
                 remove the high-frequency components of the original
                 color image. In addition, the Y component of the
                 color-transfer image is obtained by adding the
                 deviation of the texture pattern Y component to the
                 texture pattern of the color image. These algorithms
                 for reducing unnaturalness and synthesizing images were
                 developed based on our findings from several subjective
                 experiments on natural appearance. Finally, we
                 implemented our algorithm on a smart device. Our system
                 allows us to interactively design the texture and color
                 of fabric by using images.",
  acknowledgement = ack-nhfb,
  articleno =    "16",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Williams:2017:ACM,
  author =       "Duncan Williams and Alexis Kirke and Eduardo Miranda
                 and Ian Daly and Faustina Hwang and James Weaver and
                 Slawomir Nasuto",
  title =        "Affective Calibration of Musical Feature Sets in an
                 Emotionally Intelligent Music Composition System",
  journal =      j-TAP,
  volume =       "14",
  number =       "3",
  pages =        "17:1--17:??",
  month =        jul,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3059005",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Sep 19 10:26:41 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Affectively driven algorithmic composition (AAC) is a
                 rapidly growing field that exploits computer-aided
                 composition in order to generate new music with
                 particular emotional qualities or affective intentions.
                 An AAC system was devised in order to generate a
                 stimulus set covering nine discrete sectors of a
                 two-dimensional emotion space by means of a 16-channel
                 feed-forward artificial neural network. This system was
                 used to generate a stimulus set of short pieces of
                 music, which were rendered using a sampled piano timbre
                 and evaluated by a group of experienced listeners who
                 ascribed a two-dimensional valence-arousal coordinate
                 to each stimulus. The underlying musical feature set,
                 initially drawn from the literature, was subsequently
                 adjusted by amplifying or attenuating the quantity of
                 each feature in order to maximize the spread of stimuli
                 in the valence-arousal space before a second listener
                 evaluation was conducted. This process was repeated a
                 third time in order to maximize the spread of
                 valence-arousal coordinates ascribed to the generated
                 stimulus set in comparison to a spread taken from an
                 existing prerated database of stimuli, demonstrating
                 that this prototype AAC system is capable of creating
                 short sequences of music with a slight improvement on
                 the range of emotion found in a stimulus set comprised
                 of real-world, traditionally composed musical
                 excerpts.",
  acknowledgement = ack-nhfb,
  articleno =    "17",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Li:2017:HDS,
  author =       "Yi-Na Li and Kang Zhang and Dong-Jin Li",
  title =        "How Dimensional and Semantic Attributes of Visual Sign
                 Influence Relative Value Estimation",
  journal =      j-TAP,
  volume =       "14",
  number =       "3",
  pages =        "18:1--18:??",
  month =        jul,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3059006",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Sep 19 10:26:41 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "High-quality decision making requires accurate
                 estimation of relative values. The perceptual bias when
                 estimating relative values displayed by a visual sign
                 may weaken the accuracy and cause misjudgment. This
                 research explores the heuristic estimation of relative
                 values using visual cues, namely linear, areal, and
                 volumetric information. We conduct experiments to
                 empirically test the influences of dimensional
                 information on perceptual biases. First, we investigate
                 the conspicuity of areal information. Our experiments
                 indicate that the responses of participants instructed
                 to estimate rates defined by either linear or
                 volumetric information are biased by the corresponding
                 rates determined by areal information. Second, visual
                 cues implying three-dimensional information (e.g.,
                 depth) can lead to overestimation. Third, we probe the
                 influence of vividness as the boundary condition on
                 relative value estimation. Empirical evidence on
                 perceptual bias sheds light on the pragmatics of visual
                 signs, helps suggest guidelines for visual persuasions,
                 and improves decision-making quality.",
  acknowledgement = ack-nhfb,
  articleno =    "18",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Rafian:2017:RSA,
  author =       "Paymon Rafian and Gordon E. Legge",
  title =        "Remote Sighted Assistants for Indoor Location Sensing
                 of Visually Impaired Pedestrians",
  journal =      j-TAP,
  volume =       "14",
  number =       "3",
  pages =        "19:1--19:??",
  month =        jul,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3047408",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Sep 19 10:26:41 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Because indoor navigation is difficult for people with
                 visual impairment, there is a need for the development
                 of assistive technology. Indoor location sensing, the
                 ability to identify a pedestrian's location and
                 orientation, is a key component of such technology. We
                 tested the accuracy of a potential crowdsourcing-based
                 indoor location sensing method. Normally sighted
                 subjects were asked to identify the location and facing
                 direction of photos taken by a pedestrian in a
                 building. The subjects had available a floor plan and a
                 small number of representative photos from key
                 locations within the floor plan. Subjects were able to
                 provide accurate location estimates (median location
                 accuracy 3.87ft). This finding indicates that normally
                 sighted subjects, with minimal training, using a simple
                 graphical representation of a floor plan, can provide
                 accurate location estimates based on a single, suitable
                 photo taken by a pedestrian. We conclude that indoor
                 localization is possible using remote, crowdsourced,
                 human assistance. This method has the potential to be
                 used for the location-sensing component of an indoor
                 navigation aid for people with visual impairment.",
  acknowledgement = ack-nhfb,
  articleno =    "19",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Zhang:2017:CAE,
  author =       "Jiajing Zhang and Jinhui Yu and Kang Zhang and Xianjun
                 Sam Zheng and Junsong Zhang",
  title =        "Computational Aesthetic Evaluation of Logos",
  journal =      j-TAP,
  volume =       "14",
  number =       "3",
  pages =        "20:1--20:??",
  month =        jul,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3058982",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Sep 19 10:26:41 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Computational aesthetics has become an active research
                 field in recent years, but there have been few attempts
                 in computational aesthetic evaluation of logos. In this
                 article, we restrict our study on black-and-white
                 logos, which are professionally designed for name-brand
                 companies with similar properties, and apply perceptual
                 models of standard design principles in computational
                 aesthetic evaluation of logos. We define a group of
                 metrics to evaluate some aspects in design principles
                 such as balance, contrast, and harmony of logos. We
                 also collect human ratings of balance, contrast,
                 harmony, and aesthetics of 60 logos from 60 volunteers.
                 Statistical linear regression models are trained on
                 this database using a supervised machine-learning
                 method. Experimental results show that our
                 model-evaluated balance, contrast, and harmony have
                 highly significant correlation of over 0.87 with human
                 evaluations on the same dimensions. Finally, we regress
                 human-evaluated aesthetics scores on model-evaluated
                 balance, contrast, and harmony. The resulted regression
                 model of aesthetics can predict human judgments on
                 perceived aesthetics with a high correlation of 0.85.
                 Our work provides a machine-learning-based reference
                 framework for quantitative aesthetic evaluation of
                 graphic design patterns and also the research of
                 exploring the relationship between aesthetic
                 perceptions of human and computational evaluation of
                 design principles extracted from graphic designs.",
  acknowledgement = ack-nhfb,
  articleno =    "20",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Noceti:2017:EBM,
  author =       "Nicoletta Noceti and Francesca Odone and Alessandra
                 Sciutti and Giulio Sandini",
  title =        "Exploring Biological Motion Regularities of Human
                 Actions: a New Perspective on Video Analysis",
  journal =      j-TAP,
  volume =       "14",
  number =       "3",
  pages =        "21:1--21:??",
  month =        jul,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3086591",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Sep 19 10:26:41 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "The ability to detect potentially interacting agents
                 in the surrounding environment is acknowledged to be
                 one of the first perceptual tasks developed by humans,
                 supported by the ability to recognise biological
                 motion. The precocity of this ability suggests that it
                 might be based on rather simple motion properties, and
                 it can be interpreted as an atomic building block of
                 more complex perception tasks typical of interacting
                 scenarios, as the understanding of non-verbal
                 communication cues based on motion or the anticipation
                 of others' action goals. In this article, we propose a
                 novel perspective for video analysis, bridging
                 cognitive science and machine vision, which leverages
                 the use of computational models of the perceptual
                 primitives that are at the basis of biological motion
                 perception in humans. Our work offers different
                 contributions. In a first part, we propose an empirical
                 formulation for the Two-Thirds Power Law, a well-known
                 invariant law of human movement, and thoroughly discuss
                 its readability in experimental settings of increasing
                 complexity. In particular, we consider unconstrained
                 video analysis scenarios, where, to the best of our
                 knowledge, the invariant law has not found application
                 so far. The achievements of this analysis pave the way
                 for the second part of the work, in which we propose
                 and evaluate a general representation scheme for
                 biological motion characterisation to discriminate
                 biological movements with respect to non-biological
                 dynamic events in video sequences. The method is
                 proposed as the first layer of a more complex
                 architecture for behaviour analysis and human-machine
                 interaction, providing in particular a new way to
                 approach the problem of human action understanding.",
  acknowledgement = ack-nhfb,
  articleno =    "21",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Tauscher:2017:CAT,
  author =       "Jan-Philipp Tauscher and Maryam Mustafa and Marcus
                 Magnor",
  title =        "Comparative Analysis of Three Different Modalities for
                 Perception of Artifacts in Videos",
  journal =      j-TAP,
  volume =       "14",
  number =       "4",
  pages =        "22:1--22:??",
  month =        sep,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3129289",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Sep 19 10:26:41 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "This study compares three popular modalities for
                 analyzing perceived video quality; user ratings, eye
                 tracking, and EEG. We contrast these three modalities
                 for a given video sequence to determine if there is a
                 gap between what humans consciously see and what we
                 implicitly perceive. Participants are shown a video
                 sequence with different artifacts appearing at specific
                 distances in their field of vision; near foveal, middle
                 peripheral, and far peripheral. Our results show
                 distinct differences between what we saccade to (eye
                 tracking), how we consciously rate video quality, and
                 our neural responses (EEG data). Our findings indicate
                 that the measurement of perceived quality depends on
                 the specific modality used.",
  acknowledgement = ack-nhfb,
  articleno =    "22",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Breeden:2017:GDA,
  author =       "Katherine Breeden and Pat Hanrahan",
  title =        "Gaze Data for the Analysis of Attention in Feature
                 Films",
  journal =      j-TAP,
  volume =       "14",
  number =       "4",
  pages =        "23:1--23:??",
  month =        sep,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3127588",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Sep 19 10:26:41 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Film directors are masters at controlling what we look
                 at when we watch a film. However, there have been few
                 quantitative studies of how gaze responds to
                 cinematographic conventions thought to influence
                 attention. We have collected and are releasing a
                 dataset designed to help investigate eye movements in
                 response to higher level features such as faces,
                 dialogue, camera movements, image composition, and
                 edits. The dataset, which will be released to the
                 community, includes gaze information for 21 viewers
                 watching 15 clips from live action 2D films, which have
                 been hand annotated for high level features. This work
                 has implications for the media studies, display
                 technology, immersive reality, and human cognition.",
  acknowledgement = ack-nhfb,
  articleno =    "23",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Alexanderson:2017:MIE,
  author =       "Simon Alexanderson and Carol O'sullivan and Michael
                 Neff and Jonas Beskow",
  title =        "{Mimebot} --- Investigating the Expressibility of
                 Non-Verbal Communication Across Agent Embodiments",
  journal =      j-TAP,
  volume =       "14",
  number =       "4",
  pages =        "24:1--24:??",
  month =        sep,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3127590",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Sep 19 10:26:41 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Unlike their human counterparts, artificial agents
                 such as robots and game characters may be deployed with
                 a large variety of face and body configurations. Some
                 have articulated bodies but lack facial features, and
                 others may be talking heads ending at the neck.
                 Generally, they have many fewer degrees of freedom than
                 humans through which they must express themselves, and
                 there will inevitably be a filtering effect when
                 mapping human motion onto the agent. In this article,
                 we investigate filtering effects on three types of
                 embodiments: (a) an agent with a body but no facial
                 features, (b) an agent with a head only, and (c) an
                 agent with a body and a face. We performed a full
                 performance capture of a mime actor enacting short
                 interactions varying the non-verbal expression along
                 five dimensions (e.g., level of frustration and level
                 of certainty) for each of the three embodiments. We
                 performed a crowd-sourced evaluation experiment
                 comparing the video of the actor to the video of an
                 animated robot for the different embodiments and
                 dimensions. Our findings suggest that the face is
                 especially important to pinpoint emotional reactions
                 but is also most volatile to filtering effects. The
                 body motion, on the other hand, had more diverse
                 interpretations but tended to preserve the
                 interpretation after mapping and thus proved to be more
                 resilient to filtering.",
  acknowledgement = ack-nhfb,
  articleno =    "24",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Albert:2017:LRF,
  author =       "Rachel Albert and Anjul Patney and David Luebke and
                 Joohwan Kim",
  title =        "Latency Requirements for Foveated Rendering in Virtual
                 Reality",
  journal =      j-TAP,
  volume =       "14",
  number =       "4",
  pages =        "25:1--25:??",
  month =        sep,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3127589",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Sep 19 10:26:41 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Foveated rendering is a performance optimization based
                 on the well-known degradation of peripheral visual
                 acuity. It reduces computational costs by showing a
                 high-quality image in the user's central (foveal)
                 vision and a lower quality image in the periphery.
                 Foveated rendering is a promising optimization for
                 Virtual Reality (VR) graphics, and generally requires
                 accurate and low-latency eye tracking to ensure
                 correctness even when a user makes large, fast eye
                 movements such as saccades. However, due to the
                 phenomenon of saccadic omission, it is possible that
                 these requirements may be relaxed. In this article, we
                 explore the effect of latency for foveated rendering in
                 VR applications. We evaluated the detectability of
                 visual artifacts for three techniques capable of
                 generating foveated images and for three different
                 radii of the high-quality foveal region. Our results
                 show that larger foveal regions allow for more
                 aggressive foveation, but this effect is more
                 pronounced for temporally stable foveation techniques.
                 Added eye tracking latency of 80--150ms causes a
                 significant reduction in acceptable amount of
                 foveation, but a similar decrease in acceptable
                 foveation was not found for shorter eye-tracking
                 latencies of 20--40ms, suggesting that a total system
                 latency of 50--70ms could be tolerated.",
  acknowledgement = ack-nhfb,
  articleno =    "25",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Gao:2017:FBQ,
  author =       "Xihe Gao and Stephen Brooks and Dirk V. Arnold",
  title =        "A Feature-Based Quality Metric for Tone Mapped
                 Images",
  journal =      j-TAP,
  volume =       "14",
  number =       "4",
  pages =        "26:1--26:??",
  month =        sep,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3129675",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Sep 19 10:26:41 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "With the development of high-dynamic-range images and
                 tone mapping operators comes a need for image quality
                 evaluation of tone mapped images. However, because of
                 the significant difference in dynamic range between
                 high-dynamic-range images and tone mapped images,
                 conventional image quality assessment algorithms that
                 predict distortion based on the magnitude of intensity
                 or normalized contrast are not suitable for this task.
                 In this article, we present a feature-based quality
                 metric for tone mapped images that predicts the
                 perceived quality by measuring the distortion in
                 important image features that affect quality judgment.
                 Our metric utilizes multi-exposed virtual photographs
                 taken from the original high-dynamic-range images to
                 bridge the gap between dynamic ranges in image feature
                 analysis. By combining measures for brightness
                 distortion, visual saliency distortion, and detail
                 distortion in light and dark areas, the metric measures
                 the overall perceptual distortion and assigns a score
                 to a tone mapped image. Experiments on a subject-rated
                 database indicate that the proposed metric is more
                 consistent with subjective evaluation results than
                 alternative approaches.",
  acknowledgement = ack-nhfb,
  articleno =    "26",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Abebe:2017:PLM,
  author =       "Mekides Assefa Abebe and Tania Pouli and
                 Mohamed-Chaker Larabi and Erik Reinhard",
  title =        "Perceptual Lightness Modeling for High-Dynamic-Range
                 Imaging",
  journal =      j-TAP,
  volume =       "15",
  number =       "1",
  pages =        "1:1--1:??",
  month =        nov,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3086577",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jan 22 17:39:41 MST 2018",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "The human visual system (HVS) non-linearly processes
                 light from the real world, allowing us to perceive
                 detail over a wide range of illumination. Although
                 models that describe this non-linearity are constructed
                 based on psycho-visual experiments, they generally
                 apply to a limited range of illumination and therefore
                 may not fully explain the behavior of the HVS under
                 more extreme illumination conditions. We propose a
                 modified experimental protocol for measuring visual
                 responses to emissive stimuli that do not require
                 participant training, nor requiring the exclusion of
                 non-expert participants. Furthermore, the protocol can
                 be applied to stimuli covering an extended luminance
                 range. Based on the outcome of our experiment, we
                 propose a new model describing lightness response over
                 an extended luminance range. The model can be
                 integrated with existing color appearance models or
                 perceptual color spaces. To demonstrate the
                 effectiveness of our model in high dynamic range
                 applications, we evaluate its suitability for dynamic
                 range expansion relative to existing solutions.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Kelly:2017:PSH,
  author =       "Jonathan W. Kelly and Lucia A. Cherep and Zachary D.
                 Siegel",
  title =        "Perceived Space in the {HTC} Vive",
  journal =      j-TAP,
  volume =       "15",
  number =       "1",
  pages =        "2:1--2:??",
  month =        nov,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3106155",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jan 22 17:39:41 MST 2018",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Underperception of egocentric distance in virtual
                 reality has been a persistent concern for almost 20
                 years. Modern head-mounted displays (HMDs) appear to
                 have begun to ameliorate underperception. The current
                 study examined several aspects of perceived space in
                 the HTC Vive. Blind-walking distance judgments, verbal
                 distance judgments, and size judgments were measured in
                 two distinct virtual environments (VEs)-a high-quality
                 replica of a real classroom and an empty grass field-as
                 well as the real classroom upon which the classroom VE
                 was modeled. A brief walking interaction was also
                 examined as an intervention for improving anticipated
                 underperception in the VEs. Results from the Vive were
                 compared to existing data using two older HMDs (nVisor
                 SX111 and ST50). Blind-walking judgments were more
                 accurate in the Vive compared to the older displays,
                 and did not differ substantially from the real world
                 nor across VEs. Size judgments were more accurate in
                 the classroom VE than the grass VE and in the Vive
                 compared to the older displays. Verbal judgments were
                 significantly smaller in the classroom VE compared to
                 the real classroom and did not significantly differ
                 across VEs. Blind-walking and size judgments were more
                 accurate after walking interaction, but verbal
                 judgments were unaffected. The results indicate that
                 underperception of distance in the HTC Vive is less
                 than in older displays but has not yet been completely
                 resolved. With more accurate space perception afforded
                 by modern HMDs, alternative methods for improving
                 judgments of perceived space-such as walking
                 interaction-may no longer be necessary.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Kneusel:2017:IHM,
  author =       "Ronald T. Kneusel and Michael C. Mozer",
  title =        "Improving Human-Machine Cooperative Visual Search With
                 Soft Highlighting",
  journal =      j-TAP,
  volume =       "15",
  number =       "1",
  pages =        "3:1--3:??",
  month =        nov,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3129669",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jan 22 17:39:41 MST 2018",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Advances in machine learning have produced systems
                 that attain human-level performance on certain visual
                 tasks, e.g., object identification. Nonetheless, other
                 tasks requiring visual expertise are unlikely to be
                 entrusted to machines for some time, e.g., satellite
                 and medical imagery analysis. We describe a
                 human-machine cooperative approach to visual search,
                 the aim of which is to outperform either human or
                 machine acting alone. The traditional route to
                 augmenting human performance with automatic classifiers
                 is to draw boxes around regions of an image deemed
                 likely to contain a target. Human experts typically
                 reject this type of hard highlighting. We propose
                 instead a soft highlighting technique in which the
                 saliency of regions of the visual field is modulated in
                 a graded fashion based on classifier confidence level.
                 We report on experiments with both synthetic and
                 natural images showing that soft highlighting achieves
                 a performance synergy surpassing that attained by hard
                 highlighting.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Ferstl:2017:FFN,
  author =       "Ylva Ferstl and Elena Kokkinara and Rachel Mcdonnell",
  title =        "Facial Features of Non-player Creatures Can Influence
                 Moral Decisions in Video Games",
  journal =      j-TAP,
  volume =       "15",
  number =       "1",
  pages =        "4:1--4:??",
  month =        nov,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3129561",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jan 22 17:39:41 MST 2018",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "With the development of increasingly sophisticated
                 computer graphics, there is a continuous growth of the
                 variety and originality of virtual characters used in
                 movies and games. So far, however, their design has
                 mostly been led by the artist's preferences, not by
                 perceptual studies. In this article, we explored how
                 effective non-player character design can be used to
                 influence gameplay. In particular, we focused on
                 abstract virtual characters with few facial features.
                 In experiment 1, we sought to find rules for how to use
                 a character's facial features to elicit the perception
                 of certain personality traits, using prior findings for
                 human face perception as a basis. In experiment 2, we
                 then tested how perceived personality traits of a
                 non-player character could influence a player's moral
                 decisions in a video game. We found that the appearance
                 of the character interacting with the subject modulated
                 aggressive behavior towards a non-present individual.
                 Our results provide us with a better understanding of
                 the perception of abstract virtual characters, their
                 employment in video games, as well as giving us some
                 insights about the factors underlying aggressive
                 behavior in video games.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Vanhoey:2017:VQA,
  author =       "Kenneth Vanhoey and Basile Sauvage and Pierre Kraemer
                 and Guillaume Lavou{\'e}",
  title =        "Visual Quality Assessment of {$3$D} Models: On the
                 Influence of Light-Material Interaction",
  journal =      j-TAP,
  volume =       "15",
  number =       "1",
  pages =        "5:1--5:??",
  month =        nov,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3129505",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jan 22 17:39:41 MST 2018",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Geometric modifications of three-dimensional (3D)
                 digital models are commonplace for the purpose of
                 efficient rendering or compact storage. Modifications
                 imply visual distortions that are hard to measure
                 numerically. They depend not only on the model itself
                 but also on how the model is visualized. We hypothesize
                 that the model's light environment and the way it
                 reflects incoming light strongly influences perceived
                 quality. Hence, we conduct a perceptual study
                 demonstrating that the same modifications can be
                 masked, or conversely highlighted, by different
                 light-matter interactions. Additionally, we propose a
                 new metric that predicts the perceived distortion of 3D
                 modifications for a known interaction. It operates in
                 the space of 3D meshes with the object's appearance,
                 that is, the light emitted by its surface in any
                 direction given a known incoming light. Despite its
                 simplicity, this metric outperforms 3D mesh metrics and
                 competes with sophisticated perceptual image-based
                 metrics in terms of correlation to subjective
                 measurements. Unlike image-based methods, it has the
                 advantage of being computable prior to the costly
                 rendering steps of image projection and rasterization
                 of the scene for given camera parameters.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Zannoli:2017:PCC,
  author =       "Marina Zannoli and Martin S. Banks",
  title =        "The Perceptual Consequences of Curved Screens",
  journal =      j-TAP,
  volume =       "15",
  number =       "1",
  pages =        "6:1--6:??",
  month =        nov,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3106012",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jan 22 17:39:41 MST 2018",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Flat panels are by far the most common type of
                 television screen. There are reasons, however, to
                 believe that curved screens create a greater sense of
                 immersion, reduce distracting reflections, and minimize
                 some perceptual distortions that are commonplace with
                 large televisions. To examine these possibilities, we
                 calculated how curving the screen affects the field of
                 view and the probability of seeing reflections of
                 ambient lights. We find that screen curvature has a
                 small beneficial effect on field of view and a large
                 beneficial effect on the probability of seeing
                 reflections. We also collected behavioral data to
                 characterize perceptual distortions in various viewing
                 configurations. We find that curved screens can in fact
                 reduce problematic perceptual distortions on large
                 screens, but that the benefit depends on the geometry
                 of the projection on such screens.",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Berman:2017:EUS,
  author =       "Lewis Berman and Keith Gallagher and Suzanne
                 Kozaitis",
  title =        "Evaluating the Use of Sound in Static Program
                 Comprehension",
  journal =      j-TAP,
  volume =       "15",
  number =       "1",
  pages =        "7:1--7:??",
  month =        nov,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3129456",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jan 22 17:39:41 MST 2018",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/java2010.bib;
                 https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Comprehension of computer programs is daunting, due in
                 part to clutter in the software developer's visual
                 environment and the need for frequent visual context
                 changes. Previous research has shown that nonspeech
                 sound can be useful in understanding the runtime
                 behavior of a program. We explore the viability and
                 advantages of using nonspeech sound in an ecological
                 framework to help understand the static structure of
                 software. We describe a novel concept for auditory
                 display of program elements in which sounds indicate
                 characteristics and relationships among a Java
                 program's classes, interfaces, and methods. An
                 empirical study employing this concept was used to
                 evaluate 24 sighted software professionals and students
                 performing maintenance-oriented tasks using a 2$ \times
                 $2 crossover. Viability is strong for differentiation
                 and characterization of software entities, less so for
                 identification. The results suggest that sonification
                 can be advantageous under certain conditions, though
                 they do not indicate the overall advantage of using
                 sound in terms of task duration at a 5\% level of
                 significance. The results uncover other findings such
                 as differences in comprehension strategy based on the
                 available tool environment. The participants reported
                 enthusiasm for the idea of software sonification,
                 mitigated by lack of familiarity with the concept and
                 the brittleness of the tool. Limitations of the present
                 research include restriction to particular types of
                 comprehension tasks, a single sound mapping, a single
                 programming language, and limited training time, but
                 the use of sound in program comprehension shows
                 sufficient promise for continued research.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Jiang:2018:ATJ,
  author =       "Yuanyuan Jiang and Elizabeth E. O'neal and Junghum
                 Paul Yon and Luke Franzen and Pooya Rahimian and Jodie
                 M. Plumert and Joseph K. Kearney",
  title =        "Acting Together: Joint Pedestrian Road Crossing in an
                 Immersive Virtual Environment",
  journal =      j-TAP,
  volume =       "15",
  number =       "2",
  pages =        "8:1--8:??",
  month =        apr,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3147884",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 22 07:40:24 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "We investigated how two people jointly coordinate
                 their decisions and actions in a co-occupied,
                 large-screen virtual environment. The task for
                 participants was to physically cross a virtual road
                 with continuous traffic without getting hit by a car.
                 Participants performed this task either alone or with
                 another person (see Figure 1). Two separate streams of
                 non-stereo images were generated based on the dynamic
                 locations of the two viewers' eye-points. Stereo
                 shutter glasses were programmed to display a single
                 image stream to each viewer so that they saw
                 perspectively correct non-stereo images for their
                 eyepoint. We found that participant pairs often crossed
                 the same gap together and closely synchronized their
                 movements when crossing. Pairs also chose larger gaps
                 than individuals, presumably to accommodate the extra
                 time needed to cross through gaps together. These
                 results demonstrate how two people interact and
                 coordinate their behaviors in performing whole-body,
                 joint motions in a co-occupied virtual environment.
                 This study also provides a foundation for future
                 studies examining joint actions in shared VEs where
                 participants are represented by graphic avatars.",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Aygar:2018:CSM,
  author =       "Erol Aygar and Colin Ware and David Rogers",
  title =        "The Contribution of Stereoscopic and Motion Depth Cues
                 to the Perception of Structures in {$3$D} Point
                 Clouds",
  journal =      j-TAP,
  volume =       "15",
  number =       "2",
  pages =        "9:1--9:??",
  month =        apr,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3147914",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 22 07:40:24 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Particle-based simulations are used across many
                 science domains, and it is well known that stereoscopic
                 viewing and kinetic depth enhance our ability to
                 perceive the 3D structure of such data. But the
                 relative advantages of stereo and kinetic depth have
                 not been studied for point cloud data, although they
                 have been studied for 3D networks. This article reports
                 two experiments assessing human ability to perceive 3D
                 structures in point clouds as a function of different
                 viewing parameters. In the first study, the number of
                 discrete views was varied to determine the extent to
                 which smooth motion is needed. Also, half the trials
                 had stereoscopic viewing and half had no stereo. The
                 results showed kinetic depth to be more beneficial than
                 stereo viewing in terms of accuracy and so long as the
                 motion was smooth. The second experiment varied the
                 amplitude of oscillatory motion from 0 to 16 degrees.
                 The results showed an increase in detection rate with
                 amplitude, with the best amplitudes being 4 degrees and
                 greater. Overall, motion was shown to yield greater
                 accuracy, but at the expense of longer response times
                 in comparison with stereoscopic viewing.",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Bochereau:2018:PCR,
  author =       "S{\'e}r{\'e}na Bochereau and Stephen Sinclair and
                 Vincent Hayward",
  title =        "Perceptual Constancy in the Reproduction of Virtual
                 Tactile Textures With Surface Displays",
  journal =      j-TAP,
  volume =       "15",
  number =       "2",
  pages =        "10:1--10:??",
  month =        apr,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3152764",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 22 07:40:24 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "For very rough surfaces, friction-induced vibrations
                 contain frequencies that change in proportion to
                 sliding speed. Given the poor capacity of the
                 somatosensory system to discriminate frequencies, this
                 fact raises the question of how accurately finger
                 sliding speed must be known during the reproduction of
                 virtual textures with a surface tactile display. During
                 active touch, ten observers were asked to discriminate
                 texture recordings corresponding to different speeds.
                 The samples were constructed from a common texture,
                 which was resampled at various frequencies to give a
                 set of stimuli of different swiping speeds. In trials,
                 they swiped their finger in rapid succession over a
                 glass plate, which vibrated to accurately reproduce
                 three texture recordings. Two of these recordings were
                 identical and a third differed in that the sample
                 represented a texture swiped at a speed different from
                 the other two. Observers identified which of the three
                 samples felt different. For a metal mesh texture
                 recording, seven observers reported differences when
                 the speed varied by 60, 80, and 100mm/s while the other
                 three did not reach a discrimination threshold. For a
                 finer leather chamois texture recording, thresholds
                 were never reached in the 100mm/s range. These results
                 show that the need for high-accuracy measurement of
                 swiping speed during texture reproduction may actually
                 be quite limited compared to what is commonly found in
                 the literature.",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Kelly:2018:CTM,
  author =       "Jonathan W. Kelly and Lucia A. Cherep and Brenna
                 Klesel and Zachary D. Siegel and Seth George",
  title =        "Comparison of Two Methods for Improving Distance
                 Perception in Virtual Reality",
  journal =      j-TAP,
  volume =       "15",
  number =       "2",
  pages =        "11:1--11:??",
  month =        apr,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3165285",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 22 07:40:24 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Distance is commonly underperceived in virtual
                 environments (VEs) compared to real environments. Past
                 work suggests that displaying a replica VE based on the
                 real surrounding environment leads to more accurate
                 judgments of distance, but that work has lacked the
                 necessary control conditions to firmly make this
                 conclusion. Other research indicates that walking
                 through a VE with visual feedback improves judgments of
                 distance and size. This study evaluated and compared
                 those two methods for improving perceived distance in
                 VEs. All participants experienced a replica VE based on
                 the real lab. In one condition, participants visually
                 previewed the real lab prior to experiencing the
                 replica VE, and in another condition they did not.
                 Participants performed blind-walking judgments of
                 distance and also judgments of size in the replica VE
                 before and after walking interaction. Distance
                 judgments were more accurate in the preview compared to
                 no preview condition, but size judgments were
                 unaffected by visual preview. Distance judgments and
                 size judgments increased after walking interaction, and
                 the improvement was larger for distance than for size
                 judgments. After walking interaction, distance
                 judgments did not differ based on visual preview, and
                 walking interaction led to a larger improvement in
                 judged distance than did visual preview. These data
                 suggest that walking interaction may be more effective
                 than visual preview as a method for improving perceived
                 space in a VE.",
  acknowledgement = ack-nhfb,
  articleno =    "11",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Li:2018:EPV,
  author =       "Bochao Li and James Walker and Scott A. Kuhl",
  title =        "The Effects of Peripheral Vision and Light Stimulation
                 on Distance Judgments Through {HMDs}",
  journal =      j-TAP,
  volume =       "15",
  number =       "2",
  pages =        "12:1--12:??",
  month =        apr,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3165286",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 22 07:40:24 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Egocentric distances are often underestimated in
                 virtual environments through head-mounted displays
                 (HMDs). Previous studies suggest that peripheral vision
                 can influence distance perception. Specifically, light
                 in the periphery may improve distance judgments in
                 HMDs. In this study, we conducted a series of
                 experiments with varied peripheral treatments around
                 the viewport. First, we found that the peripheral
                 brightness significantly influences distance judgments
                 when the periphery is brighter than a certain
                 threshold, and found a possible range where the
                 threshold was in. Second, we extended our previous
                 research by changing the size of the peripheral
                 treatment. A larger visual field (field of view of the
                 HMD) resulted in significantly more accurate distance
                 judgments compared to our original experiments with
                 black peripheral treatment. Last, we found that
                 applying a pixelated peripheral treatment can also
                 improve distance judgments. The result implies that
                 augmenting peripheral vision with secondary
                 low-resolution displays may improve distance judgments
                 in HMDs.",
  acknowledgement = ack-nhfb,
  articleno =    "12",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Moffat:2018:PES,
  author =       "David Moffat and Joshua D. Reiss",
  title =        "Perceptual Evaluation of Synthesized Sound Effects",
  journal =      j-TAP,
  volume =       "15",
  number =       "2",
  pages =        "13:1--13:??",
  month =        apr,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3165287",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 22 07:40:24 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Sound synthesis is the process of generating
                 artificial sounds through some form of simulation or
                 modelling. This article aims to identify which sound
                 synthesis methods achieve the goal of producing a
                 believable audio sample that may replace a recorded
                 sound sample. A perceptual evaluation experiment of
                 five different sound synthesis techniques was
                 undertaken. Additive synthesis, statistical modelling
                 synthesis with two different feature sets, physically
                 inspired synthesis, concatenative synthesis, and
                 sinusoidal modelling synthesis were all compared.
                 Evaluation using eight different sound class stimuli
                 and 66 different samples was undertaken. The additive
                 synthesizer is the only synthesis method not considered
                 significantly different from the reference sample
                 across all sounds classes. The results demonstrate that
                 sound synthesis can be considered as realistic as a
                 recorded sample and makes recommendations for use of
                 synthesis methods, given different sound class
                 contexts.",
  acknowledgement = ack-nhfb,
  articleno =    "13",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Singhal:2018:CTI,
  author =       "Anshul Singhal and Lynette A. Jones",
  title =        "Creating Thermal Icons --- a Model-Based Approach",
  journal =      j-TAP,
  volume =       "15",
  number =       "2",
  pages =        "14:1--14:??",
  month =        apr,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3182175",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 22 07:40:24 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "The objective of this set of experiments was to
                 evaluate thermal pattern recognition on the hand and
                 arm and to determine which features of thermal stimuli
                 are encoded by cutaneous thermoreceptors and perceived
                 by the user of a thermal display. Thermal icons were
                 created by varying the direction, rate, and magnitude
                 of change in temperature. It was found that thermal
                 icons were identified more accurately when presented on
                 the thenar eminence or the wrist, as compared to the
                 fingertips and that thermal patterns as brief as 8s
                 could be reliably identified. In these experiments,
                 there was no difference in performance when identifying
                 warm or cool stimuli. A dynamic model of the change in
                 skin temperature as a function of the thermal input was
                 developed based on linear system identification
                 techniques. This model was able to predict the change
                 in skin temperature from an unrelated experiment
                 involving thermal icons. This opens the possibility of
                 using a model-based approach to the development of
                 thermal icons.",
  acknowledgement = ack-nhfb,
  articleno =    "14",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Schmidtler:2018:HPI,
  author =       "Jonas Schmidtler and Moritz K{\"o}rber",
  title =        "Human Perception of Inertial Mass for Joint
                 Human-Robot Object Manipulation",
  journal =      j-TAP,
  volume =       "15",
  number =       "3",
  pages =        "15:1--15:??",
  month =        aug,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3182176",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 22 07:40:24 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "In this article, we investigate human perception of
                 inertial mass discrimination in active planar
                 manipulations, as they are common in daily tasks, such
                 as moving heavy and bulky objects. Psychophysical
                 experiments were conducted to develop a human inertial
                 mass perception model to improve usability and
                 acceptance of novel haptically collaborating robotic
                 systems. In contrast to existing literature,
                 large-scale movements involving a broad selection of
                 reference stimuli and larger sample sizes were used.
                 Linear mixed models were fitted to model dependent
                 errors from the longitudinal perceptual data.
                 Differential thresholds near the perception boundary
                 exponentially increased and resulted in constant
                 behavior for higher stimuli. No effect of different
                 directions (sagittal and transversal) was found;
                 however, a large effect of different movement types
                 (precise and imprecise) was present in the data.
                 Recommendations to implement the findings in novel
                 physical assist devices are given.",
  acknowledgement = ack-nhfb,
  articleno =    "15",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Aviles-Rivero:2018:SSF,
  author =       "Angelica I. Aviles-Rivero and Samar M. Alsaleh and
                 John Philbeck and Stella P. Raventos and Naji Younes
                 and James K. Hahn and Alicia Casals",
  title =        "Sensory Substitution for Force Feedback Recovery: a
                 Perception Experimental Study",
  journal =      j-TAP,
  volume =       "15",
  number =       "3",
  pages =        "16:1--16:??",
  month =        aug,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3176642",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 22 07:40:24 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Robotic-assisted surgeries are commonly used today as
                 a more efficient alternative to traditional surgical
                 options. Both surgeons and patients benefit from those
                 systems, as they offer many advantages, including less
                 trauma and blood loss, fewer complications, and better
                 ergonomics. However, a remaining limitation of
                 currently available surgical systems is the lack of
                 force feedback due to the teleoperation setting, which
                 prevents direct interaction with the patient. Once the
                 force information is obtained by either a sensing
                 device or indirectly through vision-based force
                 estimation, a concern arises on how to transmit this
                 information to the surgeon. An attractive alternative
                 is sensory substitution, which allows transcoding
                 information from one sensory modality to present it in
                 a different sensory modality. In the current work, we
                 used visual feedback to convey interaction forces to
                 the surgeon. Our overarching goal was to address the
                 following question: How should interaction forces be
                 displayed to support efficient comprehension by the
                 surgeon without interfering with the surgeon's
                 perception and workflow during surgery? Until now, the
                 use the visual modality for force feedback has not been
                 carefully evaluated. For this reason, we conducted an
                 experimental study with two aims: (1) to demonstrate
                 the potential benefits of using this modality and (2)
                 to understand the surgeons' perceptual preferences. The
                 results derived from our study of 28 surgeons revealed
                 a strong positive acceptance of the users (96\%) using
                 this modality. Moreover, we found that for surgeons to
                 easily interpret the information, their mental model
                 must be considered, meaning that the design of the
                 visualizations should fit the perceptual and cognitive
                 abilities of the end user. To our knowledge, this is
                 the first time that these principles have been analyzed
                 for exploring sensory substitution in medical robotics.
                 Finally, we provide user-centered recommendations for
                 the design of visual displays for robotic surgical
                 systems.",
  acknowledgement = ack-nhfb,
  articleno =    "16",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Lyu:2018:NIM,
  author =       "Yongqiang Lyu and Xiao Zhang and Xiaomin Luo and Ziyue
                 Hu and Jingyu Zhang and Yuanchun Shi",
  title =        "Non-Invasive Measurement of Cognitive Load and Stress
                 Based on the Reflected Stress-Induced Vascular Response
                 Index",
  journal =      j-TAP,
  volume =       "15",
  number =       "3",
  pages =        "17:1--17:??",
  month =        aug,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3185665",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 22 07:40:24 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Measuring cognitive load and stress is crucial for
                 ubiquitous human--computer interaction applications to
                 dynamically understand and respond to the mental status
                 of users, such as in smart healthcare, smart driving,
                 and robotics. Various quantitative methods have been
                 employed for this purpose, such as physiological and
                 behavioral methods. However, the sensitivity,
                 reliability, and usability are not satisfactory in many
                 of the current methods, so they are not ideal for
                 ubiquitous applications. In this study, we employed a
                 reflected photoplethysmogram-based stress-induced
                 vascular response index, i.e., the reflected sVRI
                 (sVRI-r), to non-invasively measure the cognitive load
                 and stress. This method has high usability as well as
                 good sensitivity and reliability compared with the
                 previously proposed transmitted sVRI (sVRI-t). We
                 developed the basic methodology and detailed algorithm
                 framework to validate the sVRI-r measurements, and it
                 was implemented by employing two light sources, i.e.,
                 infrared light and green light. Compared with the
                 simultaneously recorded blood pressure, heart rate
                 variation, and sVRI-t, our findings demonstrated the
                 greater potential of the sVRI-r for use as a sensitive,
                 reliable, and usable parameter, as well as suggesting
                 its potential integration with ubiquitous touch
                 interactions for dynamic cognition and stress-sensing
                 scenarios.",
  acknowledgement = ack-nhfb,
  articleno =    "17",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Chapiro:2018:ISS,
  author =       "Alexandre Chapiro and Timo Kunkel and Robin Atkins and
                 Scott Daly",
  title =        "Influence of Screen Size and Field of View on
                 Perceived Brightness",
  journal =      j-TAP,
  volume =       "15",
  number =       "3",
  pages =        "18:1--18:??",
  month =        aug,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3190346",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 22 07:40:24 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "We present a study into the perception of display
                 brightness as related to the physical size and distance
                 of the screen from the observer. Brightness perception
                 is a complex topic, which is influenced by a number of
                 lower- and higher-order factors-with empirical evidence
                 from the cinema industry suggesting that display size
                 may play a significant role. To test this hypothesis,
                 we conducted a series of user studies exploring
                 brightness perception for a range of displays and
                 distances from the observer that span representative
                 use scenarios. Our results suggest that retinal size is
                 not sufficient to explain the range of discovered
                 brightness variations, but is sufficient in combination
                 with physical distance from the observer. The resulting
                 model can be used as a step toward perceptually
                 correcting image brightness perception based on target
                 display parameters. This can be leveraged for energy
                 management and the preservation of artistic intent. A
                 pilot study suggests that adaptation luminance is an
                 additional factor for the magnitude of the effect.",
  acknowledgement = ack-nhfb,
  articleno =    "18",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Lau:2018:HPS,
  author =       "Manfred Lau and Kapil Dev and Julie Dorsey and Holly
                 Rushmeier",
  title =        "A Human-Perceived Softness Measure of Virtual {$3$D}
                 Objects",
  journal =      j-TAP,
  volume =       "15",
  number =       "3",
  pages =        "19:1--19:??",
  month =        aug,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3193107",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 22 07:40:24 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "We introduce the problem of computing a
                 human-perceived softness measure for virtual 3D
                 objects. As the virtual objects do not exist in the
                 real world, we do not directly consider their physical
                 properties but instead compute the human-perceived
                 softness of the geometric shapes. In an initial
                 experiment, we find that humans are highly consistent
                 in their responses when given a pair of vertices on a
                 3D model and asked to select the vertex that they
                 perceive to be more soft. This motivates us to take a
                 crowdsourcing and machine learning framework. We
                 collect crowdsourced data for such pairs of vertices.
                 We then combine a learning-to-rank approach and a
                 multi-layer neural network to learn a non-linear
                 softness measure mapping any vertex to a softness
                 value. For a new 3D shape, we can use the learned
                 measure to compute the relative softness of every
                 vertex on its surface. We demonstrate the robustness of
                 our framework with a variety of 3D shapes and compare
                 our non-linear learning approach with a linear method
                 from previous work. Finally, we demonstrate the
                 accuracy of our learned measure with user studies
                 comparing our measure with the human-perceived softness
                 of both virtual and real objects, and we show the
                 usefulness of our measure with some applications.",
  acknowledgement = ack-nhfb,
  articleno =    "19",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Agethen:2018:BAH,
  author =       "Philipp Agethen and Viswa Subramanian Sekar and Felix
                 Gaisbauer and Thies Pfeiffer and Michael Otto and
                 Enrico Rukzio",
  title =        "Behavior Analysis of Human Locomotion in the Real
                 World and Virtual Reality for the Manufacturing
                 Industry",
  journal =      j-TAP,
  volume =       "15",
  number =       "3",
  pages =        "20:1--20:??",
  month =        aug,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3230648",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 22 07:40:24 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "With the rise of immersive visualization techniques,
                 many domains within the manufacturing industry are
                 increasingly validating production processes in virtual
                 reality (VR). The validity of the results gathered in
                 such simulations, however, is widely unknown-in
                 particular, with regard to human locomotion behavior.
                 To bridge this gap, this article presents an experiment
                 analyzing the behavioral disparity between human
                 locomotion being performed without any equipment and in
                 immersive VR while wearing a head-mounted display
                 (HMD). The presented study ( n = 30) is split up in
                 three sections and covers linear walking, non-linear
                 walking, and obstacle avoidance. Special care has been
                 given to design the experiment so that findings are
                 generally valid and can be applied to a wide range of
                 domains beyond the manufacturing industry. The findings
                 provide novel insights into the effect of immersive VR
                 on specific gait parameters. In total, a comprehensive
                 sample of 18.09km is analyzed. The results reveal that
                 the HMD had a medium effect (up to 13\%) on walking
                 velocity, on non-linear walking toward an oriented
                 target, and on clearance distance. The overall
                 differences are modeled using multiple regression
                 models, thus allowing the general usage within various
                 domains. Summarizing, it can be concluded that VR can
                 be used to analyze and plan human locomotion; however,
                 specific details may have to be adjusted to transfer
                 findings to the real world.",
  acknowledgement = ack-nhfb,
  articleno =    "20",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Buck:2018:CDE,
  author =       "Lauren E. Buck and Mary K. Young and Bobby
                 Bodenheimer",
  title =        "A Comparison of Distance Estimation in {HMD}-Based
                 Virtual Environments with Different {HMD}-Based
                 Conditions",
  journal =      j-TAP,
  volume =       "15",
  number =       "3",
  pages =        "21:1--21:??",
  month =        aug,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3196885",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 22 07:40:24 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Underestimation of egocentric distances in immersive
                 virtual environments using various head-mounted
                 displays (HMDs) has been a puzzling topic of research
                 interest for several years. As more commodity-level
                 systems become available to developers, it is important
                 to test the variation of underestimation in each system
                 since reasons for underestimation remain elusive. In
                 this article, we examine several different systems in
                 two experiments and comparatively evaluate how much
                 users underestimate distances in each one. To observe
                 distance estimation behavior, a standard indirect blind
                 walking task was used. An Oculus Rift DK1, weighted
                 Oculus Rift DK1, Oculus Rift DK1 with an artificially
                 restricted field of view, Nvis SX60, Nvis SX111, Oculus
                 Rift DK2, Oculus Rift consumer version (CV1), and HTC
                 Vive were tested. The weighted and restricted field of
                 view HMDs were evaluated to determine the effect of
                 these factors on distance underestimation; the other
                 systems were evaluated because they are popular systems
                 that are widely available. We found that weight and
                 field of view restrictions heightened underestimation
                 in the Rift DK1. Results from these conditions were
                 comparable to the Nvis SX60 and SX111. The Oculus Rift
                 DK1 and CV1 possessed the least amount of distance
                 underestimation, but in general, commodity-level HMDs
                 provided more accurate estimates of distance than the
                 prior generation of HMDs.",
  acknowledgement = ack-nhfb,
  articleno =    "21",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Seifi:2018:TAH,
  author =       "Hasti Seifi and Mattew Chun and Karon E. Maclean",
  title =        "Toward Affective Handles for Tuning Vibrations",
  journal =      j-TAP,
  volume =       "15",
  number =       "3",
  pages =        "22:1--22:??",
  month =        aug,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3230645",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 22 07:40:24 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "When refining or personalizing a design, we count on
                 being able to modify or move an element by changing its
                 parameters rather than creating it anew in a different
                 form or location-a standard utility in graphic and
                 auditory authoring tools. Similarly, we need to tune
                 vibrotactile sensations to fit new use cases,
                 distinguish members of communicative icon sets, and
                 personalize items. For tactile vibration display,
                 however, we lack knowledge of the human perceptual
                 mappings that must underlie such tools. Based on
                 evidence that affective dimensions are a natural way to
                 tune vibrations for practical purposes, we attempted to
                 manipulate perception along three emotion dimensions (
                 agitation, liveliness, and strangeness ) using
                 engineering parameters of hypothesized relevance.
                 Results from two user studies show that an automatable
                 algorithm can increase a vibration's perceived
                 agitation and liveliness to different degrees via
                 signal energy, while increasing its discontinuity or
                 randomness makes it more strange. These continuous
                 mappings apply across diverse base vibrations; the
                 extent of achievable emotion change varies. These
                 results illustrate the potential for developing
                 vibrotactile emotion controls as efficient tuning for
                 designers and end-users.",
  acknowledgement = ack-nhfb,
  articleno =    "22",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Riecke:2018:ISI,
  author =       "Bernhard Riecke and Joseph Kearny",
  title =        "Introduction to Special Issue {SAP 2018}",
  journal =      j-TAP,
  volume =       "15",
  number =       "4",
  pages =        "23:1--23:??",
  month =        oct,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3274477",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 22 07:40:25 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3274477",
  acknowledgement = ack-nhfb,
  articleno =    "23",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Jorg:2018:PAE,
  author =       "Sophie J{\"o}rg and Andrew Duchowski and Krzysztof
                 Krejtz and Anna Niedzielska",
  title =        "Perceptual Adjustment of Eyeball Rotation and Pupil
                 Size Jitter for Virtual Characters",
  journal =      j-TAP,
  volume =       "15",
  number =       "4",
  pages =        "24:1--24:??",
  month =        oct,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3238302",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 22 07:40:25 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Eye motions constitute an important part of our daily
                 face-to-face interactions. Even subtle details in the
                 eyes' motions give us clues about a person's thoughts
                 and emotions. Believable and natural animation of the
                 eyes is therefore crucial when creating appealing
                 virtual characters. In this article, we investigate the
                 perceived naturalness of detailed eye motions, more
                 specifically of jitter of the eyeball rotation and
                 pupil diameter on three virtual characters with
                 differing levels of realism. Participants watched
                 stimuli with six scaling factors from 0 to 1 in
                 increments of 0.2, varying eye rotation and pupil size
                 jitter individually, and they had to indicate if they
                 would like to increase or decrease the level of jitter
                 to make the animation look more natural. Based on
                 participants' responses, we determine the scaling
                 factors for noise attenuation perceived as most natural
                 for each character when using motion-captured eye
                 motions. We compute the corresponding average jitter
                 amplitudes for the eyeball rotation and pupil size to
                 serve as guidelines for other characters. We find that
                 the amplitudes perceived as most natural depend on the
                 character, with our character with a medium level of
                 realism requiring the largest scaling factors.",
  acknowledgement = ack-nhfb,
  articleno =    "24",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Ramesh:2018:AHS,
  author =       "Girish Ramesh and Martin Turner and Bj{\"o}rn
                 Schr{\"o}der and Franz Wortmann",
  title =        "Analysis of Hair Shine Using Rendering and Subjective
                 Evaluation",
  journal =      j-TAP,
  volume =       "15",
  number =       "4",
  pages =        "25:1--25:??",
  month =        oct,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3274478",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 22 07:40:25 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3274478",
  abstract =     "Hair shine is a highly desirable attribute to
                 consumers within the cosmetic industry and is also an
                 important indicator of hair health. However, perceptual
                 evaluation of shine is a complex task as it is known
                 that even subtle manipulation of local hair properties
                 such as colour, thickness, or style and global
                 properties such as lighting or environment can affect
                 the evaluation. In this article, we are interested in
                 the physical, optical, and chemical characteristics
                 that affect the realism of hair along with the
                 perception of shine. We have constructed a Computer
                 Graphics (CG) setup, based on current physical testing
                 systems, that reduces the number of variables that
                 affect the perspective. Physically based shading models
                 were used to create the images that participants
                 assessed on realism, health, naturalness, and shine
                 through three different evaluation experiments. Our
                 results provide new insights on how hair is perceived,
                 the factors that affect its realism, and the potential
                 of using CG techniques in the cosmetic industry to
                 replace physical testing.",
  acknowledgement = ack-nhfb,
  articleno =    "25",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Weier:2018:FDF,
  author =       "Martin Weier and Thorsten Roth and Andr{\'e}
                 Hinkenjann and Philipp Slusallek",
  title =        "Foveated Depth-of-Field Filtering in Head-Mounted
                 Displays",
  journal =      j-TAP,
  volume =       "15",
  number =       "4",
  pages =        "26:1--26:??",
  month =        oct,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3238301",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 22 07:40:25 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3238301",
  abstract =     "In recent years, a variety of methods have been
                 introduced to exploit the decrease in visual acuity of
                 peripheral vision, known as foveated rendering. As more
                 and more computationally involved shading is requested
                 and display resolutions increase, maintaining low
                 latencies is challenging when rendering in a virtual
                 reality context. Here, foveated rendering is a
                 promising approach for reducing the number of shaded
                 samples. However, besides the reduction of the visual
                 acuity, the eye is an optical system, filtering
                 radiance through lenses. The lenses create
                 depth-of-field (DoF) effects when accommodated to
                 objects at varying distances. The central idea of this
                 article is to exploit these effects as a filtering
                 method to conceal rendering artifacts. To showcase the
                 potential of such filters, we present a foveated
                 rendering system, tightly integrated with a
                 gaze-contingent DoF filter. Besides presenting
                 benchmarks of the DoF and rendering pipeline, we
                 carried out a perceptual study, showing that rendering
                 quality is rated almost on par with full rendering when
                 using DoF in our foveated mode, while shaded samples
                 are reduced by more than 69\%.",
  acknowledgement = ack-nhfb,
  articleno =    "26",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Grogorick:2018:CUV,
  author =       "Steve Grogorick and Georgia Albuquerque and
                 Jan-Philipp Tauscher and Marcus Magnor",
  title =        "Comparison of Unobtrusive Visual Guidance Methods in
                 an Immersive Dome Environment",
  journal =      j-TAP,
  volume =       "15",
  number =       "4",
  pages =        "27:1--27:??",
  month =        oct,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3238303",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 22 07:40:25 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3238303",
  abstract =     "In this article, we evaluate various image-space
                 modulation techniques that aim to unobtrusively guide
                 viewers' attention. While previous evaluations mainly
                 target desktop settings, we examine their applicability
                 to ultrawide field of view immersive environments,
                 featuring technical characteristics expected for
                 future-generation head-mounted displays. A
                 custom-built, high-resolution immersive dome
                 environment with high-precision eye tracking is used in
                 our experiments. We investigate gaze guidance success
                 rates and unobtrusiveness of five different techniques.
                 Our results show promising guiding performance for four
                 of the tested methods. With regard to unobtrusiveness
                 we find that-while no method remains completely
                 unnoticed-many participants do not report any
                 distractions. The evaluated methods show promise to
                 guide users' attention also in a wide field of virtual
                 environment applications, e.g., virtually guided tours
                 or field operation training.",
  acknowledgement = ack-nhfb,
  articleno =    "27",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Filip:2019:PAA,
  author =       "Jir{\'\i} Filip and Martina Kolafov{\'a}",
  title =        "Perceptual Attributes Analysis of Real-world
                 Materials",
  journal =      j-TAP,
  volume =       "16",
  number =       "1",
  pages =        "1:1--1:??",
  month =        feb,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3301412",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 22 07:40:25 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3301412",
  abstract =     "Material appearance is often represented by a
                 bidirectional reflectance distribution function (BRDF).
                 Although the concept of the BRDF is widely used in
                 computer graphics and related applications, the number
                 of actual captured BRDFs is limited due to a time and
                 resources demanding measurement process. Several BRDF
                 databases have already been provided publicly, yet
                 subjective properties of underlying captured material
                 samples, apart from single photographs, remain
                 unavailable for users. In this article, we analyzed
                 material samples, used in the creation of the UTIA BRDF
                 database, in a psychophysical study with nine subjects
                 and assessed its 12 visual, tactile, and subjective
                 attributes. Further, we evaluated the relationship
                 between the attributes and six material categories. We
                 consider the presented perceptual analysis as valuable
                 and complementary information to the database; that
                 could aid users to select appropriate materials for
                 their applications.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Kenny:2019:PEI,
  author =       "Sophie Kenny and Naureen Mahmood and Claire Honda and
                 Michael J. Black and Nikolaus F. Troje",
  title =        "Perceptual Effects of Inconsistency in Human
                 Animations",
  journal =      j-TAP,
  volume =       "16",
  number =       "1",
  pages =        "2:1--2:??",
  month =        feb,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3301411",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 22 07:40:25 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3301411",
  abstract =     "The individual shape of the human body, including the
                 geometry of its articulated structure and the
                 distribution of weight over that structure, influences
                 the kinematics of a person's movements. How sensitive
                 is the visual system to inconsistencies between shape
                 and motion introduced by retargeting motion from one
                 person onto the shape of another? We used optical
                 motion capture to record five pairs of male performers
                 with large differences in body weight, while they
                 pushed, lifted, and threw objects. From these data, we
                 estimated both the kinematics of the actions as well as
                 the performer's individual body shape. To obtain
                 consistent and inconsistent stimuli, we created
                 animated avatars by combining the shape and motion
                 estimates from either a single performer or from
                 different performers. Using these stimuli we conducted
                 three experiments in an immersive virtual reality
                 environment. First, a group of participants detected
                 which of two stimuli was inconsistent. Performance was
                 very low, and results were only marginally significant.
                 Next, a second group of participants rated perceived
                 attractiveness, eeriness, and humanness of consistent
                 and inconsistent stimuli, but these judgements of
                 animation characteristics were not affected by
                 consistency of the stimuli. Finally, a third group of
                 participants rated properties of the objects rather
                 than of the performers. Here, we found strong
                 influences of shape-motion inconsistency on perceived
                 weight and thrown distance of objects. This suggests
                 that the visual system relies on its knowledge of shape
                 and motion and that these components are assimilated
                 into an altered perception of the action outcome. We
                 propose that the visual system attempts to resist
                 inconsistent interpretations of human animations.
                 Actions involving object manipulations present an
                 opportunity for the visual system to reinterpret the
                 introduced inconsistencies as a change in the dynamics
                 of an object rather than as an unexpected combination
                 of body shape and body motion.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Still:2019:IVS,
  author =       "Jeremiah Still and Mary Still",
  title =        "Influence of Visual Salience on Webpage Product
                 Searches",
  journal =      j-TAP,
  volume =       "16",
  number =       "1",
  pages =        "3:1--3:??",
  month =        feb,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3301413",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 22 07:40:25 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3301413",
  abstract =     "Visual salience can increase search efficiency in
                 complex displays but does that influence persist when
                 completing a specific search? In two experiments,
                 participants were asked to search webpages for the
                 prices of specific products. Those products were
                 located near an area of high visual salience or low
                 visual salience. In Experiment 1, participants were
                 read the name of the product before searching; in
                 Experiment 2, participants were shown an image of the
                 exact product before searching. In both cases,
                 participants completed their search more quickly in the
                 high-salience condition. This was true even when there
                 was no ambiguity about the visual characteristics of
                 the product. Our findings suggest that salience guides
                 users through complex displays under realistic,
                 goal-driven task conditions. Designers can use this
                 knowledge to create interfaces that are easier to
                 search by aligning salience and task-critical
                 elements.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Kartashova:2019:LSP,
  author =       "Tatiana Kartashova and Susan F. Te Pas and Huib {De
                 Ridder} and Sylvia C. Pont",
  title =        "Light Shapes: Perception-Based Visualizations of the
                 Global Light Transport",
  journal =      j-TAP,
  volume =       "16",
  number =       "1",
  pages =        "4:1--4:??",
  month =        feb,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3232851",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 22 07:40:25 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3232851",
  abstract =     "In computer graphics, illuminating a scene is a
                 complex task, typically consisting of cycles of
                 adjusting and rendering the scene to see the effects.
                 We propose a technique for visualization of light as a
                 tensor field via extracting its properties (i.e.,
                 intensity, direction, diffuseness) from (virtual)
                 radiance measurements and showing these properties as a
                 grid of shapes over a volume of a scene. Presented in
                 the viewport, our visualizations give an understanding
                 of the illumination conditions in the measured volume
                 for both the local values and the global variations of
                 light properties. Additionally, they allow quick
                 inferences of the resulting visual appearance of
                 (objects in) scenes without the need to render them. In
                 our evaluation, observers performed at least as well
                 using visualizations as using renderings when they were
                 comparing illumination between parts of a scene and
                 inferring the final appearance of objects in the
                 measured volume. Therefore, the proposed visualizations
                 are expected to help lighting artists by providing
                 perceptually relevant information about the structure
                 of the light field and flow in a scene.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Spicker:2019:QVA,
  author =       "Marc Spicker and Franz G{\"o}tz-Hahn and Thomas
                 Lindemeier and Dietmar Saupe and Oliver Deussen",
  title =        "Quantifying Visual Abstraction Quality for
                 Computer-Generated Illustrations",
  journal =      j-TAP,
  volume =       "16",
  number =       "1",
  pages =        "5:1--5:??",
  month =        feb,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3301414",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 22 07:40:25 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3301414",
  abstract =     "We investigate how the perceived abstraction quality
                 of computer-generated illustrations is related to the
                 number of primitives (points and small lines) used to
                 create them. Since it is difficult to find objective
                 functions that quantify the visual quality of such
                 illustrations, we propose an approach to derive
                 perceptual models from a user study. By gathering
                 comparative data in a crowdsourcing user study and
                 employing a paired comparison model, we can reconstruct
                 absolute quality values. Based on an exemplary study
                 for stippling, we show that it is possible to model the
                 perceived quality of stippled representations based on
                 the properties of an input image. The generalizability
                 of our approach is demonstrated by comparing models for
                 different stippling methods. By showing that our
                 proposed approach also works for small lines, we
                 demonstrate its applicability toward quantifying
                 different representational drawing elements. Our
                 results can be related to Weber--Fechner's law from
                 psychophysics and indicate a logarithmic relationship
                 between number of rendering primitives in an
                 illustration and the perceived abstraction quality
                 thereof.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Tennison:2019:NVP,
  author =       "Jennifer L. Tennison and Jenna L. Gorlewicz",
  title =        "Non-visual Perception of Lines on a Multimodal
                 Touchscreen Tablet",
  journal =      j-TAP,
  volume =       "16",
  number =       "1",
  pages =        "6:1--6:??",
  month =        feb,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3301415",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 22 07:40:25 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3301415",
  abstract =     "While text-to-speech software has largely made textual
                 information accessible in the digital space, analogous
                 access to graphics still remains an unsolved problem.
                 Because of their portability and ubiquity, several
                 studies have alluded to touchscreens as a potential
                 platform for such access, yet there is still a gap in
                 our understanding of multimodal information transfer in
                 the context of graphics. The current research
                 demonstrates feasibility for following lines, a
                 fundamental graphical concept, via vibrations and
                 sounds on commercial touchscreens. Two studies were run
                 with 21 blind and visually impaired participants ( N =
                 12; N = 9). The first study examined the presentation
                 of straight, linear lines using a multitude of line
                 representations, such as vibration-only, auditory-only,
                 vibration lines with auditory borders, and auditory
                 lines with vibration borders. The results of this study
                 demonstrated that both auditory and vibratory bordered
                 lines were optimal for precise tracing, although both
                 vibration- and auditory-only lines were also sufficient
                 for following, with minimal deviations. The second
                 study examined the presentation of curving, non-linear
                 lines. Conditions differed on the number of auditory
                 reference points presented at the inflection and
                 deflection points. Participants showed minimal
                 deviation from the lines during tracing, performing
                 nearly equally in both 1- and 3-point conditions. From
                 these studies, we demonstrate that line following via
                 multimodal feedback is possible on touchscreens, and we
                 present guidelines for the presentation of such
                 non-visual graphical concepts.",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Dodge:2019:HDC,
  author =       "Samuel Dodge and Lina Karam",
  title =        "Human and {DNN} Classification Performance on Images
                 With Quality Distortions: a Comparative Study",
  journal =      j-TAP,
  volume =       "16",
  number =       "2",
  pages =        "7:1--7:??",
  month =        aug,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3306241",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 22 07:40:25 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3306241",
  abstract =     "Image quality is an important practical challenge that
                 is often overlooked in the design of machine vision
                 systems. Commonly, machine vision systems are trained
                 and tested on high-quality image datasets, yet in
                 practical applications the input images cannot be
                 assumed to be of high quality. Modern deep neural
                 networks (DNNs) have been shown to perform poorly on
                 images affected by blur or noise distortions. In this
                 work, we investigate whether human subjects also
                 perform poorly on distorted stimuli and provide a
                 direct comparison with the performance of DNNs.
                 Specifically, we study the effect of Gaussian blur and
                 additive Gaussian noise on human and DNN classification
                 performance. We perform two experiments: one
                 crowd-sourced experiment with unlimited stimulus
                 display time, and one lab experiment with 100ms display
                 time. In both cases, we found that humans outperform
                 neural networks on distorted stimuli, even when the
                 networks are retrained with distorted data.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Kelly:2019:VSB,
  author =       "Jonathan W. Kelly and Brenna C. Klesel and Lucia A.
                 Cherep",
  title =        "Visual Stabilization of Balance in Virtual Reality
                 Using the {HTC Vive}",
  journal =      j-TAP,
  volume =       "16",
  number =       "2",
  pages =        "8:1--8:??",
  month =        aug,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3313902",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 22 07:40:25 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3313902",
  abstract =     "Vision in real environments stabilizes balance
                 compared to an eyes-closed condition. For virtual
                 reality to be safe and fully effective in applications
                 such as physical rehabilitation, vision in virtual
                 reality should stabilize balance as much as vision in
                 the real world. Older virtual reality technology was
                 previously found to stabilize balance but by less than
                 half as much as real-world vision. Recent advancements
                 in display technology might allow for vision in virtual
                 reality to be as stabilizing as vision in the real
                 world. This study evaluated whether viewing a virtual
                 environment through the HTC Vive-a new consumer-grade
                 head-mounted display-stabilizes balance, and whether
                 visual stabilization is similar to that provided by
                 real-world vision. Participants viewed the real
                 laboratory or a virtual replica of the laboratory and
                 attempted to maintain an unstable stance with eyes open
                 or closed while standing at one of two viewing
                 distances. Vision was significantly stabilizing in all
                 conditions, but the virtual environment provided less
                 visual stabilization than did the real environment.
                 Regardless of the environment, near viewing led to
                 greater visual stabilization than did far viewing. The
                 smaller stabilizing influence of viewing a virtual
                 compared to real environment might lead to greater risk
                 of falls in virtual reality and smaller gains in
                 physical rehabilitation using virtual reality.",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Ferwerda:2019:FHT,
  author =       "James Ferwerda",
  title =        "The {FechDeck}: a Hand Tool for Exploring
                 Psychophysics",
  journal =      j-TAP,
  volume =       "16",
  number =       "2",
  pages =        "9:1--9:??",
  month =        aug,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3313186",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 22 07:40:25 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3313186",
  abstract =     "Learning the methods of psychophysics is an essential
                 part of training for perceptual experimentation, and
                 hands-on experience is vital, but gaining this
                 experience is difficult because good tools for learning
                 are not available. The FechDeck is an ordinary deck of
                 playing cards that has been modified to support
                 learning the methods of psychophysics. Card backs are
                 printed with noise patterns that span a range of
                 densities. Faces are augmented with line segments
                 arranged in ``L'' patterns. Jokers are printed with
                 ruled faces and with backs that serve as standards.
                 Instructions provided with the FechDeck allow users to
                 perform threshold experiments using Fechner's methods
                 of adjustment, limits, and constant stimuli; scaling
                 experiments using Thurstone's ranking, paired
                 comparison, and successive categories methods; and
                 Stevens's magnitude estimation method. Spreadsheets
                 provided with the deck support easy data entry and
                 meaningful data analysis. An online repository
                 supporting the FechDeck has been established to
                 facilitate dissemination and to encourage open source
                 development of the deck.",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Kawabe:2019:SBI,
  author =       "Takahiro Kawabe",
  title =        "Shadow-based Illusion of Depth and Transparency in
                 Printed Images",
  journal =      j-TAP,
  volume =       "16",
  number =       "2",
  pages =        "10:1--10:??",
  month =        aug,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3342350",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 22 07:40:25 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3342350",
  abstract =     "A cast shadow is one of the visual features that serve
                 as a perceptual cue to the three-dimensional (3D)
                 layout of objects. Although it is well known that
                 adding cast shadows to an object produces the illusion
                 that the object has a 3D layout, investigations into
                 this illusion have been limited to virtual objects in a
                 display. Using a light-projection technique, we show
                 that it is possible to create a similar 3D layout
                 illusion for real two-dimensional objects.
                 Specifically, we displayed spatial patterns that look
                 like cast shadows in the vicinity of an object depicted
                 as a printed image. The combination of the cast shadow
                 patterns with the printed object made it appear as if
                 the printed object hovered over its original location
                 even though the object was physically two-dimensional.
                 By using this technique, we demonstrated that the
                 shadow-induced layout illusion resulted in printed
                 images having novel perceptual transparency. Vision
                 researchers may find our technique useful if they want
                 to extend their studies on the perception of cast
                 shadows and transparency with real objects.",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Kuzovkin:2019:CPA,
  author =       "Dmitry Kuzovkin and Tania Pouli and Olivier {Le Meur}
                 and R{\'e}mi Cozot and Jonathan Kervec and Kadi
                 Bouatouch",
  title =        "Context in Photo Albums: Understanding and Modeling
                 User Behavior in Clustering and Selection",
  journal =      j-TAP,
  volume =       "16",
  number =       "2",
  pages =        "11:1--11:??",
  month =        aug,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3333612",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 22 07:40:25 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3333612",
  abstract =     "Recent progress in digital photography and storage
                 availability has drastically changed our approach to
                 photo creation. While in the era of film cameras,
                 careful forethought would usually precede the capture
                 of a photo; nowadays, a large number of pictures can be
                 taken with little effort. One of the consequences is
                 the creation of numerous photos depicting the same
                 moment in slightly different ways, which makes the
                 process of organizing photos laborious for the
                 photographer. Nevertheless, photo collection
                 organization is important both for exploring photo
                 albums and for simplifying the ultimate task of
                 selecting the best photos. In this work, we conduct a
                 user study to explore how users tend to organize or
                 cluster similar photos in albums, to what extent
                 different users agree in their clustering decisions,
                 and to investigate how the clustering-defined photo
                 context affects the subsequent photo-selection process.
                 We also propose an automatic hierarchical clustering
                 solution for modeling user clustering decisions. To
                 demonstrate the usefulness of our approach, we apply it
                 to the task of automatic photo evaluation within photo
                 albums and propose a clustering-based context
                 adaptation.",
  acknowledgement = ack-nhfb,
  articleno =    "11",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Rosa:2019:SHI,
  author =       "Nina Rosa and Remco C. Veltkamp and Wolfgang H{\"u}rst
                 and Tanja Nijboer and Carolien Gilbers and Peter
                 Werkhoven",
  title =        "The Supernumerary Hand Illusion in Augmented Reality",
  journal =      j-TAP,
  volume =       "16",
  number =       "2",
  pages =        "12:1--12:??",
  month =        aug,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3341225",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 22 07:40:25 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3341225",
  abstract =     "The classic rubber hand illusion (RHI) experiment
                 studies the sense of embodiment over a fake limb.
                 Distinguished subcomponents of embodiment are ownership
                 (sense of self-attribution of a body), agency (sense of
                 having motor control), and self-location (the spatial
                 experience of being inside a body), and are typically
                 evoked in either reality or virtual reality. In
                 augmented reality (AR), however, visually present real
                 limbs can be augmented with (multiple) fake virtual
                 limbs, which results in a variation of the RHI, the
                 augmented reality supernumerary hand illusion (ARSHI).
                 Such conditions occur, for example, in first-person AR
                 games and in AR-interfaces for tele-robotics. In this
                 article, we examined to what extent humans can
                 experience the sense of embodiment over a supernumerary
                 virtual arm in addition to one or two real arms. We
                 also examine how embodiment is affected by the
                 perceptual visual-tactile synchronicity of the virtual
                 and real limbs, and by the synchronicity of active
                 movement of the virtual and real hand. Embodiment was
                 measured subjectively by questionnaire and objectively
                 by skin conductance responses (SCRs). Questionnaire
                 responses show that ownership, agency, and
                 self-location can be evoked over the virtual arm in the
                 presence of a real arm, and that they are significantly
                 stronger for synchronous conditions than for
                 asynchronous conditions. The perceptual and motorical
                 synchronous condition with three visible hands led to
                 an experience of owning the virtual hand. These
                 responses further show that agency was also strongly
                 experienced over the supernumerary virtual arm, and
                 responses regarding self-location suggest a shift in
                 sensed location when one real arm was in view and an
                 additional location when both real arms where in view.
                 SCRs show no significant effect of condition, but do
                 show a significant habituation effect as a function of
                 the number of conditions performed by participants.
                 When analyzing the relations at the individual
                 participant level between the questionnaire data and
                 skin conductance, we found two clusters of
                 participants: (1) participants with low questionnaire
                 responses and low-medium SCRs and (2) participants with
                 high questionnaire responses and low-high SCRs.
                 Finally, we discuss how virtual hand appearance/realism
                 and willingness to accept virtual limbs could play an
                 important role in the ARSHI, and provide insights on
                 intricacies involved with measuring and evaluating
                 RHIs.",
  acknowledgement = ack-nhfb,
  articleno =    "12",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Hoyet:2019:ISI,
  author =       "Ludovic Hoyet and Douglas W. Cunningham",
  title =        "Introduction to the Special Issue on {SAP 2019}",
  journal =      j-TAP,
  volume =       "16",
  number =       "3",
  pages =        "13:1--13:??",
  month =        sep,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3355996",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 22 07:40:26 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3355996",
  acknowledgement = ack-nhfb,
  articleno =    "13",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Zibrek:2019:PIP,
  author =       "Katja Zibrek and Sean Martin and Rachel McDonnell",
  title =        "Is Photorealism Important for Perception of Expressive
                 Virtual Humans in Virtual Reality?",
  journal =      j-TAP,
  volume =       "16",
  number =       "3",
  pages =        "14:1--14:??",
  month =        sep,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3349609",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 22 07:40:26 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3349609",
  abstract =     "In recent years, the quality of real-time rendering
                 has reached new heights-realistic reflections,
                 physically based materials, and photometric lighting
                 are all becoming commonplace in modern game engines and
                 even interactive virtual environments, such as virtual
                 reality (VR). As the strive for realism continues,
                 there is a need to investigate the effect of
                 photorealism on users' perception, particularly for
                 interactive, emotional scenarios in VR. In this
                 article, we explored three main topics, where we
                 predicted photorealism will make a difference: the
                 illusion of being present with the virtual person and
                 in an environment, altered emotional response toward
                 the character, and a subtler response-comfort of being
                 in close proximity to the character. We present a
                 perceptual experiment, with an interactive expressive
                 virtual character in VR, which was designed to induce
                 particular social responses in people. Our participant
                 pool was large (N = 797) and diverse in terms of
                 demographics. We designed a between-group experiment,
                 where each group saw either the realistic rendering or
                 one of our stylized conditions (simple and sketch
                 style), expressing one of three attitudes: Friendly,
                 Unfriendly, or Sad. While the render style did not
                 particularly effect the level of comfort with the
                 character or increase the illusion of presence with it,
                 our main finding shows that the photorealistic
                 character changed the emotional responses of
                 participants, compared to the stylized versions. We
                 also found a preference for realism in VR, reflected in
                 the affinity and higher place illusion in the scenario,
                 rendered in the realistic render style.",
  acknowledgement = ack-nhfb,
  articleno =    "14",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Knopp:2019:PPN,
  author =       "Benjamin Knopp and Dmytro Velychko and Johannes
                 Dreibrodt and Dominik Endres",
  title =        "Predicting Perceived Naturalness of Human Animations
                 Based on Generative Movement Primitive Models",
  journal =      j-TAP,
  volume =       "16",
  number =       "3",
  pages =        "15:1--15:??",
  month =        sep,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3355401",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 22 07:40:26 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3355401",
  abstract =     "We compared the perceptual validity of human avatar
                 walking animations driven by six different
                 representations of human movement using a graphics
                 Turing test. All six representations are based on
                 movement primitives (MPs), which are predictive models
                 of full-body movement that differ in their complexity
                 and prediction mechanism. Assuming that humans are
                 experts at perceiving biological movement from noisy
                 sensory signals, it follows that these percepts should
                 be describable by a suitably constructed Bayesian ideal
                 observer model. We build such models from MPs and
                 investigate if the perceived naturalness of human
                 animations are predictable from approximate Bayesian
                 model scores of the MPs. We found that certain MP-based
                 representations are capable of producing movements that
                 are perceptually indistinguishable from natural
                 movements. Furthermore, approximate Bayesian model
                 scores of these representations can be used to predict
                 perceived naturalness. In particular, we could show
                 that movement dynamics are more important for perceived
                 naturalness of human animations than single frame
                 poses. This indicates that perception of human
                 animations is highly sensitive to their temporal
                 coherence. More generally, our results add evidence for
                 a shared MP-representation of action and perception.
                 Even though the motivation of our work is primarily
                 drawn from neuroscience, we expect that our results
                 will be applicable in virtual and augmented reality
                 settings, when perceptually plausible human avatar
                 movements are required.",
  acknowledgement = ack-nhfb,
  articleno =    "15",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Jacobs:2019:KIS,
  author =       "Jochen Jacobs and Xi Wang and Marc Alexa",
  title =        "Keep It Simple: Depth-based Dynamic Adjustment of
                 Rendering for Head-mounted Displays Decreases Visual
                 Comfort",
  journal =      j-TAP,
  volume =       "16",
  number =       "3",
  pages =        "16:1--16:??",
  month =        sep,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3353902",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 22 07:40:26 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3353902",
  abstract =     "Head-mounted displays cause discomfort. This is
                 commonly attributed to conflicting depth cues, most
                 prominently between vergence, which is consistent with
                 object depth, and accommodation, which is adjusted to
                 the near eye displays. It is possible to adjust the
                 camera parameters, specifically interocular distance
                 and vergence angles, for rendering the virtual
                 environment to minimize this conflict. This requires
                 dynamic adjustment of the parameters based on object
                 depth. In an experiment based on a visual search task,
                 we evaluate how dynamic adjustment affects visual
                 comfort compared to fixed camera parameters. We collect
                 objective as well as subjective data. Results show that
                 dynamic adjustment decreases common objective measures
                 of visual comfort such as pupil diameter and blink rate
                 by a statistically significant margin. The subjective
                 evaluation of categories such as fatigue or eye
                 irritation shows a similar trend but was inconclusive.
                 This suggests that rendering with fixed camera
                 parameters is the better choice for head-mounted
                 displays, at least in scenarios similar to the ones
                 used here.",
  acknowledgement = ack-nhfb,
  articleno =    "16",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Hadnett-Hunter:2019:ETV,
  author =       "Jacob Hadnett-Hunter and George Nicolaou and Eamonn
                 O'Neill and Michael Proulx",
  title =        "The Effect of Task on Visual Attention in Interactive
                 Virtual Environments",
  journal =      j-TAP,
  volume =       "16",
  number =       "3",
  pages =        "17:1--17:??",
  month =        sep,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3352763",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 22 07:40:26 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3352763",
  abstract =     "Virtual environments for gaming and simulation provide
                 dynamic and adaptive experiences, but, despite advances
                 in multisensory interfaces, these are still primarily
                 visual experiences. To support real-time dynamic
                 adaptation, interactive virtual environments could
                 implement techniques to predict and manipulate human
                 visual attention. One promising way of developing such
                 techniques is to base them on psychophysical
                 observations, an approach that requires a sound
                 understanding of visual attention allocation.
                 Understanding how this allocation of visual attention
                 changes depending on a user's task offers clear
                 benefits in developing these techniques and improving
                 virtual environment design. With this aim, we
                 investigated the effect of task on visual attention in
                 interactive virtual environments. We recorded fixation
                 data from participants completing freeview, search, and
                 navigation tasks in three different virtual
                 environments. We quantified visual attention
                 differences between conditions by identifying the
                 predictiveness of a low-level saliency model and its
                 corresponding color, intensity, and orientation
                 feature-conspicuity maps, as well as measuring fixation
                 center bias, depth, duration, and saccade amplitude.
                 Our results show that task does affect visual attention
                 in virtual environments. Navigation relies more than
                 search or freeview on intensity conspicuity to allocate
                 visual attention. Navigation also produces fixations
                 that are more central, longer, and deeper into the
                 scenes. Further, our results suggest that it is
                 difficult to distinguish between freeview and search
                 tasks. These results provide important guidance for
                 designing virtual environments for human interaction,
                 as well as identifying future avenues of research for
                 developing ``attention-aware'' virtual worlds.",
  acknowledgement = ack-nhfb,
  articleno =    "17",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Company:2019:APV,
  author =       "Pedro Company and Raquel Plumed and Peter A. C. Varley
                 and Jorge D. Camba",
  title =        "Algorithmic Perception of Vertices in Sketched
                 Drawings of Polyhedral Shapes",
  journal =      j-TAP,
  volume =       "16",
  number =       "3",
  pages =        "18:1--18:??",
  month =        sep,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3345507",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 22 07:40:26 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3345507",
  abstract =     "In this article, visual perception principles were
                 used to build an artificial perception model aimed at
                 developing an algorithm for detecting junctions in line
                 drawings of polyhedral objects that are vectorized from
                 hand-drawn sketches. The detection is performed in two
                 dimensions (2D), before any 3D model is available and
                 minimal information about the shape depicted by the
                 sketch is used. The goal of this approach is to not
                 only detect junctions in careful sketches created by
                 skilled engineers and designers but also detect
                 junctions when skilled people draw casually to quickly
                 convey rough ideas. Current approaches for extracting
                 junctions from digital images are mostly incomplete, as
                 they simply merge endpoints that are near each other,
                 thus ignoring the fact that different vertices may be
                 represented by different (but close) junctions and that
                 the endpoints of lines that depict edges that share a
                 common vertex may not necessarily be close to each
                 other, particularly in quickly sketched drawings. We
                 describe and validate a new algorithm that uses these
                 perceptual findings to merge tips of line segments into
                 2D junctions that are assumed to depict 3D vertices.",
  acknowledgement = ack-nhfb,
  articleno =    "18",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Zhang:2019:PBC,
  author =       "Xiao Zhang and Yongqiang Lyu and Tong Qu and Pengfei
                 Qiu and Xiaomin Luo and Jingyu Zhang and Shunjie Fan
                 and Yuanchun Shi",
  title =        "Photoplethysmogram-based Cognitive Load Assessment
                 Using Multi-Feature Fusion Model",
  journal =      j-TAP,
  volume =       "16",
  number =       "4",
  pages =        "19:1--19:??",
  month =        sep,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3340962",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 22 07:40:26 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3340962",
  abstract =     "Cognitive load assessment is crucial for user studies
                 and human--computer interaction designs. As a
                 noninvasive and easy-to-use category of measures,
                 current photoplethysmogram- (PPG) based assessment
                 methods rely on single or small-scale predefined
                 features to recognize responses induced by people's
                 cognitive load, which are not stable in assessment
                 accuracy. In this study, we propose a machine-learning
                 method by using 46 kinds of PPG features together to
                 improve the measurement accuracy for cognitive load. We
                 test the method on 16 participants through the
                 classical n-back tasks (0-back, 1-back, and 2-back).
                 The accuracy of the machine-learning method in
                 differentiating different levels of cognitive loads
                 induced by task difficulties can reach 100\% in 0-back
                 vs. 2-back tasks, which outperformed the traditional
                 HRV-based and single-PPG-feature-based methods by
                 12--55\%. When using ``leave-one-participant-out''
                 subject-independent cross validation, 87.5\% binary
                 classification accuracy was reached, which is at the
                 state-of-the-art level. The proposed method can also
                 support real-time cognitive load assessment by
                 beat-to-beat classifications with better performance
                 than the traditional single-feature-based real-time
                 evaluation method.",
  acknowledgement = ack-nhfb,
  articleno =    "19",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Ito:2019:TTD,
  author =       "Ken Ito and Shogo Okamoto and Yoji Yamada and Hiroyuki
                 Kajimoto",
  title =        "Tactile Texture Display with Vibrotactile and
                 Electrostatic Friction Stimuli Mixed at Appropriate
                 Ratio Presents Better Roughness Textures",
  journal =      j-TAP,
  volume =       "16",
  number =       "4",
  pages =        "20:1--20:??",
  month =        sep,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3340961",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 22 07:40:26 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3340961",
  abstract =     "Vibrotactile and friction texture displays are good
                 options for artificially presenting the roughness and
                 frictional properties of textures, respectively. These
                 two types of displays are compatible with touch panels
                 and exhibit complementary characteristics. We combine
                 vibrotactile and electrostatic friction texture
                 displays to improve the quality of virtual textures,
                 considering that actual textured surfaces are composed
                 of both properties. We investigate their composition
                 ratios when displaying roughness textures. Grating
                 roughness scales with one of the six surface
                 wavelengths are generated under 11 display conditions,
                 and in 9 of which, vibrotactile and friction stimuli
                 are combined with different composition ratios. A
                 forced-choice experiment regarding subjective realism
                 indicates that a vibrotactile stimulus with a slight
                 variable-friction stimulus is effective for presenting
                 quality textures for surface wavelengths greater than
                 or equal to 1.0mm.",
  acknowledgement = ack-nhfb,
  articleno =    "20",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Tadros:2019:ANN,
  author =       "Timothy Tadros and Nicholas C. Cullen and Michelle R.
                 Greene and Emily A. Cooper",
  title =        "Assessing Neural Network Scene Classification from
                 Degraded Images",
  journal =      j-TAP,
  volume =       "16",
  number =       "4",
  pages =        "21:1--21:??",
  month =        sep,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3342349",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 22 07:40:26 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3342349",
  abstract =     "Scene recognition is an essential component of both
                 machine and biological vision. Recent advances in
                 computer vision using deep convolutional neural
                 networks (CNNs) have demonstrated impressive
                 sophistication in scene recognition, through training
                 on large datasets of labeled scene images (Zhou et al.
                 2018, 2014). One criticism of CNN-based approaches is
                 that performance may not generalize well beyond the
                 training image set (Torralba and Efros 2011), and may
                 be hampered by minor image modifications, which in some
                 cases are barely perceptible to the human eye
                 (Goodfellow et al. 2015; Szegedy et al. 2013). While
                 these ``adversarial examples'' may be unlikely in
                 natural contexts, during many real-world visual tasks
                 scene information can be degraded or limited due to
                 defocus blur, camera motion, sensor noise, or occluding
                 objects. Here, we quantify the impact of several image
                 degradations (some common, and some more exotic) on
                 indoor/outdoor scene classification using CNNs. For
                 comparison, we use human observers as a benchmark, and
                 also evaluate performance against classifiers using
                 limited, manually selected descriptors. While the CNNs
                 outperformed the other classifiers and rivaled human
                 accuracy for intact images, our results show that their
                 classification accuracy is more affected by image
                 degradations than human observers. On a practical
                 level, however, accuracy of the CNNs remained well
                 above chance for a wide range of image manipulations
                 that disrupted both local and global image statistics.
                 We also examine the level of image-by-image agreement
                 with human observers, and find that the CNNs' agreement
                 with observers varied as a function of the nature of
                 image manipulation. In many cases, this agreement was
                 not substantially different from the level one would
                 expect to observe for two independent classifiers.
                 Together, these results suggest that CNN-based scene
                 classification techniques are relatively robust to
                 several image degradations. However, the pattern of
                 classifications obtained for ambiguous images does not
                 appear to closely reflect the strategies employed by
                 human observers.",
  acknowledgement = ack-nhfb,
  articleno =    "21",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Bhargava:2019:CEU,
  author =       "Ayush Bhargava and James Martin and Sabarish V. Babu",
  title =        "Comparative Evaluation of User Perceived Quality
                 Assessment of Design Strategies for {HTTP}-based
                 Adaptive Streaming",
  journal =      j-TAP,
  volume =       "16",
  number =       "4",
  pages =        "22:1--22:??",
  month =        sep,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3345313",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 22 07:40:26 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3345313",
  abstract =     "HTTP-based Adaptive Streaming (HAS) is the dominant
                 Internet video streaming application. One specific HAS
                 approach, Dynamic Adaptive Streaming over HTTP (DASH),
                 is of particular interest, as it is a widely deployed,
                 standardized implementation. Prior academic research
                 has focused on networking and protocol issues, and has
                 contributed an accepted understanding of the
                 performance and possible performance issues in large
                 deployment scenarios. Our work extends the current
                 understanding of HAS by focusing directly on the
                 impacts of choice of the video quality adaptation
                 algorithm on end-user perceived quality. In congested
                 network scenarios, the details of the adaptation
                 algorithm determine the amount of bandwidth consumed by
                 the application as well as the quality of the rendered
                 video stream. HAS will lead to user-perceived changes
                 in video quality due to intentional changes in quality
                 video segments, or unintentional perceived quality
                 impairments caused by video decoder artifacts such as
                 pixelation, stutters, or short or long stalls in the
                 rendered video when the playback buffer becomes empty.
                 The HAS adaptation algorithm attempts to find the
                 optimal solution to mitigate the conflict between
                 avoiding buffer stalls and maximizing video quality. In
                 this article, we present results from a user study that
                 was designed to provide insights into ``best practice
                 guidelines'' for a HAS adaptation algorithm. Our
                 findings suggest that a buffer-based strategy might
                 provide a better experience under higher network
                 impairment conditions. For the two network scenarios
                 considered, the buffer-based strategy is effective in
                 avoiding stalls but does so at the cost of reduced
                 video quality. However, the buffer-based strategy does
                 yield a lower number of quality switches as a result of
                 infrequent bitrate adaptations. Participants in
                 buffer-based strategy do notice the drop in video
                 quality causing a decrease in perceived QoE, but the
                 perceived levels of video quality, viewer frustration,
                 and opinions of video clarity and distortion are
                 significantly worse due to artifacts such as stalls in
                 capacity-based strategy. The capacity-based strategy
                 tries to provide the highest video quality possible but
                 produces many more artifacts during playback. The
                 results suggest that player video quality has more of
                 an impact on perceived quality when stalls are
                 infrequent. The study methodology also contributes a
                 unique method for gathering continuous quantitative
                 subjective measure of user perceived quality using a
                 Wii remote.",
  acknowledgement = ack-nhfb,
  articleno =    "22",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Usevitch:2020:TRA,
  author =       "David E. Usevitch and Adam J. Sperry and Jake J.
                 Abbott",
  title =        "{Translational and Rotational Arrow Cues (TRAC)}
                 Navigation Method for Manual Alignment Tasks",
  journal =      j-TAP,
  volume =       "17",
  number =       "1",
  pages =        "1:1--1:19",
  month =        mar,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3375001",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Mar 6 09:17:16 MST 2020",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3375001",
  abstract =     "Many tasks in image-guided surgery require a clinician
                 to manually position an instrument in space, with
                 respect to a patient, with five or six degrees of
                 freedom (DOF). Displaying the current and desired pose
                 of the object on a 2D display such as a \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Glaholt:2020:VIR,
  author =       "Mackenzie G. Glaholt and Justin G. Hollands and Grace
                 Sim and Tzvi Spivak and Beatrice Sacripanti",
  title =        "Visual Information Requirements for Dismounted Soldier
                 Target Acquisition",
  journal =      j-TAP,
  volume =       "17",
  number =       "1",
  pages =        "2:1--2:20",
  month =        mar,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3375000",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Mar 6 09:17:16 MST 2020",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3375000",
  abstract =     "We conducted an empirical investigation of the visual
                 information requirements for target detection and
                 threat identification decisions in the dismounted
                 soldier context. Forty soldiers viewed digital
                 photographs of a person standing against a forested
                 \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Mihelac:2020:ICH,
  author =       "Lorena Mihelac and Janez Povh",
  title =        "The Impact of the Complexity of Harmony on the
                 Acceptability of Music",
  journal =      j-TAP,
  volume =       "17",
  number =       "1",
  pages =        "3:1--3:27",
  month =        mar,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3375014",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Mar 6 09:17:16 MST 2020",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3375014",
  abstract =     "In this article, we contribute to the longstanding
                 challenge of how to explain the listener's
                 acceptability for a particular piece of music, using
                 harmony as one of the crucial dimensions in music, one
                 of the least examined in this context. We propose
                 \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Narciso:2020:IOW,
  author =       "David Narciso and Miguel Melo and Jos{\'e}
                 Vasconcelos-Raposo and Maximino Bessa",
  title =        "The Impact of Olfactory and Wind Stimuli on 360 Videos
                 Using Head-mounted Displays",
  journal =      j-TAP,
  volume =       "17",
  number =       "1",
  pages =        "4:1--4:13",
  month =        mar,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3380903",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Mar 6 09:17:16 MST 2020",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3380903",
  abstract =     "Consuming 360 audiovisual content using a Head-Mounted
                 Display (HMD) has become a standard feature for
                 Immersive Virtual Reality (IVR). However, most
                 applications rely only on visual and auditory feedback
                 whereas other senses are often disregarded. The
                 \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Parikh:2020:FWL,
  author =       "Saurin S. Parikh and Hari Kalva",
  title =        "Feature Weighted Linguistics Classifier for Predicting
                 Learning Difficulty Using Eye Tracking",
  journal =      j-TAP,
  volume =       "17",
  number =       "2",
  pages =        "5:1--5:25",
  month =        may,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3380877",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat May 30 20:46:00 MDT 2020",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3380877",
  abstract =     "This article presents a new approach to predict
                 learning difficulty in applications such as e-learning
                 using eye movement and pupil response. We have
                 developed 12 eye response features based on
                 psycholinguistics, contextual information processing,
                 \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Toscani:2020:TPD,
  author =       "Matteo Toscani and Dar'ya Guarnera and Giuseppe
                 Claudio Guarnera and Jon Yngve Hardeberg and Karl R.
                 Gegenfurtner",
  title =        "Three Perceptual Dimensions for Specular and Diffuse
                 Reflection",
  journal =      j-TAP,
  volume =       "17",
  number =       "2",
  pages =        "6:1--6:26",
  month =        may,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3380741",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat May 30 20:46:00 MDT 2020",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3380741",
  abstract =     "Previous research investigated the perceptual
                 dimensionality of achromatic reflection of opaque
                 surfaces, by using either simple analytic models of
                 reflection or measured reflection properties of a
                 limited sample of materials. Here, we aim to extend
                 \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Tennison:2020:EVB,
  author =       "Jennifer L. Tennison and P. Merlin Uesbeck and
                 Nicholas A. Giudice and Andreas Stefik and Derrick W.
                 Smith and Jenna L. Gorlewicz",
  title =        "Establishing Vibration-Based Tactile Line Profiles for
                 Use in Multimodal Graphics",
  journal =      j-TAP,
  volume =       "17",
  number =       "2",
  pages =        "7:1--7:14",
  month =        may,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3383457",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat May 30 20:46:00 MDT 2020",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3383457",
  abstract =     "Vibration plays a significant role in the way users
                 interact with touchscreens. For many users, vibration
                 affords tactile alerts and other enhancements. For
                 eyes-free users and users with visual impairments,
                 vibration can also serve a more primary role \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Pra:2020:DIP,
  author =       "Yuri {De Pra} and Federico Fontana and Hanna
                 J{\"a}rvel{\"a}inen and Stefano Papetti and Michele
                 Simonato",
  title =        "Does It Ping or Pong? {Auditory} and Tactile
                 Classification of Materials by Bouncing Events",
  journal =      j-TAP,
  volume =       "17",
  number =       "2",
  pages =        "8:1--8:17",
  month =        may,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3393898",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat May 30 20:46:00 MDT 2020",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3393898",
  abstract =     "Two experiments studied the role of impact sounds and
                 vibrations in classification of materials. The task
                 consisted of feeling on an actuated surface and
                 listening through headphones to the recorded feedback
                 of a ping-pong ball hitting three flat \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Plaisier:2020:LVM,
  author =       "Myrthe A. Plaisier and Daphne S. Vermeer and Astrid M.
                 L. Kappers",
  title =        "Learning the Vibrotactile {Morse} Code Alphabet",
  journal =      j-TAP,
  volume =       "17",
  number =       "3",
  pages =        "9:1--9:10",
  month =        nov,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3402935",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Jan 22 06:45:15 MST 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3402935",
  abstract =     "Vibrotactile Morse code provides a way to convey words
                 using the sense of touch with vibrations. This can be
                 useful in applications for users with a visual and/or
                 auditory impairment. The advantage of using
                 vibrotactile Morse code is that it is \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Fang:2020:PAU,
  author =       "Yuchun Fang and Wei Zhang and Ningjie Liu",
  title =        "On the Perception Analysis of User Feedback for
                 Interactive Face Retrieval",
  journal =      j-TAP,
  volume =       "17",
  number =       "3",
  pages =        "10:1--10:20",
  month =        nov,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3403964",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Jan 22 06:45:15 MST 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3403964",
  abstract =     "In this article, we explore the coherence of face
                 perception between human and machine in the scenario of
                 interactive face retrieval. In the part of human
                 perception, we collect user feedback to the stimuli of
                 a target face and groups of displayed \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Beshai:2020:PSP,
  author =       "Peter Beshai and Ricardo Caceffo and Kellogg S.
                 Booth",
  title =        "Providing Semi-private Feedback on a Shared Public
                 Screen by Controlling Presentation Onset",
  journal =      j-TAP,
  volume =       "17",
  number =       "3",
  pages =        "11:1--11:32",
  month =        nov,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3419983",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Jan 22 06:45:15 MST 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3419983",
  abstract =     "We describe a novel technique to provide semi-private
                 feedback on a shared public screen. The technique uses
                 a no-onset presentation that takes advantage of
                 perceptual limitations in human vision to avoid
                 alerting other users to feedback directed at one
                 \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "11",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Thorpe:2020:SRE,
  author =       "Alexander Thorpe and Keith Nesbitt and Ami Eidels",
  title =        "A Systematic Review of Empirical Measures of Workload
                 Capacity",
  journal =      j-TAP,
  volume =       "17",
  number =       "3",
  pages =        "12:1--12:26",
  month =        nov,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3422869",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Jan 22 06:45:15 MST 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3422869",
  abstract =     "The usability of the human-machine interface is
                 dependent on the quality of its design and testing.
                 Defining clear criteria that the interface must meet
                 can assist the implementation and evaluation process.
                 These criteria may be based on performance, \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "12",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Grimm:2020:ISI,
  author =       "Cindy Grimm and Mar Gonzalez-Franco and Elham
                 Ebrahimi",
  title =        "Introduction to the Special Issue on {SAP 2020}",
  journal =      j-TAP,
  volume =       "17",
  number =       "4",
  pages =        "13e:1--13e:2",
  month =        dec,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3428144",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Jan 22 06:45:15 MST 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3428144",
  acknowledgement = ack-nhfb,
  articleno =    "13e",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Wang:2020:TQA,
  author =       "Xi Wang and Zoya Bylinskii and Aaron Hertzmann and
                 Robert Pepperell",
  title =        "Toward Quantifying Ambiguities in Artistic Images",
  journal =      j-TAP,
  volume =       "17",
  number =       "4",
  pages =        "13:1--13:10",
  month =        dec,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3418054",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Jan 22 06:45:15 MST 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3418054",
  abstract =     "It has long been hypothesized that perceptual
                 ambiguities play an important role in aesthetic
                 experience: A work with some ambiguity engages a viewer
                 more than one that does not. However, current
                 frameworks for testing this theory are limited by the
                 \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "13",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Zibrek:2020:EGA,
  author =       "Katja Zibrek and Benjamin Niay and Anne-H{\'e}l{\`e}ne
                 Olivier and Ludovic Hoyet and Julien Pettre and Rachel
                 McDonnell",
  title =        "The Effect of Gender and Attractiveness of Motion on
                 Proximity in Virtual Reality",
  journal =      j-TAP,
  volume =       "17",
  number =       "4",
  pages =        "14:1--14:15",
  month =        dec,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3419985",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Jan 22 06:45:15 MST 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3419985",
  abstract =     "In human interaction, people will keep different
                 distances from each other depending on their gender.
                 For example, males will stand further away from males
                 and closer to females. Previous studies in virtual
                 reality (VR), where people were interacting \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "14",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Brickler:2020:FLE,
  author =       "David Brickler and Robert J. Teather and Andrew T.
                 Duchowski and Sabarish V. Babu",
  title =        "A {Fitts' Law} Evaluation of Visuo-haptic Fidelity and
                 Sensory Mismatch on User Performance in a Near-field
                 Disc Transfer Task in Virtual Reality",
  journal =      j-TAP,
  volume =       "17",
  number =       "4",
  pages =        "15:1--15:20",
  month =        dec,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3419986",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Jan 22 06:45:15 MST 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3419986",
  abstract =     "The trade-off between speed and accuracy in precision
                 tasks is important to evaluate during user interaction
                 with input devices. When different sensory cues are
                 added or altered in such interactions, those cues have
                 an effect on this trade-off, and thus,. \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "15",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Lin:2020:HPS,
  author =       "Yun-Xuan Lin and Rohith Venkatakrishnan and Roshan
                 Venkatakrishnan and Elham Ebrahimi and Wen-Chieh Lin
                 and Sabarish V. Babu",
  title =        "How the Presence and Size of Static Peripheral Blur
                 Affects Cybersickness in Virtual Reality",
  journal =      j-TAP,
  volume =       "17",
  number =       "4",
  pages =        "16:1--16:18",
  month =        dec,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3419984",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Jan 22 06:45:15 MST 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3419984",
  abstract =     "Cybersickness (CS) is one of the challenges that has
                 hindered the widespread adoption of Virtual Reality and
                 its applications. Consequently, a number of studies
                 have focused on extensively understanding and reducing
                 CS. Inspired by previous work that \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "16",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Choi:2021:QDT,
  author =       "Jeongbong Choi and Soonhyun Yook and In Young Kim and
                 Mok Kun Jeong and Dong Pyo Jang",
  title =        "Quantification of Displacement for Tactile Sensation
                 in a Contact-type Low Intensity Focused Ultrasound
                 Haptic Device",
  journal =      j-TAP,
  volume =       "18",
  number =       "1",
  pages =        "1:1--1:8",
  month =        jan,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3422820",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Jan 22 06:45:15 MST 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3422820",
  abstract =     "Tactile threshold of low-intensity focused ultrasound
                 (LIFU) haptic devices has been defined as the minimum
                 pressure required for tactile sensation. However, in
                 contact-type LIFU haptic devices using an elastomer as
                 a conductive medium, the tactile \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Nehme:2021:CSM,
  author =       "Yana Nehm{\'e} and Jean-Philippe Farrugia and Florent
                 Dupont and Patrick {Le Callet} and Guillaume
                 Lavou{\'e}",
  title =        "Comparison of Subjective Methods for Quality
                 Assessment of {$3$D} Graphics in Virtual Reality",
  journal =      j-TAP,
  volume =       "18",
  number =       "1",
  pages =        "2:1--2:23",
  month =        jan,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3427931",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Jan 22 06:45:15 MST 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3427931",
  abstract =     "Numerous methodologies for subjective quality
                 assessment exist in the field of image processing. In
                 particular, the Absolute Category Rating with Hidden
                 Reference (ACR-HR), the Double Stimulus Impairment
                 Scale (DSIS), and the Subjective Assessment \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Hooge:2021:EAF,
  author =       "Kimberley D. Orsten Hooge and Asal Baragchizadeh and
                 Thomas P. Karnowski and David S. Bolme and Regina
                 Ferrell and Parisa R. Jesudasen and Carlos D. Castillo
                 and Alice J. O'Toole",
  title =        "Evaluating Automated Face Identity-Masking Methods
                 with Human Perception and a Deep Convolutional Neural
                 Network",
  journal =      j-TAP,
  volume =       "18",
  number =       "1",
  pages =        "3:1--3:20",
  month =        jan,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3422988",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Jan 22 06:45:15 MST 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3422988",
  abstract =     "Face de-identification (or ``masking'') algorithms
                 have been developed in response to the prevalent use of
                 video recordings in public places. We evaluated the
                 success of face identity masking for human perceivers
                 and a deep convolutional neural network \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Jiang:2021:CRC,
  author =       "Yuanyuan Jiang and Elizabeth E. O'Neal and Shiwen Zhou
                 and Jodie M. Plumert and Joseph K. Kearney",
  title =        "Crossing Roads with a Computer-generated Agent:
                 Persistent Effects on Perception-Action Tuning",
  journal =      j-TAP,
  volume =       "18",
  number =       "1",
  pages =        "4:1--4:16",
  month =        jan,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3431923",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Jan 22 06:45:15 MST 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3431923",
  abstract =     "This study investigated how people coordinate their
                 decisions and actions with a risky or safe
                 computer-generated agent in a humanoid or non-humanoid
                 form and how this experience influences later behavior
                 when acting alone. In Experiment 1, participants
                 \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Adhanom:2021:FVR,
  author =       "Isayas Berhe Adhanom and Majed Al-Zayer and Paul
                 Macneilage and Eelke Folmer",
  title =        "Field-of-View Restriction to Reduce {VR} Sickness Does
                 Not Impede Spatial Learning in Women",
  journal =      j-TAP,
  volume =       "18",
  number =       "2",
  pages =        "5:1--5:17",
  month =        jun,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3448304",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 7 07:44:08 MDT 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3448304",
  abstract =     "Women are more likely to experience virtual reality
                 (VR) sickness than men, which could pose a major
                 challenge to the mass market success of VR. Because VR
                 sickness often results from a visual-vestibular
                 conflict, an effective strategy to mitigate \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Um:2021:SDA,
  author =       "Kiwon Um and Xiangyu Hu and Bing Wang and Nils
                 Thuerey",
  title =        "Spot the Difference: Accuracy of Numerical Simulations
                 via the Human Visual System",
  journal =      j-TAP,
  volume =       "18",
  number =       "2",
  pages =        "6:1--6:15",
  month =        jun,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3449064",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 7 07:44:08 MDT 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3449064",
  abstract =     "Comparative evaluation lies at the heart of science,
                 and determining the accuracy of a computational method
                 is crucial for evaluating its potential as well as for
                 guiding future efforts. However, metrics that are
                 typically used have inherent \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Gagnon:2021:EDA,
  author =       "Holly C. Gagnon and Carlos Salas Rosales and Ryan
                 Mileris and Jeanine K. Stefanucci and Sarah H.
                 Creem-Regehr and Robert E. Bodenheimer",
  title =        "Estimating Distances in Action Space in Augmented
                 Reality",
  journal =      j-TAP,
  volume =       "18",
  number =       "2",
  pages =        "7:1--7:16",
  month =        jun,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3449067",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 7 07:44:08 MDT 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3449067",
  abstract =     "Augmented reality (AR) is important for training
                 complex tasks, such as navigation, assembly, and
                 medical procedures. The effectiveness of such training
                 may depend on accurate spatial localization of AR
                 objects in the environment. This article presents
                 \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Devlin:2021:DWW,
  author =       "Shannon P. Devlin and Jennifer K. Byham and Sara Lu
                 Riggs",
  title =        "Does What We See Shape History? {Examining} Workload
                 History as a Function of Performance and Ambient\slash
                 Focal Visual Attention",
  journal =      j-TAP,
  volume =       "18",
  number =       "2",
  pages =        "8:1--8:17",
  month =        jun,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3449066",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 7 07:44:08 MDT 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3449066",
  abstract =     "Changes in task demands can have delayed adverse
                 impacts on performance. This phenomenon, known as the
                 workload history effect, is especially of concern in
                 dynamic work domains where operators manage fluctuating
                 task demands. The existing workload \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Park:2021:ETT,
  author =       "Wanjoo Park and Muhammad Hassan Jamil and Ruth Ghidey
                 Gebremedhin and Mohamad Eid",
  title =        "Effects of Tactile Textures on Preference in
                 Visuo-Tactile Exploration",
  journal =      j-TAP,
  volume =       "18",
  number =       "2",
  pages =        "9:1--9:13",
  month =        jun,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3449065",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 7 07:44:08 MDT 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3449065",
  abstract =     "The use of haptic technologies has recently become
                 immensely essential in Human-Computer Interaction to
                 improve user experience and performance. With the
                 introduction of tactile feedback on a touchscreen
                 device, commonly known as surface haptics, \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Gigilashvili:2021:RSS,
  author =       "Davit Gigilashvili and Weiqi Shi and Zeyu Wang and
                 Marius Pedersen and Jon Yngve Hardeberg and Holly
                 Rushmeier",
  title =        "The Role of Subsurface Scattering in Glossiness
                 Perception",
  journal =      j-TAP,
  volume =       "18",
  number =       "3",
  pages =        "10:1--10:26",
  month =        jul,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3458438",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Aug 21 07:46:01 MDT 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3458438",
  abstract =     "This study investigates the potential impact of
                 subsurface light transport on gloss perception for the
                 purposes of broadening our understanding of visual
                 appearance in computer graphics applications. Gloss is
                 an important attribute for characterizing \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Drakopoulos:2021:ETI,
  author =       "Panagiotis Drakopoulos and George-Alex Koulieris and
                 Katerina Mania",
  title =        "Eye Tracking Interaction on Unmodified Mobile {VR}
                 Headsets Using the Selfie Camera",
  journal =      j-TAP,
  volume =       "18",
  number =       "3",
  pages =        "11:1--11:20",
  month =        jul,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3456875",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Aug 21 07:46:01 MDT 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3456875",
  abstract =     "Input methods for interaction in smartphone-based
                 virtual and mixed reality (VR/MR) are currently based
                 on uncomfortable head tracking controlling a pointer on
                 the screen. User fixations are a fast and natural input
                 method for VR/MR interaction. \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "11",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Erickson:2021:EAB,
  author =       "Austin Erickson and Kangsoo Kim and Alexis Lambert and
                 Gerd Bruder and Michael P. Browne and Gregory F.
                 Welch",
  title =        "An Extended Analysis on the Benefits of Dark Mode User
                 Interfaces in Optical See-Through Head-Mounted
                 Displays",
  journal =      j-TAP,
  volume =       "18",
  number =       "3",
  pages =        "12:1--12:22",
  month =        jul,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3456874",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Aug 21 07:46:01 MDT 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3456874",
  abstract =     "Light-on-dark color schemes, so-called ``Dark Mode,''
                 are becoming more and more popular over a wide range of
                 display technologies and application fields. Many
                 people who have to look at computer screens for hours
                 at a time, such as computer programmers \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "12",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Reed:2021:IWP,
  author =       "Charlotte M. Reed and Hong Z. Tan and Yang Jiao and
                 Zachary D. Perez and E. Courtenay Wilson",
  title =        "Identification of Words and Phrases Through a
                 Phonemic-Based Haptic Display: Effects of Inter-Phoneme
                 and Inter-Word Interval Durations",
  journal =      j-TAP,
  volume =       "18",
  number =       "3",
  pages =        "13:1--13:22",
  month =        jul,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3458725",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Aug 21 07:46:01 MDT 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3458725",
  abstract =     "Stand-alone devices for tactile speech reception serve
                 a need as communication aids for persons with profound
                 sensory impairments as well as in applications such as
                 human-computer interfaces and remote communication when
                 the normal auditory and visual \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "13",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Wei:2021:CMG,
  author =       "Hui Wei and Jingmeng Li",
  title =        "Computational Model for Global Contour Precedence
                 Based on Primary Visual Cortex Mechanisms",
  journal =      j-TAP,
  volume =       "18",
  number =       "3",
  pages =        "14:1--14:21",
  month =        jul,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3459999",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Aug 21 07:46:01 MDT 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3459999",
  abstract =     "The edges of an image contains rich visual cognitive
                 cues. However, the edge information of a natural scene
                 usually is only a set of disorganized unorganized
                 pixels for a computer. In psychology, the phenomenon of
                 quickly perceiving global information \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "14",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Bressolette:2021:MGS,
  author =       "Benjamin Bressolette and S{\'e}bastien Denjean and
                 Vincent Roussarie and Mitsuko Aramaki and S{\o}lvi
                 Ystad and Richard Kronland-Martinet",
  title =        "{MovEcho}: a Gesture-Sound Interface Allowing Blind
                 Manipulations in a Driving Context",
  journal =      j-TAP,
  volume =       "18",
  number =       "3",
  pages =        "15:1--15:19",
  month =        jul,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3464692",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Aug 21 07:46:01 MDT 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3464692",
  abstract =     "Most recent vehicles are equipped with touchscreens,
                 which replace arrays of buttons that control secondary
                 driving functions, such as temperature level, strength
                 of ventilation, GPS, or choice of radio stations. While
                 driving, manipulating such \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "15",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Alkasasbeh:2021:WSH,
  author =       "Anas Ali Alkasasbeh and Fotios Spyridonis and
                 Gheorghita Ghinea",
  title =        "When Scents Help Me Remember My Password",
  journal =      j-TAP,
  volume =       "18",
  number =       "3",
  pages =        "16:1--16:18",
  month =        jul,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3469889",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Aug 21 07:46:01 MDT 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3469889",
  abstract =     "Current authentication processes overwhelmingly rely
                 on audiovisual data, comprising images, text or audio.
                 However, the use of olfactory data (scents) has
                 remained unexploited in the authentication process,
                 notwithstanding their verified potential to \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "16",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Bodenheimer:2021:E,
  author =       "Bobby Bodenheimer",
  title =        "Editorial",
  journal =      j-TAP,
  volume =       "18",
  number =       "4",
  pages =        "17:1--17:2",
  month =        oct,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3486957",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Dec 17 15:45:31 MST 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3486957",
  acknowledgement = ack-nhfb,
  articleno =    "17",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Jain:2021:ISI,
  author =       "Eakta Jain and Anne-H{\'e}l{\`e}ne Olivier",
  title =        "Introduction to the Special Issue on {SAP 2021}",
  journal =      j-TAP,
  volume =       "18",
  number =       "4",
  pages =        "18:1--18:2",
  month =        oct,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3486577",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Dec 17 15:45:31 MST 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3486577",
  acknowledgement = ack-nhfb,
  articleno =    "18",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Adkins:2021:EGV,
  author =       "Alex Adkins and Lorraine Lin and Aline Normoyle and
                 Ryan Canales and Yuting Ye and Sophie J{\"o}rg",
  title =        "Evaluating Grasping Visualizations and Control Modes
                 in a {VR} Game",
  journal =      j-TAP,
  volume =       "18",
  number =       "4",
  pages =        "19:1--19:14",
  month =        oct,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3486582",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Dec 17 15:45:31 MST 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3486582",
  abstract =     "A primary goal of the Virtual Reality (VR) community
                 is to build fully immersive and presence-inducing
                 environments with seamless and natural interactions. To
                 reach this goal, researchers are investigating how to
                 best directly use our hands to interact \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "19",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Brickler:2021:ESP,
  author =       "David Brickler and Sabarish V. Babu",
  title =        "An Evaluation of Screen Parallax, Haptic Feedback, and
                 Sensory-Motor Mismatch on Near-Field Perception-Action
                 Coordination in {VR}",
  journal =      j-TAP,
  volume =       "18",
  number =       "4",
  pages =        "20:1--20:16",
  month =        oct,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3486583",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Dec 17 15:45:31 MST 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3486583",
  abstract =     "Virtual reality (VR) displays have factors such as
                 vergence-accommodation conflicts that negatively impact
                 depth perception and cause users to misjudge distances
                 to select objects. In addition, popular large-screen
                 immersive displays present the depth of \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "20",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Ehret:2021:DPE,
  author =       "Jonathan Ehret and Andrea B{\"o}nsch and Lukas
                 Asp{\"o}ck and Christine T. R{\"o}hr and Stefan Baumann
                 and Martine Grice and Janina Fels and Torsten W.
                 Kuhlen",
  title =        "Do Prosody and Embodiment Influence the Perceived
                 Naturalness of Conversational Agents' Speech?",
  journal =      j-TAP,
  volume =       "18",
  number =       "4",
  pages =        "21:1--21:15",
  month =        oct,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3486580",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Dec 17 15:45:31 MST 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3486580",
  abstract =     "For conversational agents' speech, either all possible
                 sentences have to be prerecorded by voice actors or the
                 required utterances can be synthesized. While
                 synthesizing speech is more flexible and economic in
                 production, it also potentially reduces the \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "21",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Ferstl:2021:FFM,
  author =       "Ylva Ferstl and Michael McKay and Rachel McDonnell",
  title =        "Facial Feature Manipulation for Trait Portrayal in
                 Realistic and Cartoon-Rendered Characters",
  journal =      j-TAP,
  volume =       "18",
  number =       "4",
  pages =        "22:1--22:8",
  month =        oct,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3486579",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Dec 17 15:45:31 MST 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3486579",
  abstract =     "Previous perceptual studies on human faces have shown
                 that specific facial features have consistent effects
                 on perceived personality and appeal, but it remains
                 unclear if and how findings relate to perception of
                 virtual characters. For example, wider \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "22",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Kasahara:2021:SUV,
  author =       "Shunichi Kasahara and Kazuma Takada",
  title =        "Stealth Updates of Visual Information by Leveraging
                 Change Blindness and Computational Visual Morphing",
  journal =      j-TAP,
  volume =       "18",
  number =       "4",
  pages =        "23:1--23:17",
  month =        oct,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3486581",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Dec 17 15:45:31 MST 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3486581",
  abstract =     "We present an approach for covert visual updates by
                 leveraging change blindness with computationally
                 generated morphed images. To clarify the design
                 parameters for intentionally suppressing change
                 detection with morphing visuals, we investigated the
                 \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "23",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Kim:2022:PCA,
  author =       "Hye Ji Kim and Michael Neff and Sung-Hee Lee",
  title =        "The Perceptual Consistency and Association of the
                 {LMA} Effort Elements",
  journal =      j-TAP,
  volume =       "19",
  number =       "1",
  pages =        "1:1--1:17",
  month =        jan,
  year =         "2022",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3473041",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Jan 13 09:14:08 MST 2022",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3473041",
  abstract =     "Laban Movement Analysis (LMA) and its Effort element
                 provide a conceptual framework through which we can
                 observe, describe, and interpret the intention of
                 movement. Effort attributes provide a link between how
                 people move and how their movement \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Liu:2022:MVC,
  author =       "Wanyu Liu and Michelle Agnes Magalhaes and Wendy E.
                 Mackay and Michel Beaudouin-Lafon and Fr{\'e}d{\'e}ric
                 Bevilacqua",
  title =        "Motor Variability in Complex Gesture Learning: Effects
                 of Movement Sonification and Musical Background",
  journal =      j-TAP,
  volume =       "19",
  number =       "1",
  pages =        "2:1--2:21",
  month =        jan,
  year =         "2022",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3482967",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Jan 13 09:14:08 MST 2022",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3482967",
  abstract =     "With the increasing interest in movement sonification
                 and expressive gesture-based interaction, it is
                 important to understand which factors contribute to
                 movement learning and how. We explore the effects of
                 movement sonification and users' musical \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Salagean:2022:VRA,
  author =       "Anca Salagean and Jacob Hadnett-Hunter and Daniel J.
                 Finnegan and Alexandra A. {De Sousa} and Michael J.
                 Proulx",
  title =        "A Virtual Reality Application of the Rubber Hand
                 Illusion Induced by Ultrasonic Mid-air Haptic
                 Stimulation",
  journal =      j-TAP,
  volume =       "19",
  number =       "1",
  pages =        "3:1--3:19",
  month =        jan,
  year =         "2022",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3487563",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Jan 13 09:14:08 MST 2022",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3487563",
  abstract =     "Ultrasonic mid-air haptic technologies, which provide
                 haptic feedback through airwaves produced using
                 ultrasound, could be employed to investigate the sense
                 of body ownership and immersion in virtual reality (VR)
                 by inducing the virtual hand illusion (VHI). \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Blissing:2022:EDB,
  author =       "Bj{\"o}rn Blissing and Fredrik Bruzelius and Olle
                 Eriksson",
  title =        "The Effects on Driving Behavior When Using a
                 Head-mounted Display in a Dynamic Driving Simulator",
  journal =      j-TAP,
  volume =       "19",
  number =       "1",
  pages =        "4:1--4:18",
  month =        jan,
  year =         "2022",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3483793",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Jan 13 09:14:08 MST 2022",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3483793",
  abstract =     "Driving simulators are established tools used during
                 automotive development and research. Most simulators
                 use either monitors or projectors as their primary
                 display system. However, the emergence of a new
                 generation of head-mounted displays has triggered
                 \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Miyashita:2022:DSD,
  author =       "Yamato Miyashita and Yasuhito Sawahata and Akihiro
                 Sakai and Masamitsu Harasawa and Kazuhiro Hara and
                 Toshiya Morita and Kazuteru Komine",
  title =        "Display-Size Dependent Effects of {$3$D} Viewing on
                 Subjective Impressions",
  journal =      j-TAP,
  volume =       "19",
  number =       "2",
  pages =        "5:1--5:15",
  month =        apr,
  year =         "2022",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3510461",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Wed Jul 20 06:55:31 MDT 2022",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3510461",
  abstract =     "This paper describes how the screen size of 3D
                 displays affect the subjective impressions of
                 3D-visualized content. The key requirement for 3D
                 displays is the presentation of depth cues comprising
                 binocular disparities and/or motion parallax; however,
                 the \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Rajasekaran:2022:PPT,
  author =       "Suren Deepak Rajasekaran and Hao Kang and Martin
                 Cad{\'\i}k and Eric Galin and Eric Gu{\'e}rin and
                 Adrien Peytavie and Pavel Slav{\'\i}k and Bedrich
                 Benes",
  title =        "{PTRM}: Perceived Terrain Realism Metric",
  journal =      j-TAP,
  volume =       "19",
  number =       "2",
  pages =        "6:1--6:22",
  month =        apr,
  year =         "2022",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3514244",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Wed Jul 20 06:55:31 MDT 2022",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3514244",
  abstract =     "Terrains are visually prominent and commonly needed
                 objects in many computer graphics applications. While
                 there are many algorithms for synthetic terrain
                 generation, it is rather difficult to assess the
                 realism of a generated output. This article presents
                 \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Hinde:2022:IPH,
  author =       "Stephen J. Hinde and Katy C. Noland and Graham A.
                 Thomas and David R. Bull and Iain D. Gilchrist",
  title =        "On the Immersive Properties of High Dynamic Range
                 Video",
  journal =      j-TAP,
  volume =       "19",
  number =       "2",
  pages =        "7:1--7:15",
  month =        apr,
  year =         "2022",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3524692",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Wed Jul 20 06:55:31 MDT 2022",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3524692",
  abstract =     "This paper presents the results from two studies which
                 used a dual-task methodology to measure an audience's
                 experience of immersion while watching video under
                 typical television viewing conditions. Immersion was
                 measured while participants watched either \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Cabral:2022:DAI,
  author =       "Jo{\~a}o P. Cabral and Gerard B. Remijn",
  title =        "The Duration of an Auditory Icon Can Affect How the
                 Listener Interprets Its Meaning",
  journal =      j-TAP,
  volume =       "19",
  number =       "2",
  pages =        "8:1--8:16",
  month =        apr,
  year =         "2022",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3527269",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Wed Jul 20 06:55:31 MDT 2022",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3527269",
  abstract =     "Initially introduced in the field of informatics, an
                 auditory icon consists of a short sound that is present
                 in everyday life, used to represent a specific event,
                 object, function, or action. Auditory icons have been
                 studied in various fields, and overall,. \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Gao:2022:ESM,
  author =       "Zihan Gao and Huiqiang Wang and Guangsheng Feng and
                 Hongwu Lv",
  title =        "Exploring Sonification Mapping Strategies for Spatial
                 Auditory Guidance in Immersive Virtual Environments",
  journal =      j-TAP,
  volume =       "19",
  number =       "3",
  pages =        "9:1--9:??",
  month =        jul,
  year =         "2022",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3528171",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Nov 12 07:13:41 MST 2022",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3528171",
  abstract =     "Spatial auditory cues are important for many tasks in
                 immersive virtual environments, especially guidance
                 tasks. However, due to the limited fidelity of spatial
                 sounds rendered by generic Head-Related Transfer
                 Functions (HRTFs), sound localization usually
                 \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Aevarsson:2022:VTM,
  author =       "Elvar Atli {\AE}varsson and Th{\'o}rhildur
                 {\'A}sgeirsd{\'o}ttir and Finnur Pind and {\'A}rni
                 Kristj{\'a}nsson and Runar Unnthorsson",
  title =        "Vibrotactile Threshold Measurements at the Wrist Using
                 Parallel Vibration Actuators",
  journal =      j-TAP,
  volume =       "19",
  number =       "3",
  pages =        "10:1--10:??",
  month =        jul,
  year =         "2022",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3529259",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Nov 12 07:13:41 MST 2022",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3529259",
  abstract =     "This article presents an investigation into the
                 perceptual vibrotactile thresholds for a range of
                 frequencies on both the inside and outside areas of the
                 wrist when exciting the skin with parallel vibrations,
                 realized using the L5 actuator made by Lofelt
                 \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Scott:2022:ERE,
  author =       "Joshua J. Scott and Neil A. Dodgson",
  title =        "Evaluating Realism in Example-based Terrain
                 Synthesis",
  journal =      j-TAP,
  volume =       "19",
  number =       "3",
  pages =        "11:1--11:??",
  month =        jul,
  year =         "2022",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3531526",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Nov 12 07:13:41 MST 2022",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3531526",
  abstract =     "We report two studies that investigate the use of
                 subjective believability in the assessment of objective
                 realism of terrain. The first demonstrates that there
                 is a clear subjective feature bias that depends on the
                 types of terrain being evaluated: Our \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "11",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Su:2022:MLB,
  author =       "Jun Su and Peng Zhou",
  title =        "Machine Learning-based Modeling and Prediction of the
                 Intrinsic Relationship between Human Emotion and
                 Music",
  journal =      j-TAP,
  volume =       "19",
  number =       "3",
  pages =        "12:1--12:??",
  month =        jul,
  year =         "2022",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3534966",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Nov 12 07:13:41 MST 2022",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3534966",
  abstract =     "Human emotion is one of the most complex
                 psychophysiological phenomena and has been reported to
                 be affected significantly by music listening. It is
                 supposed that there is an intrinsic relationship
                 between human emotion and music, which can be modeled
                 and \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "12",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Zhang:2022:CWP,
  author =       "Junsong Zhang and Zuyi Yang and Linchengyu Jin and
                 Zhitang Lu and Jinhui Yu",
  title =        "Creating Word Paintings Jointly Considering Semantics,
                 Attention, and Aesthetics",
  journal =      j-TAP,
  volume =       "19",
  number =       "3",
  pages =        "13:1--13:??",
  month =        jul,
  year =         "2022",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3539610",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Nov 12 07:13:41 MST 2022",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3539610",
  abstract =     "In this article, we present a content-aware method for
                 generating a word painting. Word painting is a
                 composite artwork made from the assemblage of words
                 extracted from a given text, which carries similar
                 semantics and visual features to a given source
                 \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "13",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Serrano:2022:ISI,
  author =       "Ana Serrano and Michael Barnett-Cowan",
  title =        "Introduction to the Special Issue on {SAP 2022}",
  journal =      j-TAP,
  volume =       "19",
  number =       "4",
  pages =        "14:1--14:??",
  month =        oct,
  year =         "2022",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3563136",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Nov 12 07:13:41 MST 2022",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3563136",
  acknowledgement = ack-nhfb,
  articleno =    "14",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Venkatakrishnan:2022:ICI,
  author =       "Roshan Venkatakrishnan and Rohith Venkatakrishnan and
                 Chih-Han Chung and Yu-Shuen Wang and Sabarish Babu",
  title =        "Investigating a Combination of Input Modalities,
                 Canvas Geometries, and Inking Triggers on On-Air
                 Handwriting in Virtual Reality",
  journal =      j-TAP,
  volume =       "19",
  number =       "4",
  pages =        "15:1--15:??",
  month =        oct,
  year =         "2022",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3560817",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Nov 12 07:13:41 MST 2022",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3560817",
  abstract =     "Humans communicate by writing, often taking notes that
                 assist thinking. With the growing popularity of
                 collaborative Virtual Reality (VR) applications, it is
                 imperative that we better understand aspects that
                 affect writing in these virtual experiences. On-.
                 \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "15",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Robb:2022:EML,
  author =       "Andrew Robb and Kristopher Kohm and John Porter",
  title =        "Experience Matters: Longitudinal Changes in
                 Sensitivity to Rotational Gains in Virtual Reality",
  journal =      j-TAP,
  volume =       "19",
  number =       "4",
  pages =        "16:1--16:??",
  month =        oct,
  year =         "2022",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3560818",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Nov 12 07:13:41 MST 2022",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3560818",
  abstract =     "Redirected walking techniques use rotational gains to
                 guide users away from physical obstacles as they walk
                 in a virtual world, effectively creating the illusion
                 of a larger virtual space than is physically present.
                 Designers often want to keep users \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "16",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Kohm:2022:SHO,
  author =       "Kristopher Kohm and John Porter and Andrew Robb",
  title =        "Sensitivity to Hand Offsets and Related Behavior in
                 Virtual Environments over Time",
  journal =      j-TAP,
  volume =       "19",
  number =       "4",
  pages =        "17:1--17:??",
  month =        oct,
  year =         "2022",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3561055",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Nov 12 07:13:41 MST 2022",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3561055",
  abstract =     "This work explored how users' sensitivity to offsets
                 in their avatars' virtual hands changes as they gain
                 exposure to virtual reality. We conducted an experiment
                 using a two-alternative forced choice (2-AFC) design
                 over the course of 4 weeks, split into \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "17",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Otake:2022:TTD,
  author =       "Kazuya Otake and Shogo Okamoto and Yasuhiro Akiyama
                 and Yoji Yamada",
  title =        "Tactile Texture Display Combining Vibrotactile and
                 Electrostatic-friction Stimuli: Substantial Effects on
                 Realism and Moderate Effects on Behavioral Responses",
  journal =      j-TAP,
  volume =       "19",
  number =       "4",
  pages =        "18:1--18:??",
  month =        oct,
  year =         "2022",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3539733",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Nov 12 07:13:41 MST 2022",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3539733",
  abstract =     "There is increasing demand for tactile feedback
                 functions for touch panels. We investigated whether
                 virtual roughness texture quality can be improved
                 through simultaneous use of vibrotactile and
                 electrostatic-friction stimuli. This conjunctive use is
                 \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "18",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Wang:2022:PGO,
  author =       "Minqi Wang and Emily A. Cooper",
  title =        "Perceptual Guidelines for Optimizing Field of View in
                 Stereoscopic Augmented Reality Displays",
  journal =      j-TAP,
  volume =       "19",
  number =       "4",
  pages =        "19:1--19:??",
  month =        oct,
  year =         "2022",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3554921",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Nov 12 07:13:41 MST 2022",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3554921",
  abstract =     "Near-eye display systems for augmented reality (AR)
                 aim to seamlessly merge virtual content with the user's
                 view of the real-world. A substantial limitation of
                 current systems is that they only present virtual
                 content over a limited portion of the user's \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "19",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Brown:2023:EDM,
  author =       "Rachel Brown and Vasha Dutell and Bruce Walter and
                 Ruth Rosenholtz and Peter Shirley and Morgan McGuire
                 and David Luebke",
  title =        "Efficient Dataflow Modeling of Peripheral Encoding in
                 the Human Visual System",
  journal =      j-TAP,
  volume =       "20",
  number =       "1",
  pages =        "1:1--1:??",
  month =        jan,
  year =         "2023",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3564605",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Jun 9 06:34:04 MDT 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3564605",
  abstract =     "Computer graphics seeks to deliver compelling images,
                 generated within a computing budget, targeted at a
                 specific display device, and ultimately viewed by an
                 individual user. The foveated nature of human vision
                 offers an opportunity to efficiently \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Arabadzhiyska:2023:PSP,
  author =       "Elena Arabadzhiyska and Cara Tursun and Hans-Peter
                 Seidel and Piotr Didyk",
  title =        "Practical Saccade Prediction for Head-Mounted
                 Displays: Towards a Comprehensive Model",
  journal =      j-TAP,
  volume =       "20",
  number =       "1",
  pages =        "2:1--2:??",
  month =        jan,
  year =         "2023",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3568311",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Jun 9 06:34:04 MDT 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3568311",
  abstract =     "Eye-tracking technology has started to become an
                 integral component of new display devices such as
                 virtual and augmented reality headsets. Applications of
                 gaze information range from new interaction techniques
                 that exploit eye patterns to gaze-contingent \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Fukiage:2023:CAV,
  author =       "Taiki Fukiage and Takeshi Oishi",
  title =        "A Content-adaptive Visibility Predictor for
                 Perceptually Optimized Image Blending",
  journal =      j-TAP,
  volume =       "20",
  number =       "1",
  pages =        "3:1--3:??",
  month =        jan,
  year =         "2023",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3565972",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Jun 9 06:34:04 MDT 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3565972",
  abstract =     "The visibility of an image semi-transparently overlaid
                 on another image varies significantly, depending on the
                 content of the images. This makes it difficult to
                 maintain the desired visibility level when the image
                 content changes. To tackle this problem, \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Choudhary:2023:VBH,
  author =       "Zubin Choudhary and Austin Erickson and Nahal Norouzi
                 and Kangsoo Kim and Gerd Bruder and Gregory Welch",
  title =        "Virtual Big Heads in Extended Reality: Estimation of
                 Ideal Head Scales and Perceptual Thresholds for Comfort
                 and Facial Cues",
  journal =      j-TAP,
  volume =       "20",
  number =       "1",
  pages =        "4:1--4:??",
  month =        jan,
  year =         "2023",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3571074",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Jun 9 06:34:04 MDT 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3571074",
  abstract =     "Extended reality (XR) technologies, such as virtual
                 reality (VR) and augmented reality (AR), provide users,
                 their avatars, and embodied agents a shared platform to
                 collaborate in a spatial context. Although traditional
                 face-to-face communication is \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Howard:2023:GDP,
  author =       "Thomas Howard and Karina Driller and William Frier and
                 Claudio Pacchierotti and Maud Marchal and Jessica
                 Hartcher-O'Brien",
  title =        "Gap Detection in Pairs of Ultrasound Mid-air
                 Vibrotactile Stimuli",
  journal =      j-TAP,
  volume =       "20",
  number =       "1",
  pages =        "5:1--5:??",
  month =        jan,
  year =         "2023",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3570904",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Jun 9 06:34:04 MDT 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3570904",
  abstract =     "Ultrasound mid-air haptic (UMH) devices are a novel
                 tool for haptic feedback, capable of providing
                 localized vibrotactile stimuli to users at a distance.
                 UMH applications largely rely on generating tactile
                 shape outlines on the users' skin. Here we \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Shamy:2023:ILI,
  author =       "Mor Shamy and Dror G. Feitelson",
  title =        "Identifying Lines and Interpreting Vertical Jumps in
                 Eye Tracking Studies of Reading Text and Code",
  journal =      j-TAP,
  volume =       "20",
  number =       "2",
  pages =        "6:1--6:??",
  month =        apr,
  year =         "2023",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3579357",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Jun 9 06:34:05 MDT 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3579357",
  abstract =     "Eye tracking studies have shown that reading code, in
                 contradistinction to reading text, includes many
                 vertical jumps. As different lines of code may have
                 quite different functions (e.g., variable definition,
                 flow control, or computation), it is important
                 \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Surace:2023:LGB,
  author =       "Luca Surace and Marek Wernikowski and Cara Tursun and
                 Karol Myszkowski and Rados{\l}aw Mantiuk and Piotr
                 Didyk",
  title =        "Learning {GAN}-Based Foveated Reconstruction to
                 Recover Perceptually Important Image Features",
  journal =      j-TAP,
  volume =       "20",
  number =       "2",
  pages =        "7:1--7:??",
  month =        apr,
  year =         "2023",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3583072",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Jun 9 06:34:05 MDT 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3583072",
  abstract =     "A foveated image can be entirely reconstructed from a
                 sparse set of samples distributed according to the
                 retinal sensitivity of the human visual system, which
                 rapidly decreases with increasing eccentricity. The use
                 of generative adversarial networks (GANs). \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Hoh:2023:SCS,
  author =       "Weng Khuan Hoh and Fang-Lue Zhang and Neil A.
                 Dodgson",
  title =        "Salient-Centeredness and Saliency Size in
                 Computational Aesthetics",
  journal =      j-TAP,
  volume =       "20",
  number =       "2",
  pages =        "8:1--8:??",
  month =        apr,
  year =         "2023",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3588317",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Jun 9 06:34:05 MDT 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3588317",
  abstract =     "We investigate the optimal aesthetic location and size
                 of a single dominant salient region in a photographic
                 image. Existing algorithms for photographic composition
                 do not take full account of the spatial positioning or
                 sizes of these salient regions. We \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Balasubramanian:2023:ESE,
  author =       "Jagan Krishnasamy Balasubramanian and Rahul Kumar Ray
                 and Manivannan Muniyandi",
  title =        "Effect of Subthreshold Electrotactile Stimulation on
                 the Perception of Electrovibration",
  journal =      j-TAP,
  volume =       "20",
  number =       "3",
  pages =        "9:1--9:??",
  month =        jul,
  year =         "2023",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3599970",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Wed Oct 4 09:37:46 MDT 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3599970",
  abstract =     "Electrovibration is used in touch enabled devices to
                 render different textures. Tactile sub-modal stimuli
                 can enhance texture perception when presented along
                 with electrovibration stimuli. Perception of texture
                 depends on the threshold of \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Appl. Percept.",
  articleno =    "9",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Parde:2023:TIV,
  author =       "Connor J. Parde and Virginia E. Strehle and Vivekjyoti
                 Banerjee and Ying Hu and Jacqueline G. Cavazos and
                 Carlos D. Castillo and Alice J. O'Toole",
  title =        "Twin Identification over Viewpoint Change: a Deep
                 Convolutional Neural Network Surpasses Humans",
  journal =      j-TAP,
  volume =       "20",
  number =       "3",
  pages =        "10:1--10:??",
  month =        jul,
  year =         "2023",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3609224",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Wed Oct 4 09:37:46 MDT 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3609224",
  abstract =     "Deep convolutional neural networks (DCNNs) have
                 achieved human-level accuracy in face identification
                 (Phillips et al., 2018), though it is unclear how
                 accurately they discriminate highly-similar faces.
                 Here, humans and a DCNN performed a challenging face-.
                 \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Appl. Percept.",
  articleno =    "10",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Feijoo-Garcia:2023:PDV,
  author =       "Pedro Guillermo {Feij{\'o}o-Garc{\'\i}a} and Chase
                 Wrenn and Jacob Stuart and Alexandre {Gomes De
                 Siqueira} and Benjamin Lok",
  title =        "Participatory Design of Virtual Humans for Mental
                 Health Support Among {North American} Computer Science
                 Students: Voice, Appearance, and the
                 Similarity-Attraction Effect",
  journal =      j-TAP,
  volume =       "20",
  number =       "3",
  pages =        "11:1--11:??",
  month =        jul,
  year =         "2023",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3613961",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Wed Oct 4 09:37:46 MDT 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3613961",
  abstract =     "Virtual humans (VHs) have the potential to support
                 mental wellness among college computer science (CS)
                 students. However, designing effective VHs for
                 counseling purposes requires a clear understanding of
                 students' demographics, backgrounds, and \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Appl. Percept.",
  articleno =    "11",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Chapiro:2023:ISS,
  author =       "Alexandre Chapiro and Andrew Robb",
  title =        "Introduction to the {SAP} 2023 Special Issue",
  journal =      j-TAP,
  volume =       "20",
  number =       "4",
  pages =        "12:1--12:??",
  month =        oct,
  year =         "2023",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3629977",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Dec 5 08:54:12 MST 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3629977",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Appl. Percept.",
  articleno =    "12",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Mulot:2023:IPM,
  author =       "Lendy Mulot and Thomas Howard and Claudio Pacchierotti
                 and Maud Marchal",
  title =        "Improving the Perception of Mid-air Tactile Shapes
                 with Spatio-temporally-modulated Tactile Pointers",
  journal =      j-TAP,
  volume =       "20",
  number =       "4",
  pages =        "13:1--13:??",
  month =        oct,
  year =         "2023",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3611388",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Dec 5 08:54:12 MST 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3611388",
  abstract =     "Ultrasound mid-air haptic (UMH) devices can remotely
                 render vibrotactile shapes on the skin of unequipped
                 users, e.g., to draw haptic icons or render virtual
                 object shapes. Spatio-temporal modulation (STM), the
                 state-of-the-art UMH shape-rendering method, \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Appl. Percept.",
  articleno =    "13",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Gagnon:2023:CPP,
  author =       "Holly Gagnon and Jeanine Stefanucci and Sarah
                 Creem-Regehr and Bobby Bodenheimer",
  title =        "Calibrated Passability Perception in Virtual Reality
                 Transfers to Augmented Reality",
  journal =      j-TAP,
  volume =       "20",
  number =       "4",
  pages =        "14:1--14:??",
  month =        oct,
  year =         "2023",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3613450",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Dec 5 08:54:12 MST 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3613450",
  abstract =     "As applications for virtual reality (VR) and augmented
                 reality (AR) technology increase, it will be important
                 to understand how users perceive their action
                 capabilities in virtual environments. Feedback about
                 actions may help to calibrate perception for \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Appl. Percept.",
  articleno =    "14",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Wang:2023:HLB,
  author =       "Yuanhao Wang and Qian Zhang and Celine Aubuchon and
                 Jovan Kemp and Fulvio Domini and James Tompkin",
  title =        "On Human-like Biases in Convolutional Neural Networks
                 for the Perception of Slant from Texture",
  journal =      j-TAP,
  volume =       "20",
  number =       "4",
  pages =        "15:1--15:??",
  month =        oct,
  year =         "2023",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3613451",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Dec 5 08:54:12 MST 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3613451",
  abstract =     "Depth estimation is fundamental to 3D perception, and
                 humans are known to have biased estimates of depth.
                 This study investigates whether convolutional neural
                 networks (CNNs) can be biased when predicting the sign
                 of curvature and depth of surfaces of \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Appl. Percept.",
  articleno =    "15",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Nasiri:2023:CNT,
  author =       "Moloud Nasiri and John Porter and Kristopher Kohm and
                 Andrew Robb",
  title =        "Changes in Navigation over Time: a Comparison of
                 Teleportation and Joystick-Based Locomotion",
  journal =      j-TAP,
  volume =       "20",
  number =       "4",
  pages =        "16:1--16:??",
  month =        oct,
  year =         "2023",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3613902",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Dec 5 08:54:12 MST 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3613902",
  abstract =     "Little research has studied how people use Virtual
                 Reality (VR) changes as they experience VR. This
                 article reports the results of an experiment
                 investigating how users' behavior with two locomotion
                 methods changed over 4 weeks: teleportation and
                 joystick-. \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Appl. Percept.",
  articleno =    "16",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Wang:2024:EIC,
  author =       "Minqi Wang and Jian Ding and Dennis M. Levi and Emily
                 A. Cooper",
  title =        "The Effect of Interocular Contrast Differences on the
                 Appearance of Augmented Reality Imagery",
  journal =      j-TAP,
  volume =       "21",
  number =       "1",
  pages =        "1:1--1:??",
  month =        jan,
  year =         "2024",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3617684",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Jan 13 15:24:00 MST 2024",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3617684",
  abstract =     "Augmented reality (AR) devices seek to create
                 compelling visual experiences that merge virtual
                 imagery with the natural world. These devices often
                 rely on wearable near-eye display systems that can
                 optically overlay digital images to the left and right
                 \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Appl. Percept.",
  articleno =    "1",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Mallick:2024:IOR,
  author =       "Snipta Mallick and G{\'e}raldine Jeckeln and Connor J.
                 Parde and Carlos D. Castillo and Alice J. O'Toole",
  title =        "The Influence of the Other-Race Effect on
                 Susceptibility to Face Morphing Attacks",
  journal =      j-TAP,
  volume =       "21",
  number =       "1",
  pages =        "2:1--2:??",
  month =        jan,
  year =         "2024",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3618113",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Jan 13 15:24:00 MST 2024",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3618113",
  abstract =     "Facial morphs created between two identities resemble
                 both of the faces used to create the morph.
                 Consequently, humans and machines are prone to mistake
                 morphs made from two identities for either of the faces
                 used to create the morph. This vulnerability \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Appl. Percept.",
  articleno =    "2",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Kim:2024:ERE,
  author =       "Aelee Kim and Jeong-Eun Lee and Kyoung-Min Lee",
  title =        "Exploring the Relative Effects of Body Position and
                 Locomotion Method on Presence and Cybersickness when
                 Navigating a Virtual Environment",
  journal =      j-TAP,
  volume =       "21",
  number =       "1",
  pages =        "3:1--3:??",
  month =        jan,
  year =         "2024",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3627706",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Jan 13 15:24:00 MST 2024",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3627706",
  abstract =     "The primary goals of this research are to strengthen
                 the understanding of the mechanisms underlying presence
                 and cybersickness in relation to the body position and
                 locomotion method when navigating a virtual environment
                 (VE). In this regard, we compared \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Appl. Percept.",
  articleno =    "3",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}

@Article{Makarov:2024:HIO,
  author =       "Ivan Makarov and Snorri Steinn Stef{\'a}nsson Thors
                 and Elvar Atli {\AE}varsson and Finnur K{\'a}ri Pind
                 J{\"o}rgensson and Nashmin Yeganeh and {\'A}rni
                 Kristj{\'a}nsson and Runar Unnthorsson",
  title =        "The Haptic Intensity Order Illusion Is Caused by
                 Amplitude Changes",
  journal =      j-TAP,
  volume =       "21",
  number =       "1",
  pages =        "4:1--4:??",
  month =        jan,
  year =         "2024",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3626237",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Jan 13 15:24:00 MST 2024",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/tap.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3626237",
  abstract =     "When two brief vibrotactile stimulations are
                 sequentially applied to observers' lower back, there is
                 systematic mislocalization of the stimulation: if the
                 second stimulation is of higher intensity than the
                 first one, observers tend to respond that the
                 \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Appl. Percept.",
  articleno =    "4",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "https://dl.acm.org/loi/tap",
}