@Preamble{"\input bibnames.sty" #
"\def \TM {${}^{\sc TM}$}" #
"\ifx \undefined \booktitle \def \booktitle #1{{{\em #1}}} \fi"
}
@String{ack-nhfb = "Nelson H. F. Beebe,
University of Utah,
Department of Mathematics, 110 LCB,
155 S 1400 E RM 233,
Salt Lake City, UT 84112-0090, USA,
Tel: +1 801 581 5254,
e-mail: \path|beebe@math.utah.edu|,
\path|beebe@acm.org|,
\path|beebe@computer.org| (Internet),
URL: \path|https://www.math.utah.edu/~beebe/|"}
@String{j-TIIS = "ACM Transactions on Interactive Intelligent
Systems (TIIS)"}
@Article{Jameson:2011:ITI,
author = "Anthony Jameson and John Riedl",
title = "Introduction to the {Transactions on Interactive
Intelligent Systems}",
journal = j-TIIS,
volume = "1",
number = "1",
pages = "1:1--1:??",
month = oct,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2030365.2030366",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Thu Nov 3 17:51:10 MDT 2011",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "1",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Kulesza:2011:WOE,
author = "Todd Kulesza and Simone Stumpf and Weng-Keen Wong and
Margaret M. Burnett and Stephen Perona and Andrew Ko
and Ian Oberst",
title = "Why-oriented end-user debugging of naive {Bayes} text
classification",
journal = j-TIIS,
volume = "1",
number = "1",
pages = "2:1--2:??",
month = oct,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2030365.2030367",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Thu Nov 3 17:51:10 MDT 2011",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "2",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Hoi:2011:AMK,
author = "Steven C. H. Hoi and Rong Jin",
title = "Active multiple kernel learning for interactive {$3$D}
object retrieval systems",
journal = j-TIIS,
volume = "1",
number = "1",
pages = "3:1--3:??",
month = oct,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2030365.2030368",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Thu Nov 3 17:51:10 MDT 2011",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "3",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Hammond:2011:RSM,
author = "Tracy Hammond and Brandon Paulson",
title = "Recognizing sketched multistroke primitives",
journal = j-TIIS,
volume = "1",
number = "1",
pages = "4:1--4:??",
month = oct,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2030365.2030369",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Thu Nov 3 17:51:10 MDT 2011",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "4",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Okita:2011:MAA,
author = "Sandra Y. Okita and Victor Ng-Thow-Hing and Ravi K.
Sarvadevabhatla",
title = "Multimodal approach to affective human-robot
interaction design with children",
journal = j-TIIS,
volume = "1",
number = "1",
pages = "5:1--5:??",
month = oct,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2030365.2030370",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Thu Nov 3 17:51:10 MDT 2011",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "5",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Gibet:2011:SSD,
author = "Sylvie Gibet and Nicolas Courty and Kyle Duarte and
Thibaut Le Naour",
title = "The {SignCom} system for data-driven animation of
interactive virtual signers: Methodology and
Evaluation",
journal = j-TIIS,
volume = "1",
number = "1",
pages = "6:1--6:??",
month = oct,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2030365.2030371",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Thu Nov 3 17:51:10 MDT 2011",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "6",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Castellano:2012:ISI,
author = "Ginevra Castellano and Laurel D. Riek and Christopher
Peters and Kostas Karpouzis and Jean-Claude Martin and
Louis-Philippe Morency",
title = "Introduction to the special issue on affective
interaction in natural environments",
journal = j-TIIS,
volume = "2",
number = "1",
pages = "1:1--1:??",
month = mar,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2133366.2133367",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Fri Mar 16 12:34:07 MDT 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Affect-sensitive systems such as social robots and
virtual agents are increasingly being investigated in
real-world settings. In order to work effectively in
natural environments, these systems require the ability
to infer the affective and mental states of humans and
to provide appropriate timely output that helps to
sustain long-term interactions. This special issue,
which appears in two parts, includes articles on the
design of socio-emotional behaviors and expressions in
robots and virtual agents and on computational
approaches for the automatic recognition of social
signals and affective states.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "1",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Beck:2012:EBL,
author = "Aryel Beck and Brett Stevens and Kim A. Bard and Lola
Ca{\~n}amero",
title = "Emotional body language displayed by artificial
agents",
journal = j-TIIS,
volume = "2",
number = "1",
pages = "2:1--2:??",
month = mar,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2133366.2133368",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Fri Mar 16 12:34:07 MDT 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Complex and natural social interaction between
artificial agents (computer-generated or robotic) and
humans necessitates the display of rich emotions in
order to be believable, socially relevant, and
accepted, and to generate the natural emotional
responses that humans show in the context of social
interaction, such as engagement or empathy. Whereas
some robots use faces to display (simplified) emotional
expressions, for other robots such as Nao, body
language is the best medium available given their
inability to convey facial expressions. Displaying
emotional body language that can be interpreted whilst
interacting with the robot should significantly improve
naturalness. This research investigates the creation of
an affect space for the generation of emotional body
language to be displayed by humanoid robots. To do so,
three experiments investigating how emotional body
language displayed by agents is interpreted were
conducted. The first experiment compared the
interpretation of emotional body language displayed by
humans and agents. The results showed that emotional
body language displayed by an agent or a human is
interpreted in a similar way in terms of recognition.
Following these results, emotional key poses were
extracted from an actor's performances and implemented
in a Nao robot. The interpretation of these key poses
was validated in a second study where it was found that
participants were better than chance at interpreting
the key poses displayed. Finally, an affect space was
generated by blending key poses and validated in a
third study. Overall, these experiments confirmed that
body language is an appropriate medium for robots to
display emotions and suggest that an affect space for
body expressions can be used to improve the
expressiveness of humanoid robots.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "2",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Hiolle:2012:ECB,
author = "Antoine Hiolle and Lola Ca{\~n}amero and Marina
Davila-Ross and Kim A. Bard",
title = "Eliciting caregiving behavior in dyadic human-robot
attachment-like interactions",
journal = j-TIIS,
volume = "2",
number = "1",
pages = "3:1--3:??",
month = mar,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2133366.2133369",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Fri Mar 16 12:34:07 MDT 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "We present here the design and applications of an
arousal-based model controlling the behavior of a Sony
AIBO robot during the exploration of a novel
environment: a children's play mat. When the robot
experiences too many new perceptions, the increase of
arousal triggers calls for attention towards its human
caregiver. The caregiver can choose to either calm the
robot down by providing it with comfort, or to leave
the robot coping with the situation on its own. When
the arousal of the robot has decreased, the robot moves
on to further explore the play mat. We gathered results
from two experiments using this arousal-driven control
architecture. In the first setting, we show that such a
robotic architecture allows the human caregiver to
influence greatly the learning outcomes of the
exploration episode, with some similarities to a
primary caregiver during early childhood. In a second
experiment, we tested how human adults behaved in a
similar setup with two different robots: one `needy',
often demanding attention, and one more independent,
requesting far less care or assistance. Our results
show that human adults recognise each profile of the
robot for what they have been designed, and behave
accordingly to what would be expected, caring more for
the needy robot than for the other. Additionally, the
subjects exhibited a preference and more positive
affect whilst interacting and rating the robot we
designed as needy. This experiment leads us to the
conclusion that our architecture and setup succeeded in
eliciting positive and caregiving behavior from adults
of different age groups and technological background.
Finally, the consistency and reactivity of the robot
during this dyadic interaction appeared crucial for the
enjoyment and engagement of the human partner.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "3",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Scherer:2012:SLN,
author = "Stefan Scherer and Michael Glodek and Friedhelm
Schwenker and Nick Campbell and G{\"u}nther Palm",
title = "Spotting laughter in natural multiparty conversations:
a comparison of automatic online and offline approaches
using audiovisual data",
journal = j-TIIS,
volume = "2",
number = "1",
pages = "4:1--4:??",
month = mar,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2133366.2133370",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Fri Mar 16 12:34:07 MDT 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "It is essential for the advancement of human-centered
multimodal interfaces to be able to infer the current
user's state or communication state. In order to enable
a system to do that, the recognition and interpretation
of multimodal social signals (i.e., paralinguistic and
nonverbal behavior) in real-time applications is
required. Since we believe that laughs are one of the
most important and widely understood social nonverbal
signals indicating affect and discourse quality, we
focus in this work on the detection of laughter in
natural multiparty discourses. The conversations are
recorded in a natural environment without any specific
constraint on the discourses using unobtrusive
recording devices. This setup ensures natural and
unbiased behavior, which is one of the main foci of
this work. To compare results of methods, namely
Gaussian Mixture Model (GMM) supervectors as input to a
Support Vector Machine (SVM), so-called Echo State
Networks (ESN), and a Hidden Markov Model (HMM)
approach, are utilized in online and offline detection
experiments. The SVM approach proves very accurate in
the offline classification task, but is outperformed by
the ESN and HMM approach in the online detection (F 1
scores: GMM SVM 0.45, ESN 0.63, HMM 0.72). Further, we
were able to utilize the proposed HMM approach in a
cross-corpus experiment without any retraining with
respectable generalization capability (F 1 score:
0.49). The results and possible reasons for these
outcomes are shown and discussed in the article. The
proposed methods may be directly utilized in practical
tasks such as the labeling or the online detection of
laughter in conversational data and affect-aware
applications.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "4",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Song:2012:CBH,
author = "Yale Song and David Demirdjian and Randall Davis",
title = "Continuous body and hand gesture recognition for
natural human-computer interaction",
journal = j-TIIS,
volume = "2",
number = "1",
pages = "5:1--5:??",
month = mar,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2133366.2133371",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Fri Mar 16 12:34:07 MDT 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Intelligent gesture recognition systems open a new era
of natural human-computer interaction: Gesturing is
instinctive and a skill we all have, so it requires
little or no thought, leaving the focus on the task
itself, as it should be, not on the interaction
modality. We present a new approach to gesture
recognition that attends to both body and hands, and
interprets gestures continuously from an unsegmented
and unbounded input stream. This article describes the
whole procedure of continuous body and hand gesture
recognition, from the signal acquisition to processing,
to the interpretation of the processed signals. Our
system takes a vision-based approach, tracking body and
hands using a single stereo camera. Body postures are
reconstructed in 3D space using a generative
model-based approach with a particle filter, combining
both static and dynamic attributes of motion as the
input feature to make tracking robust to
self-occlusion. The reconstructed body postures guide
searching for hands. Hand shapes are classified into
one of several canonical hand shapes using an
appearance-based approach with a multiclass support
vector machine. Finally, the extracted body and hand
features are combined and used as the input feature for
gesture recognition. We consider our task as an online
sequence labeling and segmentation problem. A
latent-dynamic conditional random field is used with a
temporal sliding window to perform the task
continuously. We augment this with a novel technique
called multilayered filtering, which performs filtering
both on the input layer and the prediction layer.
Filtering on the input layer allows capturing
long-range temporal dependencies and reducing input
signal noise; filtering on the prediction layer allows
taking weighted votes of multiple overlapping
prediction results as well as reducing estimation
noise. We tested our system in a scenario of real-world
gestural interaction using the NATOPS dataset, an
official vocabulary of aircraft handling gestures. Our
experimental results show that: (1) the use of both
static and dynamic attributes of motion in body
tracking allows statistically significant improvement
of the recognition performance over using static
attributes of motion alone; and (2) the multilayered
filtering statistically significantly improves
recognition performance over the nonfiltering method.
We also show that, on a set of twenty-four NATOPS
gestures, our system achieves a recognition accuracy of
75.37\%.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "5",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Eyben:2012:MAC,
author = "Florian Eyben and Martin W{\"o}llmer and Bj{\"o}rn
Schuller",
title = "A multitask approach to continuous five-dimensional
affect sensing in natural speech",
journal = j-TIIS,
volume = "2",
number = "1",
pages = "6:1--6:??",
month = mar,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2133366.2133372",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Fri Mar 16 12:34:07 MDT 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Automatic affect recognition is important for the
ability of future technical systems to interact with us
socially in an intelligent way by understanding our
current affective state. In recent years there has been
a shift in the field of affect recognition from `in the
lab' experiments with acted data to `in the wild'
experiments with spontaneous and naturalistic data. Two
major issues thereby are the proper segmentation of the
input and adequate description and modeling of
affective states. The first issue is crucial for
responsive, real-time systems such as virtual agents
and robots, where the latency of the analysis must be
as small as possible. To address this issue we
introduce a novel method of incremental segmentation to
be used in combination with supra-segmental modeling.
For modeling of continuous affective states we use Long
Short-Term Memory Recurrent Neural Networks, with which
we can show an improvement in performance over standard
recurrent neural networks and feed-forward neural
networks as well as Support Vector Regression. For
experiments we use the SEMAINE database, which contains
recordings of spontaneous and natural human to
Wizard-of-Oz conversations. The recordings are
annotated continuously in time and magnitude with
FeelTrace for five affective dimensions, namely
activation, expectation, intensity, power/dominance,
and valence. To exploit dependencies between the five
affective dimensions we investigate multitask learning
of all five dimensions augmented with inter-rater
standard deviation. We can show improvements for
multitask over single-task modeling. Correlation
coefficients of up to 0.81 are obtained for the
activation dimension and up to 0.58 for the valence
dimension. The performance for the remaining dimensions
were found to be in between that for activation and
valence.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "6",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Yazdani:2012:ARB,
author = "Ashkan Yazdani and Jong-Seok Lee and Jean-Marc Vesin
and Touradj Ebrahimi",
title = "Affect recognition based on physiological changes
during the watching of music videos",
journal = j-TIIS,
volume = "2",
number = "1",
pages = "7:1--7:??",
month = mar,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2133366.2133373",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Fri Mar 16 12:34:07 MDT 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Assessing emotional states of users evoked during
their multimedia consumption has received a great deal
of attention with recent advances in multimedia content
distribution technologies and increasing interest in
personalized content delivery. Physiological signals
such as the electroencephalogram (EEG) and peripheral
physiological signals have been less considered for
emotion recognition in comparison to other modalities
such as facial expression and speech, although they
have a potential interest as alternative or
supplementary channels. This article presents our work
on: (1) constructing a dataset containing EEG and
peripheral physiological signals acquired during
presentation of music video clips, which is made
publicly available, and (2) conducting binary
classification of induced positive/negative valence,
high/low arousal, and like/dislike by using the
aforementioned signals. The procedure for the dataset
acquisition, including stimuli selection, signal
acquisition, self-assessment, and signal processing is
described in detail. Especially, we propose a novel
asymmetry index based on relative wavelet entropy for
measuring the asymmetry in the energy distribution of
EEG signals, which is used for EEG feature extraction.
Then, the classification systems based on EEG and
peripheral physiological signals are presented.
Single-trial and single-run classification results
indicate that, on average, the performance of the
EEG-based classification outperforms that of the
peripheral physiological signals. However, the
peripheral physiological signals can be considered as a
good alternative to EEG signals in the case of
assessing a user's preference for a given music video
clip (like/dislike) since they have a comparable
performance to EEG signals while being more easily
measured.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "7",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Park:2012:CFM,
author = "Souneil Park and Seungwoo Kang and Sangyoung Chung and
Junehwa Song",
title = "A Computational Framework for Media Bias Mitigation",
journal = j-TIIS,
volume = "2",
number = "2",
pages = "8:1--8:??",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2209310.2209311",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Tue Nov 6 19:14:39 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Bias in the news media is an inherent flaw of the news
production process. The bias often causes a sharp
increase in political polarization and in the cost of
conflict on social issues such as the Iraq war. This
article presents NewsCube, a novel Internet news
service which aims to mitigate the effect of media
bias. NewsCube automatically creates and promptly
provides readers with multiple classified views on a
news event. As such, it helps readers understand the
event from a plurality of views and to formulate their
own, more balanced, viewpoints. The media bias problem
has been studied extensively in mass communications and
social science. This article reviews related mass
communication and journalism studies and provides a
structured view of the media bias problem and its
solution. We propose media bias mitigation as a
practical solution and demonstrate it through NewsCube.
We evaluate and discuss the effectiveness of NewsCube
through various performance studies.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "8",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Berkovsky:2012:IIF,
author = "Shlomo Berkovsky and Jill Freyne and Harri
Oinas-Kukkonen",
title = "Influencing Individually: Fusing Personalization and
Persuasion",
journal = j-TIIS,
volume = "2",
number = "2",
pages = "9:1--9:??",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2209310.2209312",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Tue Nov 6 19:14:39 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Personalized technologies aim to enhance user
experience by taking into account users' interests,
preferences, and other relevant information. Persuasive
technologies aim to modify user attitudes, intentions,
or behavior through computer-human dialogue and social
influence. While both personalized and persuasive
technologies influence user interaction and behavior,
we posit that this influence could be significantly
increased if the two technologies were combined to
create personalized and persuasive systems. For
example, the persuasive power of a one-size-fits-all
persuasive intervention could be enhanced by
considering the users being influenced and their
susceptibility to the persuasion being offered.
Likewise, personalized technologies could cash in on
increased success, in terms of user satisfaction,
revenue, and user experience, if their services used
persuasive techniques. Hence, the coupling of
personalization and persuasion has the potential to
enhance the impact of both technologies. This new,
developing area clearly offers mutual benefits to both
research areas, as we illustrate in this special
issue.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "9",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Kaptein:2012:APS,
author = "Maurits Kaptein and Boris {De Ruyter} and Panos
Markopoulos and Emile Aarts",
title = "Adaptive Persuasive Systems: a Study of Tailored
Persuasive Text Messages to Reduce Snacking",
journal = j-TIIS,
volume = "2",
number = "2",
pages = "10:1--10:??",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2209310.2209313",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Tue Nov 6 19:14:39 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "This article describes the use of personalized short
text messages (SMS) to reduce snacking. First, we
describe the development and validation ( N = 215) of a
questionnaire to measure individual susceptibility to
different social influence strategies. To evaluate the
external validity of this Susceptibility to Persuasion
Scale (STPS) we set up a two week text-messaging
intervention that used text messages implementing
social influence strategies as prompts to reduce
snacking behavior. In this experiment ( N = 73) we show
that messages that are personalized (tailored) to the
individual based on their scores on the STPS, lead to a
higher decrease in snacking consumption than randomized
messages or messages that are not tailored
(contra-tailored) to the individual. We discuss the
importance of this finding for the design of persuasive
systems and detail how designers can use tailoring at
the level of social influence strategies to increase
the effects of their persuasive technologies.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "10",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Cremonesi:2012:IPP,
author = "Paolo Cremonesi and Franca Garzotto and Roberto
Turrin",
title = "Investigating the Persuasion Potential of Recommender
Systems from a Quality Perspective: an Empirical
Study",
journal = j-TIIS,
volume = "2",
number = "2",
pages = "11:1--11:??",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2209310.2209314",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Tue Nov 6 19:14:39 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Recommender Systems (RSs) help users search large
amounts of digital contents and services by allowing
them to identify the items that are likely to be more
attractive or useful. RSs play an important persuasion
role, as they can potentially augment the users' trust
towards in an application and orient their decisions or
actions towards specific directions. This article
explores the persuasiveness of RSs, presenting two vast
empirical studies that address a number of research
questions. First, we investigate if a design property
of RSs, defined by the statistically measured quality
of algorithms, is a reliable predictor of their
potential for persuasion. This factor is measured in
terms of perceived quality, defined by the overall
satisfaction, as well as by how users judge the
accuracy and novelty of recommendations. For our
purposes, we designed an empirical study involving 210
subjects and implemented seven full-sized versions of a
commercial RS, each one using the same interface and
dataset (a subset of Netflix), but each with a
different recommender algorithm. In each experimental
configuration we computed the statistical quality
(recall and F-measures) and collected data regarding
the quality perceived by 30 users. The results show us
that algorithmic attributes are less crucial than we
might expect in determining the user's perception of an
RS's quality, and suggest that the user's judgment and
attitude towards a recommender are likely to be more
affected by factors related to the user experience.
Second, we explore the persuasiveness of RSs in the
context of large interactive TV services. We report a
study aimed at assessing whether measurable persuasion
effects (e.g., changes of shopping behavior) can be
achieved through the introduction of a recommender. Our
data, collected for more than one year, allow us to
conclude that, (1) the adoption of an RS can affect
both the lift factor and the conversion rate,
determining an increased volume of sales and
influencing the user's decision to actually buy one of
the recommended products, (2) the introduction of an RS
tends to diversify purchases and orient users towards
less obvious choices (the long tail), and (3) the
perceived novelty of recommendations is likely to be
more influential than their perceived accuracy.
Overall, the results of these studies improve our
understanding of the persuasion phenomena induced by
RSs, and have implications that can be of interest to
academic scholars, designers, and adopters of this
class of systems.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "11",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Andrews:2012:SPP,
author = "Pierre Y. Andrews",
title = "System Personality and Persuasion in Human-Computer
Dialogue",
journal = j-TIIS,
volume = "2",
number = "2",
pages = "12:1--12:??",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2209310.2209315",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Tue Nov 6 19:14:39 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "The human-computer dialogue research field has been
studying interaction with computers since the early
stage of Artificial Intelligence, however, research has
often focused on very practical tasks to be completed
with the dialogues. A new trend in the field tries to
implement persuasive techniques with automated
interactive agents; unlike booking a train ticket, for
example, such dialogues require the system to show more
anthropomorphic qualities. The influences of such
qualities in the effectiveness of persuasive dialogue
is only starting to be studied. In this article we
focus on one important perceived trait of the system:
personality, and explore how it influences the
persuasiveness of a dialogue system. We introduce a new
persuasive dialogue system and combine it with a state
of the art personality utterance generator. By doing
so, we can control the system's extraversion
personality trait and observe its influence on the
user's perception of the dialogue and its output. In
particular, we observe that the user's extraversion
influences their perception of the dialogue and its
persuasiveness, and that the perceived personality of
the system can affect its trustworthiness and
persuasiveness. We believe that theses observations
will help to set up guidelines to tailor dialogue
systems to the user's interaction expectations and
improve the persuasive interventions.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "12",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Vig:2012:TGE,
author = "Jesse Vig and Shilad Sen and John Riedl",
title = "The Tag Genome: Encoding Community Knowledge to
Support Novel Interaction",
journal = j-TIIS,
volume = "2",
number = "3",
pages = "13:1--13:??",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2362394.2362395",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Tue Nov 6 19:14:40 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "This article introduces the tag genome, a data
structure that extends the traditional tagging model to
provide enhanced forms of user interaction. Just as a
biological genome encodes an organism based on a
sequence of genes, the tag genome encodes an item in an
information space based on its relationship to a common
set of tags. We present a machine learning approach for
computing the tag genome, and we evaluate several
learning models on a ground truth dataset provided by
users. We describe an application of the tag genome
called Movie Tuner which enables users to navigate from
one item to nearby items along dimensions represented
by tags. We present the results of a 7-week field trial
of 2,531 users of Movie Tuner and a survey evaluating
users' subjective experience. Finally, we outline the
broader space of applications of the tag genome.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "13",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Lieberman:2012:ISI,
author = "Henry Lieberman and Catherine Havasi",
title = "Introduction to the {Special Issue on Common Sense for
Interactive Systems}",
journal = j-TIIS,
volume = "2",
number = "3",
pages = "14:1--14:??",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2362394.2362396",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Tue Nov 6 19:14:40 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "This editorial introduction describes the aims and
scope of the special issue on Common Sense for
Interactive Systems of the ACM Transactions on
Interactive Intelligent Systems. It explains why the
common sense knowledge problem is crucial for both
artificial intelligence and human-computer interaction,
and it shows how the four articles selected for this
issue fit into the theme.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "14",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Gil:2012:CCK,
author = "Yolanda Gil and Varun Ratnakar and Timothy Chklovski
and Paul Groth and Denny Vrandecic",
title = "Capturing Common Knowledge about Tasks: Intelligent
Assistance for To-Do Lists",
journal = j-TIIS,
volume = "2",
number = "3",
pages = "15:1--15:??",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2362394.2362397",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Tue Nov 6 19:14:40 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Although to-do lists are a ubiquitous form of personal
task management, there has been no work on intelligent
assistance to automate, elaborate, or coordinate a
user's to-dos. Our research focuses on three aspects of
intelligent assistance for to-dos. We investigated the
use of intelligent agents to automate to-dos in an
office setting. We collected a large corpus from users
and developed a paraphrase-based approach to matching
agent capabilities with to-dos. We also investigated
to-dos for personal tasks and the kinds of assistance
that can be offered to users by elaborating on them on
the basis of substep knowledge extracted from the Web.
Finally, we explored coordination of user tasks with
other users through a to-do management application
deployed in a popular social networking site. We
discuss the emergence of Social Task Networks, which
link users` tasks to their social network as well as to
relevant resources on the Web. We show the benefits of
using common sense knowledge to interpret and elaborate
to-dos. Conversely, we also show that to-do lists are a
valuable way to create repositories of common sense
knowledge about tasks.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "15",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Swanson:2012:SAU,
author = "Reid Swanson and Andrew S. Gordon",
title = "Say Anything: Using Textual Case-Based Reasoning to
Enable Open-Domain Interactive Storytelling",
journal = j-TIIS,
volume = "2",
number = "3",
pages = "16:1--16:??",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2362394.2362398",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Tue Nov 6 19:14:40 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "We describe Say Anything, a new interactive
storytelling system that collaboratively writes textual
narratives with human users. Unlike previous attempts,
this interactive storytelling system places no
restrictions on the content or direction of the user's
contribution to the emerging storyline. In response to
these contributions, the computer continues the
storyline with narration that is both coherent and
entertaining. This capacity for open-domain interactive
storytelling is enabled by an extremely large
repository of nonfiction personal stories, which is
used as a knowledge base in a case-based reasoning
architecture. In this article, we describe the three
main components of our case-based reasoning approach: a
million-item corpus of personal stories mined from
internet weblogs, a case retrieval strategy that is
optimized for narrative coherence, and an adaptation
strategy that ensures that repurposed sentences from
the case base are appropriate for the user's emerging
fiction. We describe a series of evaluations of the
system's ability to produce coherent and entertaining
stories, and we compare these narratives with
single-author stories posted to internet weblogs.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "16",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Kuo:2012:PRM,
author = "Yen-Ling Kuo and Jane Yung-Jen Hsu",
title = "Planning for Reasoning with Multiple Common Sense
Knowledge Bases",
journal = j-TIIS,
volume = "2",
number = "3",
pages = "17:1--17:??",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2362394.2362399",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Tue Nov 6 19:14:40 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Intelligent user interfaces require common sense
knowledge to bridge the gap between the functionality
of applications and the user's goals. While current
reasoning methods have been used to provide contextual
information for interface agents, the quality of their
reasoning results is limited by the coverage of their
underlying knowledge bases. This article presents
reasoning composition, a planning-based approach to
integrating reasoning methods from multiple common
sense knowledge bases to answer queries. The reasoning
results of one reasoning method are passed to other
reasoning methods to form a reasoning chain to the
target context of a query. By leveraging different weak
reasoning methods, we are able to find answers to
queries that cannot be directly answered by querying a
single common sense knowledge base. By conducting
experiments on ConceptNet and WordNet, we compare the
reasoning results of reasoning composition, directly
querying merged knowledge bases, and spreading
activation. The results show an 11.03\% improvement in
coverage over directly querying merged knowledge bases
and a 49.7\% improvement in accuracy over spreading
activation. Two case studies are presented, showing how
reasoning composition can improve performance of
retrieval in a video editing system and a dialogue
assistant.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "17",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Dinakar:2012:CSR,
author = "Karthik Dinakar and Birago Jones and Catherine Havasi
and Henry Lieberman and Rosalind Picard",
title = "Common Sense Reasoning for Detection, Prevention, and
Mitigation of Cyberbullying",
journal = j-TIIS,
volume = "2",
number = "3",
pages = "18:1--18:??",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2362394.2362400",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Tue Nov 6 19:14:40 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Cyberbullying (harassment on social networks) is
widely recognized as a serious social problem,
especially for adolescents. It is as much a threat to
the viability of online social networks for youth today
as spam once was to email in the early days of the
Internet. Current work to tackle this problem has
involved social and psychological studies on its
prevalence as well as its negative effects on
adolescents. While true solutions rest on teaching
youth to have healthy personal relationships, few have
considered innovative design of social network software
as a tool for mitigating this problem. Mitigating
cyberbullying involves two key components: robust
techniques for effective detection and reflective user
interfaces that encourage users to reflect upon their
behavior and their choices. Spam filters have been
successful by applying statistical approaches like
Bayesian networks and hidden Markov models. They can,
like Google's GMail, aggregate human spam judgments
because spam is sent nearly identically to many people.
Bullying is more personalized, varied, and contextual.
In this work, we present an approach for bullying
detection based on state-of-the-art natural language
processing and a common sense knowledge base, which
permits recognition over a broad spectrum of topics in
everyday life. We analyze a more narrow range of
particular subject matter associated with bullying
(e.g. appearance, intelligence, racial and ethnic
slurs, social acceptance, and rejection), and construct
BullySpace, a common sense knowledge base that encodes
particular knowledge about bullying situations. We then
perform joint reasoning with common sense knowledge
about a wide range of everyday life topics. We analyze
messages using our novel AnalogySpace common sense
reasoning technique. We also take into account social
network analysis and other factors. We evaluate the
model on real-world instances that have been reported
by users on Formspring, a social networking website
that is popular with teenagers. On the intervention
side, we explore a set of reflective user-interaction
paradigms with the goal of promoting empathy among
social network participants. We propose an ``air
traffic control''-like dashboard, which alerts
moderators to large-scale outbreaks that appear to be
escalating or spreading and helps them prioritize the
current deluge of user complaints. For potential
victims, we provide educational material that informs
them about how to cope with the situation, and connects
them with emotional support from others. A user
evaluation shows that in-context, targeted, and dynamic
help during cyberbullying situations fosters end-user
reflection that promotes better coping strategies.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "18",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Jameson:2012:ISI,
author = "Anthony Jameson and John Riedl",
title = "Introduction to the special issue on highlights of the
decade in interactive intelligent systems",
journal = j-TIIS,
volume = "2",
number = "4",
pages = "19:1--19:??",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2395123.2395124",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Tue Apr 30 18:37:15 MDT 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "This editorial introduction explains the motivation
and origin of the TiiS special issue on Highlights of
the Decade in Interactive Intelligent Systems and shows
how its five articles exemplify the types of research
contribution that TiiS aims to encourage and publish.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "19",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Hoey:2012:PSD,
author = "Jesse Hoey and Craig Boutilier and Pascal Poupart and
Patrick Olivier and Andrew Monk and Alex Mihailidis",
title = "People, sensors, decisions: Customizable and adaptive
technologies for assistance in healthcare",
journal = j-TIIS,
volume = "2",
number = "4",
pages = "20:1--20:??",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2395123.2395125",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Tue Apr 30 18:37:15 MDT 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "The ratio of healthcare professionals to care
recipients is dropping at an alarming rate,
particularly for the older population. It is estimated
that the number of persons with Alzheimer's disease,
for example, will top 100 million worldwide by the year
2050 [Alzheimer's Disease International 2009]. It will
become harder and harder to provide needed health
services to this population of older adults. Further,
patients are becoming more aware and involved in their
own healthcare decisions. This is creating a void in
which technology has an increasingly important role to
play as a tool to connect providers with recipients.
Examples of interactive technologies range from
telecare for remote regions to computer games promoting
fitness in the home. Currently, such technologies are
developed for specific applications and are difficult
to modify to suit individual user needs. The future
potential economic and social impact of technology in
the healthcare field therefore lies in our ability to
make intelligent devices that are customizable by
healthcare professionals and their clients, that are
adaptive to users over time, and that generalize across
tasks and environments. A wide application area for
technology in healthcare is for assistance and
monitoring in the home. As the population ages, it
becomes increasingly dependent on chronic healthcare,
such as assistance for tasks of everyday life (washing,
cooking, dressing), medication taking, nutrition, and
fitness. This article will present a summary of work
over the past decade on the development of intelligent
systems that provide assistance to persons with
cognitive disabilities. These systems are unique in
that they are all built using a common framework, a
decision-theoretic model for general-purpose assistance
in the home. In this article, we will show how this
type of general model can be applied to a range of
assistance tasks, including prompting for activities of
daily living, assistance for art therapists, and stroke
rehabilitation. This model is a Partially Observable
Markov Decision Process (POMDP) that can be customized
by end-users, that can integrate complex sensor
information, and that can adapt over time. These three
characteristics of the POMDP model will allow for
increasing uptake and long-term efficiency and
robustness of technology for assistance.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "20",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Carberry:2012:AMA,
author = "Sandra Carberry and Stephanie Elzer Schwartz and
Kathleen Mccoy and Seniz Demir and Peng Wu and Charles
Greenbacker and Daniel Chester and Edward Schwartz and
David Oliver and Priscilla Moraes",
title = "Access to multimodal articles for individuals with
sight impairments",
journal = j-TIIS,
volume = "2",
number = "4",
pages = "21:1--21:??",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2395123.2395126",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Tue Apr 30 18:37:15 MDT 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Although intelligent interactive systems have been the
focus of many research efforts, very few have addressed
systems for individuals with disabilities. This article
presents our methodology for an intelligent interactive
system that provides individuals with sight impairments
with access to the content of information graphics
(such as bar charts and line graphs) in popular media.
The article describes the methodology underlying the
system's intelligent behavior, its interface for
interacting with users, examples processed by the
implemented system, and evaluation studies both of the
methodology and the effectiveness of the overall
system. This research advances universal access to
electronic documents.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "21",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Chen:2012:MBI,
author = "Fang Chen and Natalie Ruiz and Eric Choi and Julien
Epps and M. Asif Khawaja and Ronnie Taib and Bo Yin and
Yang Wang",
title = "Multimodal behavior and interaction as indicators of
cognitive load",
journal = j-TIIS,
volume = "2",
number = "4",
pages = "22:1--22:??",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2395123.2395127",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Tue Apr 30 18:37:15 MDT 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "High cognitive load arises from complex time and
safety-critical tasks, for example, mapping out flight
paths, monitoring traffic, or even managing nuclear
reactors, causing stress, errors, and lowered
performance. Over the last five years, our research has
focused on using the multimodal interaction paradigm to
detect fluctuations in cognitive load in user behavior
during system interaction. Cognitive load variations
have been found to impact interactive behavior: by
monitoring variations in specific modal input features
executed in tasks of varying complexity, we gain an
understanding of the communicative changes that occur
when cognitive load is high. So far, we have identified
specific changes in: speech, namely acoustic, prosodic,
and linguistic changes; interactive gesture; and
digital pen input, both interactive and freeform. As
ground-truth measurements, galvanic skin response,
subjective, and performance ratings have been used to
verify task complexity. The data suggest that it is
feasible to use features extracted from behavioral
changes in multiple modal inputs as indices of
cognitive load. The speech-based indicators of load,
based on data collected from user studies in a variety
of domains, have shown considerable promise. Scenarios
include single-user and team-based tasks; think-aloud
and interactive speech; and single-word, reading, and
conversational speech, among others. Pen-based
cognitive load indices have also been tested with some
success, specifically with pen-gesture, handwriting,
and freeform pen input, including diagraming. After
examining some of the properties of these measurements,
we present a multimodal fusion model, which is
illustrated with quantitative examples from a case
study. The feasibility of employing user input and
behavior patterns as indices of cognitive load is
supported by experimental evidence. Moreover,
symptomatic cues of cognitive load derived from user
behavior such as acoustic speech signals, transcribed
text, digital pen trajectories of handwriting, and
shapes pen, can be supported by well-established
theoretical frameworks, including O'Donnell and
Eggemeier's workload measurement [1986] Sweller's
Cognitive Load Theory [Chandler and Sweller 1991], and
Baddeley's model of modal working memory [1992] as well
as McKinstry et al.'s [2008] and Rosenbaum's [2005]
action dynamics work. The benefit of using this
approach to determine the user's cognitive load in real
time is that the data can be collected implicitly that
is, during day-to-day use of intelligent interactive
systems, thus overcomes problems of intrusiveness and
increases applicability in real-world environments,
while adapting information selection and presentation
in a dynamic computer interface with reference to
load.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "22",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Dmello:2012:AAA,
author = "Sidney D'mello and Art Graesser",
title = "{AutoTutor} and {Affective Autotutor}: Learning by
talking with cognitively and emotionally intelligent
computers that talk back",
journal = j-TIIS,
volume = "2",
number = "4",
pages = "23:1--23:??",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2395123.2395128",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Tue Apr 30 18:37:15 MDT 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "We present AutoTutor and Affective AutoTutor as
examples of innovative 21$^{st}$ century interactive
intelligent systems that promote learning and
engagement. AutoTutor is an intelligent tutoring system
that helps students compose explanations of difficult
concepts in Newtonian physics and enhances computer
literacy and critical thinking by interacting with them
in natural language with adaptive dialog moves similar
to those of human tutors. AutoTutor constructs a
cognitive model of students' knowledge levels by
analyzing the text of their typed or spoken responses
to its questions. The model is used to dynamically
tailor the interaction toward individual students'
zones of proximal development. Affective AutoTutor
takes the individualized instruction and human-like
interactivity to a new level by automatically detecting
and responding to students' emotional states in
addition to their cognitive states. Over 20 controlled
experiments comparing AutoTutor with ecological and
experimental controls such reading a textbook have
consistently yielded learning improvements of
approximately one letter grade after brief
30--60-minute interactions. Furthermore, Affective
AutoTutor shows even more dramatic improvements in
learning than the original AutoTutor system,
particularly for struggling students with low domain
knowledge. In addition to providing a detailed
description of the implementation and evaluation of
AutoTutor and Affective AutoTutor, we also discuss new
and exciting technologies motivated by AutoTutor such
as AutoTutor-Lite, Operation ARIES, GuruTutor,
DeepTutor, MetaTutor, and AutoMentor. We conclude this
article with our vision for future work on interactive
and engaging intelligent tutoring systems.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "23",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Kay:2012:CPS,
author = "Judy Kay and Bob Kummerfeld",
title = "Creating personalized systems that people can
scrutinize and control: Drivers, principles and
experience",
journal = j-TIIS,
volume = "2",
number = "4",
pages = "24:1--24:??",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2395123.2395129",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Tue Apr 30 18:37:15 MDT 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Widespread personalized computing systems play an
already important and fast-growing role in diverse
contexts, such as location-based services,
recommenders, commercial Web-based services, and
teaching systems. The personalization in these systems
is driven by information about the user, a user model.
Moreover, as computers become both ubiquitous and
pervasive, personalization operates across the many
devices and information stores that constitute the
user's personal digital ecosystem. This enables
personalization, and the user models driving it, to
play an increasing role in people's everyday lives.
This makes it critical to establish ways to address key
problems of personalization related to privacy,
invisibility of personalization, errors in user models,
wasted user models, and the broad issue of enabling
people to control their user models and associated
personalization. We offer scrutable user models as a
foundation for tackling these problems. This article
argues the importance of scrutable user modeling and
personalization, illustrating key elements in case
studies from our work. We then identify the broad roles
for scrutable user models. The article describes how to
tackle the technical and interface challenges of
designing and building scrutable user modeling systems,
presenting design principles and showing how they were
established over our twenty years of work on the
Personis software framework. Our contributions are the
set of principles for scrutable personalization linked
to our experience from creating and evaluating
frameworks and associated applications built upon them.
These constitute a general approach to tackling
problems of personalization by enabling users to
scrutinize their user models as a basis for
understanding and controlling personalization.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "24",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Giunchiglia:2013:ISS,
author = "Fausto Giunchiglia and David Robertson",
title = "Introduction to the special section on
{Internet}-scale human problem solving",
journal = j-TIIS,
volume = "3",
number = "1",
pages = "1:1--1:??",
month = apr,
year = "2013",
CODEN = "????",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Tue Apr 30 18:37:17 MDT 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "This editorial introduction first outlines some of the
research challenges raised by the emerging forms of
internet-scale human problem solving. It then explains
how the two articles in this special section can serve
as illuminating complementary case studies, providing
concrete examples embedded in general conceptual
frameworks.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "1",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Yu:2013:ISI,
author = "Lixiu Yu and Jeffrey V. Nickerson",
title = "An {Internet}-scale idea generation system",
journal = j-TIIS,
volume = "3",
number = "1",
pages = "2:1--2:??",
month = apr,
year = "2013",
CODEN = "????",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Tue Apr 30 18:37:17 MDT 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "A method of organizing the crowd to generate ideas is
described. It integrates crowds using evolutionary
algorithms. The method increases the creativity of
ideas across generations, and it works better than
greenfield idea generation. Specifically, a design
space of internet-scale idea generation systems is
defined, and one instance is tested: a crowd idea
generation system that uses combination to improve
previous designs. The key process of the system is the
following: A crowd generates designs, then another
crowd combines the designs of the previous crowd. In an
experiment with 540 participants, the combined designs
were compared to the initial designs and to the designs
produced by a greenfield idea generation system. The
results show that the sequential combination system
produced more creative ideas in the last generation and
outperformed the greenfield idea generation system. The
design space of crowdsourced idea generation developed
here may be used to instantiate systems that can be
applied to a wide range of design problems. The work
has both pragmatic and theoretical implications: New
forms of coordination are now possible, and, using the
crowd, it is possible to test existing and emerging
theories of coordination and participatory design.
Moreover, it may be possible for human designers,
organized as a crowd, to codesign with each other and
with automated algorithms.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "2",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Poesio:2013:PDU,
author = "Massimo Poesio and Jon Chamberlain and Udo Kruschwitz
and Livio Robaldo and Luca Ducceschi",
title = "Phrase detectives: Utilizing collective intelligence
for {Internet}-scale language resource creation",
journal = j-TIIS,
volume = "3",
number = "1",
pages = "3:1--3:??",
month = apr,
year = "2013",
CODEN = "????",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Tue Apr 30 18:37:17 MDT 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "We are witnessing a paradigm shift in Human Language
Technology (HLT) that may well have an impact on the
field comparable to the statistical revolution:
acquiring large-scale resources by exploiting
collective intelligence. An illustration of this new
approach is Phrase Detectives, an interactive online
game with a purpose for creating anaphorically
annotated resources that makes use of a highly
distributed population of contributors with different
levels of expertise. The purpose of this article is to
first of all give an overview of all aspects of Phrase
Detectives, from the design of the game and the HLT
methods we used to the results we have obtained so far.
It furthermore summarizes the lessons that we have
learned in developing this game which should help other
researchers to design and implement similar games.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "3",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Console:2013:ISN,
author = "Luca Console and Fabrizio Antonelli and Giulia Biamino
and Francesca Carmagnola and Federica Cena and Elisa
Chiabrando and Vincenzo Cuciti and Matteo Demichelis
and Franco Fassio and Fabrizio Franceschi and Roberto
Furnari and Cristina Gena and Marina Geymonat and
Piercarlo Grimaldi and Pierluige Grillo and Silvia
Likavec and Ilaria Lombardi and Dario Mana and
Alessandro Marcengo and Michele Mioli and Mario
Mirabelli and Monica Perrero and Claudia Picardi and
Federica Protti and Amon Rapp and Rossana Simeoni and
Daniele Theseider Dupr{\'e} and Ilaria Torre and Andrea
Toso and Fabio Torta and Fabiana Vernero",
title = "Interacting with social networks of intelligent things
and people in the world of gastronomy",
journal = j-TIIS,
volume = "3",
number = "1",
pages = "4:1--4:??",
month = apr,
year = "2013",
CODEN = "????",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Tue Apr 30 18:37:17 MDT 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "This article introduces a framework for creating rich
augmented environments based on a social web of
intelligent things and people. We target outdoor
environments, aiming to transform a region into a smart
environment that can share its cultural heritage with
people, promoting itself and its special qualities.
Using the applications developed in the framework,
people can interact with things, listen to the stories
that these things tell them, and make their own
contributions. The things are intelligent in the sense
that they aggregate information provided by users and
behave in a socially active way. They can autonomously
establish social relationships on the basis of their
properties and their interaction with users. Hence when
a user gets in touch with a thing, she is also
introduced to its social network consisting of other
things and of users; she can navigate this network to
discover and explore the world around the thing itself.
Thus the system supports serendipitous navigation in a
network of things and people that evolves according to
the behavior of users. An innovative interaction model
was defined that allows users to interact with objects
in a natural, playful way using smartphones without the
need for a specially created infrastructure. The
framework was instantiated into a suite of applications
called WantEat, in which objects from the domain of
tourism and gastronomy (such as cheese wheels or
bottles of wine) are taken as testimonials of the
cultural roots of a region. WantEat includes an
application that allows the definition and registration
of things, a mobile application that allows users to
interact with things, and an application that supports
stakeholders in getting feedback about the things that
they have registered in the system. WantEat was
developed and tested in a real-world context which
involved a region and gastronomy-related items from it
(such as products, shops, restaurants, and recipes),
through an early evaluation with stakeholders and a
final evaluation with hundreds of users.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "4",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Song:2013:PII,
author = "Wei Song and Andrew Finch and Kumiko Tanaka-Ishii and
Keiji Yasuda and Eiichiro Sumita",
title = "{picoTrans}: an intelligent icon-driven interface for
cross-lingual communication",
journal = j-TIIS,
volume = "3",
number = "1",
pages = "5:1--5:??",
month = apr,
year = "2013",
CODEN = "????",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Tue Apr 30 18:37:17 MDT 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "picoTrans is a prototype system that introduces a
novel icon-based paradigm for cross-lingual
communication on mobile devices. Our approach marries a
machine translation system with the popular picture
book. Users interact with picoTrans by pointing at
pictures as if it were a picture book; the system
generates natural language from these icons and the
user is able to interact with the icon sequence to
refine the meaning of the words that are generated.
When users are satisfied that the sentence generated
represents what they wish to express, they tap a
translate button and picoTrans displays the
translation. Structuring the process of communication
in this way has many advantages. First, tapping icons
is a very natural method of user input on mobile
devices; typing is cumbersome and speech input
errorful. Second, the sequence of icons which is
annotated both with pictures and bilingually with words
is meaningful to both users, and it opens up a second
channel of communication between them that conveys the
gist of what is being expressed. We performed a number
of evaluations of picoTrans to determine: its coverage
of a corpus of in-domain sentences; the input
efficiency in terms of the number of key presses
required relative to text entry; and users' overall
impressions of using the system compared to using a
picture book. Our results show that we are able to
cover 74\% of the expressions in our test corpus using
a 2000-icon set; we believe that this icon set size is
realistic for a mobile device. We also found that
picoTrans requires fewer key presses than typing the
input and that the system is able to predict the
correct, intended natural language sentence from the
icon sequence most of the time, making user interaction
with the icon sequence often unnecessary. In the user
evaluation, we found that in general users prefer using
picoTrans and are able to communicate more rapidly and
expressively. Furthermore, users had more confidence
that they were able to communicate effectively using
picoTrans.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "5",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Schreiber:2013:ISI,
author = "Daniel Schreiber and Kris Luyten and Max
M{\"u}hlh{\"a}user and Oliver Brdiczka and Melanie
Hartman",
title = "Introduction to the special issue on interaction with
smart objects",
journal = j-TIIS,
volume = "3",
number = "2",
pages = "6:1--6:??",
month = jul,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2499474.2499475",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Thu Mar 13 06:46:45 MDT 2014",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Smart objects can be smart because of the information
and communication technology that is added to
human-made artifacts. It is not, however, the
technology itself that makes them smart but rather the
way in which the technology is integrated, and their
smartness surfaces through how people are able to
interact with these objects. Hence, the key challenge
for making smart objects successful is to design usable
and useful interactions with them. We list five
features that can contribute to the smartness of an
object, and we discuss how smart objects can help
resolve the simplicity-featurism paradox. We conclude
by introducing the three articles in this special
issue, which dive into various aspects of smart object
interaction: augmenting objects with projection,
service-oriented interaction with smart objects via a
mobile portal, and an analysis of input-output
relations in interaction with tangible smart objects.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "6",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Molyneaux:2013:CAM,
author = "David Molyneaux and Hans Gellersen and Joe Finney",
title = "Cooperative augmentation of mobile smart objects with
projected displays",
journal = j-TIIS,
volume = "3",
number = "2",
pages = "7:1--7:??",
month = jul,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2499474.2499476",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Thu Mar 13 06:46:45 MDT 2014",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Sensors, processors, and radios can be integrated
invisibly into objects to make them smart and sensitive
to user interaction, but feedback is often limited to
beeps, blinks, or buzzes. We propose to redress this
input-output imbalance by augmentation of smart objects
with projected displays, that-unlike physical
displays-allow seamless integration with the natural
appearance of an object. In this article, we
investigate how, in a ubiquitous computing world, smart
objects can acquire and control a projection. We
consider that projectors and cameras are ubiquitous in
the environment, and we develop a novel conception and
system that enables smart objects to spontaneously
associate with projector-camera systems for cooperative
augmentation. Projector-camera systems are conceived as
generic, supporting standard computer vision methods
for different appearance cues, and smart objects
provide a model of their appearance for method
selection at runtime, as well as sensor observations to
constrain the visual detection process. Cooperative
detection results in accurate location and pose of the
object, which is then tracked for visual augmentation
in response to display requests by the smart object. In
this article, we define the conceptual framework
underlying our approach; report on computer vision
experiments that give original insight into natural
appearance-based detection of everyday objects; show
how object sensing can be used to increase speed and
robustness of visual detection; describe and evaluate a
fully implemented system; and describe two smart object
applications to illustrate the system's cooperative
augmentation process and the embodied interactions it
enables with smart objects.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "7",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Thebault:2013:ESP,
author = "Pierrick Thebault and Dominique Decotter and Mathieu
Boussard and Monique Lu",
title = "Embodying services into physical places: Toward the
design of a mobile environment browser",
journal = j-TIIS,
volume = "3",
number = "2",
pages = "8:1--8:??",
month = jul,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2499474.2499477",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Thu Mar 13 06:46:45 MDT 2014",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "The tremendous developments in mobile computing and
handheld devices have allowed for an increasing usage
of the resources of the World Wide Web. People today
consume information and services on the go, through
smart phones applications capable of exploiting their
location in order to adapt the content according to the
context of use. As location-based services gain
traction and reveal their limitations, we argue there
is a need for intelligent systems to be created to
better support people's activities in their experience
of the city, especially regarding their decision-making
processes. In this article, we explore the opportunity
to move closer to the realization of the ubiquitous
computing vision by turning physical places into smart
environments capable of cooperatively and autonomously
collecting, processing, and transporting information
about their characteristics (e.g., practical
information, presence of people, and ambience).
Following a multidisciplinary approach which leverages
psychology, design, and computer science, we propose to
investigate the potential of building communication and
interaction spaces, called information spheres, on top
of physical places such as businesses, homes, and
institutions. We argue that, if the latter are exposed
on the Web, they can act as a platform delivering
information and services and mediating interactions
with smart objects without requiring too much effort
for the deployment of the architecture. After
presenting the inherent challenges of our vision, we go
through the protocol of two preliminary experiments
that aim to evaluate users' perception of different
types of information (i.e., reviews, check-in
information, video streams, and real-time
representations) and their influence on the
decision-making process. Results of this study lead us
to elaborate the design considerations that must be
taken into account to ensure the intelligibility and
user acceptance of information spheres. We finally
describe a research prototype application called
Environment Browser (Env-B) and present the underlying
smart space middleware, before evaluating the user
experience with our system through quantitative and
qualitative methods.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "8",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{vandeGarde-Perik:2013:AIO,
author = "Evelien van de Garde-Perik and Serge Offermans and
Koen van Boerdonk and Kars-Michiel Lenssen and Elise
van den Hoven",
title = "An analysis of input-output relations in interaction
with smart tangible objects",
journal = j-TIIS,
volume = "3",
number = "2",
pages = "9:1--9:??",
month = jul,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2499474.2499478",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Thu Mar 13 06:46:45 MDT 2014",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "This article focuses on the conceptual relation
between the user's input and a system's output in
interaction with smart tangible objects. Understanding
this input-output relation (IO relation) is a
prerequisite for the design of meaningful interaction.
A meaningful IO relation allows the user to know what
to do with a system to achieve a certain goal and to
evaluate the outcome. The work discussed in this
article followed a design research process in which
four concepts were developed and prototyped. An
evaluation was performed using these prototypes to
investigate the effect of highly different IO relations
on the user's understanding of the interaction. The
evaluation revealed two types of IO relations differing
in functionality and the number of mappings between the
user and system actions. These two types of relations
are described by two IO models that provide an overview
of these mappings. Furthermore, they illustrate the
role of the user and the influence of the system in the
process of understanding the interaction. The analysis
of the two types of IO models illustrates the value of
understanding IO relations for the design of smart
tangible objects.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "9",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Andre:2013:ISS,
author = "Elisabeth Andr{\'e} and Joyce Chai",
title = "Introduction to the special section on eye gaze and
conversation",
journal = j-TIIS,
volume = "3",
number = "2",
pages = "10:1--10:??",
month = jul,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2499474.2499479",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Thu Mar 13 06:46:45 MDT 2014",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "This editorial introduction first explains the origin
of this special section. It then outlines how each of
the two articles included sheds light on possibilities
for conversational dialog systems to use eye gaze as a
signal that reflects aspects of participation in the
dialog: degree of engagement and turn taking behavior,
respectively.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "10",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Ishii:2013:GAC,
author = "Ryo Ishii and Yukiko I. Nakano and Toyoaki Nishida",
title = "Gaze awareness in conversational agents: Estimating a
user's conversational engagement from eye gaze",
journal = j-TIIS,
volume = "3",
number = "2",
pages = "11:1--11:??",
month = jul,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2499474.2499480",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Thu Mar 13 06:46:45 MDT 2014",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "In face-to-face conversations, speakers are
continuously checking whether the listener is engaged
in the conversation, and they change their
conversational strategy if the listener is not fully
engaged. With the goal of building a conversational
agent that can adaptively control conversations, in
this study we analyze listener gaze behaviors and
develop a method for estimating whether a listener is
engaged in the conversation on the basis of these
behaviors. First, we conduct a Wizard-of-Oz study to
collect information on a user's gaze behaviors. We then
investigate how conversational disengagement, as
annotated by human judges, correlates with gaze
transition, mutual gaze (eye contact) occurrence, gaze
duration, and eye movement distance. On the basis of
the results of these analyses, we identify useful
information for estimating a user's disengagement and
establish an engagement estimation method using a
decision tree technique. The results of these analyses
show that a model using the features of gaze
transition, mutual gaze occurrence, gaze duration, and
eye movement distance provides the best performance and
can estimate the user's conversational engagement
accurately. The estimation model is then implemented as
a real-time disengagement judgment mechanism and
incorporated into a multimodal dialog manager in an
animated conversational agent. This agent is designed
to estimate the user's conversational engagement and
generate probing questions when the user is distracted
from the conversation. Finally, we evaluate the
engagement-sensitive agent and find that asking probing
questions at the proper times has the expected effects
on the user's verbal/nonverbal behaviors during
communication with the agent. We also find that our
agent system improves the user's impression of the
agent in terms of its engagement awareness, behavior
appropriateness, conversation smoothness, favorability,
and intelligence.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "11",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Jokinen:2013:GTT,
author = "Kristiina Jokinen and Hirohisa Furukawa and Masafumi
Nishida and Seiichi Yamamoto",
title = "Gaze and turn-taking behavior in casual conversational
interactions",
journal = j-TIIS,
volume = "3",
number = "2",
pages = "12:1--12:??",
month = jul,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2499474.2499481",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Thu Mar 13 06:46:45 MDT 2014",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Eye gaze is an important means for controlling
interaction and coordinating the participants' turns
smoothly. We have studied how eye gaze correlates with
spoken interaction and especially focused on the
combined effect of the speech signal and gazing to
predict turn taking possibilities. It is well known
that mutual gaze is important in the coordination of
turn taking in two-party dialogs, and in this article,
we investigate whether this fact also holds for
three-party conversations. In group interactions, it
may be that different features are used for managing
turn taking than in two-party dialogs. We collected
casual conversational data and used an eye tracker to
systematically observe a participant's gaze in the
interactions. By studying the combined effect of speech
and gaze on turn taking, we aimed to answer our main
questions: How well can eye gaze help in predicting
turn taking? What is the role of eye gaze when the
speaker holds the turn? Is the role of eye gaze as
important in three-party dialogs as in two-party
dialogue? We used Support Vector Machines (SVMs) to
classify turn taking events with respect to speech and
gaze features, so as to estimate how well the features
signal a change of the speaker or a continuation of the
same speaker. The results confirm the earlier
hypothesis that eye gaze significantly helps in
predicting the partner's turn taking activity, and we
also get supporting evidence for our hypothesis that
the speaker is a prominent coordinator of the
interaction space. Such a turn taking model could be
used in interactive applications to improve the
system's conversational performance.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "12",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Jameson:2013:MJR,
author = "Anthony Jameson",
title = "In Memoriam: {John Riedl}",
journal = j-TIIS,
volume = "3",
number = "3",
pages = "13:1--13:??",
month = oct,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2533670.2533671",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Thu Mar 13 06:46:47 MDT 2014",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "This recollection of John Riedl, founding
coeditor-in-chief of the ACM Transactions on
Interactive Intelligent Systems, presents a picture by
editors of the journal of what it was like to
collaborate and interact with him.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "13",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Amershi:2013:LAW,
author = "Saleema Amershi and Jalal Mahmud and Jeffrey Nichols
and Tessa Lau and German Attanasio Ruiz",
title = "{LiveAction}: Automating {Web} Task Model Generation",
journal = j-TIIS,
volume = "3",
number = "3",
pages = "14:1--14:??",
month = oct,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2533670.2533672",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Thu Mar 13 06:46:47 MDT 2014",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Task automation systems promise to increase human
productivity by assisting us with our mundane and
difficult tasks. These systems often rely on people to
(1) identify the tasks they want automated and (2)
specify the procedural steps necessary to accomplish
those tasks (i.e., to create task models). However, our
interviews with users of a Web task automation system
reveal that people find it difficult to identify tasks
to automate and most do not even believe they perform
repetitive tasks worthy of automation. Furthermore,
even when automatable tasks are identified, the
well-recognized difficulties of specifying task steps
often prevent people from taking advantage of these
automation systems. In this research, we analyze real
Web usage data and find that people do in fact repeat
behaviors on the Web and that automating these
behaviors, regardless of their complexity, would reduce
the overall number of actions people need to perform
when completing their tasks, potentially saving time.
Motivated by these findings, we developed LiveAction, a
fully-automated approach to generating task models from
Web usage data. LiveAction models can be used to
populate the task model repositories required by many
automation systems, helping us take advantage of
automation in our everyday lives.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "14",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Wetzler:2013:CPM,
author = "Philipp Wetzler and Steven Bethard and Heather Leary
and Kirsten Butcher and Soheil Danesh Bahreini and Jin
Zhao and James H. Martin and Tamara Sumner",
title = "Characterizing and Predicting the Multifaceted Nature
of Quality in Educational {Web} Resources",
journal = j-TIIS,
volume = "3",
number = "3",
pages = "15:1--15:??",
month = oct,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2533670.2533673",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Thu Mar 13 06:46:47 MDT 2014",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Efficient learning from Web resources can depend on
accurately assessing the quality of each resource. We
present a methodology for developing computational
models of quality that can assist users in assessing
Web resources. The methodology consists of four steps:
(1) a meta-analysis of previous studies to decompose
quality into high-level dimensions and low-level
indicators, (2) an expert study to identify the key
low-level indicators of quality in the target domain,
(3) human annotation to provide a collection of example
resources where the presence or absence of quality
indicators has been tagged, and (4) training of a
machine learning model to predict quality indicators
based on content and link features of Web resources. We
find that quality is a multifaceted construct, with
different aspects that may be important to different
users at different times. We show that machine learning
models can predict this multifaceted nature of quality,
both in the context of aiding curators as they evaluate
resources submitted to digital libraries, and in the
context of aiding teachers as they develop online
educational resources. Finally, we demonstrate how
computational models of quality can be provided as a
service, and embedded into applications such as Web
search.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "15",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Amir:2013:PRV,
author = "Ofra Amir and Ya'akov (Kobi) Gal",
title = "Plan Recognition and Visualization in Exploratory
Learning Environments",
journal = j-TIIS,
volume = "3",
number = "3",
pages = "16:1--16:??",
month = oct,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2533670.2533674",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Thu Mar 13 06:46:47 MDT 2014",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Modern pedagogical software is open-ended and
flexible, allowing students to solve problems through
exploration and trial-and-error. Such exploratory
settings provide for a rich educational environment for
students, but they challenge teachers to keep track of
students' progress and to assess their performance.
This article presents techniques for recognizing
students' activities in such pedagogical software and
visualizing these activities to teachers. It describes
a new plan recognition algorithm that uses a recursive
grammar that takes into account repetition and
interleaving of activities. This algorithm was
evaluated empirically using an exploratory environment
for teaching chemistry used by thousands of students in
several countries. It was always able to correctly
infer students' plans when the appropriate grammar was
available. We designed two methods for visualizing
students' activities for teachers: one that visualizes
students' inferred plans, and one that visualizes
students' interactions over a timeline. Both of these
visualization methods were preferred to and found more
helpful than a baseline method which showed a movie of
students' interactions. These results demonstrate the
benefit of combining novel AI techniques and
visualization methods for the purpose of designing
collaborative systems that support students in their
problem solving and teachers in their understanding of
students' performance.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "16",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Chen:2013:HDM,
author = "Li Chen and Marco de Gemmis and Alexander Felfernig
and Pasquale Lops and Francesco Ricci and Giovanni
Semeraro",
title = "Human Decision Making and Recommender Systems",
journal = j-TIIS,
volume = "3",
number = "3",
pages = "17:1--17:??",
month = oct,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2533670.2533675",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Thu Mar 13 06:46:47 MDT 2014",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Recommender systems have already proved to be valuable
for coping with the information overload problem in
several application domains. They provide people with
suggestions for items which are likely to be of
interest for them; hence, a primary function of
recommender systems is to help people make good choices
and decisions. However, most previous research has
focused on recommendation techniques and algorithms,
and less attention has been devoted to the decision
making processes adopted by the users and possibly
supported by the system. There is still a gap between
the importance that the community gives to the
assessment of recommendation algorithms and the current
range of ongoing research activities concerning human
decision making. Different decision-psychological
phenomena can influence the decision making of users of
recommender systems, and research along these lines is
becoming increasingly important and popular. This
special issue highlights how the coupling of
recommendation algorithms with the understanding of
human choice and decision making theory has the
potential to benefit research and practice on
recommender systems and to enable users to achieve a
good balance between decision accuracy and decision
effort.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "17",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Dodson:2013:ELA,
author = "Thomas Dodson and Nicholas Mattei and Joshua T. Guerin
and Judy Goldsmith",
title = "An {English}-Language Argumentation Interface for
Explanation Generation with {Markov} Decision Processes
in the Domain of Academic Advising",
journal = j-TIIS,
volume = "3",
number = "3",
pages = "18:1--18:??",
month = oct,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2513564",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Thu Mar 13 06:46:47 MDT 2014",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "A Markov Decision Process (MDP) policy presents, for
each state, an action, which preferably maximizes the
expected utility accrual over time. In this article, we
present a novel explanation system for MDP policies.
The system interactively generates conversational
English-language explanations of the actions suggested
by an optimal policy, and does so in real time. We rely
on natural language explanations in order to build
trust between the user and the explanation system,
leveraging existing research in psychology in order to
generate salient explanations. Our explanation system
is designed for portability between domains and uses a
combination of domain-specific and domain-independent
techniques. The system automatically extracts implicit
knowledge from an MDP model and accompanying policy.
This MDP-based explanation system can be ported between
applications without additional effort by knowledge
engineers or model builders. Our system separates
domain-specific data from the explanation logic,
allowing for a robust system capable of incremental
upgrades. Domain-specific explanations are generated
through case-based explanation techniques specific to
the domain and a knowledge base of concept mappings
used to generate English-language explanations.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "18",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Freyne:2013:RBP,
author = "Jill Freyne and Shlomo Berkovsky and Gregory Smith",
title = "Rating Bias and Preference Acquisition",
journal = j-TIIS,
volume = "3",
number = "3",
pages = "19:1--19:??",
month = oct,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2499673",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Thu Mar 13 06:46:47 MDT 2014",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Personalized systems and recommender systems exploit
implicitly and explicitly provided user information to
address the needs and requirements of those using their
services. User preference information, often in the
form of interaction logs and ratings data, is used to
identify similar users, whose opinions are leveraged to
inform recommendations or to filter information. In
this work we explore a different dimension of
information trends in user bias and reasoning learned
from ratings provided by users to a recommender system.
Our work examines the characteristics of a dataset of
100,000 user ratings on a corpus of recipes, which
illustrates stable user bias towards certain features
of the recipes (cuisine type, key ingredient, and
complexity). We exploit this knowledge to design and
evaluate a personalized rating acquisition tool based
on active learning, which leverages user biases in
order to obtain ratings bearing high-value information
and to reduce prediction errors with new users.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "19",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Knijnenburg:2013:MDA,
author = "Bart P. Knijnenburg and Alfred Kobsa",
title = "Making Decisions about Privacy: Information Disclosure
in Context-Aware Recommender Systems",
journal = j-TIIS,
volume = "3",
number = "3",
pages = "20:1--20:??",
month = oct,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2499670",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Thu Mar 13 06:46:47 MDT 2014",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Recommender systems increasingly use contextual and
demographical data as a basis for recommendations.
Users, however, often feel uncomfortable providing such
information. In a privacy-minded design of
recommenders, users are free to decide for themselves
what data they want to disclose about themselves. But
this decision is often complex and burdensome, because
the consequences of disclosing personal information are
uncertain or even unknown. Although a number of
researchers have tried to analyze and facilitate such
information disclosure decisions, their research
results are fragmented, and they often do not hold up
well across studies. This article describes a unified
approach to privacy decision research that describes
the cognitive processes involved in users' ``privacy
calculus'' in terms of system-related perceptions and
experiences that act as mediating factors to
information disclosure. The approach is applied in an
online experiment with 493 participants using a mock-up
of a context-aware recommender system. Analyzing the
results with a structural linear model, we demonstrate
that personal privacy concerns and disclosure
justification messages affect the perception of and
experience with a system, which in turn drive
information disclosure decisions. Overall, disclosure
justification messages do not increase disclosure.
Although they are perceived to be valuable, they
decrease users' trust and satisfaction. Another result
is that manipulating the order of the requests
increases the disclosure of items requested early but
decreases the disclosure of items requested later.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "20",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Apostolopoulos:2014:IOL,
author = "Ilias Apostolopoulos and Navid Fallah and Eelke Folmer
and Kostas E. Bekris",
title = "Integrated online localization and navigation for
people with visual impairments using smart phones",
journal = j-TIIS,
volume = "3",
number = "4",
pages = "21:1--21:??",
month = jan,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2499669",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Thu Mar 13 06:46:49 MDT 2014",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Indoor localization and navigation systems for
individuals with Visual Impairments (VIs) typically
rely upon extensive augmentation of the physical space,
significant computational resources, or heavy and
expensive sensors; thus, few systems have been
implemented on a large scale. This work describes a
system able to guide people with VIs through indoor
environments using inexpensive sensors, such as
accelerometers and compasses, which are available in
portable devices like smart phones. The method takes
advantage of feedback from the human user, who confirms
the presence of landmarks, something that users with
VIs already do when navigating in a building. The
system calculates the user's location in real time and
uses it to provide audio instructions on how to reach
the desired destination. Initial early experiments
suggested that the accuracy of the localization depends
on the type of directions and the availability of an
appropriate transition model for the user. A critical
parameter for the transition model is the user's step
length. Consequently, this work also investigates
different schemes for automatically computing the
user's step length and reducing the dependence of the
approach on the definition of an accurate transition
model. In this way, the direction provision method is
able to use the localization estimate and adapt to
failed executions of paths by the users. Experiments
are presented that evaluate the accuracy of the overall
integrated system, which is executed online on a smart
phone. Both people with VIs and blindfolded sighted
people participated in the experiments, which included
paths along multiple floors that required the use of
stairs and elevators.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "21",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Zamborlin:2014:FGI,
author = "Bruno Zamborlin and Frederic Bevilacqua and Marco
Gillies and Mark D'inverno",
title = "Fluid gesture interaction design: Applications of
continuous recognition for the design of modern
gestural interfaces",
journal = j-TIIS,
volume = "3",
number = "4",
pages = "22:1--22:??",
month = jan,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2543921",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Thu Mar 13 06:46:49 MDT 2014",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "This article presents Gesture Interaction DEsigner
(GIDE), an innovative application for gesture
recognition. Instead of recognizing gestures only after
they have been entirely completed, as happens in
classic gesture recognition systems, GIDE exploits the
full potential of gestural interaction by tracking
gestures continuously and synchronously, allowing users
to both control the target application moment to moment
and also receive immediate and synchronous feedback
about system recognition states. By this means, they
quickly learn how to interact with the system in order
to develop better performances. Furthermore, rather
than learning the predefined gestures of others, GIDE
allows users to design their own gestures, making
interaction more natural and also allowing the
applications to be tailored by users' specific needs.
We describe our system that demonstrates these new
qualities-that combine to provide fluid gesture
interaction design-through evaluations with a range of
performers and artists.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "22",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Young:2014:DET,
author = "James E. Young and Takeo Igarashi and Ehud Sharlin and
Daisuke Sakamoto and Jeffrey Allen",
title = "Design and evaluation techniques for authoring
interactive and stylistic behaviors",
journal = j-TIIS,
volume = "3",
number = "4",
pages = "23:1--23:??",
month = jan,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2499671",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Thu Mar 13 06:46:49 MDT 2014",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "We present a series of projects for end-user authoring
of interactive robotic behaviors, with a particular
focus on the style of those behaviors: we call this
approach Style-by-Demonstration (SBD). We provide an
overview introduction of three different SBD platforms:
SBD for animated character interactive locomotion
paths, SBD for interactive robot locomotion paths, and
SBD for interactive robot dance. The primary
contribution of this article is a detailed
cross-project SBD analysis of the interaction designs
and evaluation approaches employed, with the goal of
providing general guidelines stemming from our
experiences, for both developing and evaluating SBD
systems. In addition, we provide the first full account
of our Puppet Master SBD algorithm, with an explanation
of how it evolved through the projects.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "23",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Kumar:2014:TES,
author = "Rohit Kumar and Carolyn P. Ros{\'e}",
title = "Triggering effective social support for online
groups",
journal = j-TIIS,
volume = "3",
number = "4",
pages = "24:1--24:??",
month = jan,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2499672",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Thu Mar 13 06:46:49 MDT 2014",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Conversational agent technology is an emerging
paradigm for creating a social environment in online
groups that is conducive to effective teamwork. Prior
work has demonstrated advantages in terms of learning
gains and satisfaction scores when groups learning
together online have been supported by conversational
agents that employ Balesian social strategies. This
prior work raises two important questions that are
addressed in this article. The first question is one of
generality. Specifically, are the positive effects of
the designed support specific to learning contexts? Or
are they in evidence in other collaborative task
domains as well? We present a study conducted within a
collaborative decision-making task where we see that
the positive effects of the Balesian social strategies
extend to this new context. The second question is
whether it is possible to increase the effectiveness of
the Balesian social strategies by increasing the
context sensitivity with which the social strategies
are triggered. To this end, we present technical work
that increases the sensitivity of the triggering. Next,
we present a user study that demonstrates an
improvement in performance of the support agent with
the new, more sensitive triggering policy over the
baseline approach from prior work. The technical
contribution of this article is that we extend prior
work where such support agents were modeled using a
composition of conversational behaviors integrated
within an event-driven framework. Within the present
approach, conversation is orchestrated through
context-sensitive triggering of the composed behaviors.
The core effort involved in applying this approach
involves building a set of triggering policies that
achieve this orchestration in a time-sensitive and
coherent manner. In line with recent developments in
data-driven approaches for building dialog systems, we
present a novel technique for learning
behavior-specific triggering policies, deploying it as
part of our efforts to improve a socially capable
conversational tutor agent that supports collaborative
learning.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "24",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Kritikos:2014:TMD,
author = "K. Kritikos and D. Plexousakis and F. Patern{\`o}",
title = "Task model-driven realization of interactive
application functionality through services",
journal = j-TIIS,
volume = "3",
number = "4",
pages = "25:1--25:??",
month = jan,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2559979",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Thu Mar 13 06:46:49 MDT 2014",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "The Service-Oriented Computing (SOC) paradigm is
currently being adopted by many developers, as it
promises the construction of applications through reuse
of existing Web Services (WSs). However, current SOC
tools produce applications that interact with users in
a limited way. This limitation is overcome by
model-based Human-Computer Interaction (HCI) approaches
that support the development of applications whose
functionality is realized with WSs and whose User
Interface (UI) is adapted to the user's context.
Typically, such approaches do not consider various
functional issues, such as the applications' semantics
and their syntactic robustness in terms of the WSs
selected to implement their functionality and the
automation of the service discovery and selection
processes. To this end, we propose a model-driven
design method for interactive service-based
applications that is able to consider the functional
issues and their implications for the UI. This method
is realized by a semiautomatic environment that can be
integrated into current model-based HCI tools to
complete the development of interactive service
front-ends. The proposed method takes as input an HCI
task model, which includes the user's view of the
interactive system, and produces a concrete service
model that describes how existing services can be
combined to realize the application's functionality. To
achieve its goal, our method first transforms system
tasks into semantic service queries by mapping the task
objects onto domain ontology concepts; then it sends
each resulting query to a semantic service engine so as
to discover the corresponding services. In the end,
only one service from those associated with a system
task is selected, through the execution of a novel
service concretization algorithm that ensures message
compatibility between the selected services.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "25",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Rafailidis:2014:CBT,
author = "Dimitrios Rafailidis and Apostolos Axenopoulos and
Jonas Etzold and Stavroula Manolopoulou and Petros
Daras",
title = "Content-based tag propagation and tensor factorization
for personalized item recommendation based on social
tagging",
journal = j-TIIS,
volume = "3",
number = "4",
pages = "26:1--26:??",
month = jan,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2487164",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Thu Mar 13 06:46:49 MDT 2014",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "In this article, a novel method for personalized item
recommendation based on social tagging is presented.
The proposed approach comprises a content-based tag
propagation method to address the sparsity and ``cold
start'' problems, which often occur in social tagging
systems and decrease the quality of recommendations.
The proposed method exploits (a) the content of items
and (b) users' tag assignments through a relevance
feedback mechanism in order to automatically identify
the optimal number of content-based and conceptually
similar items. The relevance degrees between users,
tags, and conceptually similar items are calculated in
order to ensure accurate tag propagation and
consequently to address the issue of ``learning tag
relevance.'' Moreover, the ternary relation among
users, tags, and items is preserved by performing tag
propagation in the form of triplets based on users'
personal preferences and ``cold start'' degree. The
latent associations among users, tags, and items are
revealed based on a tensor factorization model in order
to build personalized item recommendations. In our
experiments with real-world social data, we show the
superiority of the proposed approach over other
state-of-the-art methods, since several problems in
social tagging systems are successfully tackled.
Finally, we present the recommendation methodology in
the multimodal engine of I-SEARCH, where users'
interaction capabilities are demonstrated.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "26",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Callaway:2014:EMD,
author = "Charles Callaway and Oliviero Stock and Elyon
Dekoven",
title = "Experiments with Mobile Drama in an Instrumented
Museum for Inducing Conversation in Small Groups",
journal = j-TIIS,
volume = "4",
number = "1",
pages = "2:1--2:??",
month = apr,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2584250",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Sat Apr 12 11:14:27 MDT 2014",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Small groups can have a better museum visit when that
visit is both a social and an educational occasion. The
unmediated discussion that often ensues during a shared
cultural experience, especially when it is with a small
group whose members already know each other, has been
shown by ethnographers to be important for a more
enriching experience. We present DRAMATRIC, a mobile
presentation system that delivers hour-long dramas to
small groups of museum visitors. DRAMATRIC continuously
receives sensor data from the museum environment during
a museum visit and analyzes group behavior from that
data. On the basis of that analysis, DRAMATRIC delivers
a series of dynamically coordinated dramatic scenes
about exhibits that the group walks near, each designed
to stimulate group discussion. Each drama presentation
contains small, complementary differences in the
narrative content heard by the different members of the
group, leveraging the tension/release cycle of
narrative to naturally lead visitors to fill in missing
pieces in their own drama by interacting with their
fellow group members. Using four specific techniques to
produce these coordinated narrative variations, we
describe two experiments: one in a neutral, nonmobile
environment, and the other a controlled experiment with
a full-scale drama in an actual museum. The first
experiment tests the hypothesis that narrative
differences will lead to increased conversation
compared to hearing identical narratives, whereas the
second experiment tests whether switching from
presenting a drama using one technique to using another
technique for the subsequent drama will result in
increased conversation. The first experiment shows that
hearing coordinated narrative variations can in fact
lead to significantly increased conversation. The
second experiment also serves as a framework for future
studies that evaluate strategies for similar adaptive
systems.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "2",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Martens:2014:ISI,
author = "Jean-Bernard Martens",
title = "Interactive Statistics with {Illmo}",
journal = j-TIIS,
volume = "4",
number = "1",
pages = "4:1--4:??",
month = apr,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2509108",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Sat Apr 12 11:14:27 MDT 2014",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Progress in empirical research relies on adequate
statistical analysis and reporting. This article
proposes an alternative approach to statistical
modeling that is based on an old but mostly forgotten
idea, namely Thurstone modeling. Traditional
statistical methods assume that either the measured
data, in the case of parametric statistics, or the
rank-order transformed data, in the case of
nonparametric statistics, are samples from a specific
(usually Gaussian) distribution with unknown
parameters. Consequently, such methods should not be
applied when this assumption is not valid. Thurstone
modeling similarly assumes the existence of an
underlying process that obeys an a priori assumed
distribution with unknown parameters, but combines this
underlying process with a flexible response mechanism
that can be either continuous or discrete and either
linear or nonlinear. One important advantage of
Thurstone modeling is that traditional statistical
methods can still be applied on the underlying process,
irrespective of the nature of the measured data itself.
Another advantage is that Thurstone models can be
graphically represented, which helps to communicate
them to a broad audience. A new interactive statistical
package, Interactive Log Likelihood MOdeling ( Illmo ),
was specifically designed for estimating and rendering
Thurstone models and is intended to bring Thurstone
modeling within the reach of persons who are not
experts in statistics. Illmo is unique in the sense
that it provides not only extensive graphical
renderings of the data analysis results but also an
interface for navigating between different model
options. In this way, users can interactively explore
different models and decide on an adequate balance
between model complexity and agreement with the
experimental data. Hypothesis testing on model
parameters is also made intuitive and is supported by
both textual and graphical feedback. The flexibility
and ease of use of Illmo means that it is also
potentially useful as a didactic tool for teaching
statistics.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "4",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Riveiro:2014:ENM,
author = "Maria Riveiro",
title = "Evaluation of Normal Model Visualization for Anomaly
Detection in Maritime Traffic",
journal = j-TIIS,
volume = "4",
number = "1",
pages = "5:1--5:??",
month = apr,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2591511",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Sat Apr 12 11:14:27 MDT 2014",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Monitoring dynamic objects in surveillance
applications is normally a demanding activity for
operators, not only because of the complexity and high
dimensionality of the data but also because of other
factors like time constraints and uncertainty. Timely
detection of anomalous objects or situations that need
further investigation may reduce operators' cognitive
load. Surveillance applications may include anomaly
detection capabilities, but their use is not
widespread, as they usually generate a high number of
false alarms, they do not provide appropriate cognitive
support for operators, and their outcomes can be
difficult to comprehend and trust. Visual analytics can
bridge the gap between computational and human
approaches to detecting anomalous behavior in traffic
data, making this process more transparent. As a step
toward this goal of transparency, this article presents
an evaluation that assesses whether visualizations of
normal behavioral models of vessel traffic support two
of the main analytical tasks specified during our field
work in maritime control centers. The evaluation
combines quantitative and qualitative usability
assessments. The quantitative evaluation, which was
carried out with a proof-of-concept prototype, reveals
that participants who used the visualization of normal
behavioral models outperformed the group that did not
do so. The qualitative assessment shows that domain
experts have a positive attitude toward the provision
of automatic support and the visualization of normal
behavioral models, as these aids may reduce reaction
time and increase trust in and comprehensibility of the
system.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "5",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Chen:2014:EPM,
author = "Yingjie Victor Chen and Zhenyu Cheryl Qian and Robert
Woodbury and John Dill and Chris D. Shaw",
title = "Employing a Parametric Model for Analytic Provenance",
journal = j-TIIS,
volume = "4",
number = "1",
pages = "6:1--6:??",
month = apr,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2591510",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Sat Apr 12 11:14:27 MDT 2014",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "We introduce a propagation-based parametric symbolic
model approach to supporting analytic provenance. This
approach combines a script language to capture and
encode the analytic process and a parametrically
controlled symbolic model to represent and reuse the
logic of the analysis process. Our approach first
appeared in a visual analytics system called CZSaw.
Using a script to capture the analyst's interactions at
a meaningful system action level allows the creation of
a parametrically controlled symbolic model in the form
of a Directed Acyclic Graph (DAG). Using the DAG allows
propagating changes. Graph nodes correspond to
variables in CZSaw scripts, which are results (data and
data visualizations) generated from user interactions.
The user interacts with variables representing entities
or relations to create the next step's results. Graph
edges represent dependency relationships among nodes.
Any change to a variable triggers the propagation
mechanism to update downstream dependent variables and
in turn updates data views to reflect the change. The
analyst can reuse parts of the analysis process by
assigning new values to a node in the graph. We
evaluated this symbolic model approach by solving three
IEEE VAST Challenge contest problems (from IEEE VAST
2008, 2009, and 2010). In each of these challenges, the
analyst first created a symbolic model to explore,
understand, analyze, and solve a particular subproblem
and then reused the model via its dependency graph
propagation mechanism to solve similar subproblems.
With the script and model, CZSaw supports the analytic
provenance by capturing, encoding, and reusing the
analysis process. The analyst can recall the
chronological states of the analysis process with the
CZSaw script and may interpret the underlying rationale
of the analysis with the symbolic model.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "6",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Chan:2014:RCT,
author = "Yu-Hsuan Chan and Carlos D. Correa and Kwan-Liu Ma",
title = "{Regression Cube}: a Technique for Multidimensional
Visual Exploration and Interactive Pattern Finding",
journal = j-TIIS,
volume = "4",
number = "1",
pages = "7:1--7:??",
month = apr,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2590349",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Sat Sep 13 13:17:36 MDT 2014",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Scatterplots are commonly used to visualize
multidimensional data; however, 2D projections of data
offer limited understanding of the high-dimensional
interactions between data points. We introduce an
interactive 3D extension of scatterplots called the
Regression Cube (RC), which augments a 3D scatterplot
with three facets on which the correlations between the
two variables are revealed by sensitivity lines and
sensitivity streamlines. The sensitivity visualization
of local regression on the 2D projections provides
insights about the shape of the data through its
orientation and continuity cues. We also introduce a
series of visual operations such as clustering,
brushing, and selection supported in RC. By iteratively
refining the selection of data points of interest, RC
is able to reveal salient local correlation patterns
that may otherwise remain hidden with a global
analysis. We have demonstrated our system with two
examples and a user-oriented evaluation, and we show
how RCs enable interactive visual exploration of
multidimensional datasets via a variety of
classification and information retrieval tasks. A video
demo of RC is available.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "7",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Jawaheer:2014:MUP,
author = "Gawesh Jawaheer and Peter Weller and Patty Kostkova",
title = "Modeling User Preferences in Recommender Systems: a
Classification Framework for Explicit and Implicit User
Feedback",
journal = j-TIIS,
volume = "4",
number = "2",
pages = "8:1--8:??",
month = jul,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2512208",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Sat Sep 13 13:15:34 MDT 2014",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Recommender systems are firmly established as a
standard technology for assisting users with their
choices; however, little attention has been paid to the
application of the user model in recommender systems,
particularly the variability and noise that are an
intrinsic part of human behavior and activity. To
enable recommender systems to suggest items that are
useful to a particular user, it can be essential to
understand the user and his or her interactions with
the system. These interactions typically manifest
themselves as explicit and implicit user feedback that
provides the key indicators for modeling users'
preferences for items and essential information for
personalizing recommendations. In this article, we
propose a classification framework for the use of
explicit and implicit user feedback in recommender
systems based on a set of distinct properties that
include Cognitive Effort, User Model, Scale of
Measurement, and Domain Relevance. We develop a set of
comparison criteria for explicit and implicit user
feedback to emphasize the key properties. Using our
framework, we provide a classification of recommender
systems that have addressed questions about user
feedback, and we review state-of-the-art techniques to
improve such user feedback and thereby improve the
performance of the recommender system. Finally, we
formulate challenges for future research on improvement
of user feedback.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "8",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Fang:2014:CLM,
author = "Yi Fang and Ziad Al Bawab and Jean-Fran{\c{c}}ois
Crespo",
title = "Collaborative Language Models for Localized Query
Prediction",
journal = j-TIIS,
volume = "4",
number = "2",
pages = "9:1--9:??",
month = jul,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2622617",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Sat Sep 13 13:15:34 MDT 2014",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Localized query prediction (LQP) is the task of
estimating web query trends for a specific location.
This problem subsumes many interesting personalized web
applications such as personalization for buzz query
detection, for query expansion, and for query
recommendation. These personalized applications can
greatly enhance user interaction with web search
engines by providing more customized information
discovered from user input (i.e., queries), but the LQP
task has rarely been investigated in the literature.
Although exist abundant work on estimating global web
search trends does exist, it often encounters the big
challenge of data sparsity when personalization comes
into play. In this article, we tackle the LQP task by
proposing a series of collaborative language models
(CLMs). CLMs alleviate the data sparsity issue by
collaboratively collecting queries and trend
information from the other locations. The traditional
statistical language models assume a fixed background
language model, which loses the taste of
personalization. In contrast, CLMs are personalized
language models with flexible background language
models customized to various locations. The most
sophisticated CLM enables the collaboration to adapt to
specific query topics, which further advances the
personalization level. An extensive set of experiments
have been conducted on a large-scale web query log to
demonstrate the effectiveness of the proposed models.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "9",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Castellano:2014:CSA,
author = "Ginevra Castellano and Iolanda Leite and Andr{\'e}
Pereira and Carlos Martinho and Ana Paiva and Peter W.
Mcowan",
title = "Context-Sensitive Affect Recognition for a Robotic
Game Companion",
journal = j-TIIS,
volume = "4",
number = "2",
pages = "10:1--10:??",
month = jul,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2622615",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Sat Sep 13 13:15:34 MDT 2014",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Social perception abilities are among the most
important skills necessary for robots to engage humans
in natural forms of interaction. Affect-sensitive
robots are more likely to be able to establish and
maintain believable interactions over extended periods
of time. Nevertheless, the integration of affect
recognition frameworks in real-time human-robot
interaction scenarios is still underexplored. In this
article, we propose and evaluate a context-sensitive
affect recognition framework for a robotic game
companion for children. The robot can automatically
detect affective states experienced by children in an
interactive chess game scenario. The affect recognition
framework is based on the automatic extraction of task
features and social interaction-based features.
Vision-based indicators of the children's nonverbal
behaviour are merged with contextual features related
to the game and the interaction and given as input to
support vector machines to create a context-sensitive
multimodal system for affect recognition. The affect
recognition framework is fully integrated in an
architecture for adaptive human-robot interaction.
Experimental evaluation showed that children's affect
can be successfully predicted using a combination of
behavioural and contextual data related to the game and
the interaction with the robot. It was found that
contextual data alone can be used to successfully
predict a subset of affective dimensions, such as
interest toward the robot. Experiments also showed that
engagement with the robot can be predicted using
information about the user's valence, interest and
anticipatory behaviour. These results provide evidence
that social engagement can be modelled as a state
consisting of affect and attention components in the
context of the interaction.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "10",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Steichen:2014:IVT,
author = "Ben Steichen and Cristina Conati and Giuseppe
Carenini",
title = "Inferring Visualization Task Properties, User
Performance, and User Cognitive Abilities from Eye Gaze
Data",
journal = j-TIIS,
volume = "4",
number = "2",
pages = "11:1--11:??",
month = jul,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2633043",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Sat Sep 13 13:15:34 MDT 2014",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Information visualization systems have traditionally
followed a one-size-fits-all model, typically ignoring
an individual user's needs, abilities, and preferences.
However, recent research has indicated that
visualization performance could be improved by adapting
aspects of the visualization to the individual user. To
this end, this article presents research aimed at
supporting the design of novel user-adaptive
visualization systems. In particular, we discuss
results on using information on user eye gaze patterns
while interacting with a given visualization to predict
properties of the user's visualization task; the user's
performance (in terms of predicted task completion
time); and the user's individual cognitive abilities,
such as perceptual speed, visual working memory, and
verbal working memory. We provide a detailed analysis
of different eye gaze feature sets, as well as
over-time accuracies. We show that these predictions
are significantly better than a baseline classifier
even during the early stages of visualization usage.
These findings are then discussed with a view to
designing visualization systems that can adapt to the
individual user in real time.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "11",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Cuayahuitl:2014:ISI,
author = "Heriberto Cuay{\'a}huitl and Lutz Frommberger and Nina
Dethlefs and Antoine Raux and Mathew Marge and Hendrik
Zender",
title = "Introduction to the Special Issue on Machine Learning
for Multiple Modalities in Interactive Systems and
Robots",
journal = j-TIIS,
volume = "4",
number = "3",
pages = "12e:1--12e:??",
month = oct,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2670539",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Tue Oct 14 17:38:05 MDT 2014",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "This special issue highlights research articles that
apply machine learning to robots and other systems that
interact with users through more than one modality,
such as speech, gestures, and vision. For example, a
robot may coordinate its speech with its actions,
taking into account (audio-)visual feedback during
their execution. Machine learning provides interactive
systems with opportunities to improve performance not
only of individual components but also of the system as
a whole. However, machine learning methods that
encompass multiple modalities of an interactive system
are still relatively hard to find. The articles in this
special issue represent examples that contribute to
filling this gap.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "12e",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Ngo:2014:EIM,
author = "Hung Ngo and Matthew Luciw and Jawas Nagi and
Alexander Forster and J{\"u}rgen Schmidhuber and Ngo
Anh Vien",
title = "Efficient Interactive Multiclass Learning from Binary
Feedback",
journal = j-TIIS,
volume = "4",
number = "3",
pages = "12:1--12:??",
month = aug,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2629631",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Sat Sep 13 13:15:36 MDT 2014",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "We introduce a novel algorithm called upper confidence
--- weighted learning (UCWL) for online multiclass
learning from binary feedback (e.g., feedback that
indicates whether the prediction was right or wrong).
UCWL combines the upper confidence bound (UCB)
framework with the soft confidence-weighted (SCW)
online learning scheme. In UCB, each instance is
classified using both score and uncertainty. For a
given instance in the sequence, the algorithm might
guess its class label primarily to reduce the class
uncertainty. This is a form of informed exploration,
which enables the performance to improve with lower
sample complexity compared to the case without
exploration. Combining UCB with SCW leads to the
ability to deal well with noisy and nonseparable data,
and state-of-the-art performance is achieved without
increasing the computational cost. A potential
application setting is human-robot interaction (HRI),
where the robot is learning to classify some set of
inputs while the human teaches it by providing only
binary feedback-or sometimes even the wrong answer
entirely. Experimental results in the HRI setting and
with two benchmark datasets from other settings show
that UCWL outperforms other state-of-the-art algorithms
in the online binary feedback setting-and surprisingly
even sometimes outperforms state-of-the-art algorithms
that get full feedback (e.g., the true class label),
whereas UCWL gets only binary feedback on the same data
sequence.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "12",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Benotti:2014:INL,
author = "Luciana Benotti and Tessa Lau and Mart{\'\i}n
Villalba",
title = "Interpreting Natural Language Instructions Using
Language, Vision, and Behavior",
journal = j-TIIS,
volume = "4",
number = "3",
pages = "13:1--13:??",
month = aug,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2629632",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Sat Sep 13 13:15:36 MDT 2014",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "We define the problem of automatic instruction
interpretation as follows. Given a natural language
instruction, can we automatically predict what an
instruction follower, such as a robot, should do in the
environment to follow that instruction? Previous
approaches to automatic instruction interpretation have
required either extensive domain-dependent rule writing
or extensive manually annotated corpora. This article
presents a novel approach that leverages a large amount
of unannotated, easy-to-collect data from humans
interacting in a game-like environment. Our approach
uses an automatic annotation phase based on artificial
intelligence planning, for which two different
annotation strategies are compared: one based on
behavioral information and the other based on
visibility information. The resulting annotations are
used as training data for different automatic
classifiers. This algorithm is based on the intuition
that the problem of interpreting a situated instruction
can be cast as a classification problem of choosing
among the actions that are possible in the situation.
Classification is done by combining language, vision,
and behavior information. Our empirical analysis shows
that machine learning classifiers achieve 77\% accuracy
on this task on available English corpora and 74\% on
similar German corpora. Finally, the inclusion of human
feedback in the interpretation process is shown to
boost performance to 92\% for the English corpus and
90\% for the German corpus.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "13",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Keizer:2014:MLS,
author = "Simon Keizer and Mary Ellen Foster and Zhuoran Wang
and Oliver Lemon",
title = "Machine Learning for Social Multiparty Human--Robot
Interaction",
journal = j-TIIS,
volume = "4",
number = "3",
pages = "14:1--14:??",
month = oct,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2600021",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Tue Oct 14 17:38:05 MDT 2014",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "We describe a variety of machine-learning techniques
that are being applied to social multiuser human--robot
interaction using a robot bartender in our scenario. We
first present a data-driven approach to social state
recognition based on supervised learning. We then
describe an approach to social skills execution-that
is, action selection for generating socially
appropriate robot behavior-which is based on
reinforcement learning, using a data-driven simulation
of multiple users to train execution policies for
social skills. Next, we describe how these components
for social state recognition and skills execution have
been integrated into an end-to-end robot bartender
system, and we discuss the results of a user
evaluation. Finally, we present an alternative
unsupervised learning framework that combines social
state recognition and social skills execution based on
hierarchical Dirichlet processes and an infinite POMDP
interaction manager. The models make use of data from
both human--human interactions collected in a number of
German bars and human--robot interactions recorded in
the evaluation of an initial version of the system.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "14",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Cuayahuitl:2014:NHR,
author = "Heriberto Cuay{\'a}huitl and Ivana
Kruijff-Korbayov{\'a} and Nina Dethlefs",
title = "Nonstrict Hierarchical Reinforcement Learning for
Interactive Systems and Robots",
journal = j-TIIS,
volume = "4",
number = "3",
pages = "15:1--15:??",
month = oct,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2659003",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Tue Oct 14 17:38:05 MDT 2014",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Conversational systems and robots that use
reinforcement learning for policy optimization in large
domains often face the problem of limited scalability.
This problem has been addressed either by using
function approximation techniques that estimate the
approximate true value function of a policy or by using
a hierarchical decomposition of a learning task into
subtasks. We present a novel approach for dialogue
policy optimization that combines the benefits of both
hierarchical control and function approximation and
that allows flexible transitions between dialogue
subtasks to give human users more control over the
dialogue. To this end, each reinforcement learning
agent in the hierarchy is extended with a subtask
transition function and a dynamic state space to allow
flexible switching between subdialogues. In addition,
the subtask policies are represented with linear
function approximation in order to generalize the
decision making to situations unseen in training. Our
proposed approach is evaluated in an interactive
conversational robot that learns to play quiz games.
Experimental results, using simulation and real users,
provide evidence that our proposed approach can lead to
more flexible (natural) interactions than strict
hierarchical control and that it is preferred by human
users.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "15",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Bulling:2015:ISI,
author = "Andreas Bulling and Ulf Blanke and Desney Tan and Jun
Rekimoto and Gregory Abowd",
title = "Introduction to the Special Issue on Activity
Recognition for Interaction",
journal = j-TIIS,
volume = "4",
number = "4",
pages = "16:1--16:??",
month = jan,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2694858",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Thu Jan 29 10:52:31 MST 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "This editorial introduction describes the aims and
scope of the ACM Transactions on Interactive
Intelligent Systems special issue on Activity
Recognition for Interaction. It explains why activity
recognition is becoming crucial as part of the cycle of
interaction between users and computing systems, and it
shows how the five articles selected for this special
issue reflect this theme.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "16e",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Ye:2015:UUS,
author = "Juan Ye and Graeme Stevenson and Simon Dobson",
title = "{USMART}: an Unsupervised Semantic Mining Activity
Recognition Technique",
journal = j-TIIS,
volume = "4",
number = "4",
pages = "16:1--16:??",
month = jan,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2662870",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Thu Jan 29 10:52:31 MST 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Recognising high-level human activities from low-level
sensor data is a crucial driver for pervasive systems
that wish to provide seamless and distraction-free
support for users engaged in normal activities.
Research in this area has grown alongside advances in
sensing and communications, and experiments have
yielded sensor traces coupled with ground truth
annotations about the underlying environmental
conditions and user actions. Traditional machine
learning has had some success in recognising human
activities; but the need for large volumes of annotated
data and the danger of overfitting to specific
conditions represent challenges in connection with the
building of models applicable to a wide range of users,
activities, and environments. We present USMART, a
novel unsupervised technique that combines data- and
knowledge-driven techniques. USMART uses a general
ontology model to represent domain knowledge that can
be reused across different environments and users, and
we augment a range of learning techniques with
ontological semantics to facilitate the unsupervised
discovery of patterns in how each user performs daily
activities. We evaluate our approach against four
real-world third-party datasets featuring different
user populations and sensor configurations, and we find
that USMART achieves up to 97.5\% accuracy in
recognising daily activities.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "16",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Dim:2015:ADS,
author = "Eyal Dim and Tsvi Kuflik",
title = "Automatic Detection of Social Behavior of Museum
Visitor Pairs",
journal = j-TIIS,
volume = "4",
number = "4",
pages = "17:1--17:??",
month = jan,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2662869",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Thu Jan 29 10:52:31 MST 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "In many cases, visitors come to a museum in small
groups. In such cases, the visitors' social context has
an impact on their museum visit experience. Knowing the
social context may allow a system to provide socially
aware services to the visitors. Evidence of the social
context can be gained from observing/monitoring the
visitors' social behavior. However, automatic
identification of a social context requires, on the one
hand, identifying typical social behavior patterns and,
on the other, using relevant sensors that measure
various signals and reason about them to detect the
visitors' social behavior. We present such typical
social behavior patterns of visitor pairs, identified
by observations, and then the instrumentation,
detection process, reasoning, and analysis of measured
signals that enable us to detect the visitors' social
behavior. Simple sensors' data, such as proximity to
other visitors, proximity to museum points of interest,
and visitor orientation are used to detect social
synchronization, attention to the social companion, and
interest in museum exhibits. The presented approach may
allow future research to offer adaptive services to
museum visitors based on their social context to
support their group visit experience better.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "17",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Caramiaux:2015:AGR,
author = "Baptiste Caramiaux and Nicola Montecchio and Atau
Tanaka and Fr{\'e}d{\'e}ric Bevilacqua",
title = "Adaptive Gesture Recognition with Variation Estimation
for Interactive Systems",
journal = j-TIIS,
volume = "4",
number = "4",
pages = "18:1--18:??",
month = jan,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2643204",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Thu Jan 29 10:52:31 MST 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "This article presents a gesture recognition/adaptation
system for human--computer interaction applications
that goes beyond activity classification and that, as a
complement to gesture labeling, characterizes the
movement execution. We describe a template-based
recognition method that simultaneously aligns the input
gesture to the templates using a Sequential Monte Carlo
inference technique. Contrary to standard
template-based methods based on dynamic programming,
such as Dynamic Time Warping, the algorithm has an
adaptation process that tracks gesture variation in
real time. The method continuously updates, during
execution of the gesture, the estimated parameters and
recognition results, which offers key advantages for
continuous human--machine interaction. The technique is
evaluated in several different ways: Recognition and
early recognition are evaluated on 2D onscreen pen
gestures; adaptation is assessed on synthetic data; and
both early recognition and adaptation are evaluated in
a user study involving 3D free-space gestures. The
method is robust to noise, and successfully adapts to
parameter variation. Moreover, it performs recognition
as well as or better than nonadapting offline
template-based methods.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "18",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Cooney:2015:AIS,
author = "Martin Cooney and Shuichi Nishio and Hiroshi
Ishiguro",
title = "Affectionate Interaction with a Small Humanoid Robot
Capable of Recognizing Social Touch Behavior",
journal = j-TIIS,
volume = "4",
number = "4",
pages = "19:1--19:??",
month = jan,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2685395",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Thu Jan 29 10:52:31 MST 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Activity recognition, involving a capability to
recognize people's behavior and its underlying
significance, will play a crucial role in facilitating
the integration of interactive robotic artifacts into
everyday human environments. In particular, social
intelligence in recognizing affectionate behavior will
offer value by allowing companion robots to bond
meaningfully with interacting persons. The current
article addresses the issue of designing an
affectionate haptic interaction between a person and a
companion robot by exploring how a small humanoid robot
can behave to elicit affection while recognizing
touches. We report on an experiment conducted to gain
insight into how people perceive three fundamental
interactive strategies in which a robot is either
always highly affectionate, appropriately affectionate,
or superficially unaffectionate (emphasizing
positivity, contingency, and challenge, respectively).
Results provide insight into the structure of
affectionate interaction between humans and humanoid
robots-underlining the importance of an interaction
design expressing sincere liking, stability and
variation-and suggest the usefulness of novel
modalities such as warmth and cold.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "19",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{DeCarolis:2015:ILD,
author = "Berardina {De Carolis} and Stefano Ferilli and
Domenico Redavid",
title = "Incremental Learning of Daily Routines as Workflows in
a {Smart} Home Environment",
journal = j-TIIS,
volume = "4",
number = "4",
pages = "20:1--20:??",
month = jan,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2675063",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Thu Jan 29 10:52:31 MST 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Smart home environments should proactively support
users in their activities, anticipating their needs
according to their preferences. Understanding what the
user is doing in the environment is important for
adapting the environment's behavior, as well as for
identifying situations that could be problematic for
the user. Enabling the environment to exploit models of
the user's most common behaviors is an important step
toward this objective. In particular, models of the
daily routines of a user can be exploited not only for
predicting his/her needs, but also for comparing the
actual situation at a given moment with the expected
one, in order to detect anomalies in his/her behavior.
While manually setting up process models in business
and factory environments may be cost-effective,
building models of the processes involved in people's
everyday life is infeasible. This fact fully justifies
the interest of the Ambient Intelligence community in
automatically learning such models from examples of
actual behavior. Incremental adaptation of the models
and the ability to express/learn complex conditions on
the involved tasks are also desirable. This article
describes how process mining can be used for learning
users' daily routines from a dataset of annotated
sensor data. The solution that we propose relies on a
First-Order Logic learning approach. Indeed,
First-Order Logic provides a single, comprehensive and
powerful framework for supporting all the previously
mentioned features. Our experiments, performed both on
a proprietary toy dataset and on publicly available
real-world ones, indicate that this approach is
efficient and effective for learning and modeling daily
routines in Smart Home Environments.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "20",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Gianni:2015:SRF,
author = "Mario Gianni and Geert-Jan M. Kruijff and Fiora
Pirri",
title = "A Stimulus-Response Framework for Robot Control",
journal = j-TIIS,
volume = "4",
number = "4",
pages = "21:1--21:??",
month = jan,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2677198",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Thu Jan 29 10:52:31 MST 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "We propose in this article a new approach to robot
cognitive control based on a stimulus-response
framework that models both a robot's stimuli and the
robot's decision to switch tasks in response to or
inhibit the stimuli. In an autonomous system, we expect
a robot to be able to deal with the whole system of
stimuli and to use them to regulate its behavior in
real-world applications. The proposed framework
contributes to the state of the art of robot planning
and high-level control in that it provides a novel
perspective on the interaction between robot and
environment. Our approach is inspired by Gibson's
constructive view of the concept of a stimulus and by
the cognitive control paradigm of task switching. We
model the robot's response to a stimulus in three
stages. We start by defining the stimuli as perceptual
functions yielded by the active robot processes and
learned via an informed logistic regression. Then we
model the stimulus-response relationship by estimating
a score matrix that leads to the selection of a single
response task for each stimulus, basing the estimation
on low-rank matrix factorization. The decision about
switching takes into account both an interference cost
and a reconfiguration cost. The interference cost
weighs the effort of discontinuing the current robot
mental state to switch to a new state, whereas the
reconfiguration cost weighs the effort of activating
the response task. A choice is finally made based on
the payoff of switching. Because processes play such a
crucial role both in the stimulus model and in the
stimulus-response model, and because processes are
activated by actions, we address also the process
model, which is built on a theory of action. The
framework is validated by several experiments that
exploit a full implementation on an advanced robotic
platform and is compared with two known approaches to
replanning. Results demonstrate the practical value of
the system in terms of robot autonomy, flexibility, and
usability.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "21",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Taranta:2015:EBC,
author = "Eugene M. {Taranta II} and Thaddeus K. Simons and
Rahul Sukthankar and Joseph J. {Laviola, Jr.}",
title = "Exploring the Benefits of Context in {$3$D} Gesture
Recognition for Game-Based Virtual Environments",
journal = j-TIIS,
volume = "5",
number = "1",
pages = "1:1--1:??",
month = mar,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2656345",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Thu Mar 26 05:43:35 MDT 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "We present a systematic exploration of how to utilize
video game context (e.g., player and environmental
state) to modify and augment existing 3D gesture
recognizers to improve accuracy for large gesture sets.
Specifically, our work develops and evaluates three
strategies for incorporating context into 3D gesture
recognizers. These strategies include modifying the
well-known Rubine linear classifier to handle
unsegmented input streams and per-frame retraining
using contextual information (CA-Linear); a GPU
implementation of dynamic time warping (DTW) that
reduces the overhead of traditional DTW by utilizing
context to evaluate only relevant time sequences inside
of a multithreaded kernel (CA-DTW); and a multiclass
SVM with per-class probability estimation that is
combined with a contextually based prior probability
distribution (CA-SVM). We evaluate each strategy using
a Kinect-based third-person perspective VE game
prototype that combines parkour-style navigation with
hand-to-hand combat. Using a simple gesture collection
application to collect a set of 57 gestures and the
game prototype that implements 37 of these gestures, we
conduct three experiments. In the first experiment, we
evaluate the effectiveness of several established
classifiers on our gesture set and demonstrate
state-of-the-art results using our proposed method. In
our second experiment, we generate 500 random scenarios
having between 5 and 19 of the 57 gestures in context.
We show that the contextually aware classifiers
CA-Linear, CA-DTW, and CA-SVM significantly outperform
their non--contextually aware counterparts by 37.74\%,
36.04\%, and 20.81\%, respectively. On the basis of the
results of the second experiment, we derive upper-bound
expectations for in-game performance for the three CA
classifiers: 96.61\%, 86.79\%, and 96.86\%,
respectively. Finally, our third experiment is an
in-game evaluation of the three CA classifiers with and
without context. Our results show that through the use
of context, we are able to achieve an average in-game
recognition accuracy of 89.67\% with CA-Linear compared
to 65.10\% without context, 79.04\% for CA-DTW compared
to 58.1\% without context, and 90.85\% with CA-SVM
compared to 75.2\% without context.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "1",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Gil:2015:HTI,
author = "Yolanda Gil",
title = "Human Tutorial Instruction in the Raw",
journal = j-TIIS,
volume = "5",
number = "1",
pages = "2:1--2:??",
month = mar,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2531920",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Thu Mar 26 05:43:35 MDT 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Humans learn procedures from one another through a
variety of methods, such as observing someone do the
task, practicing by themselves, reading manuals or
textbooks, or getting instruction from a teacher. Some
of these methods generate examples that require the
learner to generalize appropriately. When procedures
are complex, however, it becomes unmanageable to induce
the procedures from examples alone. An alternative and
very common method for teaching procedures is tutorial
instruction, where a teacher describes in general terms
what actions to perform and possibly includes
explanations of the rationale for the actions. This
article provides an overview of the challenges in using
human tutorial instruction for teaching procedures to
computers. First, procedures can be very complex and
can involve many different types of interrelated
information, including (1) situating the instruction in
the context of relevant objects and their properties,
(2) describing the steps involved, (3) specifying the
organization of the procedure in terms of relationships
among steps and substeps, and (4) conveying control
structures. Second, human tutorial instruction is
naturally plagued with omissions, oversights,
unintentional inconsistencies, errors, and simply poor
design. The article presents a survey of work from the
literature that highlights the nature of these
challenges and illustrates them with numerous examples
of instruction in many domains. Major research
challenges in this area are highlighted, including the
difficulty of the learning task when procedures are
complex, the need to overcome omissions and errors in
the instruction, the design of a natural user interface
to specify procedures, the management of the
interaction of a human with a learning system, and the
combination of tutorial instruction with other teaching
modalities.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "2",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Pejsa:2015:GAM,
author = "Tomislav Pejsa and Sean Andrist and Michael Gleicher
and Bilge Mutlu",
title = "Gaze and Attention Management for Embodied
Conversational Agents",
journal = j-TIIS,
volume = "5",
number = "1",
pages = "3:1--3:??",
month = mar,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2724731",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Thu Mar 26 05:43:35 MDT 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "To facilitate natural interactions between humans and
embodied conversational agents (ECAs), we need to endow
the latter with the same nonverbal cues that humans use
to communicate. Gaze cues in particular are integral in
mechanisms for communication and management of
attention in social interactions, which can trigger
important social and cognitive processes, such as
establishment of affiliation between people or learning
new information. The fundamental building blocks of
gaze behaviors are gaze shifts: coordinated movements
of the eyes, head, and body toward objects and
information in the environment. In this article, we
present a novel computational model for gaze shift
synthesis for ECAs that supports parametric control
over coordinated eye, head, and upper body movements.
We employed the model in three studies with human
participants. In the first study, we validated the
model by showing that participants are able to
interpret the agent's gaze direction accurately. In the
second and third studies, we showed that by adjusting
the participation of the head and upper body in gaze
shifts, we can control the strength of the attention
signals conveyed, thereby strengthening or weakening
their social and cognitive effects. The second study
shows that manipulation of eye--head coordination in
gaze enables an agent to convey more information or
establish stronger affiliation with participants in a
teaching task, while the third study demonstrates how
manipulation of upper body coordination enables the
agent to communicate increased interest in objects in
the environment.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "3",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Deng:2015:ESA,
author = "James J. Deng and Clement H. C. Leung and Alfredo
Milani and Li Chen",
title = "Emotional States Associated with Music:
Classification, Prediction of Changes, and
Consideration in Recommendation",
journal = j-TIIS,
volume = "5",
number = "1",
pages = "4:1--4:??",
month = mar,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2723575",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Thu Mar 26 05:43:35 MDT 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "We present several interrelated technical and
empirical contributions to the problem of emotion-based
music recommendation and show how they can be applied
in a possible usage scenario. The contributions are (1)
a new three-dimensional resonance-arousal-valence model
for the representation of emotion expressed in music,
together with methods for automatically classifying a
piece of music in terms of this model, using robust
regression methods applied to musical/acoustic
features; (2) methods for predicting a listener's
emotional state on the assumption that the emotional
state has been determined entirely by a sequence of
pieces of music recently listened to, using conditional
random fields and taking into account the decay of
emotion intensity over time; and (3) a method for
selecting a ranked list of pieces of music that match a
particular emotional state, using a minimization
iteration method. A series of experiments yield
information about the validity of our
operationalizations of these contributions. Throughout
the article, we refer to an illustrative usage scenario
in which all of these contributions can be exploited,
where it is assumed that (1) a listener's emotional
state is being determined entirely by the music that he
or she has been listening to and (2) the listener wants
to hear additional music that matches his or her
current emotional state. The contributions are intended
to be useful in a variety of other scenarios as well.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "4",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Mazilu:2015:WAG,
author = "Sinziana Mazilu and Ulf Blanke and Moran Dorfman and
Eran Gazit and Anat Mirelman and Jeffrey M. Hausdorff
and Gerhard Tr{\"o}ster",
title = "A Wearable Assistant for Gait Training for
{Parkinson}'s Disease with Freezing of Gait in
Out-of-the-Lab Environments",
journal = j-TIIS,
volume = "5",
number = "1",
pages = "5:1--5:??",
month = mar,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2701431",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Thu Mar 26 05:43:35 MDT 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "People with Parkinson's disease (PD) suffer from
declining mobility capabilities, which cause a
prevalent risk of falling. Commonly, short periods of
motor blocks occur during walking, known as freezing of
gait (FoG). To slow the progressive decline of motor
abilities, people with PD usually undertake stationary
motor-training exercises in the clinics or supervised
by physiotherapists. We present a wearable system for
the support of people with PD and FoG. The system is
designed for independent use. It enables motor training
and gait assistance at home and other unsupervised
environments. The system consists of three components.
First, FoG episodes are detected in real time using
wearable inertial sensors and a smartphone as the
processing unit. Second, a feedback mechanism triggers
a rhythmic auditory signal to the user to alleviate
freeze episodes in an assistive mode. Third, the
smartphone-based application features support for
training exercises. Moreover, the system allows
unobtrusive and long-term monitoring of the user's
clinical condition by transmitting sensing data and
statistics to a telemedicine service. We investigate
the at-home acceptance of the wearable system in a
study with nine PD subjects. Participants deployed and
used the system on their own, without any clinical
support, at their homes during three protocol sessions
in 1 week. Users' feedback suggests an overall positive
attitude toward adopting and using the system in their
daily life, indicating that the system supports them in
improving their gait. Further, in a data-driven
analysis with sensing data from five participants, we
study whether there is an observable effect on the gait
during use of the system. In three out of five
subjects, we observed a decrease in FoG duration
distributions over the protocol days during
gait-training exercises. Moreover, sensing data-driven
analysis shows a decrease in FoG duration and FoG
number in four out of five participants when they use
the system as a gait-assistive tool during normal daily
life activities at home.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "5",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Salah:2015:BIS,
author = "Albert Ali Salah and Hayley Hung and Oya Aran and
Hatice Gunes and Matthew Turk",
title = "Brief Introduction to the Special Issue on Behavior
Understanding for Arts and Entertainment",
journal = j-TIIS,
volume = "5",
number = "2",
pages = "6:1--6:??",
month = jul,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2786762",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Fri Aug 7 09:18:56 MDT 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "This editorial introduction describes the aims and
scope of the special issue of the ACM Transactions on
Interactive Intelligent Systems on Behavior
Understanding for Arts and Entertainment, which is
being published in issues 2 and 3 of volume 5 of the
journal. Here we offer a brief introduction to the use
of behavior analysis for interactive systems that
involve creativity in either the creator or the
consumer of a work of art. We then characterize each of
the five articles included in this first part of the
special issue, which span a wide range of
applications.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "6",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Grenader:2015:VIA,
author = "Emily Grenader and Danilo Gasques Rodrigues and
Fernando Nos and Nadir Weibel",
title = "The {VideoMob} Interactive Art Installation Connecting
Strangers through Inclusive Digital Crowds",
journal = j-TIIS,
volume = "5",
number = "2",
pages = "7:1--7:??",
month = jul,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2768208",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Fri Aug 7 09:18:56 MDT 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "VideoMob is an interactive video platform and an
artwork that enables strangers visiting different
installation locations to interact across time and
space through a computer interface that detects their
presence, video-records their actions while
automatically removing the video background through
computer vision, and co-situates visitors as part of
the same digital environment. Through the combination
of individual user videos to form a digital crowd,
strangers are connected through the graphic display.
Our work is inspired by the way distant people can
interact with each other through technology and
influenced by artists working in the realm of
interactive art. We deployed VideoMob in a variety of
settings, locations, and contexts to observe hundreds
of visitors' reactions. By analyzing behavioral data
collected through depth cameras from our 1,068
recordings across eight venues, we studied how
participants behave when given the opportunity to
record their own video portrait into the artwork. We
report the specific activity performed in front of the
camera and the influences that existing crowds impose
on new participants. Our analysis informs the
integration of a series of possible novel interaction
paradigms based on real-time analysis of the visitors'
behavior through specific computer vision and machine
learning techniques that have the potential to increase
the engagement of the artwork's visitors and to impact
user experience.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "7",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Sartori:2015:AAP,
author = "Andreza Sartori and Victoria Yanulevskaya and Almila
Akdag Salah and Jasper Uijlings and Elia Bruni and Nicu
Sebe",
title = "Affective Analysis of Professional and Amateur
Abstract Paintings Using Statistical Analysis and Art
Theory",
journal = j-TIIS,
volume = "5",
number = "2",
pages = "8:1--8:??",
month = jul,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2768209",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Fri Aug 7 09:18:56 MDT 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "When artists express their feelings through the
artworks they create, it is believed that the resulting
works transform into objects with ``emotions'' capable
of conveying the artists' mood to the audience. There
is little to no dispute about this belief: Regardless
of the artwork, genre, time, and origin of creation,
people from different backgrounds are able to read the
emotional messages. This holds true even for the most
abstract paintings. Could this idea be applied to
machines as well? Can machines learn what makes a work
of art ``emotional''? In this work, we employ a
state-of-the-art recognition system to learn which
statistical patterns are associated with positive and
negative emotions on two different datasets that
comprise professional and amateur abstract artworks.
Moreover, we analyze and compare two different
annotation methods in order to establish the ground
truth of positive and negative emotions in abstract
art. Additionally, we use computer vision techniques to
quantify which parts of a painting evoke positive and
negative emotions. We also demonstrate how the
quantification of evidence for positive and negative
emotions can be used to predict which parts of a
painting people prefer to focus on. This method opens
new opportunities of research on why a specific
painting is perceived as emotional at global and local
scales.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "8",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Sanchez-Cortes:2015:MVM,
author = "Dairazalia Sanchez-Cortes and Shiro Kumano and
Kazuhiro Otsuka and Daniel Gatica-Perez",
title = "In the Mood for Vlog: Multimodal Inference in
Conversational Social Video",
journal = j-TIIS,
volume = "5",
number = "2",
pages = "9:1--9:??",
month = jul,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2641577",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Fri Aug 7 09:18:56 MDT 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "The prevalent ``share what's on your mind'' paradigm
of social media can be examined from the perspective of
mood: short-term affective states revealed by the
shared data. This view takes on new relevance given the
emergence of conversational social video as a popular
genre among viewers looking for entertainment and among
video contributors as a channel for debate, expertise
sharing, and artistic expression. From the perspective
of human behavior understanding, in conversational
social video both verbal and nonverbal information is
conveyed by speakers and decoded by viewers. We present
a systematic study of classification and ranking of
mood impressions in social video, using vlogs from
YouTube. Our approach considers eleven natural mood
categories labeled through crowdsourcing by external
observers on a diverse set of conversational vlogs. We
extract a comprehensive number of nonverbal and verbal
behavioral cues from the audio and video channels to
characterize the mood of vloggers. Then we implement
and validate vlog classification and vlog ranking tasks
using supervised learning methods. Following a
reliability and correlation analysis of the mood
impression data, our study demonstrates that, while the
problem is challenging, several mood categories can be
inferred with promising performance. Furthermore,
multimodal features perform consistently better than
single-channel features. Finally, we show that
addressing mood as a ranking problem is a promising
practical direction for several of the mood categories
studied.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "9",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Vezzani:2015:GPS,
author = "Roberto Vezzani and Martino Lombardi and Augusto
Pieracci and Paolo Santinelli and Rita Cucchiara",
title = "A General-Purpose Sensing Floor Architecture for
Human-Environment Interaction",
journal = j-TIIS,
volume = "5",
number = "2",
pages = "10:1--10:??",
month = jul,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2751566",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Fri Aug 7 09:18:56 MDT 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Smart environments are now designed as natural
interfaces to capture and understand human behavior
without a need for explicit human-computer interaction.
In this article, we present a general-purpose
architecture that acquires and understands human
behaviors through a sensing floor. The pressure field
generated by moving people is captured and analyzed.
Specific actions and events are then detected by a
low-level processing engine and sent to high-level
interfaces providing different functions. The proposed
architecture and sensors are modular, general-purpose,
cheap, and suitable for both small- and large-area
coverage. Some sample entertainment and virtual reality
applications that we developed to test the platform are
presented.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "10",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Baur:2015:CAA,
author = "Tobias Baur and Gregor Mehlmann and Ionut Damian and
Florian Lingenfelser and Johannes Wagner and Birgit
Lugrin and Elisabeth Andr{\'e} and Patrick Gebhard",
title = "Context-Aware Automated Analysis and Annotation of
Social Human--Agent Interactions",
journal = j-TIIS,
volume = "5",
number = "2",
pages = "11:1--11:??",
month = jul,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2764921",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Fri Aug 7 09:18:56 MDT 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "The outcome of interpersonal interactions depends not
only on the contents that we communicate verbally, but
also on nonverbal social signals. Because a lack of
social skills is a common problem for a significant
number of people, serious games and other training
environments have recently become the focus of
research. In this work, we present NovA ( No n v erbal
behavior A nalyzer), a system that analyzes and
facilitates the interpretation of social signals
automatically in a bidirectional interaction with a
conversational agent. It records data of interactions,
detects relevant social cues, and creates descriptive
statistics for the recorded data with respect to the
agent's behavior and the context of the situation. This
enhances the possibilities for researchers to
automatically label corpora of human--agent
interactions and to give users feedback on strengths
and weaknesses of their social behavior.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "11",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Salah:2015:BUA,
author = "Albert Ali Salah and Hayley Hung and Oya Aran and
Hatice Gunes and Matthew Turk",
title = "Behavior Understanding for Arts and Entertainment",
journal = j-TIIS,
volume = "5",
number = "3",
pages = "12:1--12:??",
month = oct,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2817208",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Sat Oct 17 18:18:51 MDT 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "This editorial introduction complements the shorter
introduction to the first part of the two-part special
issue on Behavior Understanding for Arts and
Entertainment. It offers a more expansive discussion of
the use of behavior analysis for interactive systems
that involve creativity, either for the producer or the
consumer of such a system. We first summarise the two
articles that appear in this second part of the special
issue. We then discuss general questions and challenges
in this domain that were suggested by the entire set of
seven articles of the special issue and by the comments
of the reviewers of these articles.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "12",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Alaoui:2015:IVM,
author = "Sarah Fdili Alaoui and Frederic Bevilacqua and
Christian Jacquemin",
title = "Interactive Visuals as Metaphors for Dance Movement
Qualities",
journal = j-TIIS,
volume = "5",
number = "3",
pages = "13:1--13:??",
month = oct,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2738219",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Sat Oct 17 18:18:51 MDT 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "The notion of ``movement qualities'' is central in
contemporary dance; it describes the manner in which a
movement is executed. Movement qualities convey
information revealing movement expressiveness; their
use has strong potential for movement-based interaction
with applications in arts, entertainment, education, or
rehabilitation. The purpose of our research is to
design and evaluate interactive reflexive visuals for
movement qualities. The theoretical basis for this
research is drawn from a collaboration with the members
of the international dance company Emio Greco|PC to
study their formalization of movement qualities. We
designed a pedagogical interactive installation called
Double Skin/Double Mind (DS/DM) for the analysis and
visualization of movement qualities through physical
model-based interactive renderings. In this article, we
first evaluate dancers' perception of the visuals as
metaphors for movement qualities. This evaluation shows
that, depending on the physical model parameterization,
the visuals are capable of generating dynamic behaviors
that the dancers associate with DS/DM movement
qualities. Moreover, we evaluate dance students' and
professionals' experience of the interactive visuals in
the context of a dance pedagogical workshop and a
professional dance training. The results of these
evaluations show that the dancers consider the
interactive visuals to be a reflexive system that
encourages them to perform, improves their experience,
and contributes to a better understanding of movement
qualities. Our findings support research on interactive
systems for real-time analysis and visualization of
movement qualities, which open new perspectives in
movement-based interaction design.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "13",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Yang:2015:QSM,
author = "Yi-Hsuan Yang and Yuan-Ching Teng",
title = "Quantitative Study of Music Listening Behavior in a
{Smartphone} Context",
journal = j-TIIS,
volume = "5",
number = "3",
pages = "14:1--14:??",
month = oct,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2738220",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Sat Oct 17 18:18:51 MDT 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Context-based services have attracted increasing
attention because of the prevalence of sensor-rich
mobile devices such as smartphones. The idea is to
recommend information that a user would be interested
in according to the user's surrounding context.
Although remarkable progress has been made to
contextualize music playback, relatively little
research has been made using a large collection of
real-life listening records collected in situ. In light
of this fact, we present in this article a quantitative
study of the personal, situational, and musical factors
of musical preference in a smartphone context, using a
new dataset comprising the listening records and
self-report context annotation of 48 participants
collected over 3wk via an Android app. Although the
number of participants is limited and the population is
biased towards students, the dataset is unique in that
it is collected in a daily context, with sensor data
and music listening profiles recorded at the same time.
We investigate 3 core research questions evaluating the
strength of a rich set of low-level and high-level
audio features for music usage auto-tagging (i.e.,
music preference in different user activities), the
strength of time-domain and frequency-domain sensor
features for user activity classification, and how user
factors such as personality traits are correlated with
the predictability of music usage and user activity,
using a closed set of 8 activity classes. We provide an
in-depth discussion of the main findings of this study
and their implications for the development of
context-based music services for smartphones.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "14",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Bott:2015:WRW,
author = "Jared N. Bott and Joseph J. {Laviola Jr.}",
title = "The {WOZ Recognizer}: a {Wizard of Oz} Sketch
Recognition System",
journal = j-TIIS,
volume = "5",
number = "3",
pages = "15:1--15:??",
month = oct,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2743029",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Sat Oct 17 18:18:51 MDT 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Sketch recognition has the potential to be an
important input method for computers in the coming
years, particularly for STEM (science, technology,
engineering, and math) education. However, designing
and building an accurate and sophisticated sketch
recognition system is a time-consuming and daunting
task. Since sketch recognition mistakes are still
common, it is important to understand how users
perceive and tolerate recognition errors and other user
interface elements with these imperfect systems. In
order to solve this problem, we developed a Wizard of
Oz sketch recognition tool, the WOZ Recognizer, that
supports controlled recognition accuracy, multiple
recognition modes, and multiple sketching domains for
performing controlled experiments. We present the
design of the WOZ Recognizer and our process for
representing recognition domains using graphs and
symbol alphabets. In addition, we discuss how sketches
are altered, how to control the WOZ Recognizer, and how
users interact with it. Finally, we present an expert
user case study that examines the WOZ Recognizer's
usability.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "15",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Soto:2015:EVA,
author = "Axel J. Soto and Ryan Kiros and Vlado Keselj and
Evangelos Milios",
title = "Exploratory Visual Analysis and Interactive Pattern
Extraction from Semi-Structured Data",
journal = j-TIIS,
volume = "5",
number = "3",
pages = "16:1--16:??",
month = oct,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2812115",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Sat Oct 17 18:18:51 MDT 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Semi-structured documents are a common type of data
containing free text in natural language (unstructured
data) as well as additional information about the
document, or meta-data, typically following a schema or
controlled vocabulary (structured data). Simultaneous
analysis of unstructured and structured data enables
the discovery of hidden relationships that cannot be
identified from either of these sources when analyzed
independently of each other. In this work, we present a
visual text analytics tool for semi-structured
documents (ViTA-SSD), that aims to support the user in
the exploration and finding of insightful patterns in a
visual and interactive manner in a semi-structured
collection of documents. It achieves this goal by
presenting to the user a set of coordinated
visualizations that allows the linking of the metadata
with interactively generated clusters of documents in
such a way that relevant patterns can be easily
spotted. The system contains two novel approaches in
its back end: a feature-learning method to learn a
compact representation of the corpus and a
fast-clustering approach that has been redesigned to
allow user supervision. These novel contributions make
it possible for the user to interact with a large and
dynamic document collection and to perform several text
analytical tasks more efficiently. Finally, we present
two use cases that illustrate the suitability of the
system for in-depth interactive exploration of
semi-structured document collections, two user studies,
and results of several evaluations of our text-mining
components.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "16",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Meignan:2015:RTI,
author = "David Meignan and Sigrid Knust and Jean-Marc Frayret
and Gilles Pesant and Nicolas Gaud",
title = "A Review and Taxonomy of Interactive Optimization
Methods in Operations Research",
journal = j-TIIS,
volume = "5",
number = "3",
pages = "17:1--17:??",
month = oct,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2808234",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Sat Oct 17 18:18:51 MDT 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "This article presents a review and a classification of
interactive optimization methods. These interactive
methods are used for solving optimization problems. The
interaction with an end user or decision maker aims at
improving the efficiency of the optimization procedure,
enriching the optimization model, or informing the user
regarding the solutions proposed by the optimization
system. First, we present the challenges of using
optimization methods as a tool for supporting decision
making, and we justify the integration of the user in
the optimization process. This integration is generally
achieved via a dynamic interaction between the user and
the system. Next, the different classes of interactive
optimization approaches are presented. This detailed
review includes trial and error, interactive
reoptimization, interactive multiobjective
optimization, interactive evolutionary algorithms,
human-guided search, and other approaches that are less
well covered in the research literature. On the basis
of this review, we propose a classification that aims
to better describe and compare interaction mechanisms.
This classification offers two complementary views on
interactive optimization methods. The first perspective
focuses on the user's contribution to the optimization
process, and the second concerns the components of
interactive optimization systems. Finally, on the basis
of this review and classification, we identify some
open issues and potential perspectives for interactive
optimization methods.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "17",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Wang:2016:ART,
author = "Weiyi Wang and Valentin Enescu and Hichem Sahli",
title = "Adaptive Real-Time Emotion Recognition from Body
Movements",
journal = j-TIIS,
volume = "5",
number = "4",
pages = "18:1--18:??",
month = jan,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2738221",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Thu Jan 7 16:06:24 MST 2016",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "We propose a real-time system that continuously
recognizes emotions from body movements. The combined
low-level 3D postural features and high-level kinematic
and geometrical features are fed to a Random Forests
classifier through summarization (statistical values)
or aggregation (bag of features). In order to improve
the generalization capability and the robustness of the
system, a novel semisupervised adaptive algorithm is
built on top of the conventional Random Forests
classifier. The MoCap UCLIC affective gesture database
(labeled with four emotions) was used to train the
Random Forests classifier, which led to an overall
recognition rate of 78\% using a 10-fold
cross-validation. Subsequently, the trained classifier
was used in a stream-based semisupervised Adaptive
Random Forests method for continuous unlabeled Kinect
data classification. The very low update cost of our
adaptive classifier makes it highly suitable for data
stream applications. Tests performed on the publicly
available emotion datasets (body gestures and facial
expressions) indicate that our new classifier
outperforms existing algorithms for data streams in
terms of accuracy and computational costs.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "18",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Harper:2016:MDH,
author = "F. Maxwell Harper and Joseph A. Konstan",
title = "The {MovieLens} Datasets: History and Context",
journal = j-TIIS,
volume = "5",
number = "4",
pages = "19:1--19:??",
month = jan,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2827872",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Thu Jan 7 16:06:24 MST 2016",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "The MovieLens datasets are widely used in education,
research, and industry. They are downloaded hundreds of
thousands of times each year, reflecting their use in
popular press programming books, traditional and online
courses, and software. These datasets are a product of
member activity in the MovieLens movie recommendation
system, an active research platform that has hosted
many experiments since its launch in 1997. This article
documents the history of MovieLens and the MovieLens
datasets. We include a discussion of lessons learned
from running a long-standing, live research platform
from the perspective of a research organization. We
document best practices and limitations of using the
MovieLens datasets in new research.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "19",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Yordanova:2016:PSD,
author = "Kristina Yordanova and Thomas Kirste",
title = "A Process for Systematic Development of Symbolic
Models for Activity Recognition",
journal = j-TIIS,
volume = "5",
number = "4",
pages = "20:1--20:??",
month = jan,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2806893",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Thu Jan 7 16:06:24 MST 2016",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Several emerging approaches to activity recognition
(AR) combine symbolic representation of user actions
with probabilistic elements for reasoning under
uncertainty. These approaches provide promising results
in terms of recognition performance, coping with the
uncertainty of observations, and model size explosion
when complex problems are modelled. But experience has
shown that it is not always intuitive to model even
seemingly simple problems. To date, there are no
guidelines for developing such models. To address this
problem, in this work we present a development process
for building symbolic models that is based on
experience acquired so far as well as on existing
engineering and data analysis workflows. The proposed
process is a first attempt at providing structured
guidelines and practices for designing, modelling, and
evaluating human behaviour in the form of symbolic
models for AR. As an illustration of the process, a
simple example from the office domain was developed.
The process was evaluated in a comparative study of an
intuitive process and the proposed process. The results
showed a significant improvement over the intuitive
process. Furthermore, the study participants reported
greater ease of use and perceived effectiveness when
following the proposed process. To evaluate the
applicability of the process to more complex AR
problems, it was applied to a problem from the kitchen
domain. The results showed that following the proposed
process yielded an average accuracy of 78\%. The
developed model outperformed state-of-the-art methods
applied to the same dataset in previous work, and it
performed comparably to a symbolic model developed by a
model expert without following the proposed development
process.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "20",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Yamazaki:2016:ITN,
author = "Keiichi Yamazaki and Akiko Yamazaki and Keiko Ikeda
and Chen Liu and Mihoko Fukushima and Yoshinori
Kobayashi and Yoshinori Kuno",
title = "{``I'll Be There Next''}: a Multiplex Care Robot
System that Conveys Service Order Using Gaze Gestures",
journal = j-TIIS,
volume = "5",
number = "4",
pages = "21:1--21:??",
month = jan,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2844542",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Thu Jan 7 16:06:24 MST 2016",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "In this article, we discuss our findings from an
ethnographic study at an elderly care center where we
observed the utilization of two different functions of
human gaze to convey service order (i.e., ``who is
served first and who is served next''). In one case,
when an elderly person requested assistance, the gaze
of the care worker communicated that he/she would serve
that client next in turn. In the other case, the gaze
conveyed a request to the service seeker to wait until
the care worker finished attending the current client.
Each gaze function depended on the care worker's
current engagement and other behaviors. We sought to
integrate these findings into the development of a
robot that might function more effectively in multiple
human-robot party settings. We focused on the multiple
functions of gaze and bodily actions, implementing
those functions into our robot. We conducted three
experiments to gauge a combination of gestures and
gazes performed by our robot. This article demonstrates
that the employment of gaze is an important
consideration when developing robots that can interact
effectively in multiple human-robot party settings.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "21",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Nakano:2016:GRG,
author = "Yukiko I. Nakano and Takashi Yoshino and Misato
Yatsushiro and Yutaka Takase",
title = "Generating Robot Gaze on the Basis of Participation
Roles and Dominance Estimation in Multiparty
Interaction",
journal = j-TIIS,
volume = "5",
number = "4",
pages = "22:1--22:??",
month = jan,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2743028",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Thu Jan 7 16:06:24 MST 2016",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Gaze is an important nonverbal feedback signal in
multiparty face-to-face conversations. It is well known
that gaze behaviors differ depending on participation
role: speaker, addressee, or side participant. In this
study, we focus on dominance as another factor that
affects gaze. First, we conducted an empirical study
and analyzed its results that showed how gaze behaviors
are affected by both dominance and participation roles.
Then, using speech and gaze information that was
statistically significant for distinguishing the more
dominant and less dominant person in an empirical
study, we established a regression-based model for
estimating conversational dominance. On the basis of
the model, we implemented a dominance estimation
mechanism that processes online speech and head
direction data. Then we applied our findings to
human-robot interaction. To design robot gaze
behaviors, we analyzed gaze transitions with respect to
participation roles and dominance and implemented
gaze-transition models as robot gaze behavior
generation rules. Finally, we evaluated a humanoid
robot that has dominance estimation functionality and
determines its gaze based on the gaze models, and we
found that dominant participants had a better
impression of less dominant robot gaze behaviors. This
suggests that a robot using our gaze models was
preferred to a robot that was simply looking at the
speaker. We have demonstrated the importance of
considering dominance in human-robot multiparty
interaction.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "22",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Nakano:2016:ISI,
author = "Yukiko I. Nakano and Roman Bednarik and Hung-Hsuan
Huang and Kristiina Jokinen",
title = "Introduction to the Special Issue on New Directions in
Eye Gaze for Interactive Intelligent Systems",
journal = j-TIIS,
volume = "6",
number = "1",
pages = "1:1--1:??",
month = may,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2893485",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Sat May 21 08:06:01 MDT 2016",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Eye gaze has been used broadly in interactive
intelligent systems. The research area has grown in
recent years to cover emerging topics that go beyond
the traditional focus on interaction between a single
user and an interactive system. This special issue
presents five articles that explore new directions of
gaze-based interactive intelligent systems, ranging
from communication robots in dyadic and multiparty
conversations to a driving simulator that uses eye gaze
evidence to critique learners' behavior.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "1",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Xu:2016:SYS,
author = "Tian (Linger) Xu and Hui Zhang and Chen Yu",
title = "See You See Me: The Role of Eye Contact in Multimodal
Human-Robot Interaction",
journal = j-TIIS,
volume = "6",
number = "1",
pages = "2:1--2:??",
month = may,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2882970",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Sat May 21 08:06:01 MDT 2016",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "We focus on a fundamental looking behavior in
human-robot interactions-gazing at each other's face.
Eye contact and mutual gaze between two social partners
are critical in smooth human-human interactions.
Therefore, investigating at what moments and in what
ways a robot should look at a human user's face as a
response to the human's gaze behavior is an important
topic. Toward this goal, we developed a gaze-contingent
human-robot interaction system, which relied on
momentary gaze behaviors from a human user to control
an interacting robot in real time. Using this system,
we conducted an experiment in which human participants
interacted with the robot in a joint-attention task. In
the experiment, we systematically manipulated the
robot's gaze toward the human partner's face in real
time and then analyzed the human's gaze behavior as a
response to the robot's gaze behavior. We found that
more face looks from the robot led to more look-backs
(to the robot's face) from human participants, and
consequently, created more mutual gaze and eye contact
between the two. Moreover, participants demonstrated
more coordinated and synchronized multimodal behaviors
between speech and gaze when more eye contact was
successfully established and maintained.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "2",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Wade:2016:GCA,
author = "Joshua Wade and Lian Zhang and Dayi Bian and Jing Fan
and Amy Swanson and Amy Weitlauf and Medha Sarkar and
Zachary Warren and Nilanjan Sarkar",
title = "A Gaze-Contingent Adaptive Virtual Reality Driving
Environment for Intervention in Individuals with Autism
Spectrum Disorders",
journal = j-TIIS,
volume = "6",
number = "1",
pages = "3:1--3:??",
month = may,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2892636",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Sat May 21 08:06:01 MDT 2016",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "In addition to social and behavioral deficits,
individuals with Autism Spectrum Disorder (ASD) often
struggle to develop the adaptive skills necessary to
achieve independence. Driving intervention in
individuals with ASD is a growing area of study, but it
is still widely under-researched. We present the
development and preliminary assessment of a
gaze-contingent adaptive virtual reality driving
simulator that uses real-time gaze information to adapt
the driving environment with the aim of providing a
more individualized method of driving intervention. We
conducted a small pilot study of 20 adolescents with
ASD using our system: 10 with the adaptive
gaze-contingent version of the system and 10 in a
purely performance-based version. Preliminary results
suggest that the novel intervention system may be
beneficial in teaching driving skills to individuals
with ASD.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "3",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Ishii:2016:PWW,
author = "Ryo Ishii and Kazuhiro Otsuka and Shiro Kumano and
Junji Yamato",
title = "Prediction of Who Will Be the Next Speaker and When
Using Gaze Behavior in Multiparty Meetings",
journal = j-TIIS,
volume = "6",
number = "1",
pages = "4:1--4:??",
month = may,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2757284",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Sat May 21 08:06:01 MDT 2016",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "In multiparty meetings, participants need to predict
the end of the speaker's utterance and who will start
speaking next, as well as consider a strategy for good
timing to speak next. Gaze behavior plays an important
role in smooth turn-changing. This article proposes a
prediction model that features three processing steps
to predict (I) whether turn-changing or turn-keeping
will occur, (II) who will be the next speaker in
turn-changing, and (III) the timing of the start of the
next speaker's utterance. For the feature values of the
model, we focused on gaze transition patterns and the
timing structure of eye contact between a speaker and a
listener near the end of the speaker's utterance. Gaze
transition patterns provide information about the order
in which gaze behavior changes. The timing structure of
eye contact is defined as who looks at whom and who
looks away first, the speaker or listener, when eye
contact between the speaker and a listener occurs. We
collected corpus data of multiparty meetings, using the
data to demonstrate relationships between gaze
transition patterns and timing structure and situations
(I), (II), and (III). The results of our analyses
indicate that the gaze transition pattern of the
speaker and listener and the timing structure of eye
contact have a strong association with turn-changing,
the next speaker in turn-changing, and the start time
of the next utterance. On the basis of the results, we
constructed prediction models using the gaze transition
patterns and timing structure. The gaze transition
patterns were found to be useful in predicting
turn-changing, the next speaker in turn-changing, and
the start time of the next utterance. Contrary to
expectations, we did not find that the timing structure
is useful for predicting the next speaker and the start
time. This study opens up new possibilities for
predicting the next speaker and the timing of the next
utterance using gaze transition patterns in multiparty
meetings.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "4",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Dardard:2016:ACL,
author = "Floriane Dardard and Giorgio Gnecco and Donald
Glowinski",
title = "Automatic Classification of Leading Interactions in a
String Quartet",
journal = j-TIIS,
volume = "6",
number = "1",
pages = "5:1--5:??",
month = may,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2818739",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Sat May 21 08:06:01 MDT 2016",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "The aim of the present work is to analyze
automatically the leading interactions between the
musicians of a string quartet, using machine-learning
techniques applied to nonverbal features of the
musicians' behavior, which are detected through the
help of a motion-capture system. We represent these
interactions by a graph of ``influence'' of the
musicians, which displays the relations ``is
following'' and ``is not following'' with weighted
directed arcs. The goal of the machine-learning problem
investigated is to assign weights to these arcs in an
optimal way. Since only a subset of the available
training examples are labeled, a semisupervised support
vector machine is used, which is based on a linear
kernel to limit its model complexity. Specific
potential applications within the field of
human-computer interaction are also discussed, such as
e-learning, networked music performance, and social
active listening.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "5",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Piana:2016:ABG,
author = "Stefano Piana and Alessandra Staglian{\`o} and
Francesca Odone and Antonio Camurri",
title = "Adaptive Body Gesture Representation for Automatic
Emotion Recognition",
journal = j-TIIS,
volume = "6",
number = "1",
pages = "6:1--6:??",
month = may,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2818740",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Sat May 21 08:06:01 MDT 2016",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "We present a computational model and a system for the
automated recognition of emotions starting from
full-body movement. Three-dimensional motion data of
full-body movements are obtained either from
professional optical motion-capture systems (Qualisys)
or from low-cost RGB-D sensors (Kinect and Kinect2). A
number of features are then automatically extracted at
different levels, from kinematics of a single joint to
more global expressive features inspired by psychology
and humanistic theories (e.g., contraction index,
fluidity, and impulsiveness). An abstraction layer
based on dictionary learning further processes these
movement features to increase the model generality and
to deal with intraclass variability, noise, and
incomplete information characterizing emotion
expression in human movement. The resulting feature
vector is the input for a classifier performing
real-time automatic emotion recognition based on linear
support vector machines. The recognition performance of
the proposed model is presented and discussed,
including the tradeoff between precision of the
tracking measures (we compare the Kinect RGB-D sensor
and the Qualisys motion-capture system) versus
dimension of the training dataset. The resulting model
and system have been successfully applied in the
development of serious games for helping autistic
children learn to recognize and express emotions by
means of their full-body movement.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "6",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Hoque:2016:ITM,
author = "Enamul Hoque and Giuseppe Carenini",
title = "Interactive Topic Modeling for Exploring Asynchronous
Online Conversations: Design and Evaluation of
{ConVisIT}",
journal = j-TIIS,
volume = "6",
number = "1",
pages = "7:1--7:??",
month = may,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2854158",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Sat May 21 08:06:01 MDT 2016",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Since the mid-2000s, there has been exponential growth
of asynchronous online conversations, thanks to the
rise of social media. Analyzing and gaining insights
from such conversations can be quite challenging for a
user, especially when the discussion becomes very long.
A promising solution to this problem is topic modeling,
since it may help the user to understand quickly what
was discussed in a long conversation and to explore the
comments of interest. However, the results of topic
modeling can be noisy, and they may not match the
user's current information needs. To address this
problem, we propose a novel topic modeling system for
asynchronous conversations that revises the model on
the fly on the basis of users' feedback. We then
integrate this system with interactive visualization
techniques to support the user in exploring long
conversations, as well as in revising the topic model
when the current results are not adequate to fulfill
the user's information needs. Finally, we report on an
evaluation with real users that compared the resulting
system with both a traditional interface and an
interactive visual interface that does not support
human-in-the-loop topic modeling. Both the quantitative
results and the subjective feedback from the
participants illustrate the potential benefits of our
interactive topic modeling approach for exploring
conversations, relative to its counterparts.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "7",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Jannach:2016:SDM,
author = "Dietmar Jannach and Michael Jugovac and Lukas Lerche",
title = "Supporting the Design of Machine Learning Workflows
with a Recommendation System",
journal = j-TIIS,
volume = "6",
number = "1",
pages = "8:1--8:??",
month = may,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2852082",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Sat May 21 08:06:01 MDT 2016",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Machine learning and data analytics tasks in practice
require several consecutive processing steps.
RapidMiner is a widely used software tool for the
development and execution of such analytics workflows.
Unlike many other algorithm toolkits, it comprises a
visual editor that allows the user to design processes
on a conceptual level. This conceptual and visual
approach helps the user to abstract from the technical
details during the development phase and to retain a
focus on the core modeling task. The large set of
preimplemented data analysis and machine learning
operations available in the tool, as well as their
logical dependencies, can, however, be overwhelming in
particular for novice users. In this work, we present
an add-on to the RapidMiner framework that supports the
user during the modeling phase by recommending
additional operations to insert into the currently
developed machine learning workflow. First, we propose
different recommendation techniques and evaluate them
in an offline setting using a pool of several thousand
existing workflows. Second, we present the results of a
laboratory study, which show that our tool helps users
to significantly increase the efficiency of the
modeling process. Finally, we report on analyses using
data that were collected during the real-world
deployment of the plug-in component and compare the
results of the live deployment of the tool with the
results obtained through an offline analysis and a
replay simulation.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "8",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Malik:2016:HVH,
author = "Sana Malik and Ben Shneiderman and Fan Du and
Catherine Plaisant and Margret Bjarnadottir",
title = "High-Volume Hypothesis Testing: Systematic Exploration
of Event Sequence Comparisons",
journal = j-TIIS,
volume = "6",
number = "1",
pages = "9:1--9:??",
month = may,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2890478",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Sat May 21 08:06:01 MDT 2016",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Cohort comparison studies have traditionally been
hypothesis driven and conducted in carefully controlled
environments (such as clinical trials). Given two
groups of event sequence data, researchers test a
single hypothesis (e.g., does the group taking
Medication A exhibit more deaths than the group taking
Medication B?). Recently, however, researchers have
been moving toward more exploratory methods of
retrospective analysis with existing data. In this
article, we begin by showing that the task of cohort
comparison is specific enough to support automatic
computation against a bounded set of potential
questions and objectives, a method that we refer to as
High-Volume Hypothesis Testing (HVHT). From this
starting point, we demonstrate that the diversity of
these objectives, both across and within different
domains, as well as the inherent complexities of
real-world datasets, still requires human involvement
to determine meaningful insights. We explore how
visualization and interaction better support the task
of exploratory data analysis and the understanding of
HVHT results (how significant they are, why they are
meaningful, and whether the entire dataset has been
exhaustively explored). Through interviews and case
studies with domain experts, we iteratively design and
implement visualization and interaction techniques in a
visual analytics tool, CoCo. As a result of our
evaluation, we propose six design guidelines for
enabling users to explore large result sets of HVHT
systematically and flexibly in order to glean
meaningful insights more quickly. Finally, we
illustrate the utility of this method with three case
studies in the medical domain.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "9",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Pan:2016:TLS,
author = "Weike Pan and Qiang Yang and Yuchao Duan and Zhong
Ming",
title = "Transfer Learning for Semisupervised Collaborative
Recommendation",
journal = j-TIIS,
volume = "6",
number = "2",
pages = "10:1--10:??",
month = aug,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2835497",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Tue Oct 18 11:51:13 MDT 2016",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Users' online behaviors such as ratings and
examination of items are recognized as one of the most
valuable sources of information for learning users'
preferences in order to make personalized
recommendations. But most previous works focus on
modeling only one type of users' behaviors such as
numerical ratings or browsing records, which are
referred to as explicit feedback and implicit feedback,
respectively. In this article, we study a
Semisupervised Collaborative Recommendation (SSCR)
problem with labeled feedback (for explicit feedback)
and unlabeled feedback (for implicit feedback), in
analogy to the well-known Semisupervised Learning (SSL)
setting with labeled instances and unlabeled instances.
SSCR is associated with two fundamental challenges,
that is, heterogeneity of two types of users' feedback
and uncertainty of the unlabeled feedback. As a
response, we design a novel Self-Transfer Learning
(sTL) algorithm to iteratively identify and integrate
likely positive unlabeled feedback, which is inspired
by the general forward/backward process in machine
learning. The merit of sTL is its ability to learn
users' preferences from heterogeneous behaviors in a
joint and selective manner. We conduct extensive
empirical studies of sTL and several very competitive
baselines on three large datasets. The experimental
results show that our sTL is significantly better than
the state-of-the-art methods.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "10",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Verbert:2016:AVU,
author = "Katrien Verbert and Denis Parra and Peter
Brusilovsky",
title = "Agents Vs. Users: Visual Recommendation of Research
Talks with Multiple Dimension of Relevance",
journal = j-TIIS,
volume = "6",
number = "2",
pages = "11:1--11:??",
month = aug,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2946794",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Tue Oct 18 11:51:13 MDT 2016",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Several approaches have been researched to help people
deal with abundance of information. An important
feature pioneered by social tagging systems and later
used in other kinds of social systems is the ability to
explore different community relevance prospects by
examining items bookmarked by a specific user or items
associated by various users with a specific tag. A
ranked list of recommended items offered by a specific
recommender engine can be considered as another
relevance prospect. The problem that we address is that
existing personalized social systems do not allow their
users to explore and combine multiple relevance
prospects. Only one prospect can be explored at any
given time-a list of recommended items, a list of items
bookmarked by a specific user, or a list of items
marked with a specific tag. In this article, we explore
the notion of combining multiple relevance prospects as
a way to increase effectiveness and trust. We used a
visual approach to recommend articles at a conference
by explicitly presenting multiple dimensions of
relevance. Suggestions offered by different
recommendation techniques were embodied as recommender
agents to put them on the same ground as users and
tags. The results of two user studies performed at
academic conferences allowed us to obtain interesting
insights to enhance user interfaces of personalized
social systems. More specifically, effectiveness and
probability of item selection increase when users are
able to explore and interrelate prospects of items
relevance-that is, items bookmarked by users,
recommendations and tags. Nevertheless, a
less-technical audience may require guidance to
understand the rationale of such intersections.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "11",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Cafaro:2016:EIA,
author = "Angelo Cafaro and Brian Ravenet and Magalie Ochs and
Hannes H{\"o}gni Vilhj{\'a}lmsson and Catherine
Pelachaud",
title = "The Effects of Interpersonal Attitude of a Group of
Agents on User's Presence and Proxemics Behavior",
journal = j-TIIS,
volume = "6",
number = "2",
pages = "12:1--12:??",
month = aug,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2914796",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Tue Oct 18 11:51:13 MDT 2016",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "In the everyday world people form small conversing
groups where social interaction takes place, and much
of the social behavior takes place through managing
interpersonal space (i.e., proxemics) and group
formation, signaling their attention to others (i.e.,
through gaze behavior), and expressing certain
attitudes, for example, friendliness, by smiling,
getting close through increased engagement and
intimacy, and welcoming newcomers. Many real-time
interactive systems feature virtual anthropomorphic
characters in order to simulate conversing groups and
add plausibility and believability to the simulated
environments. However, only a few have dealt with
autonomous behavior generation, and in those cases, the
agents' exhibited behavior should be evaluated by users
in terms of appropriateness, believability, and
conveyed meaning (e.g., attitudes). In this article we
present an integrated intelligent interactive system
for generating believable nonverbal behavior exhibited
by virtual agents in small simulated group
conversations. The produced behavior supports group
formation management and the expression of
interpersonal attitudes (friendly vs. unfriendly) both
among the agents in the group (i.e., in-group attitude)
and towards an approaching user in an avatar-based
interaction (out-group attitude). A user study
investigating the effects of these attitudes on users'
social presence evaluation and proxemics behavior (with
their avatar) in a three-dimensional virtual city
environment is presented. We divided the study into two
trials according to the task assigned to users, that
is, joining a conversing group and reaching a target
destination behind the group. Results showed that the
out-group attitude had a major impact on social
presence evaluations in both trials, whereby friendly
groups were perceived as more socially rich. The user's
proxemics behavior depended on both out-group and
in-group attitudes expressed by the agents.
Implications of these results for the design and
implementation of similar intelligent interactive
systems for the autonomous generation of agents'
multimodal behavior are briefly discussed.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "12",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Taranta:2016:DPB,
author = "Eugene M. {Taranta II} and Andr{\'e}s N. Vargas and
Spencer P. Compton and Joseph J. {Laviola, Jr.}",
title = "A Dynamic Pen-Based Interface for Writing and Editing
Complex Mathematical Expressions With Math Boxes",
journal = j-TIIS,
volume = "6",
number = "2",
pages = "13:1--13:??",
month = aug,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2946795",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Tue Oct 18 11:51:13 MDT 2016",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Math boxes is a recently introduced pen-based user
interface for simplifying the task of hand writing
difficult mathematical expressions. Visible bounding
boxes around subexpressions are automatically generated
as the system detects relevant spatial relationships
between symbols including superscripts, subscripts, and
fractions. Subexpressions contained in a math box can
then be extended by adding new terms directly into its
given bounds. When new characters are accepted, box
boundaries are dynamically resized and neighboring
terms are translated to make room for the larger box.
Feedback on structural recognition is given via the
boxes themselves. In this work, we extend the math
boxes interface to include support for subexpression
modifications via a new set of pen-based interactions.
Specifically, techniques to expand and rearrange terms
in a given expression are introduced. To evaluate the
usefulness of our proposed methods, we first conducted
a user study in which participants wrote a variety of
equations ranging in complexity from a simple
polynomial to the more difficult expected value of the
logistic distribution. The math boxes interface is
compared against the commonly used offset typeset
(small) method, where recognized expressions are
typeset in a system font near the user's unmodified
ink. In this initial study, we find that the fluidness
of the offset method is preferred for simple
expressions but that, as difficulty increases, our math
boxes method is overwhelmingly preferred. We then
conducted a second user study that focused only on
modifying various mathematical expressions. In general,
participants worked faster with the math boxes
interface, and most new techniques were well received.
On the basis of the two user studies, we discuss the
implications of the math boxes interface and identify
areas where improvements are possible.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "13",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Yang:2016:SUS,
author = "Yi Yang and Shimei Pan and Jie Lu and Mercan Topkara
and Yangqiu Song",
title = "The Stability and Usability of Statistical Topic
Models",
journal = j-TIIS,
volume = "6",
number = "2",
pages = "14:1--14:??",
month = aug,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2954002",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Tue Oct 18 11:51:13 MDT 2016",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Statistical topic models have become a useful and
ubiquitous tool for analyzing large text corpora. One
common application of statistical topic models is to
support topic-centric navigation and exploration of
document collections. Existing work on topic modeling
focuses on the inference of model parameters so the
resulting model fits the input data. Since the exact
inference is intractable, statistical inference
methods, such as Gibbs Sampling, are commonly used to
solve the problem. However, most of the existing work
ignores an important aspect that is closely related to
the end user experience: topic model stability. When
the model is either re-trained with the same input data
or updated with new documents, the topic previously
assigned to a document may change under the new model,
which may result in a disruption of end users' mental
maps about the relations between documents and topics,
thus undermining the usability of the applications. In
this article, we propose a novel user-directed
non-disruptive topic model update method that balances
the tradeoff between finding the model that fits the
data and maintaining the stability of the model from
end users' perspective. It employs a novel constrained
LDA algorithm to incorporate pairwise document
constraints, which are converted from user feedback
about topics, to achieve topic model stability.
Evaluation results demonstrate the advantages of our
approach over previous methods.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "14",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Kveton:2016:MIC,
author = "Branislav Kveton and Shlomo Berkovsky",
title = "Minimal Interaction Content Discovery in Recommender
Systems",
journal = j-TIIS,
volume = "6",
number = "2",
pages = "15:1--15:??",
month = aug,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2845090",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Tue Oct 18 11:51:13 MDT 2016",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Many prior works in recommender systems focus on
improving the accuracy of item rating predictions. In
comparison, the areas of recommendation interfaces and
user-recommender interaction remain underexplored. In
this work, we look into the interaction of users with
the recommendation list, aiming to devise a method that
simplifies content discovery and minimizes the cost of
reaching an item of interest. We quantify this cost by
the number of user interactions (clicks and scrolls)
with the recommendation list. To this end, we propose
generalized linear search (GLS), an adaptive
combination of the established linear and generalized
search (GS) approaches. GLS leverages the advantages of
these two approaches, and we prove formally that it
performs at least as well as GS. We also conduct a
thorough experimental evaluation of GLS and compare it
to several baselines and heuristic approaches in both
an offline and live evaluation. The results of the
evaluation show that GLS consistently outperforms the
baseline approaches and is also preferred by users. In
summary, GLS offers an efficient and easy-to-use means
for content discovery in recommender systems.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "15",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Zhang:2016:BTE,
author = "Cheng Zhang and Anhong Guo and Dingtian Zhang and Yang
Li and Caleb Southern and Rosa I. Arriaga and Gregory
D. Abowd",
title = "Beyond the Touchscreen: an Exploration of Extending
Interactions on Commodity {Smartphones}",
journal = j-TIIS,
volume = "6",
number = "2",
pages = "16:1--16:??",
month = aug,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2954003",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Tue Oct 18 11:51:13 MDT 2016",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Most smartphones today have a rich set of sensors that
could be used to infer input (e.g., accelerometer,
gyroscope, microphone); however, the primary mode of
interaction is still limited to the front-facing
touchscreen and several physical buttons on the case.
To investigate the potential opportunities for
interactions supported by built-in sensors, we present
the implementation and evaluation of BeyondTouch, a
family of interactions to extend and enrich the input
experience of a smartphone. Using only existing sensing
capabilities on a commodity smartphone, we offer the
user a wide variety of additional inputs on the case
and the surface adjacent to the smartphone. Although
most of these interactions are implemented with machine
learning methods, compact and robust rule-based
detection methods can also be applied for recognizing
some interactions by analyzing physical characteristics
of tapping events on the phone. This article is an
extended version of Zhang et al. [2015], which solely
covered gestures implemented by machine learning
methods. We extended our previous work by adding
gestures implemented with rule-based methods, which
works well with different users across devices without
collecting any training data. We outline the
implementation of both machine learning and rule-based
methods for these interaction techniques and
demonstrate empirical evidence of their effectiveness
and usability. We also discuss the practicality of
BeyondTouch for a variety of application scenarios and
compare the two different implementation methods.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "16",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Bosch:2016:UVA,
author = "Nigel Bosch and Sidney K. D'mello and Jaclyn Ocumpaugh
and Ryan S. Baker and Valerie Shute",
title = "Using Video to Automatically Detect Learner Affect in
Computer-Enabled Classrooms",
journal = j-TIIS,
volume = "6",
number = "2",
pages = "17:1--17:??",
month = aug,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2946837",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Tue Oct 18 11:51:13 MDT 2016",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Affect detection is a key component in intelligent
educational interfaces that respond to students'
affective states. We use computer vision and
machine-learning techniques to detect students' affect
from facial expressions (primary channel) and gross
body movements (secondary channel) during interactions
with an educational physics game. We collected data in
the real-world environment of a school computer lab
with up to 30 students simultaneously playing the game
while moving around, gesturing, and talking to each
other. The results were cross-validated at the student
level to ensure generalization to new students.
Classification accuracies, quantified as area under the
receiver operating characteristic curve (AUC), were
above chance (AUC of 0.5) for all the affective states
observed, namely, boredom (AUC = .610), confusion (AUC
= .649), delight (AUC = .867), engagement (AUC = .679),
frustration (AUC = .631), and for off-task behavior
(AUC = .816). Furthermore, the detectors showed
temporal generalizability in that there was less than a
2\% decrease in accuracy when tested on data collected
from different times of the day and from different
days. There was also some evidence of generalizability
across ethnicity (as perceived by human coders) and
gender, although with a higher degree of variability
attributable to differences in affect base rates across
subpopulations. In summary, our results demonstrate the
feasibility of generalizable video-based detectors of
naturalistic affect in a real-world setting, suggesting
that the time is ripe for affect-sensitive
interventions in educational games and other
intelligent interfaces.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "17",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Tanaka:2016:TSC,
author = "Hiroki Tanaka and Sakti Sakriani and Graham Neubig and
Tomoki Toda and Hideki Negoro and Hidemi Iwasaka and
Satoshi Nakamura",
title = "Teaching Social Communication Skills Through
Human-Agent Interaction",
journal = j-TIIS,
volume = "6",
number = "2",
pages = "18:1--18:??",
month = aug,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2937757",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Tue Oct 18 11:51:13 MDT 2016",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "There are a large number of computer-based systems
that aim to train and improve social skills. However,
most of these do not resemble the training regimens
used by human instructors. In this article, we propose
a computer-based training system that follows the
procedure of social skills training (SST), a
well-established method to decrease human anxiety and
discomfort in social interaction, and acquire social
skills. We attempt to automate the process of SST by
developing a dialogue system named the automated social
skills trainer, which teaches social communication
skills through human-agent interaction. The system
includes a virtual avatar that recognizes user speech
and language information and gives feedback to users.
Its design is based on conventional SST performed by
human participants, including defining target skills,
modeling, role-play, feedback, reinforcement, and
homework. We performed a series of three experiments
investigating (1) the advantages of using
computer-based training systems compared to human-human
interaction (HHI) by subjectively evaluating
nervousness, ease of talking, and ability to talk well;
(2) the relationship between speech language features
and human social skills; and (3) the effect of
computer-based training using our proposed system.
Results of our first experiment show that interaction
with an avatar decreases nervousness and increases the
user's subjective impression of his or her ability to
talk well compared to interaction with an unfamiliar
person. The experimental evaluation measuring the
relationship between social skill and speech and
language features shows that these features have a
relationship with social skills. Finally, experiments
measuring the effect of performing SST with the
proposed application show that participants
significantly improve their skill, as assessed by
separate evaluators, by using the system for 50
minutes. A user survey also shows that the users
thought our system is useful and easy to use, and that
interaction with the avatar felt similar to HHI.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "18",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Mahmoud:2016:AAN,
author = "Marwa Mahmoud and Tadas Baltrusaitis and Peter
Robinson",
title = "Automatic Analysis of Naturalistic Hand-Over-Face
Gestures",
journal = j-TIIS,
volume = "6",
number = "2",
pages = "19:1--19:??",
month = aug,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2946796",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Tue Oct 18 11:51:13 MDT 2016",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "One of the main factors that limit the accuracy of
facial analysis systems is hand occlusion. As the face
becomes occluded, facial features are lost, corrupted,
or erroneously detected. Hand-over-face occlusions are
considered not only very common but also very
challenging to handle. However, there is empirical
evidence that some of these hand-over-face gestures
serve as cues for recognition of cognitive mental
states. In this article, we present an analysis of
automatic detection and classification of
hand-over-face gestures. We detect hand-over-face
occlusions and classify hand-over-face gesture
descriptors in videos of natural expressions using
multi-modal fusion of different state-of-the-art
spatial and spatio-temporal features. We show
experimentally that we can successfully detect face
occlusions with an accuracy of 83\%. We also
demonstrate that we can classify gesture descriptors (
hand shape, hand action, and facial region occluded )
significantly better than a na{\"\i}ve baseline. Our
detailed quantitative analysis sheds some light on the
challenges of automatic classification of
hand-over-face gestures in natural expressions.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "19",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Ishii:2016:URP,
author = "Ryo Ishii and Kazuhiro Otsuka and Shiro Kumano and
Junji Yamato",
title = "Using Respiration to Predict Who Will Speak Next and
When in Multiparty Meetings",
journal = j-TIIS,
volume = "6",
number = "2",
pages = "20:1--20:??",
month = aug,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2946838",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Tue Oct 18 11:51:13 MDT 2016",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Techniques that use nonverbal behaviors to predict
turn-changing situations-such as, in multiparty
meetings, who the next speaker will be and when the
next utterance will occur-have been receiving a lot of
attention in recent research. To build a model for
predicting these behaviors we conducted a research
study to determine whether respiration could be
effectively used as a basis for the prediction. Results
of analyses of utterance and respiration data collected
from participants in multiparty meetings reveal that
the speaker takes a breath more quickly and deeply
after the end of an utterance in turn-keeping than in
turn-changing. They also indicate that the listener who
will be the next speaker takes a bigger breath more
quickly and deeply in turn-changing than the other
listeners. On the basis of these results, we
constructed and evaluated models for predicting the
next speaker and the time of the next utterance in
multiparty meetings. The results of the evaluation
suggest that the characteristics of the speaker's
inhalation right after an utterance unit-the points in
time at which the inhalation starts and ends after the
end of the utterance unit and the amplitude, slope, and
duration of the inhalation phase-are effective for
predicting the next speaker in multiparty meetings.
They further suggest that the characteristics of
listeners' inhalation-the points in time at which the
inhalation starts and ends after the end of the
utterance unit and the minimum and maximum inspiration,
amplitude, and slope of the inhalation phase-are
effective for predicting the next speaker. The start
time and end time of the next speaker's inhalation are
also useful for predicting the time of the next
utterance in turn-changing.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "20",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Prendinger:2016:IBT,
author = "Helmut Prendinger and Nahum Alvarez and Antonio
Sanchez-Ruiz and Marc Cavazza and jo{\~a}o Catarino and
Jo{\~a}o Oliveira and Rui Prada and Shuji Fujimoto and
Mika Shigematsu",
title = "Intelligent Biohazard Training Based on Real-Time Task
Recognition",
journal = j-TIIS,
volume = "6",
number = "3",
pages = "21:1--21:??",
month = oct,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2883617",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Tue Oct 18 11:51:14 MDT 2016",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Virtual environments offer an ideal setting to develop
intelligent training applications. Yet, their ability
to support complex procedures depends on the
appropriate integration of knowledge-based techniques
and natural interaction. In this article, we describe
the implementation of an intelligent rehearsal system
for biohazard laboratory procedures, based on the
real-time instantiation of task models from the
trainee's actions. A virtual biohazard laboratory has
been recreated using the Unity3D engine, in which users
interact with laboratory objects using keyboard/mouse
input or hand gestures through a Kinect device.
Realistic behavior for objects is supported by the
implementation of a relevant subset of common sense and
physics knowledge. User interaction with objects leads
to the recognition of specific actions, which are used
to progressively instantiate a task-based
representation of biohazard procedures. The dynamics of
this instantiation process supports trainee evaluation
as well as real-time assistance. This system is
designed primarily as a rehearsal system providing
real-time advice and supporting user performance
evaluation. We provide detailed examples illustrating
error detection and recovery, and results from on-site
testing with students from the Faculty of Medical
Sciences at Kyushu University. In the study, we
investigate the usability aspect by comparing
interaction with mouse and Kinect devices and the
effect of real-time task recognition on recovery time
after user mistakes.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "21",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Sappelli:2016:AIA,
author = "Maya Sappelli and Suzan Verberne and Wessel Kraaij",
title = "Adapting the Interactive Activation Model for Context
Recognition and Identification",
journal = j-TIIS,
volume = "6",
number = "3",
pages = "22:1--22:??",
month = oct,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2873067",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Tue Oct 18 11:51:14 MDT 2016",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "In this article, we propose and implement a new model
for context recognition and identification. Our work is
motivated by the importance of ``working in context''
for knowledge workers to stay focused and productive. A
computer application that can identify the current
context in which the knowledge worker is working can
(among other things) provide the worker with contextual
support, for example, by suggesting relevant
information sources, or give an overview of how he or
she spent his or her time during the day. We present a
descriptive model for the context of a knowledge
worker. This model describes the contextual elements in
the work environment of the knowledge worker and how
these elements relate to each other. This model is
operationalized in an algorithm, the contextual
interactive activation model (CIA), which is based on
the interactive activation model by Rumelhart and
McClelland. It consists of a layered connected network
through which activation flows. We have tested CIA in a
context identification setting. In this case, the data
that we use as input is low-level computer interaction
logging data. We found that topical information and
entities were the most relevant types of information
for context identification. Overall the proposed CIA
model is more effective than traditional supervised
methods in identifying the active context from sparse
input data, with less labelled training data.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "22",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Aslan:2016:DEM,
author = "Ilhan Aslan and Andreas Uhl and Alexander
Meschtscherjakov and Manfred Tscheligi",
title = "Design and Exploration of Mid-Air Authentication
Gestures",
journal = j-TIIS,
volume = "6",
number = "3",
pages = "23:1--23:??",
month = oct,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2832919",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Tue Oct 18 11:51:14 MDT 2016",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Authentication based on touchless mid-air gestures
would benefit a multitude of ubiquitous computing
applications, especially those that are used in clean
environments (e.g., medical environments or clean
rooms). In order to explore the potential of mid-air
gestures for novel authentication approaches, we
performed a series of studies and design experiments.
First, we collected data from more then 200 users
during a 3-day science event organized within a
shopping mall. These data were used to investigate
capabilities of the Leap Motion sensor, observe
interaction in the wild, and to formulate an initial
design problem. The design problem, as well as the
design of mid-air gestures for authentication purposes,
were iterated in subsequent design activities. In a
final study with 13 participants, we evaluated two
mid-air gestures for authentication purposes in
different situations, including different body
positions. Our results highlight a need for different
mid-air gestures for differing situations and carefully
chosen constraints for mid-air gestures. We conclude by
proposing an exemplary system, which aims to provide
tool-support for designers and engineers, allowing them
to explore authentication gestures in the original
context of use and thus support them with the design of
contextual mid-air authentication gestures.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "23",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{El-Glaly:2016:RWY,
author = "Yasmine N. El-Glaly and Francis Quek",
title = "Read What You Touch with Intelligent Audio System for
Non-Visual Interaction",
journal = j-TIIS,
volume = "6",
number = "3",
pages = "24:1--24:??",
month = oct,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2822908",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Tue Oct 18 11:51:14 MDT 2016",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Slate-type devices allow Individuals with Blindness or
Severe Visual Impairment (IBSVI) to read in place with
the touch of their fingertip by audio-rendering the
words they touch. Such technologies are helpful for
spatial cognition while reading. However, users have to
move their fingers slowly, or they may lose their place
on screen. Also, IBSVI may wander between lines without
realizing they did. We addressed these two interaction
problems by introducing a dynamic speech-touch
interaction model and an intelligent reading support
system. With this model, the speed of the speech will
dynamically change with the user's finger speed. The
proposed model is composed of (1) an Audio Dynamics
Model and (2) an Off-line Speech Synthesis Technique.
The intelligent reading support system predicts the
direction of reading, corrects the reading word if the
user drifts, and notifies the user using a sonic gutter
to help him/her from straying off the reading line. We
tested the new audio dynamics model, the sonic gutter,
and the reading support model in two user studies. The
participants' feedback helped us fine-tune the
parameters of the two models. A decomposition study was
conducted to evaluate the main components of the
system. The results showed that both intelligent
reading support with tactile feedback are required to
achieve the best performance in terms of efficiency and
effectiveness. Finally, we ran an evaluation study
where the reading support system is compared to other
VoiceOver technologies. The results showed
preponderance to the reading support system with its
audio dynamics and intelligent reading support
components.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "24",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Park:2016:MAP,
author = "Sunghyun Park and Han Suk Shim and Moitreya Chatterjee
and Kenji Sagae and Louis-Philippe Morency",
title = "Multimodal Analysis and Prediction of Persuasiveness
in Online Social Multimedia",
journal = j-TIIS,
volume = "6",
number = "3",
pages = "25:1--25:??",
month = oct,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2897739",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Tue Oct 18 11:51:14 MDT 2016",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Our lives are heavily influenced by persuasive
communication, and it is essential in almost any type
of social interaction from business negotiation to
conversation with our friends and family. With the
rapid growth of social multimedia websites, it is
becoming ever more important and useful to understand
persuasiveness in the context of social multimedia
content online. In this article, we introduce a newly
created multimedia corpus of 1,000 movie review videos
with subjective annotations of persuasiveness and
related high-level characteristics or attributes (e.g.,
confidence). This dataset will be made freely available
to the research community. We designed our experiments
around the following five main research hypotheses.
First, we study if computational descriptors derived
from verbal and nonverbal behavior can be predictive of
persuasiveness. We further explore combining
descriptors from multiple communication modalities
(acoustic, verbal, para-verbal, and visual) for
predicting persuasiveness and compare with using a
single modality alone. Second, we investigate how
certain high-level attributes, such as credibility or
expertise, are related to persuasiveness and how the
information can be used in modeling and predicting
persuasiveness. Third, we investigate differences when
speakers are expressing a positive or negative opinion
and if the opinion polarity has any influence in the
persuasiveness prediction. Fourth, we further study if
gender has any influence in the prediction performance.
Last, we test if it is possible to make comparable
predictions of persuasiveness by only looking at thin
slices (i.e., shorter time windows) of a speaker's
behavior.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "25",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Tintarev:2016:ISI,
author = "Nava Tintarev and John O'donovan and Alexander
Felfernig",
title = "Introduction to the Special Issue on Human Interaction
with Artificial Advice Givers",
journal = j-TIIS,
volume = "6",
number = "4",
pages = "26:1--26:??",
month = dec,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/3014432",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Sat Mar 25 07:51:07 MDT 2017",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Many interactive systems in today's world can be
viewed as providing advice to their users. Commercial
examples include recommender systems, satellite
navigation systems, intelligent personal assistants on
smartphones, and automated checkout systems in
supermarkets. We will call these systems that support
people in making choices and decisions artificial
advice givers (AAGs): They propose and evaluate options
while involving their human users in the
decision-making process. This special issue addresses
the challenge of improving the interaction between
artificial and human agents. It answers the question of
how an agent of each type (human and artificial) can
influence and understand the reasoning, working models,
and conclusions of the other agent by means of novel
forms of interaction. To address this challenge, the
articles in the special issue are organized around
three themes: (a) human factors to consider when
designing interactions with AAGs (e.g., over- and
under-reliance, overestimation of the system's
capabilities), (b) methods for supporting interaction
with AAGs (e.g., natural language, visualization, and
argumentation), and (c) considerations for evaluating
AAGs (both criteria and methodology for applying
them).",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "26",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Sutherland:2016:EAE,
author = "Steven C. Sutherland and Casper Harteveld and Michael
E. Young",
title = "Effects of the Advisor and Environment on Requesting
and Complying With Automated Advice",
journal = j-TIIS,
volume = "6",
number = "4",
pages = "27:1--27:??",
month = dec,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2905370",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Sat Mar 25 07:51:07 MDT 2017",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Given the rapid technological advances in our society
and the increase in artificial and automated advisors
with whom we interact on a daily basis, it is becoming
increasingly necessary to understand how users interact
with and why they choose to request and follow advice
from these types of advisors. More specifically, it is
necessary to understand errors in advice utilization.
In the present study, we propose a methodological
framework for studying interactions between users and
automated or other artificial advisors. Specifically,
we propose the use of virtual environments and the tarp
technique for stimulus sampling, ensuring sufficient
sampling of important extreme values and the stimulus
space between those extremes. We use this proposed
framework to identify the impact of several factors on
when and how advice is used. Additionally, because
these interactions take place in different
environments, we explore the impact of where the
interaction takes place on the decision to interact. We
varied the cost of advice, the reliability of the
advisor, and the predictability of the environment to
better understand the impact of these factors on the
overutilization of suboptimal advisors and
underutilization of optimal advisors. We found that
less predictable environments, more reliable advisors,
and lower costs for advice led to overutilization,
whereas more predictable environments and less reliable
advisors led to underutilization. Moreover, once advice
was received, users took longer to make a final
decision, suggesting less confidence and trust in the
advisor when the reliability of the advisor was lower,
the environment was less predictable, and the advice
was not consistent with the environmental cues. These
results contribute to a more complete understanding of
advice utilization and trust in advisors.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "27",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Knijnenburg:2016:ICI,
author = "Bart P. Knijnenburg and Martijn C. Willemsen",
title = "Inferring Capabilities of Intelligent Agents from
Their External Traits",
journal = j-TIIS,
volume = "6",
number = "4",
pages = "28:1--28:??",
month = dec,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2963106",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Sat Mar 25 07:51:07 MDT 2017",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "We investigate the usability of humanlike agent-based
interfaces for interactive advice-giving systems. In an
experiment with a travel advisory system, we manipulate
the ``humanlikeness'' of the agent interface. We
demonstrate that users of the more humanlike agents try
to exploit capabilities that were not signaled by the
system. This severely reduces the usability of systems
that look human but lack humanlikehumanlike
capabilities (overestimation effect). We explain this
effect by showing that users of humanlike agents form
anthropomorphic beliefs (a user's ``mental model'')
about the system: They act humanlike towards the system
and try to exploit typical humanlike capabilities they
believe the system possesses. Furthermore, we
demonstrate that the mental model users form of an
agent-based system is inherently integrated (as opposed
to the compositional mental model they form of
conventional interfaces): Cues provided by the system
do not instill user responses in a one-to-one matter
but are instead integrated into a single mental
model.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "28",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Clark:2016:MAA,
author = "Leigh Clark and Abdulmalik Ofemile and Svenja Adolphs
and Tom Rodden",
title = "A Multimodal Approach to Assessing User Experiences
with Agent Helpers",
journal = j-TIIS,
volume = "6",
number = "4",
pages = "29:1--29:??",
month = dec,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2983926",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Sat Mar 25 07:51:07 MDT 2017",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "The study of agent helpers using linguistic strategies
such as vague language and politeness has often come
across obstacles. One of these is the quality of the
agent's voice and its lack of appropriate fit for using
these strategies. The first approach of this article
compares human vs. synthesised voices in agents using
vague language. This approach analyses the 60,000-word
text corpus of participant interviews to investigate
the differences of user attitudes towards the agents,
their voices and their use of vague language. It
discovers that while the acceptance of vague language
is still met with resistance in agent instructors,
using a human voice yields more positive results than
the synthesised alternatives. The second approach in
this article discusses the development of a novel
multimodal corpus of video and text data to create
multiple analyses of human-agent interaction in
agent-instructed assembly tasks. The second approach
analyses user spontaneous facial actions and gestures
during their interaction in the tasks. It found that
agents are able to elicit these facial actions and
gestures and posits that further analysis of this
nonverbal feedback may help to create a more adaptive
agent. Finally, the approaches used in this article
suggest these can contribute to furthering the
understanding of what it means to interact with
software agents.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "29",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Rosenfeld:2016:PAD,
author = "Ariel Rosenfeld and Sarit Kraus",
title = "Providing Arguments in Discussions on the Basis of the
Prediction of Human Argumentative Behavior",
journal = j-TIIS,
volume = "6",
number = "4",
pages = "30:1--30:??",
month = dec,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2983925",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Sat Mar 25 07:51:07 MDT 2017",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Argumentative discussion is a highly demanding task.
In order to help people in such discussions, this
article provides an innovative methodology for
developing agents that can support people in
argumentative discussions by proposing possible
arguments. By gathering and analyzing human
argumentative behavior from more than 1000 human study
participants, we show that the prediction of human
argumentative behavior using Machine Learning (ML) is
possible and useful in designing argument provision
agents. This paper first demonstrates that ML
techniques can achieve up to 76\% accuracy when
predicting people's top three argument choices given a
partial discussion. We further show that
well-established Argumentation Theory is not a good
predictor of people's choice of arguments. Then, we
present 9 argument provision agents, which we
empirically evaluate using hundreds of human study
participants. We show that the Predictive and
Relevance-Based Heuristic agent (PRH), which uses ML
prediction with a heuristic that estimates the
relevance of possible arguments to the current state of
the discussion, results in significantly higher levels
of satisfaction among study participants compared with
the other evaluated agents. These other agents propose
arguments based on Argumentation Theory; propose
predicted arguments without the heuristics or with only
the heuristics; or use Transfer Learning methods. Our
findings also show that people use the PRH agents
proposed arguments significantly more often than those
proposed by the other agents.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "30",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Mutlu:2016:VRP,
author = "Belgin Mutlu and Eduardo Veas and Christoph Trattner",
title = "{VizRec}: Recommending Personalized Visualizations",
journal = j-TIIS,
volume = "6",
number = "4",
pages = "31:1--31:??",
month = dec,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2983923",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Sat Mar 25 07:51:07 MDT 2017",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Visualizations have a distinctive advantage when
dealing with the information overload problem: Because
they are grounded in basic visual cognition, many
people understand them. However, creating proper
visualizations requires specific expertise of the
domain and underlying data. Our quest in this article
is to study methods to suggest appropriate
visualizations autonomously. To be appropriate, a
visualization has to follow known guidelines to find
and distinguish patterns visually and encode data
therein. A visualization tells a story of the
underlying data; yet, to be appropriate, it has to
clearly represent those aspects of the data the viewer
is interested in. Which aspects of a visualization are
important to the viewer? Can we capture and use those
aspects to recommend visualizations? This article
investigates strategies to recommend visualizations
considering different aspects of user preferences. A
multi-dimensional scale is used to estimate aspects of
quality for visualizations for collaborative filtering.
Alternatively, tag vectors describing visualizations
are used to recommend potentially interesting
visualizations based on content. Finally, a hybrid
approach combines information on what a visualization
is about (tags) and how good it is (ratings). We
present the design principles behind VizRec, our visual
recommender. We describe its architecture, the data
acquisition approach with a crowd sourced study, and
the analysis of strategies for visualization
recommendation.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "31",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Paudel:2017:UAD,
author = "Bibek Paudel and Fabian Christoffel and Chris Newell
and Abraham Bernstein",
title = "Updatable, Accurate, Diverse, and Scalable
Recommendations for Interactive Applications",
journal = j-TIIS,
volume = "7",
number = "1",
pages = "1:1--1:??",
month = mar,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/2955101",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Sat Mar 25 07:51:07 MDT 2017",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Recommender systems form the backbone of many
interactive systems. They incorporate user feedback to
personalize the user experience typically via
personalized recommendation lists. As users interact
with a system, an increasing amount of data about a
user's preferences becomes available, which can be
leveraged for improving the systems' performance.
Incorporating these new data into the underlying
recommendation model is, however, not always
straightforward. Many models used by recommender
systems are computationally expensive and, therefore,
have to perform offline computations to compile the
recommendation lists. For interactive applications, it
is desirable to be able to update the computed values
as soon as new user interaction data is available:
updating recommendations in interactive time using new
feedback data leads to better accuracy and increases
the attraction of the system to the users.
Additionally, there is a growing consensus that
accuracy alone is not enough and user satisfaction is
also dependent on diverse recommendations. In this
work, we tackle this problem of updating personalized
recommendation lists for interactive applications in
order to provide both accurate and diverse
recommendations. To that end, we explore algorithms
that exploit random walks as a sampling technique to
obtain diverse recommendations without compromising on
efficiency and accuracy. Specifically, we present a
novel graph vertex ranking recommendation algorithm
called RP$^3_\beta $ that reranks items based on
three-hop random walk transition probabilities. We show
empirically that RP$^3_\beta $ provides accurate
recommendations with high long-tail item frequency at
the top of the recommendation list. We also present
approximate versions of RP$^3_\beta $ and the two most
accurate previously published vertex ranking algorithms
based on random walk transition probabilities and show
that these approximations converge with an increasing
number of samples. To obtain interactively updatable
recommendations, we additionally show how our algorithm
can be extended for online updates at interactive
speeds. The underlying random walk sampling technique
makes it possible to perform the updates without having
to recompute the values for the entire dataset. In an
empirical evaluation with three real-world datasets, we
show that RP$^3_\beta $ provides highly accurate and
diverse recommendations that can easily be updated with
newly gathered information at interactive speeds ($ \ll
$ 100 ms ).",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "1",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Kaminskas:2017:DSN,
author = "Marius Kaminskas and Derek Bridge",
title = "Diversity, Serendipity, Novelty, and Coverage: a
Survey and Empirical Analysis of Beyond-Accuracy
Objectives in Recommender Systems",
journal = j-TIIS,
volume = "7",
number = "1",
pages = "2:1--2:??",
month = mar,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/2926720",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Sat Mar 25 07:51:07 MDT 2017",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "What makes a good recommendation or good list of
recommendations? Research into recommender systems has
traditionally focused on accuracy, in particular how
closely the recommender's predicted ratings are to the
users' true ratings. However, it has been recognized
that other recommendation qualities-such as whether the
list of recommendations is diverse and whether it
contains novel items-may have a significant impact on
the overall quality of a recommender system.
Consequently, in recent years, the focus of recommender
systems research has shifted to include a wider range
of ``beyond accuracy'' objectives. In this article, we
present a survey of the most discussed beyond-accuracy
objectives in recommender systems research: diversity,
serendipity, novelty, and coverage. We review the
definitions of these objectives and corresponding
metrics found in the literature. We also review works
that propose optimization strategies for these
beyond-accuracy objectives. Since the majority of works
focus on one specific objective, we find that it is not
clear how the different objectives relate to each
other. Hence, we conduct a set of offline experiments
aimed at comparing the performance of different
optimization approaches with a view to seeing how they
affect objectives other than the ones they are
optimizing. We use a set of state-of-the-art
recommendation algorithms optimized for recall along
with a number of reranking strategies for optimizing
the diversity, novelty, and serendipity of the
generated recommendations. For each reranking strategy,
we measure the effects on the other beyond-accuracy
objectives and demonstrate important insights into the
correlations between the discussed objectives. For
instance, we find that rating-based diversity is
positively correlated with novelty, and we demonstrate
the positive influence of novelty on recommendation
coverage.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "2",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Zhang:2017:EEI,
author = "Ting Zhang and Yu-Ting Li and Juan P. Wachs",
title = "The Effect of Embodied Interaction in Visual-Spatial
Navigation",
journal = j-TIIS,
volume = "7",
number = "1",
pages = "3:1--3:??",
month = mar,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/2953887",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Sat Mar 25 07:51:07 MDT 2017",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "This article aims to assess the effect of embodied
interaction on attention during the process of solving
spatio-visual navigation problems. It presents a method
that links operator's physical interaction, feedback,
and attention. Attention is inferred through networks
called Bayesian Attentional Networks (BANs). BANs are
structures that describe cause-effect relationship
between attention and physical action. Then, a utility
function is used to determine the best combination of
interaction modalities and feedback. Experiments
involving five physical interaction modalities
(vision-based gesture interaction, glove-based gesture
interaction, speech, feet, and body stance) and two
feedback modalities (visual and sound) are described.
The main findings are: (i) physical expressions have an
effect in the quality of the solutions to spatial
navigation problems; (ii) the combination of feet
gestures with visual feedback provides the best task
performance.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "3",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Ochs:2017:UPB,
author = "Magalie Ochs and Catherine Pelachaud and Gary
Mckeown",
title = "A User Perception--Based Approach to Create Smiling
Embodied Conversational Agents",
journal = j-TIIS,
volume = "7",
number = "1",
pages = "4:1--4:??",
month = mar,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/2925993",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Sat Mar 25 07:51:07 MDT 2017",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "In order to improve the social capabilities of
embodied conversational agents, we propose a
computational model to enable agents to automatically
select and display appropriate smiling behavior during
human--machine interaction. A smile may convey
different communicative intentions depending on subtle
characteristics of the facial expression and contextual
cues. To construct such a model, as a first step, we
explore the morphological and dynamic characteristics
of different types of smiles (polite, amused, and
embarrassed smiles) that an embodied conversational
agent may display. The resulting lexicon of smiles is
based on a corpus of virtual agents' smiles directly
created by users and analyzed through a
machine-learning technique. Moreover, during an
interaction, a smiling expression impacts on the
observer's perception of the interpersonal stance of
the speaker. As a second step, we propose a
probabilistic model to automatically compute the user's
potential perception of the embodied conversational
agent's social stance depending on its smiling behavior
and on its physical appearance. This model, based on a
corpus of users' perceptions of smiling and nonsmiling
virtual agents, enables a virtual agent to determine
the appropriate smiling behavior to adopt given the
interpersonal stance it wants to express. An experiment
using real human--virtual agent interaction provided
some validation of the proposed model.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "4",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Ramirez-Amaro:2017:AVG,
author = "Karinne Ramirez-Amaro and Humera Noor Minhas and
Michael Zehetleitner and Michael Beetz and Gordon
Cheng",
title = "Added Value of Gaze-Exploiting Semantic Representation
to Allow Robots Inferring Human Behaviors",
journal = j-TIIS,
volume = "7",
number = "1",
pages = "5:1--5:??",
month = mar,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/2939381",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Sat Mar 25 07:51:07 MDT 2017",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Neuroscience studies have shown that incorporating
gaze view with third view perspective has a great
influence to correctly infer human behaviors. Given the
importance of both first and third person observations
for the recognition of human behaviors, we propose a
method that incorporates these observations in a
technical system to enhance the recognition of human
behaviors, thus improving beyond third person
observations in a more robust human activity
recognition system. First, we present the extension of
our proposed semantic reasoning method by including
gaze data and external observations as inputs to
segment and infer human behaviors in complex real-world
scenarios. Then, from the obtained results we
demonstrate that the combination of gaze and external
input sources greatly enhance the recognition of human
behaviors. Our findings have been applied to a humanoid
robot to online segment and recognize the observed
human activities with better accuracy when using both
input sources; for example, the activity recognition
increases from 77\% to 82\% in our proposed
pancake-making dataset. To provide completeness of our
system, we have evaluated our approach with another
dataset with a similar setup as the one proposed in
this work, that is, the CMU-MMAC dataset. In this case,
we improved the recognition of the activities for the
egg scrambling scenario from 54\% to 86\% by combining
the external views with the gaze information, thus
showing the benefit of incorporating gaze information
to infer human behaviors across different datasets.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "5",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Cena:2017:ISI,
author = "F. Cena and C. Gena and G. J. Houben and M.
Strohmaier",
title = "Introduction to the {Special Issue on Big Personal
Data in Interactive Intelligent Systems}",
journal = j-TIIS,
volume = "7",
number = "2",
pages = "6:1--6:??",
month = jul,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3101102",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Fri Sep 8 08:41:25 MDT 2017",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "This brief introduction begins with an overview of the
types of research that are relevant to the special
issue on Big Personal Data in Interactive Intelligent
Systems. The overarching question is: How can big
personal data be collected, analyzed, and exploited so
as to provide new or improved forms of interaction with
intelligent systems, and what new issues have to be
taken into account? The three articles accepted for the
special issue are then characterized in terms of the
concepts of this overview.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "6",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Cassavia:2017:DUB,
author = "Nunziato Cassavia and Elio Masciari and Chiara Pulice
and Domenico Sacc{\`a}",
title = "Discovering User Behavioral Features to Enhance
Information Search on Big Data",
journal = j-TIIS,
volume = "7",
number = "2",
pages = "7:1--7:??",
month = jul,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/2856059",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Fri Sep 8 08:41:25 MDT 2017",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Due to the emerging Big Data paradigm, driven by the
increasing availability of intelligent services easily
accessible by a large number of users (e.g., social
networks), traditional data management techniques are
inadequate in many real-life scenarios. In particular,
the availability of huge amounts of data pertaining to
user social interactions, user preferences, and
opinions calls for advanced analysis strategies to
understand potentially interesting social dynamics.
Furthermore, heterogeneity and high speed of
user-generated data require suitable data storage and
management tools to be designed from scratch. This
article presents a framework tailored for analyzing
user interactions with intelligent systems while
seeking some domain-specific information (e.g.,
choosing a good restaurant in a visited area). The
framework enhances a user's quest for information by
exploiting previous knowledge about their social
environment, the extent of influence the users are
potentially subject to, and the influence they may
exert on other users. User influence spread across the
network is dynamically computed as well to improve user
search strategy by providing specific suggestions,
represented as tailored faceted features. Such features
are the result of data exchange activity (called data
posting) that enriches information sources with
additional background information and knowledge derived
from experiences and behavioral properties of domain
experts and users. The approach is tested in an
important application scenario such as tourist
recommendation, but it can be profitably exploited in
several other contexts, for example, viral marketing
and food education.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "7",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Kim:2017:MML,
author = "Seungjun Kim and Dan Tasse and Anind K. Dey",
title = "Making Machine-Learning Applications for Time-Series
Sensor Data Graphical and Interactive",
journal = j-TIIS,
volume = "7",
number = "2",
pages = "8:1--8:??",
month = jul,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/2983924",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Fri Sep 8 08:41:25 MDT 2017",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "The recent profusion of sensors has given consumers
and researchers the ability to collect significant
amounts of data. However, understanding sensor data can
be a challenge, because it is voluminous,
multi-sourced, and unintelligible. Nonetheless,
intelligent systems, such as activity recognition,
require pattern analysis of sensor data streams to
produce compelling results; machine learning (ML)
applications enable this type of analysis. However, the
number of ML experts able to proficiently classify
sensor data is limited, and there remains a lack of
interactive, usable tools to help intermediate users
perform this type of analysis. To learn which features
these tools must support, we conducted interviews with
intermediate users of ML and conducted two probe-based
studies with a prototype ML and visual analytics
system, Gimlets. Our system implements ML applications
for sensor-based time-series data as a novel
domain-specific prototype that integrates interactive
visual analytic features into the ML pipeline. We
identify future directions for usable ML systems based
on sensor data that will enable intermediate users to
build systems that have been prohibitively difficult.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "8",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Zanzotto:2017:YLT,
author = "Fabio Massimo Zanzotto and Lorenzo Ferrone",
title = "Have You Lost the Thread? {Discovering} Ongoing
Conversations in Scattered Dialog Blocks",
journal = j-TIIS,
volume = "7",
number = "2",
pages = "9:1--9:??",
month = jul,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/2885501",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Fri Sep 8 08:41:25 MDT 2017",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Finding threads in textual dialogs is emerging as a
need to better organize stored knowledge. We capture
this need by introducing the novel task of discovering
ongoing conversations in scattered dialog blocks. Our
aim in this article is twofold. First, we propose a
publicly available testbed for the task by solving the
insurmountable problem of privacy of Big Personal Data.
In fact, we showed that personal dialogs can be
surrogated with theatrical plays. Second, we propose a
suite of computationally light learning models that can
use syntactic and semantic features. With this suite,
we showed that models for this challenging task should
include features capturing shifts in language use and,
possibly, modeling underlying scripts.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "9",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Jugovac:2017:IRO,
author = "Michael Jugovac and Dietmar Jannach",
title = "Interacting with Recommenders-Overview and Research
Directions",
journal = j-TIIS,
volume = "7",
number = "3",
pages = "10:1--10:??",
month = oct,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3001837",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Mon Jan 22 17:18:51 MST 2018",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Automated recommendations have become a ubiquitous
part of today's online user experience. These systems
point us to additional items to purchase in online
shops, they make suggestions to us on movies to watch,
or recommend us people to connect with on social
websites. In many of today's applications, however, the
only way for users to interact with the system is to
inspect the recommended items. Often, no mechanisms are
implemented for users to give the system feedback on
the recommendations or to explicitly specify
preferences, which can limit the potential overall
value of the system for its users. Academic research in
recommender systems is largely focused on algorithmic
approaches for item selection and ranking. Nonetheless,
over the years a variety of proposals were made on how
to design more interactive recommenders. This work
provides a comprehensive overview on the existing
literature on user interaction aspects in recommender
systems. We cover existing approaches for preference
elicitation and result presentation, as well as
proposals that consider recommendation as an
interactive process. Throughout the work, we
furthermore discuss examples of real-world systems and
outline possible directions for future works.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "10",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Paiva:2017:EVA,
author = "Ana Paiva and Iolanda Leite and Hana Boukricha and
Ipke Wachsmuth",
title = "Empathy in Virtual Agents and Robots: a Survey",
journal = j-TIIS,
volume = "7",
number = "3",
pages = "11:1--11:??",
month = oct,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/2912150",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Mon Jan 22 17:18:51 MST 2018",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "This article surveys the area of computational
empathy, analysing different ways by which artificial
agents can simulate and trigger empathy in their
interactions with humans. Empathic agents can be seen
as agents that have the capacity to place themselves
into the position of a user's or another agent's
emotional situation and respond appropriately. We also
survey artificial agents that, by their design and
behaviour, can lead users to respond emotionally as if
they were experiencing the agent's situation. In the
course of this survey, we present the research
conducted to date on empathic agents in light of the
principles and mechanisms of empathy found in humans.
We end by discussing some of the main challenges that
this exciting area will be facing in the future.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "11",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Nourbakhsh:2017:DUC,
author = "Nargess Nourbakhsh and Fang Chen and Yang Wang and
Rafael A. Calvo",
title = "Detecting Users' Cognitive Load by Galvanic Skin
Response with Affective Interference",
journal = j-TIIS,
volume = "7",
number = "3",
pages = "12:1--12:??",
month = oct,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/2960413",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Mon Jan 22 17:18:51 MST 2018",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Experiencing high cognitive load during complex and
demanding tasks results in performance reduction,
stress, and errors. However, these could be prevented
by a system capable of constantly monitoring users'
cognitive load fluctuations and adjusting its
interactions accordingly. Physiological data and
behaviors have been found to be suitable measures of
cognitive load and are now available in many consumer
devices. An advantage of these measures over subjective
and performance-based methods is that they are captured
in real time and implicitly while the user interacts
with the system, which makes them suitable for
real-world applications. On the other hand, emotion
interference can change physiological responses and
make accurate cognitive load measurement more
challenging. In this work, we have studied six galvanic
skin response (GSR) features in detection of four
cognitive load levels with the interference of
emotions. The data was derived from two arithmetic
experiments and emotions were induced by displaying
pleasant and unpleasant pictures in the background. Two
types of classifiers were applied to detect cognitive
load levels. Results from both studies indicate that
the features explored can detect four and two cognitive
load levels with high accuracy even under emotional
changes. More specifically, rise duration and
accumulative GSR are the common best features in all
situations, having the highest accuracy especially in
the presence of emotions.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "12",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Duncan:2017:ESC,
author = "Brittany A. Duncan and Robin R. Murphy",
title = "Effects of Speed, Cyclicity, and Dimensionality on
Distancing, Time, and Preference in Human--Aerial
Vehicle Interactions",
journal = j-TIIS,
volume = "7",
number = "3",
pages = "13:1--13:??",
month = oct,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/2983927",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Mon Jan 22 17:18:51 MST 2018",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "This article will present a simulation-based approach
to testing multiple variables in the behavior of a
small Unmanned Aerial Vehicle (sUAV), inspired by
insect and animal motions, to understand how these
variables impact time of interaction, preference for
interaction, and distancing in Human-Robot Interaction
(HRI). Previous work has focused on communicating
directionality of flight, intentionality of the robot,
and perception of motion in sUAVs, while interactions
involving direct distancing from these vehicles have
been limited to a single study (likely due to safety
concerns). This study takes place in a Cave Automatic
Virtual Environment (CAVE) to maintain a sense of scale
and immersion with the users, while also allowing for
safe interaction. Additionally, the two-alternative
forced-choice method is employed as a unique
methodology to the study of collocated HRI in order to
both study the impact of these variables on preference
and allow participants to choose whether or not to
interact with a specific robot. This article will be of
interest to end-users of sUAV technologies to encourage
appropriate distancing based on their application,
practitioners in HRI to understand the use of this new
methodology, and human-aerial vehicle researchers to
understand the perception of these vehicles by 64 naive
users. Results suggest that low speed (by 0.27m, $ p <
0.02$) and high cyclicity (by 0.28m, $ p < 0.01$)
expressions can be used to increase distancing; that
low speed (by 4.4s, $ p < 0.01$) and three-dimensional
(by 2.6s, $ p < 0.01$) expressions can be used to
decrease time of interaction; and low speed (by 10.4\%,
$ p < 0.01$) expressions are less preferred for
passability in human-aerial vehicle interactions.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "13",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Kucher:2017:ALV,
author = "Kostiantyn Kucher and Carita Paradis and Magnus
Sahlgren and Andreas Kerren",
title = "Active Learning and Visual Analytics for Stance
Classification with {ALVA}",
journal = j-TIIS,
volume = "7",
number = "3",
pages = "14:1--14:??",
month = oct,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3132169",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Mon Jan 22 17:18:51 MST 2018",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "The automatic detection and classification of stance
(e.g., certainty or agreement) in text data using
natural language processing and machine-learning
methods creates an opportunity to gain insight into the
speakers' attitudes toward their own and other people's
utterances. However, identifying stance in text
presents many challenges related to training data
collection and classifier training. To facilitate the
entire process of training a stance classifier, we
propose a visual analytics approach, called ALVA, for
text data annotation and visualization. ALVA's
interplay with the stance classifier follows an active
learning strategy to select suitable candidate
utterances for manual annotaion. Our approach supports
annotation process management and provides the
annotators with a clean user interface for labeling
utterances with multiple stance categories. ALVA also
contains a visualization method to help analysts of the
annotation and training process gain a better
understanding of the categories used by the annotators.
The visualization uses a novel visual representation,
called CatCombos, which groups individual annotation
items by the combination of stance categories.
Additionally, our system makes a visualization of a
vector space model available that is itself based on
utterances. ALVA is already being used by our domain
experts in linguistics and computational linguistics to
improve the understanding of stance phenomena and to
build a stance classifier for applications such as
social media monitoring.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "14",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Masai:2017:EFE,
author = "Katsutoshi Masai and Kai Kunze and Yuta Sugiura and
Masa Ogata and Masahiko Inami and Maki Sugimoto",
title = "Evaluation of Facial Expression Recognition by a Smart
Eyewear for Facial Direction Changes, Repeatability,
and Positional Drift",
journal = j-TIIS,
volume = "7",
number = "4",
pages = "15:1--15:??",
month = dec,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3012941",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Mon Jan 22 17:18:51 MST 2018",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "This article presents a novel smart eyewear that
recognizes the wearer's facial expressions in daily
scenarios. Our device uses embedded photo-reflective
sensors and machine learning to recognize the wearer's
facial expressions. Our approach focuses on skin
deformations around the eyes that occur when the wearer
changes his or her facial expressions. With small
photo-reflective sensors, we measure the distances
between the skin surface on the face and the 17 sensors
embedded in the eyewear frame. A Support Vector Machine
(SVM) algorithm is then applied to the information
collected by the sensors. The sensors can cover various
facial muscle movements. In addition, they are small
and light enough to be integrated into daily-use
glasses. Our evaluation of the device shows the
robustness to the noises from the wearer's facial
direction changes and the slight changes in the
glasses' position, as well as the reliability of the
device's recognition capacity. The main contributions
of our work are as follows: (1) We evaluated the
recognition accuracy in daily scenes, showing 92.8\%
accuracy regardless of facial direction and
removal/remount. Our device can recognize facial
expressions with 78.1\% accuracy for repeatability and
87.7\% accuracy in case of its positional drift. (2) We
designed and implemented the device by taking usability
and social acceptability into account. The device looks
like a conventional eyewear so that users can wear it
anytime, anywhere. (3) Initial field trials in a daily
life setting were undertaken to test the usability of
the device. Our work is one of the first attempts to
recognize and evaluate a variety of facial expressions
with an unobtrusive wearable device.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "15",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Yan:2017:EAR,
author = "Shuo Yan and Gangyi Ding and Hongsong Li and Ningxiao
Sun and Zheng Guan and Yufeng Wu and Longfei Zhang and
Tianyu Huang",
title = "Exploring Audience Response in Performing Arts with a
Brain-Adaptive Digital Performance System",
journal = j-TIIS,
volume = "7",
number = "4",
pages = "16:1--16:??",
month = dec,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3009974",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Mon Jan 22 17:18:51 MST 2018",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Audience response is an important indicator of the
quality of performing arts. Psychophysiological
measurements enable researchers to perceive and
understand audience response by collecting their
bio-signals during a live performance. However, how the
audience respond and how the performance is affected by
these responses are the key elements but are hard to
implement. To address this issue, we designed a
brain-computer interactive system called Brain-Adaptive
Digital Performance ( BADP ) for the measurement and
analysis of audience engagement level through an
interactive three-dimensional virtual theater. The BADP
system monitors audience engagement in real time using
electroencephalography (EEG) measurement and tries to
improve it by applying content-related performing cues
when the engagement level decreased. In this article,
we generate EEG-based engagement level and build
thresholds to determine the decrease and re-engage
moments. In the experiment, we simulated two types of
theatre performance to provide participants a
high-fidelity virtual environment using the BADP
system. We also create content-related performing cues
for each performance under three different conditions.
The results of these evaluations show that our
algorithm could accurately detect the engagement status
and the performing cues have a positive impact on
regaining audience engagement across different
performance types. Our findings open new perspectives
in audience-based theatre performance design.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "16",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Gotz:2017:ACM,
author = "David Gotz and Shun Sun and Nan Cao and Rita Kundu and
Anne-Marie Meyer",
title = "Adaptive Contextualization Methods for Combating
Selection Bias during High-Dimensional Visualization",
journal = j-TIIS,
volume = "7",
number = "4",
pages = "17:1--17:??",
month = dec,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3009973",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Mon Jan 22 17:18:51 MST 2018",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Large and high-dimensional real-world datasets are
being gathered across a wide range of application
disciplines to enable data-driven decision making.
Interactive data visualization can play a critical role
in allowing domain experts to select and analyze data
from these large collections. However, there is a
critical mismatch between the very large number of
dimensions in complex real-world datasets and the much
smaller number of dimensions that can be concurrently
visualized using modern techniques. This gap in
dimensionality can result in high levels of selection
bias that go unnoticed by users. The bias can in turn
threaten the very validity of any subsequent insights.
This article describes Adaptive Contextualization (AC),
a novel approach to interactive visual data selection
that is specifically designed to combat the invisible
introduction of selection bias. The AC approach (1)
monitors and models a user's visual data selection
activity, (2) computes metrics over that model to
quantify the amount of selection bias after each step,
(3) visualizes the metric results, and (4) provides
interactive tools that help users assess and avoid
bias-related problems. This article expands on an
earlier article presented at ACM IUI 2016 [16] by
providing a more detailed review of the AC methodology
and additional evaluation results.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "17",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{DiSciascio:2017:SES,
author = "Cecilia {Di Sciascio} and Vedran Sabol and Eduardo
Veas",
title = "Supporting Exploratory Search with a Visual
User-Driven Approach",
journal = j-TIIS,
volume = "7",
number = "4",
pages = "18:1--18:??",
month = dec,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3009976",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Mon Jan 22 17:18:51 MST 2018",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Whenever users engage in gathering and organizing new
information, searching and browsing activities emerge
at the core of the exploration process. As the process
unfolds and new knowledge is acquired, interest drifts
occur inevitably and need to be accounted for. Despite
the advances in retrieval and recommender algorithms,
real-world interfaces have remained largely unchanged:
results are delivered in a relevance-ranked list.
However, it quickly becomes cumbersome to reorganize
resources along new interests, as any new search brings
new results. We introduce an interactive user-driven
tool that aims at supporting users in understanding,
refining, and reorganizing documents on the fly as
information needs evolve. Decisions regarding visual
and interactive design aspects are tightly grounded on
a conceptual model for exploratory search. In other
words, the different views in the user interface
address stages of awareness, exploration, and
explanation unfolding along the discovery process,
supported by a set of text-mining methods. A formal
evaluation showed that gathering items relevant to a
particular topic of interest with our tool incurs in a
lower cognitive load compared to a traditional ranked
list. A second study reports on usage patterns and
usability of the various interaction techniques within
a free, unsupervised setting.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "18",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Davis:2017:QCC,
author = "N. Davis and C. Hsiao and K. Y. Singh and B. Lin and
B. Magerko",
title = "Quantifying Collaboration with a Co-Creative Drawing
Agent",
journal = j-TIIS,
volume = "7",
number = "4",
pages = "19:1--19:??",
month = dec,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3009981",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Mon Jan 22 17:18:51 MST 2018",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "This article describes a new technique for quantifying
creative collaboration and applies it to the user study
evaluation of a co-creative drawing agent. We present a
cognitive framework called creative sense-making that
provides a new method to visualize and quantify the
interaction dynamics of creative collaboration, for
example, the rhythm of interaction, style of turn
taking, and the manner in which participants are
mutually making sense of a situation. The creative
sense-making framework includes a qualitative coding
technique, interaction coding software, an analysis
method, and the cognitive theory behind these
applications. This framework and analysis method are
applied to empirical studies of the Drawing Apprentice
collaborative sketching system to compare human
collaboration with a co-creative AI agent vs. a Wizard
of Oz setup. The analysis demonstrates how the proposed
technique can be used to analyze interaction data using
continuous functions (e.g., integrations and moving
averages) to measure and evaluate how collaborations
unfold through time.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "19",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Lin:2018:GES,
author = "Yu-Ru Lin and Nan Cao",
title = "Guest Editorial: Special Issue on Interactive Visual
Analysis of Human and Crowd Behaviors",
journal = j-TIIS,
volume = "8",
number = "1",
pages = "1:1--1:??",
month = mar,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3178569",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Mon Mar 4 08:29:40 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "The analysis of human behaviors has impacted many
social and commercial domains. How could interactive
visual analytic systems be used to further provide
behavioral insights? This editorial introduction
features emerging research trend related to this
question. The four articles accepted for this special
issue represent recent progress: they identify research
challenges arising from analysis of human and crowd
behaviors, and present novel methods in visual analysis
to address those challenges and help make behavioral
data more useful.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "1",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Polack:2018:CIM,
author = "Peter J. {Polack Jr.} and Shang-Tse Chen and Minsuk
Kahng and Kaya {De Barbaro} and Rahul Basole and
Moushumi Sharmin and Duen Horng Chau",
title = "Chronodes: Interactive Multifocus Exploration of Event
Sequences",
journal = j-TIIS,
volume = "8",
number = "1",
pages = "2:1--2:??",
month = mar,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3152888",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Mon Mar 4 08:29:40 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "The advent of mobile health (mHealth) technologies
challenges the capabilities of current visualizations,
interactive tools, and algorithms. We present
Chronodes, an interactive system that unifies data
mining and human-centric visualization techniques to
support explorative analysis of longitudinal mHealth
data. Chronodes extracts and visualizes frequent event
sequences that reveal chronological patterns across
multiple participant timelines of mHealth data. It then
combines novel interaction and visualization techniques
to enable multifocus event sequence analysis, which
allows health researchers to interactively define,
explore, and compare groups of participant behaviors
using event sequence combinations. Through summarizing
insights gained from a pilot study with 20 behavioral
and biomedical health experts, we discuss Chronodes's
efficacy and potential impact in the mHealth domain.
Ultimately, we outline important open challenges in
mHealth, and offer recommendations and design
guidelines for future research.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "2",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Fu:2018:VVA,
author = "Siwei Fu and Yong Wang and Yi Yang and Qingqing Bi and
Fangzhou Guo and Huamin Qu",
title = "{VisForum}: a Visual Analysis System for Exploring
User Groups in Online Forums",
journal = j-TIIS,
volume = "8",
number = "1",
pages = "3:1--3:??",
month = mar,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3162075",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Mon Mar 4 08:29:40 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "User grouping in asynchronous online forums is a
common phenomenon nowadays. People with similar
backgrounds or shared interests like to get together in
group discussions. As tens of thousands of archived
conversational posts accumulate, challenges emerge for
forum administrators and analysts to effectively
explore user groups in large-volume threads and gain
meaningful insights into the hierarchical discussions.
Identifying and comparing groups in discussion threads
are nontrivial, since the number of users and posts
increases with time and noises may hamper the detection
of user groups. Researchers in data mining fields have
proposed a large body of algorithms to explore user
grouping. However, the mining result is not intuitive
to understand and difficult for users to explore the
details. To address these issues, we present VisForum,
a visual analytic system allowing people to
interactively explore user groups in a forum. We work
closely with two educators who have released courses in
Massive Open Online Courses (MOOC) platforms to compile
a list of design goals to guide our design. Then, we
design and implement a multi-coordinated interface as
well as several novel glyphs, i.e., group glyph, user
glyph, and set glyph, with different granularities.
Accordingly, we propose the group Detecting 8 Sorting
Algorithm to reduce noises in a collection of posts,
and employ the concept of ``forum-index'' for users to
identify high-impact forum members. Two case studies
using real-world datasets demonstrate the usefulness of
the system and the effectiveness of novel glyph
designs. Furthermore, we conduct an in-lab user study
to present the usability of VisForum.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "3",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Steptoe:2018:VAF,
author = "Michael Steptoe and Robert Kr{\"u}ger and Rolando
Garcia and Xing Liang and Ross Maciejewski",
title = "A Visual Analytics Framework for Exploring Theme Park
Dynamics",
journal = j-TIIS,
volume = "8",
number = "1",
pages = "4:1--4:??",
month = mar,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3162076",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Mon Mar 4 08:29:40 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "In 2015, the top 10 largest amusement park
corporations saw a combined annual attendance of over
400 million visitors. Daily average attendance in some
of the most popular theme parks in the world can
average 44,000 visitors per day. These visitors ride
attractions, shop for souvenirs, and dine at local
establishments; however, a critical component of their
visit is the overall park experience. This experience
depends on the wait time for rides, the crowd flow in
the park, and various other factors linked to the crowd
dynamics and human behavior. As such, better insight
into visitor behavior can help theme parks devise
competitive strategies for improved customer
experience. Research into the use of attractions,
facilities, and exhibits can be studied, and as
behavior profiles emerge, park operators can also
identify anomalous behaviors of visitors which can
improve safety and operations. In this article, we
present a visual analytics framework for analyzing
crowd dynamics in theme parks. Our proposed framework
is designed to support behavioral analysis by
summarizing patterns and detecting anomalies. We
provide methodologies to link visitor movement data,
communication data, and park infrastructure data. This
combination of data sources enables a semantic analysis
of who, what, when, and where, enabling analysts to
explore visitor-visitor interactions and
visitor-infrastructure interactions. Analysts can
identify behaviors at the macro level through semantic
trajectory clustering views for group behavior
dynamics, as well as at the micro level using
trajectory traces and a novel visitor network analysis
view. We demonstrate the efficacy of our framework
through two case studies of simulated theme park
visitors.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "4",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Wang:2018:VRI,
author = "Yong Wang and Conglei Shi and Liangyue Li and Hanghang
Tong and Huamin Qu",
title = "Visualizing Research Impact through Citation Data",
journal = j-TIIS,
volume = "8",
number = "1",
pages = "5:1--5:??",
month = mar,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3132744",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Mon Mar 4 08:29:40 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Research impact plays a critical role in evaluating
the research quality and influence of a scholar, a
journal, or a conference. Many researchers have
attempted to quantify research impact by introducing
different types of metrics based on citation data, such
as h -index, citation count, and impact factor. These
metrics are widely used in the academic community.
However, quantitative metrics are highly aggregated in
most cases and sometimes biased, which probably results
in the loss of impact details that are important for
comprehensively understanding research impact. For
example, which research area does a researcher have
great research impact on? How does the research impact
change over time? How do the collaborators take effect
on the research impact of an individual? Simple
quantitative metrics can hardly help answer such kind
of questions, since more detailed exploration of the
citation data is needed. Previous work on visualizing
citation data usually only shows limited aspects of
research impact and may suffer from other problems
including visual clutter and scalability issues. To
fill this gap, we propose an interactive visualization
tool, ImpactVis, for better exploration of research
impact through citation data. Case studies and in-depth
expert interviews are conducted to demonstrate the
effectiveness of ImpactVis.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "5",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Nourashrafeddin:2018:VAI,
author = "Seyednaser Nourashrafeddin and Ehsan Sherkat and
Rosane Minghim and Evangelos E. Milios",
title = "A Visual Approach for Interactive Keyterm-Based
Clustering",
journal = j-TIIS,
volume = "8",
number = "1",
pages = "6:1--6:??",
month = mar,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3181669",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Mon Mar 4 08:29:40 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "The keyterm-based approach is arguably intuitive for
users to direct text-clustering processes and adapt
results to various applications in text analysis. Its
way of markedly influencing the results, for instance,
by expressing important terms in relevance order,
requires little knowledge of the algorithm and has
predictable effect, speeding up the task. This article
first presents a text-clustering algorithm that can
easily be extended into an interactive algorithm. We
evaluate its performance against state-of-the-art
clustering algorithms in unsupervised mode. Next, we
propose three interactive versions of the algorithm
based on keyterm labeling, document labeling, and
hybrid labeling. We then demonstrate that keyterm
labeling is more effective than document labeling in
text clustering. Finally, we propose a visual approach
to support the keyterm-based version of the algorithm.
Visualizations are provided for the whole collection as
well as for detailed views of document and cluster
relationships. We show the effectiveness and
flexibility of our framework, Vis-Kt, by presenting
typical clustering cases on real text document
collections. A user study is also reported that reveals
overwhelmingly positive acceptance toward keyterm-based
clustering.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "6",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Fiebrink:2018:ISI,
author = "Rebecca Fiebrink and Marco Gillies",
title = "Introduction to the Special Issue on Human-Centered
Machine Learning",
journal = j-TIIS,
volume = "8",
number = "2",
pages = "7:1--7:??",
month = jul,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3205942",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Mon Mar 4 08:29:40 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Machine learning is one of the most important and
successful techniques in contemporary computer science.
Although it can be applied to myriad problems of human
interest, research in machine learning is often framed
in an impersonal way, as merely algorithms being
applied to model data. However, this viewpoint hides
considerable human work of tuning the algorithms,
gathering the data, deciding what should be modeled in
the first place, and using the outcomes of machine
learning in the real world. Examining machine learning
from a human-centered perspective includes explicitly
recognizing human work, as well as reframing machine
learning workflows based on situated human working
practices, and exploring the co-adaptation of humans
and intelligent systems. A human-centered understanding
of machine learning in human contexts can lead not only
to more usable machine learning tools, but to new ways
of understanding what machine learning is good for and
how to make it more useful. This special issue brings
together nine articles that present different ways to
frame machine learning in a human context. They
represent very different application areas (from
medicine to audio) and methodologies (including machine
learning methods, human-computer interaction methods,
and hybrids), but they all explore the human contexts
in which machine learning is used. This introduction
summarizes the articles in this issue and draws out
some common themes.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "7",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Dudley:2018:RUI,
author = "John J. Dudley and Per Ola Kristensson",
title = "A Review of User Interface Design for Interactive
Machine Learning",
journal = j-TIIS,
volume = "8",
number = "2",
pages = "8:1--8:??",
month = jul,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3185517",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Mon Mar 4 08:29:40 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Interactive Machine Learning (IML) seeks to complement
human perception and intelligence by tightly
integrating these strengths with the computational
power and speed of computers. The interactive process
is designed to involve input from the user but does not
require the background knowledge or experience that
might be necessary to work with more traditional
machine learning techniques. Under the IML process,
non-experts can apply their domain knowledge and
insight over otherwise unwieldy datasets to find
patterns of interest or develop complex data-driven
applications. This process is co-adaptive in nature and
relies on careful management of the interaction between
human and machine. User interface design is fundamental
to the success of this approach, yet there is a lack of
consolidated principles on how such an interface should
be implemented. This article presents a detailed review
and characterisation of Interactive Machine Learning
from an interactive systems perspective. We propose and
describe a structural and behavioural model of a
generalised IML system and identify solution principles
for building effective interfaces for IML. Where
possible, these emergent solution principles are
contextualised by reference to the broader
human-computer interaction literature. Finally, we
identify strands of user interface research key to
unlocking more efficient and productive non-expert
interactive machine learning applications.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "8",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Chen:2018:UML,
author = "Nan-Chen Chen and Margaret Drouhard and Rafal
Kocielnik and Jina Suh and Cecilia R. Aragon",
title = "Using Machine Learning to Support Qualitative Coding
in Social Science: Shifting the Focus to Ambiguity",
journal = j-TIIS,
volume = "8",
number = "2",
pages = "9:1--9:??",
month = jul,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3185515",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Mon Mar 4 08:29:40 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Machine learning (ML) has become increasingly
influential to human society, yet the primary
advancements and applications of ML are driven by
research in only a few computational disciplines. Even
applications that affect or analyze human behaviors and
social structures are often developed with limited
input from experts outside of computational fields.
Social scientists-experts trained to examine and
explain the complexity of human behavior and
interactions in the world-have considerable expertise
to contribute to the development of ML applications for
human-generated data, and their analytic practices
could benefit from more human-centered ML methods.
Although a few researchers have highlighted some gaps
between ML and social sciences [51, 57, 70], most
discussions only focus on quantitative methods. Yet
many social science disciplines rely heavily on
qualitative methods to distill patterns that are
challenging to discover through quantitative data. One
common analysis method for qualitative data is
qualitative coding. In this article, we highlight three
challenges of applying ML to qualitative coding.
Additionally, we utilize our experience of designing a
visual analytics tool for collaborative qualitative
coding to demonstrate the potential in using ML to
support qualitative coding by shifting the focus to
identifying ambiguity. We illustrate dimensions of
ambiguity and discuss the relationship between
disagreement and ambiguity. Finally, we propose three
research directions to ground ML applications for
social science as part of the progression toward
human-centered machine learning.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "9",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Smith:2018:PUC,
author = "Jim Smith and Phil Legg and Milos Matovic and
Kristofer Kinsey",
title = "Predicting User Confidence During Visual Decision
Making",
journal = j-TIIS,
volume = "8",
number = "2",
pages = "10:1--10:??",
month = jul,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3185524",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Mon Mar 4 08:29:40 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "People are not infallible consistent ``oracles'':
their confidence in decision-making may vary
significantly between tasks and over time. We have
previously reported the benefits of using an interface
and algorithms that explicitly captured and exploited
users' confidence: error rates were reduced by up to
50\% for an industrial multi-class learning problem;
and the number of interactions required in a
design-optimisation context was reduced by 33\%. Having
access to users' confidence judgements could
significantly benefit intelligent interactive systems
in industry, in areas such as intelligent tutoring
systems and in health care. There are many reasons for
wanting to capture information about confidence
implicitly. Some are ergonomic, but others are more
``social''-such as wishing to understand (and possibly
take account of) users' cognitive state without
interrupting them. We investigate the hypothesis that
users' confidence can be accurately predicted from
measurements of their behaviour. Eye-tracking systems
were used to capture users' gaze patterns as they
undertook a series of visual decision tasks, after each
of which they reported their confidence on a 5-point
Likert scale. Subsequently, predictive models were
built using ``conventional'' machine learning
approaches for numerical summary features derived from
users' behaviour. We also investigate the extent to
which the deep learning paradigm can reduce the need to
design features specific to each application by
creating ``gaze maps''-visual representations of the
trajectories and durations of users' gaze fixations-and
then training deep convolutional networks on these
images. Treating the prediction of user confidence as a
two-class problem (confident/not confident), we
attained classification accuracy of 88\% for the
scenario of new users on known tasks, and 87\% for
known users on new tasks. Considering the confidence as
an ordinal variable, we produced regression models with
a mean absolute error of \approx 0.7 in both cases.
Capturing just a simple subset of non-task-specific
numerical features gave slightly worse, but still quite
high accuracy (e.g., MAE \approx 1.0). Results obtained
with gaze maps and convolutional networks are
competitive, despite not having access to longer-term
information about users and tasks, which was vital for
the ``summary'' feature sets. This suggests that the
gaze-map-based approach forms a viable, transferable
alternative to handcrafting features for each different
application. These results provide significant evidence
to confirm our hypothesis, and offer a way of
substantially improving many interactive artificial
intelligence applications via the addition of cheap
non-intrusive hardware and computationally cheap
prediction algorithms.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "10",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Dumitrache:2018:CGT,
author = "Anca Dumitrache and Lora Aroyo and Chris Welty",
title = "Crowdsourcing Ground Truth for Medical Relation
Extraction",
journal = j-TIIS,
volume = "8",
number = "2",
pages = "11:1--11:??",
month = jul,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3152889",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Mon Mar 4 08:29:40 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Cognitive computing systems require human labeled data
for evaluation and often for training. The standard
practice used in gathering this data minimizes
disagreement between annotators, and we have found this
results in data that fails to account for the ambiguity
inherent in language. We have proposed the CrowdTruth
method for collecting ground truth through
crowdsourcing, which reconsiders the role of people in
machine learning based on the observation that
disagreement between annotators provides a useful
signal for phenomena such as ambiguity in the text. We
report on using this method to build an annotated data
set for medical relation extraction for the cause and
treat relations, and how this data performed in a
supervised training experiment. We demonstrate that by
modeling ambiguity, labeled data gathered from crowd
workers can (1) reach the level of quality of domain
experts for this task while reducing the cost, and (2)
provide better training data at scale than distant
supervision. We further propose and validate new
weighted measures for precision, recall, and F-measure,
which account for ambiguity in both human and machine
performance on this task.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "11",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Morrison:2018:VUS,
author = "Cecily Morrison and Kit Huckvale and Bob Corish and
Richard Banks and Martin Grayson and Jonas Dorn and
Abigail Sellen and S{\^a}n Lindley",
title = "Visualizing Ubiquitously Sensed Measures of Motor
Ability in Multiple Sclerosis: Reflections on
Communicating Machine Learning in Practice",
journal = j-TIIS,
volume = "8",
number = "2",
pages = "12:1--12:??",
month = jul,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3181670",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Mon Mar 4 08:29:40 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Sophisticated ubiquitous sensing systems are being
used to measure motor ability in clinical settings.
Intended to augment clinical decision-making, the
interpretability of the machine-learning measurements
underneath becomes critical to their use. We explore
how visualization can support the interpretability of
machine-learning measures through the case of Assess
MS, a system to support the clinical assessment of
Multiple Sclerosis. A substantial design challenge is
to make visible the algorithm's decision-making process
in a way that allows clinicians to integrate the
algorithm's result into their own decision process. To
this end, we present a series of design iterations that
probe the challenges in supporting interpretability in
a real-world system. The key contribution of this
article is to illustrate that simply making visible the
algorithmic decision-making process is not helpful in
supporting clinicians in their own decision-making
process. It disregards that people and algorithms make
decisions in different ways. Instead, we propose that
visualisation can provide context to algorithmic
decision-making, rendering observable a range of
internal workings of the algorithm from data quality
issues to the web of relationships generated in the
machine-learning process.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "12",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Kim:2018:HLS,
author = "Bongjun Kim and Bryan Pardo",
title = "A Human-in-the-Loop System for Sound Event Detection
and Annotation",
journal = j-TIIS,
volume = "8",
number = "2",
pages = "13:1--13:??",
month = jul,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3214366",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Mon Mar 4 08:29:40 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Labeling of audio events is essential for many tasks.
However, finding sound events and labeling them within
a long audio file is tedious and time-consuming. In
cases where there is very little labeled data (e.g., a
single labeled example), it is often not feasible to
train an automatic labeler because many techniques
(e.g., deep learning) require a large number of
human-labeled training examples. Also, fully automated
labeling may not show sufficient agreement with human
labeling for many uses. To solve this issue, we present
a human-in-the-loop sound labeling system that helps a
user quickly label target sound events in a long audio.
It lets a user reduce the time required to label a long
audio file (e.g., 20 hours) containing target sounds
that are sparsely distributed throughout the recording
(10\% or less of the audio contains the target) when
there are too few labeled examples (e.g., one) to train
a state-of-the-art machine audio labeling system. To
evaluate the effectiveness of our tool, we performed a
human-subject study. The results show that it helped
participants label target sound events twice as fast as
labeling them manually. In addition to measuring the
overall performance of the proposed system, we also
measure interaction overhead and machine accuracy,
which are two key factors that determine the overall
performance. The analysis shows that an ideal interface
that does not have interaction overhead at all could
speed labeling by as much as a factor of four.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "13",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Zhang:2018:ERC,
author = "Amy X. Zhang and Jilin Chen and Wei Chai and Jinjun Xu
and Lichan Hong and Ed Chi",
title = "Evaluation and Refinement of Clustered Search Results
with the Crowd",
journal = j-TIIS,
volume = "8",
number = "2",
pages = "14:1--14:??",
month = jul,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3158226",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Mon Mar 4 08:29:40 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "When searching on the web or in an app, results are
often returned as lists of hundreds to thousands of
items, making it difficult for users to understand or
navigate the space of results. Research has
demonstrated that using clustering to partition search
results into coherent, topical clusters can aid in both
exploration and discovery. Yet clusters generated by an
algorithm for this purpose are often of poor quality
and do not satisfy users. To achieve acceptable
clustered search results, experts must manually
evaluate and refine the clustered results for each
search query, a process that does not scale to large
numbers of search queries. In this article, we
investigate using crowd-based human evaluation to
inspect, evaluate, and improve clusters to create
high-quality clustered search results at scale. We
introduce a workflow that begins by using a collection
of well-known clustering algorithms to produce a set of
clustered search results for a given query. Then, we
use crowd workers to holistically assess the quality of
each clustered search result to find the best one.
Finally, the workflow has the crowd spot and fix
problems in the best result to produce a final output.
We evaluate this workflow on 120 top search queries
from the Google Play Store, some of whom have clustered
search results as a result of evaluations and
refinements by experts. Our evaluations demonstrate
that the workflow is effective at reproducing the
evaluation of expert judges and also improves clusters
in a way that agrees with experts and crowds alike.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "14",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Self:2018:OLP,
author = "Jessica Zeitz Self and Michelle Dowling and John
Wenskovitch and Ian Crandell and Ming Wang and Leanna
House and Scotland Leman and Chris North",
title = "Observation-Level and Parametric Interaction for
High-Dimensional Data Analysis",
journal = j-TIIS,
volume = "8",
number = "2",
pages = "15:1--15:??",
month = jul,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3158230",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Mon Mar 4 08:29:40 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Exploring high-dimensional data is challenging.
Dimension reduction algorithms, such as weighted
multidimensional scaling, support data exploration by
projecting datasets to two dimensions for
visualization. These projections can be explored
through parametric interaction, tweaking underlying
parameterizations, and observation-level interaction,
directly interacting with the points within the
projection. In this article, we present the results of
a controlled usability study determining the
differences, advantages, and drawbacks among parametric
interaction, observation-level interaction, and their
combination. The study assesses both interaction
technique effects on domain-specific high-dimensional
data analyses performed by non-experts of statistical
algorithms. This study is performed using Andromeda, a
tool that enables both parametric and observation-level
interaction to provide in-depth data exploration. The
results indicate that the two forms of interaction
serve different, but complementary, purposes in gaining
insight through steerable dimension reduction
algorithms.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "15",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Francoise:2018:MSM,
author = "Jules Fran{\c{c}}oise and Fr{\'e}d{\'e}ric
Bevilacqua",
title = "Motion-Sound Mapping through Interaction: an Approach
to User-Centered Design of Auditory Feedback Using
Machine Learning",
journal = j-TIIS,
volume = "8",
number = "2",
pages = "16:1--16:??",
month = jul,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3211826",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Mon Mar 4 08:29:40 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Technologies for sensing movement are expanding toward
everyday use in virtual reality, gaming, and artistic
practices. In this context, there is a need for
methodologies to help designers and users create
meaningful movement experiences. This article discusses
a user-centered approach for the design of interactive
auditory feedback using interactive machine learning.
We discuss Mapping through Interaction, a method for
crafting sonic interactions from corporeal
demonstrations of embodied associations between motion
and sound. It uses an interactive machine learning
approach to build the mapping from user demonstrations,
emphasizing an iterative design process that integrates
acted and interactive experiences of the relationships
between movement and sound. We examine Gaussian Mixture
Regression and Hidden Markov Regression for continuous
movement recognition and real-time sound parameter
generation. We illustrate and evaluate this approach
through an application in which novice users can create
interactive sound feedback based on coproduced gestures
and vocalizations. Results indicate that Gaussian
Mixture Regression and Hidden Markov Regression can
efficiently learn complex motion-sound mappings from
few examples.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "16",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Sidner:2018:CNT,
author = "Candace L. Sidner and Timothy Bickmore and Bahador
Nooraie and Charles Rich and Lazlo Ring and Mahni
Shayganfar and Laura Vardoulakis",
title = "Creating New Technologies for Companionable Agents to
Support Isolated Older Adults",
journal = j-TIIS,
volume = "8",
number = "3",
pages = "17:1--17:??",
month = aug,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3213050",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Mon Mar 4 08:29:41 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "This article reports on the development of
capabilities for (on-screen) virtual agents and robots
to support isolated older adults in their homes. A
real-time architecture was developed to use a virtual
agent or a robot interchangeably to interact via dialog
and gesture with a human user. Users could interact
with either agent on 12 different activities, some of
which included on-screen games, and forms to complete.
The article reports on a pre-study that guided the
choice of interaction activities. A month-long study
with 44 adults between the ages of 55 and 91 assessed
differences in the use of the robot and virtual
agent.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "17",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Oviatt:2018:DHS,
author = "S. Oviatt and K. Hang and J. Zhou and K. Yu and F.
Chen",
title = "Dynamic Handwriting Signal Features Predict Domain
Expertise",
journal = j-TIIS,
volume = "8",
number = "3",
pages = "18:1--18:??",
month = aug,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3213309",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Mon Mar 4 08:29:41 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "As commercial pen-centric systems proliferate, they
create a parallel need for analytic techniques based on
dynamic writing. Within educational applications,
recent empirical research has shown that signal-level
features of students' writing, such as stroke distance,
pressure and duration, are adapted to conserve total
energy expenditure as they consolidate expertise in a
domain. The present research examined how accurately
three different machine-learning algorithms could
automatically classify users' domain expertise based on
signal features of their writing, without any content
analysis. Compared with an unguided machine-learning
classification accuracy of 71\%, hybrid methods using
empirical-statistical guidance correctly classified
79-92\% of students by their domain expertise level. In
addition to improved accuracy, the hybrid approach
contributed a causal understanding of prediction
success and generalization to new data. These novel
findings open up opportunities to design new automated
learning analytic systems and student-adaptive
educational technologies for the rapidly expanding
sector of commercial pen systems.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "18",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Hammond:2018:JAA,
author = "Tracy Hammond and Shalini Priya Ashok Kumar and
Matthew Runyon and Josh Cherian and Blake Williford and
Swarna Keshavabhotla and Stephanie Valentine and Wayne
Li and Julie Linsey",
title = "It's Not Just about Accuracy: Metrics That Matter When
Modeling Expert Sketching Ability",
journal = j-TIIS,
volume = "8",
number = "3",
pages = "19:1--19:??",
month = aug,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3181673",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Mon Mar 4 08:29:41 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Design sketching is an important skill for designers,
engineers, and creative professionals, as it allows
them to express their ideas and concepts in a visual
medium. Being a critical and versatile skill for many
different disciplines, courses on design sketching are
often taught in universities. Courses today
predominately rely on pen and paper; however, this
traditional pedagogy is limited by the availability of
human instructors, who can provide personalized
feedback. Using a stylus-based intelligent tutoring
system called SketchTivity, we aim to eventually mimic
the feedback given by an instructor and assess
student-drawn sketches to give students insight into
areas for improvement. To provide effective feedback to
users, it is important to identify what aspects of
their sketches they should work on to improve their
sketching ability. After consulting with several domain
experts in sketching, we came up with several classes
of features that could potentially differentiate expert
and novice sketches. Because improvement on one metric,
such as speed, may result in a decrease in another
metric, such as accuracy, the creation of a single
score may not mean much to the user. We attempted to
create a single internal score that represents overall
drawing skill so that the system can track improvement
over time and found that this score correlates highly
with expert rankings. We gathered over 2,000 sketches
from 20 novices and four experts for analysis. We
identified key metrics for quality assessment that were
shown to significantly correlate with the quality of
expert sketches and provide insight into providing
intelligent user feedback in the future.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "19",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Koskela:2018:PIR,
author = "Markus Koskela and Petri Luukkonen and Tuukka Ruotsalo
and Mats Sj{\"O}berg and Patrik Flor{\'e}en",
title = "Proactive Information Retrieval by Capturing Search
Intent from Primary Task Context",
journal = j-TIIS,
volume = "8",
number = "3",
pages = "20:1--20:??",
month = aug,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3150975",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Mon Mar 4 08:29:41 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "A significant fraction of information searches are
motivated by the user's primary task. An ideal search
engine would be able to use information captured from
the primary task to proactively retrieve useful
information. Previous work has shown that many
information retrieval activities depend on the primary
task in which the retrieved information is to be used,
but fairly little research has been focusing on methods
that automatically learn the informational intents from
the primary task context. We study how the implicit
primary task context can be used to model the user's
search intent and to proactively retrieve relevant and
useful information. Data comprising of logs from a user
study, in which users are writing an essay, demonstrate
that users' search intents can be captured from the
task and relevant and useful information can be
proactively retrieved. Data from simulations with
several datasets of different complexity show that the
proposed approach of using primary task context
generalizes to a variety of data. Our findings have
implications for the design of proactive search systems
that can infer users' search intent implicitly by
monitoring users' primary task activities.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "20",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Narzt:2018:ECA,
author = "Wolfgang Narzt and Otto Weichselbaum and Gustav
Pomberger and Markus Hofmarcher and Michael Strauss and
Peter Holzkorn and Roland Haring and Monika Sturm",
title = "Estimating Collective Attention toward a Public
Display",
journal = j-TIIS,
volume = "8",
number = "3",
pages = "21:1--21:??",
month = aug,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3230715",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Mon Mar 4 08:29:41 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Enticing groups of passers-by to focused interaction
with a public display requires the display system to
take appropriate action that depends on how much
attention the group is already paying to the display.
In the design of such a system, we might want to
present the content so that it indicates that a part of
the group that is looking head-on at the display has
already been registered and is addressed individually,
whereas it simultaneously emits a strong audio signal
that makes the inattentive rest of the group turn
toward it. The challenge here is to define and delimit
adequate mixed attention states for groups of people,
allowing for classifying collective attention based on
inhomogeneous variants of individual attention, i.e.,
where some group members might be highly attentive,
others even interacting with the public display, and
some unperceptive. In this article, we present a model
for estimating collective human attention toward a
public display and investigate technical methods for
practical implementation that employs measurement of
physical expressive features of people appearing within
the display's field of view (i.e., the basis for
deriving a person's attention). We delineate strengths
and weaknesses and prove the potentials of our model by
experimentally exerting influence on the attention of
groups of passers-by in a public gaming scenario.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "21",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Hossain:2018:ASM,
author = "H. M. Sajjad Hossain and Sreenivasan R. Ramamurthy and
Md Abdullah {Al Hafiz Khan} and Nirmalya Roy",
title = "An Active Sleep Monitoring Framework Using Wearables",
journal = j-TIIS,
volume = "8",
number = "3",
pages = "22:1--22:??",
month = aug,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3185516",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Mon Mar 4 08:29:41 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Sleep is the most important aspect of healthy and
active living. The right amount of sleep at the right
time helps an individual to protect his or her
physical, mental, and cognitive health and maintain his
or her quality of life. The most durative of the
Activities of Daily Living (ADL), sleep has a major
synergic influence on a person's fuctional, behavioral,
and cognitive health. A deep understanding of sleep
behavior and its relationship with its physiological
signals, and contexts (such as eye or body movements),
is necessary to design and develop a robust intelligent
sleep monitoring system. In this article, we propose an
intelligent algorithm to detect the microscopic states
of sleep that fundamentally constitute the components
of good and bad sleeping behaviors and thus help shape
the formative assessment of sleep quality. Our initial
analysis includes the investigation of several
classification techniques to identify and correlate the
relationship of microscopic sleep states with overall
sleep behavior. Subsequently, we also propose an online
algorithm based on change point detection to process
and classify the microscopic sleep states. We also
develop a lightweight version of the proposed algorithm
for real-time sleep monitoring, recognition, and
assessment at scale. For a larger deployment of our
proposed model across a community of individuals, we
propose an active-learning-based methodology to reduce
the effort of ground-truth data collection and
labeling. Finally, we evaluate the performance of our
proposed algorithms on real data traces and demonstrate
the efficacy of our models for detecting and assessing
the fine-grained sleep states beyond an individual.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "22",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Park:2018:MFU,
author = "Souneil Park and Joan Serr{\`a} and Enrique Frias
Martinez and Nuria Oliver",
title = "{MobInsight}: a Framework Using Semantic Neighborhood
Features for Localized Interpretations of Urban
Mobility",
journal = j-TIIS,
volume = "8",
number = "3",
pages = "23:1--23:??",
month = aug,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3158433",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Mon Mar 4 08:29:41 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Collective urban mobility embodies the residents'
local insights on the city. Mobility practices of the
residents are produced from their spatial choices,
which involve various considerations such as the
atmosphere of destinations, distance, past experiences,
and preferences. The advances in mobile computing and
the rise of geo-social platforms have provided the
means for capturing the mobility practices; however,
interpreting the residents' insights is challenging due
to the scale and complexity of an urban environment and
its unique context. In this article, we present
MobInsight, a framework for making localized
interpretations of urban mobility that reflect various
aspects of the urbanism. MobInsight extracts a rich set
of neighborhood features through holistic semantic
aggregation, and models the mobility between all-pairs
of neighborhoods. We evaluate MobInsight with the
mobility data of Barcelona and demonstrate diverse
localized and semantically rich interpretations.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "23",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Carreno-Medrano:2018:PVG,
author = "Pamela Carreno-Medrano and Sylvie Gibet and
Pierre-Fran{\c{C}}ois Marteau",
title = "Perceptual Validation for the Generation of Expressive
Movements from End-Effector Trajectories",
journal = j-TIIS,
volume = "8",
number = "3",
pages = "24:1--24:??",
month = aug,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3150976",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Mon Mar 4 08:29:41 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Endowing animated virtual characters with emotionally
expressive behaviors is paramount to improving the
quality of the interactions between humans and virtual
characters. Full-body motion, in particular, with its
subtle kinematic variations, represents an effective
way of conveying emotionally expressive content.
However, before synthesizing expressive full-body
movements, it is necessary to identify and understand
what qualities of human motion are salient to the
perception of emotions and how these qualities can be
exploited to generate novel and equally expressive
full-body movements. Based on previous studies, we
argue that it is possible to perceive and generate
expressive full-body movements from a limited set of
joint trajectories, including end-effector trajectories
and additional constraints such as pelvis and elbow
trajectories. Hence, these selected trajectories define
a significant and reduced motion space, which is
adequate for the characterization of the expressive
qualities of human motion and that is both suitable for
the analysis and generation of emotionally expressive
full-body movements. The purpose and main contribution
of this work is the methodological framework we defined
and used to assess the validity and applicability of
the selected trajectories for the perception and
generation of expressive full-body movements. This
framework consists of the creation of a motion capture
database of expressive theatrical movements, the
development of a motion synthesis system based on
trajectories re-played or re-sampled and inverse
kinematics, and two perceptual studies.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "24",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Knott:2018:ATI,
author = "Benjamin A. Knott and Jonathan Gratch and Angelo
Cangelosi and James Caverlee",
title = "{{\booktitle{ACM Transactions on Interactive
Intelligent Systems (TiiS)}}} Special Issue on Trust
and Influence in Intelligent Human-Machine
Interaction",
journal = j-TIIS,
volume = "8",
number = "4",
pages = "25:1--25:??",
month = nov,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3281451",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Mon Mar 4 08:29:41 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/ft_gateway.cfm?id=3281451",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "25",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Wagner:2018:MHR,
author = "Alan R. Wagner and Paul Robinette and Ayanna Howard",
title = "Modeling the Human-Robot Trust Phenomenon: a
Conceptual Framework based on Risk",
journal = j-TIIS,
volume = "8",
number = "4",
pages = "26:1--26:??",
month = nov,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3152890",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Mon Mar 4 08:29:41 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/ft_gateway.cfm?id=3152890",
abstract = "This article presents a conceptual framework for
human-robot trust which uses computational
representations inspired by game theory to represent a
definition of trust, derived from social psychology.
This conceptual framework generates several testable
hypotheses related to human-robot trust. This article
examines these hypotheses and a series of experiments
we have conducted which both provide support for and
also conflict with our framework for trust. We also
discuss the methodological challenges associated with
investigating trust. The article concludes with a
description of the important areas for future research
on the topic of human-robot trust.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "26",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Akash:2018:CMS,
author = "Kumar Akash and Wan-Lin Hu and Neera Jain and Tahira
Reid",
title = "A Classification Model for Sensing Human Trust in
Machines Using {EEG} and {GSR}",
journal = j-TIIS,
volume = "8",
number = "4",
pages = "27:1--27:??",
month = nov,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3132743",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Mon Mar 4 08:29:41 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/ft_gateway.cfm?id=3132743",
abstract = "Today, intelligent machines interact and collaborate
with humans in a way that demands a greater level of
trust between human and machine. A first step toward
building intelligent machines that are capable of
building and maintaining trust with humans is the
design of a sensor that will enable machines to
estimate human trust level in real time. In this
article, two approaches for developing classifier-based
empirical trust-sensor models are presented that
specifically use electroencephalography and galvanic
skin response measurements. Human subject data
collected from 45 participants is used for feature
extraction, feature selection, classifier training, and
model validation. The first approach considers a
general set of psychophysiological features across all
participants as the input variables and trains a
classifier-based model for each participant, resulting
in a trust-sensor model based on the general feature
set (i.e., a ``general trust-sensor model''). The
second approach considers a customized feature set for
each individual and trains a classifier-based model
using that feature set, resulting in improved mean
accuracy but at the expense of an increase in training
time. This work represents the first use of real-time
psychophysiological measurements for the development of
a human trust sensor. Implications of the work, in the
context of trust management algorithm design for
intelligent machines, are also discussed.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "27",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Holbrook:2018:CVI,
author = "Colin Holbrook",
title = "Cues of Violent Intergroup Conflict Diminish
Perceptions of Robotic Personhood",
journal = j-TIIS,
volume = "8",
number = "4",
pages = "28:1--28:??",
month = nov,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3181674",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Mon Mar 4 08:29:41 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/ft_gateway.cfm?id=3181674",
abstract = "Convergent lines of evidence indicate that
anthropomorphic robots are represented using
neurocognitive mechanisms typically employed in social
reasoning about other people. Relatedly, a growing
literature documents that contexts of threat can
exacerbate coalitional biases in social perceptions.
Integrating these research programs, the present
studies test whether cues of violent intergroup
conflict modulate perceptions of the intelligence,
emotional experience, or overall personhood of robots.
In Studies 1 and 2, participants evaluated a large,
bipedal all-terrain robot; in Study 3, participants
evaluated a small, social robot with humanlike facial
and vocal characteristics. Across all studies, cues of
violent conflict caused significant decreases in
perceived robotic personhood, and these shifts were
mediated by parallel reductions in emotional connection
with the robot (with no significant effects of threat
on attributions of intelligence/skill). In addition, in
Study 2, participants in the conflict condition
estimated the large bipedal robot to be less effective
in military combat, and this difference was mediated by
the reduction in perceived robotic personhood. These
results are discussed as they motivate future
investigation into the links among threat, coalitional
bias and human-robot interaction.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "28",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Chien:2018:ECT,
author = "Shih-Yi Chien and Michael Lewis and Katia Sycara and
Jyi-Shane Liu and Asiye Kumru",
title = "The Effect of Culture on Trust in Automation:
Reliability and Workload",
journal = j-TIIS,
volume = "8",
number = "4",
pages = "29:1--29:??",
month = nov,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3230736",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Mon Mar 4 08:29:41 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/ft_gateway.cfm?id=3230736",
abstract = "Trust in automation has become a topic of intensive
study since the late 1990s and is of increasing
importance with the advent of intelligent interacting
systems. While the earliest trust experiments involved
human interventions to correct failures/errors in
automated control systems, a majority of subsequent
studies have investigated information acquisition and
analysis decision aiding tasks such as target detection
for which automation reliability is more easily
manipulated. Despite the high level of international
dependence on automation in industry, almost all
current studies have employed Western samples primarily
from the U.S. The present study addresses these gaps by
running a large sample experiment in three (U.S.,
Taiwan, and Turkey) diverse cultures using a ``trust
sensitive task'' consisting of both automated control
and target detection subtasks. This article presents
results for the target detection subtask for which
reliability and task load were manipulated. The current
experiments allow us to determine whether reported
effects are universal or specific to Western culture,
vary in baseline or magnitude, or differ across
cultures. Results generally confirm consistent effects
of manipulations across the three cultures as well as
cultural differences in initial trust and variation in
effects of manipulations consistent with 10 cultural
hypotheses based on Hofstede's Cultural Dimensions and
Leung and Cohen's theory of Cultural Syndromes. These
results provide critical implications and insights for
correct trust calibration and to enhance human trust in
intelligent automation systems across cultures.
Additionally, our results would be useful in designing
intelligent systems for users of different cultures.
Our article presents the following contributions:
First, to the best of our knowledge, this is the first
set of studies that deal with cultural factors across
all the cultural syndromes identified in the literature
by comparing trust in the Honor, Face, Dignity
cultures. Second, this is the first set of studies that
uses a validated cross-cultural trust measure for
measuring trust in automation. Third, our experiments
are the first to study the dynamics of trust across
cultures.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "29",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Baker:2018:TUT,
author = "Anthony L. Baker and Elizabeth K. Phillips and Daniel
Ullman and Joseph R. Keebler",
title = "Toward an Understanding of Trust Repair in Human-Robot
Interaction: Current Research and Future Directions",
journal = j-TIIS,
volume = "8",
number = "4",
pages = "30:1--30:??",
month = nov,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3181671",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Mon Mar 4 08:29:41 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/ft_gateway.cfm?id=3181671",
abstract = "Gone are the days of robots solely operating in
isolation, without direct interaction with people.
Rather, robots are increasingly being deployed in
environments and roles that require complex social
interaction with humans. The implementation of
human-robot teams continues to increase as technology
develops in tandem with the state of human-robot
interaction (HRI) research. Trust, a major component of
human interaction, is an important facet of HRI.
However, the ideas of trust repair and trust violations
are understudied in the HRI literature. Trust repair is
the activity of rebuilding trust after one party breaks
the trust of another. These trust breaks are referred
to as trust violations. Just as with humans, trust
violations with robots are inevitable; as a result, a
clear understanding of the process of HRI trust repair
must be developed in order to ensure that a human-robot
team can continue to perform well after a trust
violation. Previous research on human-automation trust
and human-human trust can serve as starting places for
exploring trust repair in HRI. Although existing models
of human-automation and human-human trust are helpful,
they do not account for some of the complexities of
building and maintaining trust in unique relationships
between humans and robots. The purpose of this article
is to provide a foundation for exploring human-robot
trust repair by drawing upon prior work in the
human-robot, human-automation, and human-human trust
literature, concluding with recommendations for
advancing this body of work.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "30",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Wang:2018:TBM,
author = "Yue Wang and Laura R. Humphrey and Zhanrui Liao and
Huanfei Zheng",
title = "Trust-Based Multi-Robot Symbolic Motion Planning with
a Human-in-the-Loop",
journal = j-TIIS,
volume = "8",
number = "4",
pages = "31:1--31:??",
month = nov,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3213013",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Mon Mar 4 08:29:41 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/ft_gateway.cfm?id=3213013",
abstract = "Symbolic motion planning for robots is the process of
specifying and planning robot tasks in a discrete
space, then carrying them out in a continuous space in
a manner that preserves the discrete-level task
specifications. Despite progress in symbolic motion
planning, many challenges remain, including addressing
scalability for multi-robot systems and improving
solutions by incorporating human intelligence. In this
article, distributed symbolic motion planning for
multi-robot systems is developed to address
scalability. More specifically, compositional reasoning
approaches are developed to decompose the global
planning problem, and atomic propositions for
observation, communication, and control are proposed to
address inter-robot collision avoidance. To improve
solution quality and adaptability, a hypothetical
dynamic, quantitative, and probabilistic human-to-robot
trust model is developed to aid this decomposition.
Furthermore, a trust-based real-time switching
framework is proposed to switch between autonomous and
manual motion planning for tradeoffs between task
safety and efficiency. Deadlock- and livelock-free
algorithms are designed to guarantee reachability of
goals with a human-in-the-loop. A set of nontrivial
multi-robot simulations with direct human inputs and
trust evaluation is provided, demonstrating the
successful implementation of the trust-based
multi-robot symbolic motion planning methods.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "31",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Afergan:2018:DR,
author = "Daniel Afergan",
title = "Distinguished Reviewers",
journal = j-TIIS,
volume = "8",
number = "4",
pages = "32:1--32:??",
month = nov,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3283374",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Mon Mar 4 08:29:41 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/ft_gateway.cfm?id=3283374",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "32",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Niewiadomski:2019:AMQ,
author = "Radoslaw Niewiadomski and Ksenia Kolykhalova and
Stefano Piana and Paolo Alborno and Gualtiero Volpe and
Antonio Camurri",
title = "Analysis of Movement Quality in Full-Body Physical
Activities",
journal = j-TIIS,
volume = "9",
number = "1",
pages = "1:1--1:??",
month = feb,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3132369",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Mon Mar 4 08:29:41 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/ft_gateway.cfm?id=3132369",
abstract = "Full-body human movement is characterized by
fine-grain expressive qualities that humans are easily
capable of exhibiting and recognizing in others'
movement. In sports (e.g., martial arts) and performing
arts (e.g., dance), the same sequence of movements can
be performed in a wide range of ways characterized by
different qualities, often in terms of subtle (spatial
and temporal) perturbations of the movement. Even a
non-expert observer can distinguish between a top-level
and average performance by a dancer or martial artist.
The difference is not in the performed movements--the
same in both cases--but in the ``quality'' of their
performance. In this article, we present a
computational framework aimed at an automated
approximate measure of movement quality in full-body
physical activities. Starting from motion capture data,
the framework computes low-level (e.g., a limb
velocity) and high-level (e.g., synchronization between
different limbs) movement features. Then, this vector
of features is integrated to compute a value aimed at
providing a quantitative assessment of movement quality
approximating the evaluation that an external expert
observer would give of the same sequence of movements.
Next, a system representing a concrete implementation
of the framework is proposed. Karate is adopted as a
testbed. We selected two different katas (i.e.,
detailed choreographies of movements in karate)
characterized by different overall attitudes and
expressions (aggressiveness, meditation), and we asked
seven athletes, having various levels of experience and
age, to perform them. Motion capture data were
collected from the performances and were analyzed with
the system. The results of the automated analysis were
compared with the scores given by 14 karate experts who
rated the same performances. Results show that the
movement-quality scores computed by the system and the
ratings given by the human observers are highly
correlated (Pearson's correlations r = 0.84, p = 0.001
and r = 0.75, p = 0.005).",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "1",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Ramachandran:2019:TER,
author = "Aditi Ramachandran and Chien-Ming Huang and Brian
Scassellati",
title = "Toward Effective Robot--Child Tutoring: Internal
Motivation, Behavioral Intervention, and Learning
Outcomes",
journal = j-TIIS,
volume = "9",
number = "1",
pages = "2:1--2:??",
month = feb,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3213768",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Mon Mar 4 08:29:41 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/ft_gateway.cfm?id=3213768",
abstract = "Personalized learning environments have the potential
to improve learning outcomes for children in a variety
of educational domains, as they can tailor instruction
based on the unique learning needs of individuals.
Robot tutoring systems can further engage users by
leveraging their potential for embodied social
interaction and take into account crucial aspects of a
learner, such as a student's motivation in learning. In
this article, we demonstrate that motivation in young
learners corresponds to observable behaviors when
interacting with a robot tutoring system, which, in
turn, impact learning outcomes. We first detail a user
study involving children interacting one on one with a
robot tutoring system over multiple sessions. Based on
empirical data, we show that academic motivation
stemming from one's own values or goals as assessed by
the Academic Self-Regulation Questionnaire (SRQ-A)
correlates to observed suboptimal help-seeking behavior
during the initial tutoring session. We then show how
an interactive robot that responds intelligently to
these observed behaviors in subsequent tutoring
sessions can positively impact both student behavior
and learning outcomes over time. These results provide
empirical evidence for the link between internal
motivation, observable behavior, and learning outcomes
in the context of robot--child tutoring. We also
identified an additional suboptimal behavioral feature
within our tutoring environment and demonstrated its
relationship to internal factors of motivation,
suggesting further opportunities to design robot
intervention to enhance learning. We provide insights
on the design of robot tutoring systems aimed to
deliver effective behavioral intervention during
learning interactions for children and present a
discussion on the broader challenges currently faced by
robot--child tutoring systems.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "2",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Marge:2019:MDR,
author = "Matthew Marge and Alexander I. Rudnicky",
title = "Miscommunication Detection and Recovery in Situated
Human--Robot Dialogue",
journal = j-TIIS,
volume = "9",
number = "1",
pages = "3:1--3:??",
month = feb,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3237189",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Mon Mar 4 08:29:41 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/ft_gateway.cfm?id=3237189",
abstract = "Even without speech recognition errors, robots may
face difficulties interpreting natural-language
instructions. We present a method for robustly handling
miscommunication between people and robots in
task-oriented spoken dialogue. This capability is
implemented in TeamTalk, a conversational interface to
robots that supports detection and recovery from the
situated grounding problems of referential ambiguity
and impossible actions. We introduce a representation
that detects these problems and a nearest-neighbor
learning algorithm that selects recovery strategies for
a virtual robot. When the robot encounters a grounding
problem, it looks back on its interaction history to
consider how it resolved similar situations. The
learning method is trained initially on crowdsourced
data but is then supplemented by interactions from a
longitudinal user study in which six participants
performed navigation tasks with the robot. We compare
results collected using a general model to
user-specific models and find that user-specific models
perform best on measures of dialogue efficiency, while
the general model yields the highest agreement with
human judges. Our overall contribution is a novel
approach to detecting and recovering from
miscommunication in dialogue by including situated
context, namely, information from a robot's path
planner and surroundings.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "3",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Guo:2019:VEA,
author = "Fangzhou Guo and Tianlong Gu and Wei Chen and Feiran
Wu and Qi Wang and Lei Shi and Huamin Qu",
title = "Visual Exploration of Air Quality Data with a
Time-correlation-partitioning Tree Based on Information
Theory",
journal = j-TIIS,
volume = "9",
number = "1",
pages = "4:1--4:??",
month = feb,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3182187",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Mon Mar 4 08:29:41 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/ft_gateway.cfm?id=3182187",
abstract = "Discovering the correlations among variables of air
quality data is challenging, because the correlation
time series are long-lasting, multi-faceted, and
information-sparse. In this article, we propose a novel
visual representation, called
Time-correlation-partitioning (TCP) tree, that
compactly characterizes correlations of multiple air
quality variables and their evolutions. A TCP tree is
generated by partitioning the information-theoretic
correlation time series into pieces with respect to the
variable hierarchy and temporal variations, and
reorganizing these pieces into a hierarchically nested
structure. The visual exploration of a TCP tree
provides a sparse data traversal of the correlation
variations and a situation-aware analysis of
correlations among variables. This can help
meteorologists understand the correlations among air
quality variables better. We demonstrate the efficiency
of our approach in a real-world air quality
investigation scenario.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "4",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Krokos:2019:EDL,
author = "Eric Krokos and Hsueh-Chen Cheng and Jessica Chang and
Bohdan Nebesh and Celeste Lyn Paul and Kirsten Whitley
and Amitabh Varshney",
title = "Enhancing Deep Learning with Visual Interactions",
journal = j-TIIS,
volume = "9",
number = "1",
pages = "5:1--5:??",
month = feb,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3150977",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Mon Mar 4 08:29:41 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Deep learning has emerged as a powerful tool for
feature-driven labeling of datasets. However, for it to
be effective, it requires a large and finely labeled
training dataset. Precisely labeling a large training
dataset is expensive, time-consuming, and error prone.
In this article, we present a visually driven
deep-learning approach that starts with a coarsely
labeled training dataset and iteratively refines the
labeling through intuitive interactions that leverage
the latent structures of the dataset. Our approach can
be used to (a) alleviate the burden of intensive manual
labeling that captures the fine nuances in a
high-dimensional dataset by simple visual interactions,
(b) replace a complicated (and therefore difficult to
design) labeling algorithm by a simpler (but coarse)
labeling algorithm supplemented by user interaction to
refine the labeling, or (c) use low-dimensional
features (such as the RGB colors) for coarse labeling
and turn to higher-dimensional latent structures that
are progressively revealed by deep learning, for fine
labeling. We validate our approach through use cases on
three high-dimensional datasets and a user study.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "5",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Koh:2019:DHG,
author = "Jung In Koh and Josh Cherian and Paul Taele and Tracy
Hammond",
title = "Developing a Hand Gesture Recognition System for
Mapping Symbolic Hand Gestures to Analogous Emojis in
Computer-Mediated Communication",
journal = j-TIIS,
volume = "9",
number = "1",
pages = "6:1--6:??",
month = feb,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3297277",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Mon Mar 4 08:29:41 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
abstract = "Recent trends in computer-mediated communication (CMC)
have not only led to expanded instant messaging through
the use of images and videos but have also expanded
traditional text messaging with richer content in the
form of visual communication markers (VCMs) such as
emoticons, emojis, and stickers. VCMs could prevent a
potential loss of subtle emotional conversation in CMC,
which is delivered by nonverbal cues that convey
affective and emotional information. However, as the
number of VCMs grows in the selection set, the problem
of VCM entry needs to be addressed. Furthermore,
conventional means of accessing VCMs continue to rely
on input entry methods that are not directly and
intimately tied to expressive nonverbal cues. In this
work, we aim to address this issue by facilitating the
use of an alternative form of VCM entry: hand gestures.
To that end, we propose a user-defined hand gesture set
that is highly representative of a number of VCMs and a
two-stage hand gesture recognition system
(trajectory-based, shape-based) that can identify these
user-defined hand gestures with an accuracy of 82\%. By
developing such a system, we aim to allow people using
low-bandwidth forms of CMCs to still enjoy their
convenient and discreet properties while also allowing
them to experience more of the intimacy and
expressiveness of higher-bandwidth online
communication.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "6",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Chen:2019:SIH,
author = "Fang Chen and Carlos Duarte and Wai-Tat Fu",
title = "Special Issue on Highlights of {ACM Intelligent User
Interface (IUI) 2017}",
journal = j-TIIS,
volume = "9",
number = "2--3",
pages = "7:1--7:??",
month = apr,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3301292",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Wed Dec 11 06:36:19 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/ft_gateway.cfm?id=3301292",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "7",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Pham:2019:AMA,
author = "Phuong Pham and Jingtao Wang",
title = "{AttentiveVideo}: a Multimodal Approach to Quantify
Emotional Responses to Mobile Advertisements",
journal = j-TIIS,
volume = "9",
number = "2--3",
pages = "8:1--8:??",
month = apr,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3232233",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Wed Dec 11 06:36:19 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/ft_gateway.cfm?id=3232233",
abstract = "Understanding a target audience's emotional responses
to a video advertisement is crucial to evaluate the
advertisement's effectiveness. However, traditional
methods for collecting such information are slow,
expensive, and coarse grained. We propose
AttentiveVideo, a scalable intelligent mobile interface
with corresponding inference algorithms to monitor and
quantify the effects of mobile video advertising in
real time. Without requiring additional sensors,
AttentiveVideo employs a combination of implicit
photoplethysmography (PPG) sensing and facial
expression analysis (FEA) to detect the attention,
engagement, and sentiment of viewers as they watch
video advertisements on unmodified smartphones. In a
24-participant study, AttentiveVideo achieved good
accuracy on a wide range of emotional measures (the
best average accuracy = 82.6\% across nine measures).
While feature fusion alone did not improve prediction
accuracy with a single model, it significantly improved
the accuracy when working together with model fusion.
We also found that the PPG sensing channel and the FEA
technique have different strength in data availability,
latency detection, accuracy, and usage environment.
These findings show the potential for both low-cost
collection and deep understanding of emotional
responses to mobile video advertisements.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "8",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Mihoub:2019:WSS,
author = "Alaeddine Mihoub and Gr{\'e}goire Lefebvre",
title = "Wearables and Social Signal Processing for Smarter
Public Presentations",
journal = j-TIIS,
volume = "9",
number = "2--3",
pages = "9:1--9:??",
month = apr,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3234507",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Wed Dec 11 06:36:19 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/ft_gateway.cfm?id=3234507",
abstract = "Social Signal Processing techniques have given the
opportunity to analyze in-depth human behavior in
social face-to-face interactions. With recent
advancements, it is henceforth possible to use these
techniques to augment social interactions, especially
human behavior in oral presentations. The goal of this
study is to train a computational model able to provide
a relevant feedback to a public speaker concerning
his/her coverbal communication. Hence, the role of this
model is to augment the social intelligence of the
orator and then the relevance of his/her presentation.
To this end, we present an original interaction setting
in which the speaker is equipped with only wearable
devices. Several coverbal modalities have been
extracted and automatically annotated namely speech
volume, intonation, speech rate, eye gaze, hand
gestures, and body movements. In this article, which is
an extension of our previous article published in
IUI'17, we compare our Dynamic Bayesian Network design
to classical J48/Multi-Layer Perceptron/Support Vector
Machine classifiers, propose a subjective evaluation of
presenter skills with a discussion in regards to our
automatic evaluation, and we add a complementary study
about using DBScan versus k -means algorithm in the
design process of our Dynamic Bayesian Network.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "9",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Zhou:2019:TVA,
author = "Michelle X. Zhou and Gloria Mark and Jingyi Li and
Huahai Yang",
title = "Trusting Virtual Agents: The Effect of Personality",
journal = j-TIIS,
volume = "9",
number = "2--3",
pages = "10:1--10:??",
month = apr,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3232077",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Wed Dec 11 06:36:19 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/ft_gateway.cfm?id=3232077",
abstract = "We present artificial intelligent (AI) agents that act
as interviewers to engage with a user in a text-based
conversation and automatically infer the user's
personality traits. We investigate how the personality
of an AI interviewer and the inferred personality of a
user influences the user's trust in the AI interviewer
from two perspectives: the user's willingness to
confide in and listen to an AI interviewer. We have
developed two AI interviewers with distinct
personalities and deployed them in a series of
real-world events. We present findings from four such
deployments involving 1,280 users, including 606 actual
job applicants. Notably, users are more willing to
confide in and listen to an AI interviewer with a
serious, assertive personality in a high-stakes job
interview. Moreover, users' personality traits,
inferred from their chat text, along with interview
context, influence their perception of and their
willingness to confide in and listen to an AI
interviewer. Finally, we discuss the design
implications of our work on building
hyper-personalized, intelligent agents.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "10",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Santos:2019:PPT,
author = "Carlos Pereira Santos and Kevin Hutchinson and
Vassilis-Javed Khan and Panos Markopoulos",
title = "Profiling Personality Traits with Games",
journal = j-TIIS,
volume = "9",
number = "2--3",
pages = "11:1--11:??",
month = apr,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3230738",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Wed Dec 11 06:36:19 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/ft_gateway.cfm?id=3230738",
abstract = "Trying to understand a player's characteristics with
regards to a computer game is a major line of research
known as player modeling. The purpose of player
modeling is typically the adaptation of the game
itself. We present two studies that extend player
modeling into player profiling by trying to identify
abstract personality traits, such as the need for
cognition and self-esteem, through a player's in-game
behavior. We present evidence that game mechanics that
can be broadly adopted by several game genres, such as
hints and a player's self-evaluation at the end of a
level, correlate with the aforementioned personality
traits. We conclude by presenting future directions for
research regarding this topic, discuss the direct
applications for the games industry, and explore how
games can be developed as profiling tools with
applications to other contexts.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "11",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Sen:2019:TUS,
author = "Shilad Sen and Anja Beth Swoap and Qisheng Li and Ilse
Dippenaar and Monica Ngo and Sarah Pujol and Rebecca
Gold and Brooke Boatman and Brent Hecht and Bret
Jackson",
title = "Toward Universal Spatialization Through
{Wikipedia}-Based Semantic Enhancement",
journal = j-TIIS,
volume = "9",
number = "2--3",
pages = "12:1--12:??",
month = apr,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3213769",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Wed Dec 11 06:36:19 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/ft_gateway.cfm?id=3213769",
abstract = "This article introduces Cartograph, a visualization
system that harnesses the vast world knowledge encoded
within Wikipedia to create thematic maps of almost any
data. Cartograph extends previous systems that
visualize non-spatial data using geographic approaches.
Although these systems required data with an existing
semantic structure, Cartograph unlocks spatial
visualization for a much larger variety of datasets by
enhancing input datasets with semantic information
extracted from Wikipedia. Cartograph's map embeddings
use neural networks trained on Wikipedia article
content and user navigation behavior. Using these
embeddings, the system can reveal connections between
points that are unrelated in the original datasets but
are related in meaning and therefore embedded close
together on the map. We describe the design of the
system and key challenges we encountered. We present
findings from two user studies exploring design choices
and use of the system.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "12",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{DiSciascio:2019:IQA,
author = "Cecilia {Di Sciascio} and David Strohmaier and Marcelo
Errecalde and Eduardo Veas",
title = "Interactive Quality Analytics of User-generated
Content: an Integrated Toolkit for the Case of
{Wikipedia}",
journal = j-TIIS,
volume = "9",
number = "2--3",
pages = "13:1--13:??",
month = apr,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3150973",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Wed Dec 11 06:36:19 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/ft_gateway.cfm?id=3150973",
abstract = "Digital libraries and services enable users to access
large amounts of data on demand. Yet, quality
assessment of information encountered on the Internet
remains an elusive open issue. For example, Wikipedia,
one of the most visited platforms on the Web, hosts
thousands of user-generated articles and undergoes 12
million edits/contributions per month. User-generated
content is undoubtedly one of the keys to its success
but also a hindrance to good quality. Although
Wikipedia has established guidelines for the ``perfect
article,'' authors find it difficult to assert whether
their contributions comply with them and reviewers
cannot cope with the ever-growing amount of articles
pending review. Great efforts have been invested in
algorithmic methods for automatic classification of
Wikipedia articles (as featured or non-featured) and
for quality flaw detection. Instead, our contribution
is an interactive tool that combines automatic
classification methods and human interaction in a
toolkit, whereby experts can experiment with new
quality metrics and share them with authors that need
to identify weaknesses to improve a particular article.
A design study shows that experts are able to
effectively create complex quality metrics in a visual
analytics environment. In turn, a user study evidences
that regular users can identify flaws, as well as
high-quality content based on the inspection of
automatic quality scores.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "13",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Paudyal:2019:CTS,
author = "Prajwal Paudyal and Junghyo Lee and Ayan Banerjee and
Sandeep K. S. Gupta",
title = "A Comparison of Techniques for Sign Language Alphabet
Recognition Using Armband Wearables",
journal = j-TIIS,
volume = "9",
number = "2--3",
pages = "14:1--14:??",
month = apr,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3150974",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Wed Dec 11 06:36:19 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/ft_gateway.cfm?id=3150974",
abstract = "Recent research has shown that reliable recognition of
sign language words and phrases using user-friendly and
noninvasive armbands is feasible and desirable. This
work provides an analysis and implementation of
including fingerspelling recognition (FR) in such
systems, which is a much harder problem due to lack of
distinctive hand movements. A novel algorithm called
DyFAV (Dynamic Feature Selection and Voting) is
proposed for this purpose that exploits the fact that
fingerspelling has a finite corpus (26 alphabets for
the American Sign Language (ASL)). Detailed analysis of
the algorithm used as well as comparisons with other
traditional machine-learning algorithms is provided.
The system uses an independent multiple-agent voting
approach to identify letters with high accuracy. The
independent voting of the agents ensures that the
algorithm is highly parallelizable and thus recognition
times can be kept low to suit real-time mobile
applications. A thorough explanation and analysis is
presented on results obtained on the ASL alphabet
corpus for nine people with limited training. An
average recognition accuracy of 95.36\% is reported and
compared with recognition results from other
machine-learning techniques. This result is extended by
including six additional validation users with data
collected under similar settings as the previous
dataset. Furthermore, a feature selection schema using
a subset of the sensors is proposed and the results are
evaluated. The mobile, noninvasive, and real-time
nature of the technology is demonstrated by evaluating
performance on various types of Android phones and
remote server configurations. A brief discussion of the
user interface is provided along with guidelines for
best practices.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "14",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Katsuragawa:2019:BLT,
author = "Keiko Katsuragawa and Ankit Kamal and Qi Feng Liu and
Matei Negulescu and Edward Lank",
title = "Bi-Level Thresholding: Analyzing the Effect of
Repeated Errors in Gesture Input",
journal = j-TIIS,
volume = "9",
number = "2--3",
pages = "15:1--15:??",
month = apr,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3181672",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Wed Dec 11 06:36:19 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/ft_gateway.cfm?id=3181672",
abstract = "In gesture recognition, one challenge that researchers
and developers face is the need for recognition
strategies that mediate between false positives and
false negatives. In this article, we examine bi-level
thresholding, a recognition strategy that uses two
thresholds: a tighter threshold limits false positives
and recognition errors, and a looser threshold prevents
repeated errors (false negatives) by analyzing
movements in sequence. We first describe early
observations that led to the development of the
bi-level thresholding algorithm. Next, using a
Wizard-of-Oz recognizer, we hold recognition rates
constant and adjust for fixed versus bi-level
thresholding; we show that systems using bi-level
thresholding result in significantly lower workload
scores on the NASA-TLX and significantly lower
accelerometer variance when performing gesture input.
Finally, we examine the effect that bi-level
thresholding has on a real-world dataset of wrist and
finger gestures, showing an ability to significantly
improve measures of precision and recall. Overall,
these results argue for the viability of bi-level
thresholding as an effective technique for balancing
between false positives, recognition errors, and false
negatives.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "15",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Intharah:2019:HDI,
author = "Thanapong Intharah and Daniyar Turmukhambetov and
Gabriel J. Brostow",
title = "{HILC}: Domain-Independent {PbD} System Via Computer
Vision and Follow-Up Questions",
journal = j-TIIS,
volume = "9",
number = "2--3",
pages = "16:1--16:??",
month = apr,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3234508",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Wed Dec 11 06:36:19 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/ft_gateway.cfm?id=3234508",
abstract = "Creating automation scripts for tasks involving
Graphical User Interface (GUI) interactions is hard. It
is challenging because not all software applications
allow access to a program's internal state, nor do they
all have accessibility APIs. Although much of the
internal state is exposed to the user through the GUI,
it is hard to programmatically operate the GUI's
widgets. To that end, we developed a system prototype
that learns by demonstration, called HILC (Help, It
Looks Confusing). Users, both programmers and
non-programmers, train HILC to synthesize a task script
by demonstrating the task. A demonstration produces the
needed screenshots and their corresponding
mouse-keyboard signals. After the demonstration, the
user answers follow-up questions. We propose a
user-in-the-loop framework that learns to generate
scripts of actions performed on visible elements of
graphical applications. Although pure programming by
demonstration is still unrealistic due to a computer's
limited understanding of user intentions, we use
quantitative and qualitative experiments to show that
non-programming users are willing and effective at
answering follow-up queries posed by our system, to
help with confusing parts of the demonstrations. Our
models of events and appearances are surprisingly
simple but are combined effectively to cope with
varying amounts of supervision. The best available
baseline, Sikuli Slides, struggled to assist users in
the majority of the tests in our user study
experiments. The prototype with our proposed approach
successfully helped users accomplish simple linear
tasks, complicated tasks (monitoring, looping, and
mixed), and tasks that span across multiple
applications. Even when both systems could ultimately
perform a task, ours was trained and refined by the
user in less time.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "16",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Thomason:2019:CAV,
author = "John Thomason and Photchara Ratsamee and Jason Orlosky
and Kiyoshi Kiyokawa and Tomohiro Mashita and Yuki
Uranishi and Haruo Takemura",
title = "A Comparison of Adaptive View Techniques for
Exploratory {$3$D} Drone Teleoperation",
journal = j-TIIS,
volume = "9",
number = "2--3",
pages = "17:1--17:??",
month = apr,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3232232",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Wed Dec 11 06:36:19 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/ft_gateway.cfm?id=3232232",
abstract = "Drone navigation in complex environments poses many
problems to teleoperators. Especially in three
dimensional (3D) structures such as buildings or
tunnels, viewpoints are often limited to the drone's
current camera view, nearby objects can be collision
hazards, and frequent occlusion can hinder accurate
manipulation. To address these issues, we have
developed a novel interface for teleoperation that
provides a user with environment-adaptive viewpoints
that are automatically configured to improve safety and
provide smooth operation. This real-time adaptive
viewpoint system takes robot position, orientation, and
3D point-cloud information into account to modify the
user's viewpoint to maximize visibility. Our prototype
uses simultaneous localization and mapping (SLAM) based
reconstruction with an omnidirectional camera, and we
use the resulting models as well as simulations in a
series of preliminary experiments testing navigation of
various structures. Results suggest that automatic
viewpoint generation can outperform first- and
third-person view interfaces for virtual teleoperators
in terms of ease of control and accuracy of robot
operation.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "17",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Oraby:2019:MCC,
author = "Shereen Oraby and Mansurul Bhuiyan and Pritam Gundecha
and Jalal Mahmud and Rama Akkiraju",
title = "Modeling and Computational Characterization of
{Twitter} Customer Service Conversations",
journal = j-TIIS,
volume = "9",
number = "2--3",
pages = "18:1--18:??",
month = apr,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3213014",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Wed Dec 11 06:36:19 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/ft_gateway.cfm?id=3213014",
abstract = "Given the increasing popularity of customer service
dialogue on Twitter, analysis of conversation data is
essential to understanding trends in customer and agent
behavior for the purpose of automating customer service
interactions. In this work, we develop a novel taxonomy
of fine-grained ``dialogue acts'' frequently observed
in customer service, showcasing acts that are more
suited to the domain than the more generic existing
taxonomies. Using a sequential SVM-HMM model, we model
conversation flow, predicting the dialogue act of a
given turn in real time, and showcase this using our
``PredDial'' portal. We characterize differences
between customer and agent behavior in Twitter customer
service conversations and investigate the effect of
testing our system on different customer service
industries. Finally, we use a data-driven approach to
predict important conversation outcomes: customer
satisfaction, customer frustration, and overall problem
resolution. We show that the type and location of
certain dialogue acts in a conversation have a
significant effect on the probability of desirable and
undesirable outcomes and present actionable rules based
on our findings. We explore the correlations between
different dialogue acts and the outcome of the
conversations in detail using an actionable-rule
discovery task by leveraging a state-of-the-art
sequential rule mining algorithm while modeling a set
of conversations as a set of sequences. The patterns
and rules we derive can be used as guidelines for
outcome-driven automated customer service platforms.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "18",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Sharma:2019:LSI,
author = "Mohit Sharma and F. Maxwell Harper and George
Karypis",
title = "Learning from Sets of Items in Recommender Systems",
journal = j-TIIS,
volume = "9",
number = "4",
pages = "19:1--19:??",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3326128",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Wed Dec 11 06:36:20 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/ft_gateway.cfm?id=3326128",
abstract = "Most of the existing recommender systems use the
ratings provided by users on individual items. An
additional source of preference information is to use
the ratings that users provide on sets of items. The
advantages of using preferences on sets are twofold.
First, a rating provided on a set conveys some
preference information about each of the set's items,
which allows us to acquire a user's preferences for
more items than the number of ratings that the user
provided. Second, due to privacy concerns, users may
not be willing to reveal their preferences on
individual items explicitly but may be willing to
provide a single rating to a set of items, since it
provides some level of information hiding. This article
investigates two questions related to using set-level
ratings in recommender systems. First, how users'
item-level ratings relate to their set-level ratings.
Second, how collaborative filtering-based models for
item-level rating prediction can take advantage of such
set-level ratings. We have collected set-level ratings
from active users of Movielens on sets of movies that
they have rated in the past. Our analysis of these
ratings shows that though the majority of the users
provide the average of the ratings on a set's
constituent items as the rating on the set, there
exists a significant number of users that tend to
consistently either under- or over-rate the sets. We
have developed collaborative filtering-based methods to
explicitly model these user behaviors that can be used
to recommend items to users. Experiments on real data
and on synthetic data that resembles the under- or
over-rating behavior in the real data demonstrate that
these models can recover the overall characteristics of
the underlying data and predict the user's ratings on
individual items.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "19",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Chen:2019:UES,
author = "Li Chen and Dongning Yan and Feng Wang",
title = "User Evaluations on Sentiment-based Recommendation
Explanations",
journal = j-TIIS,
volume = "9",
number = "4",
pages = "20:1--20:??",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3282878",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Wed Dec 11 06:36:20 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/ft_gateway.cfm?id=3282878",
abstract = "The explanation interface has been recognized as
important in recommender systems because it can allow
users to better judge the relevance of recommendations
to their preferences and, hence, make more informed
decisions. In different product domains, the specific
purpose of explanation can be different. For
high-investment products (e.g., digital cameras,
laptops), how to educate the typical type of new buyers
about product knowledge and, consequently, improve
their preference certainty and decision quality is
essentially crucial. With this objective, we have
developed a novel tradeoff-oriented explanation
interface that particularly takes into account
sentiment features as extracted from product reviews to
generate recommendations and explanations in a category
structure. In this manuscript, we first reported the
results of an earlier user study (in both before-after
and counter-balancing setups) that compared our
prototype system with the traditional one that purely
considers static specifications for explanations. This
experiment revealed that adding sentiment-based
explanations can significantly increase users' product
knowledge, preference certainty, perceived information
usefulness, perceived recommendation transparency and
quality, and purchase intention. In order to further
identify the reason behind users' perception
improvements on the sentiment-based explanation
interface, we performed a follow-up lab controlled
eye-tracking experiment that investigated how users
viewed information and compared products on the
interface. This study shows that incorporating
sentiment features into the tradeoff-oriented
explanations can significantly affect users' eye-gaze
pattern. They were stimulated to not only notice bottom
categories of products, but also, more frequently, to
compare products across categories. The results also
disclose users' inherent information needs for
sentiment-based explanations, as they allow users to
better understand the recommended products and gain
more knowledge about static specifications.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "20",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Du:2019:EVA,
author = "Fan Du and Catherine Plaisant and Neil Spring and
Kenyon Crowley and Ben Shneiderman",
title = "{EventAction}: a Visual Analytics Approach to
Explainable Recommendation for Event Sequences",
journal = j-TIIS,
volume = "9",
number = "4",
pages = "21:1--21:??",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3301402",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Wed Dec 11 06:36:20 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/ft_gateway.cfm?id=3301402",
abstract = "People use recommender systems to improve their
decisions; for example, item recommender systems help
them find films to watch or books to buy. Despite the
ubiquity of item recommender systems, they can be
improved by giving users greater transparency and
control. This article develops and assesses interactive
strategies for transparency and control, as applied to
event sequence recommender systems, which provide
guidance in critical life choices such as medical
treatments, careers decisions, and educational course
selections. This article's main contribution is the use
of both record attributes and temporal event
information as features to identify similar records and
provide appropriate recommendations. While traditional
item recommendations are based on choices by people
with similar attributes, such as those who looked at
this product or watched this movie, our event sequence
recommendation approach allows users to select records
that share similar attribute values and start with a
similar event sequence. Then users see how different
choices of actions and the orders and times between
them might lead to users' desired outcomes. This paper
applies a visual analytics approach to present and
explain recommendations of event sequences. It presents
a workflow for event sequence recommendation that is
implemented in EventAction and reports on three case
studies in two domains to illustrate the use of
generating event sequence recommendations based on
personal histories. It also offers design guidelines
for the construction of user interfaces for event
sequence recommendation and discusses ethical issues in
dealing with personal histories. A demo video of
EventAction is available at
https://hcil.umd.edu/eventaction.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "21",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Lee:2019:UAM,
author = "Junghyo Lee and Prajwal Paudyal and Ayan Banerjee and
Sandeep K. S. Gupta",
title = "A User-adaptive Modeling for Eating Action
Identification from Wristband Time Series",
journal = j-TIIS,
volume = "9",
number = "4",
pages = "22:1--22:??",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3300149",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Wed Dec 11 06:36:20 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/ft_gateway.cfm?id=3300149",
abstract = "Eating activity monitoring using wearable sensors can
potentially enable interventions based on eating speed
to mitigate the risks of critical healthcare problems
such as obesity or diabetes. Eating actions are
poly-componential gestures composed of sequential
arrangements of three distinct components interspersed
with gestures that may be unrelated to eating. This
makes it extremely challenging to accurately identify
eating actions. The primary reasons for the lack of
acceptance of state-of-the-art eating action monitoring
techniques include the following: (i) the need to
install wearable sensors that are cumbersome to wear or
limit the mobility of the user, (ii) the need for
manual input from the user, and (iii) poor accuracy in
the absence of manual inputs. In this work, we propose
a novel methodology, IDEA, that performs accurate
eating action identification within eating episodes
with an average F1 score of 0.92. This is an
improvement of 0.11 for precision and 0.15 for recall
for the worst-case users as compared to the state of
the art. IDEA uses only a single wristband and provides
feedback on eating speed every 2 min without obtaining
any manual input from the user.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "22",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Hezarjaribi:2019:HLL,
author = "Niloofar Hezarjaribi and Sepideh Mazrouee and Saied
Hemati and Naomi S. Chaytor and Martine Perrigue and
Hassan Ghasemzadeh",
title = "Human-in-the-loop Learning for Personalized Diet
Monitoring from Unstructured Mobile Data",
journal = j-TIIS,
volume = "9",
number = "4",
pages = "23:1--23:??",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3319370",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Wed Dec 11 06:36:20 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/ft_gateway.cfm?id=3319370",
abstract = "Lifestyle interventions with the focus on diet are
crucial in self-management and prevention of many
chronic conditions, such as obesity, cardiovascular
disease, diabetes, and cancer. Such interventions
require a diet monitoring approach to estimate overall
dietary composition and energy intake. Although
wearable sensors have been used to estimate eating
context (e.g., food type and eating time), accurate
monitoring of dietary intake has remained a challenging
problem. In particular, because monitoring dietary
intake is a self-administered task that involves the
end-user to record or report their nutrition intake,
current diet monitoring technologies are prone to
measurement errors related to challenges of human
memory, estimation, and bias. New approaches based on
mobile devices have been proposed to facilitate the
process of dietary intake recording. These technologies
require individuals to use mobile devices such as
smartphones to record nutrition intake by either
entering text or taking images of the food. Such
approaches, however, suffer from errors due to low
adherence to technology adoption and time sensitivity
to the dietary intake context. In this article, we
introduce EZNutriPal, an interactive diet monitoring
system that operates on unstructured mobile data such
as speech and free-text to facilitate dietary
recording, real-time prompting, and personalized
nutrition monitoring. EZNutriPal features a natural
language processing unit that learns incrementally to
add user-specific nutrition data and rules to the
system. To prevent missing data that are required for
dietary monitoring (e.g., calorie intake estimation),
EZNutriPal devises an interactive operating mode that
prompts the end-user to complete missing data in
real-time. Additionally, we propose a combinatorial
optimization approach to identify the most appropriate
pairs of food names and food quantities in complex
input sentences. We evaluate the performance of
EZNutriPal using real data collected from 23 human
subjects who participated in two user studies conducted
in 13 days each. The results demonstrate that
EZNutriPal achieves an accuracy of 89.7\% in calorie
intake estimation. We also assess the impacts of the
incremental training and interactive prompting
technologies on the accuracy of nutrient intake
estimation and show that incremental training and
interactive prompting improve the performance of diet
monitoring by 49.6\% and 29.1\%, respectively, compared
to a system without such computing units.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "23",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Juvina:2019:TUT,
author = "Ion Juvina and Michael G. Collins and Othalia Larue
and William G. Kennedy and Ewart {De Visser} and Celso
{De Melo}",
title = "Toward a Unified Theory of Learned Trust in
Interpersonal and Human-Machine Interactions",
journal = j-TIIS,
volume = "9",
number = "4",
pages = "24:1--24:??",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3230735",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Wed Dec 11 06:36:20 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/ft_gateway.cfm?id=3230735",
abstract = "A proposal for a unified theory of learned trust
implemented in a cognitive architecture is presented.
The theory is instantiated as a computational cognitive
model of learned trust that integrates several
seemingly unrelated categories of findings from the
literature on interpersonal and human-machine
interactions and makes unintuitive predictions for
future studies. The model relies on a combination of
learning mechanisms to explain a variety of phenomena
such as trust asymmetry, the higher impact of early
trust breaches, the black-hat/white-hat effect, the
correlation between trust and cognitive ability, and
the higher resilience of interpersonal as compared to
human-machine trust. In addition, the model predicts
that trust decays in the absence of evidence of
trustworthiness or untrustworthiness. The implications
of the model for the advancement of the theory on trust
are discussed. Specifically, this work suggests two
more trust antecedents on the trustor's side: perceived
trust necessity and cognitive ability to detect cues of
trustworthiness.",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "24",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Billinghurst:2020:SIH,
author = "Mark Billinghurst and Margaret Burnett and Aaron
Quigley",
title = "Special Issue on Highlights of {ACM Intelligent User
Interface (IUI) 2018}",
journal = j-TIIS,
volume = "10",
number = "1",
pages = "1:1--1:3",
month = jan,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3357206",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Sun Mar 28 07:49:20 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3357206",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "1",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Vanderdonckt:2020:EDS,
author = "Jean Vanderdonckt and Sara Bouzit and Ga{\"e}lle
Calvary and Denis Ch{\^e}ne",
title = "Exploring a Design Space of Graphical Adaptive Menus:
Normal vs. Small Screens",
journal = j-TIIS,
volume = "10",
number = "1",
pages = "2:1--2:40",
month = jan,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3237190",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Sun Mar 28 07:49:20 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3237190",
abstract = "Graphical Adaptive Menus are Graphical User Interface
menus whose predicted items of immediate use can be
automatically rendered in a prediction window.
Rendering this prediction window is a key question for
adaptivity to enable the end-user to \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "2",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Song:2020:FLT,
author = "Jean Y. Song and Raymond Fok and Juho Kim and Walter
S. Lasecki",
title = "{FourEyes}: Leveraging Tool Diversity as a Means to
Improve Aggregate Accuracy in Crowdsourcing",
journal = j-TIIS,
volume = "10",
number = "1",
pages = "3:1--3:30",
month = jan,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3237188",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Sun Mar 28 07:49:20 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3237188",
abstract = "Crowdsourcing is a common means of collecting image
segmentation training data for use in a variety of
computer vision applications. However, designing
accurate crowd-powered image segmentation systems is
challenging, because defining object boundaries
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "3",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Broden:2020:BBE,
author = "Bj{\"o}rn Brod{\'e}n and Mikael Hammar and Bengt J.
Nilsson and Dimitris Paraschakis",
title = "A Bandit-Based Ensemble Framework for
Exploration\slash Exploitation of Diverse
Recommendation Components: an Experimental Study within
E-Commerce",
journal = j-TIIS,
volume = "10",
number = "1",
pages = "4:1--4:32",
month = jan,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3237187",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Sat Jan 11 08:20:51 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3237187",
abstract = "This work presents an extension of Thompson Sampling
bandit policy for orchestrating the collection of base
recommendation algorithms for e-commerce. We focus on
the problem of item-to-item recommendations, for which
multiple behavioral and attribute-\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "4",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Tsai:2020:ESR,
author = "Chun-Hua Tsai and Peter Brusilovsky",
title = "Exploring Social Recommendations with Visual
Diversity-Promoting Interfaces",
journal = j-TIIS,
volume = "10",
number = "1",
pages = "5:1--5:34",
month = jan,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3231465",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Sun Mar 28 07:49:20 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3231465",
abstract = "The beyond-relevance objectives of recommender systems
have been drawing more and more attention. For example,
a diversity-enhanced interface has been shown to
associate positively with overall levels of user
satisfaction. However, little is known about \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "5",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Sherkat:2020:VAA,
author = "Ehsan Sherkat and Evangelos E. Milios and Rosane
Minghim",
title = "A Visual Analytics Approach for Interactive Document
Clustering",
journal = j-TIIS,
volume = "10",
number = "1",
pages = "6:1--6:33",
month = jan,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3241380",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Sun Mar 28 07:49:20 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3241380",
abstract = "Document clustering is a necessary step in various
analytical and automated activities. When guided by the
user, algorithms are tailored to imprint a perspective
on the clustering process that reflects the user's
understanding of the dataset. More than \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "6",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Suh:2020:AFS,
author = "Jina Suh and Soroush Ghorashi and Gonzalo Ramos and
Nan-Chen Chen and Steven Drucker and Johan Verwey and
Patrice Simard",
title = "{AnchorViz}: Facilitating Semantic Data Exploration
and Concept Discovery for Interactive Machine
Learning",
journal = j-TIIS,
volume = "10",
number = "1",
pages = "7:1--7:38",
month = jan,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3241379",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Sun Mar 28 07:49:20 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3241379",
abstract = "When building a classifier in interactive machine
learning (iML), human knowledge about the target class
can be a powerful reference to make the classifier
robust to unseen items. The main challenge lies in
finding unlabeled items that can either help \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "7",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Sciascio:2020:RUC,
author = "Cecilia {Di Sciascio} and Peter Brusilovsky and
Christoph Trattner and Eduardo Veas",
title = "A Roadmap to User-Controllable Social Exploratory
Search",
journal = j-TIIS,
volume = "10",
number = "1",
pages = "8:1--8:38",
month = jan,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3241382",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Sat Jan 11 08:20:51 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3241382",
abstract = "Information-seeking tasks with learning or
investigative purposes are usually referred to as
exploratory search. Exploratory search unfolds as a
dynamic process where the user, amidst navigation,
trial and error, and on-the-fly selections, gathers and
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "8",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Todi:2020:IGL,
author = "Kashyap Todi and Jussi Jokinen and Kris Luyten and
Antti Oulasvirta",
title = "Individualising Graphical Layouts with Predictive
Visual Search Models",
journal = j-TIIS,
volume = "10",
number = "1",
pages = "9:1--9:24",
month = jan,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3241381",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Sun Mar 28 07:49:20 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3241381",
abstract = "In domains where users are exposed to large variations
in visuo-spatial features among designs, they often
spend excess time searching for common elements
(features) on an interface. This article contributes
individualised predictive models of visual \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "9",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{He:2020:DDA,
author = "Yangyang He and Paritosh Bahirat and Bart P.
Knijnenburg and Abhilash Menon",
title = "A Data-Driven Approach to Designing for Privacy in
Household {IoT}",
journal = j-TIIS,
volume = "10",
number = "1",
pages = "10:1--10:47",
month = jan,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3241378",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Sat Jan 11 08:20:51 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3241378",
abstract = "In this article, we extend and improve upon a
previously developed data-driven approach to design
privacy-setting interfaces for users of household IoT
devices. The essence of this approach is to gather
users' feedback on household IoT scenarios
before\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "10",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Avrahami:2020:UAR,
author = "Daniel Avrahami and Mitesh Patel and Yusuke Yamaura
and Sven Kratz and Matthew Cooper",
title = "Unobtrusive Activity Recognition and Position
Estimation for Work Surfaces Using {RF}-Radar Sensing",
journal = j-TIIS,
volume = "10",
number = "1",
pages = "11:1--11:28",
month = jan,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3241383",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
bibdate = "Sat Jan 11 08:20:51 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3241383",
abstract = "Activity recognition is a core component of many
intelligent and context-aware systems. We present a
solution for discreetly and unobtrusively recognizing
common work activities above a work surface without
using cameras. We demonstrate our approach, \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "11",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}
@Article{Conati:2020:CCI,
author = "Cristina Conati and S{\'e}bastien Lall{\'e} and Md
Abed Rahman and Dereck Toker",
title = "Comparing and Combining Interaction Data and
Eye-tracking Data for the Real-time Prediction of User
Cognitive Abilities in Visualization Tasks",
journal = j-TIIS,
volume = "10",
number = "2",
pages = "12:1--12:41",
month = jun,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3301400",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Sat Jun 27 14:42:35 MDT 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3301400",
abstract = "Previous work has shown that some user cognitive
abilities relevant for processing information
visualizations can be predicted from eye-tracking data.
Performing this type of user modeling is important for
devising visualizations that can detect a user'.
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "12",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Ahn:2020:PIP,
author = "Yongsu Ahn and Yu-Ru Lin",
title = "{PolicyFlow}: Interpreting Policy Diffusion in
Context",
journal = j-TIIS,
volume = "10",
number = "2",
pages = "13:1--13:23",
month = jun,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3385729",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Sat Jun 27 14:42:35 MDT 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3385729",
abstract = "Stability in social, technical, and financial systems,
as well as the capacity of organizations to work across
borders, requires consistency in public policy across
jurisdictions. The diffusion of laws and regulations
across political boundaries can \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "13",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Mohan:2020:DAH,
author = "Shiwali Mohan and Anusha Venkatakrishnan and Andrea L.
Hartzler",
title = "Designing an {AI} Health Coach and Studying Its
Utility in Promoting Regular Aerobic Exercise",
journal = j-TIIS,
volume = "10",
number = "2",
pages = "14:1--14:30",
month = jun,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3366501",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Sat Jun 27 14:42:35 MDT 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3366501",
abstract = "Our research aims to develop interactive, social
agents that can coach people to learn new tasks,
skills, and habits. In this article, we focus on
coaching sedentary, overweight individuals (i.e.,
``trainees'') to exercise regularly. We employ adaptive
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "14",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Anderson:2020:MMM,
author = "Andrew Anderson and Jonathan Dodge and Amrita
Sadarangani and Zoe Juozapaitis and Evan Newman and Jed
Irvine and Souti Chattopadhyay and Matthew Olson and
Alan Fern and Margaret Burnett",
title = "Mental Models of Mere Mortals with Explanations of
Reinforcement Learning",
journal = j-TIIS,
volume = "10",
number = "2",
pages = "15:1--15:37",
month = jun,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3366485",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Sat Jun 27 14:42:35 MDT 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3366485",
abstract = "How should reinforcement learning (RL) agents explain
themselves to humans not trained in AI? To gain
insights into this question, we conducted a
124-participant, four-treatment experiment to compare
participants' mental models of an RL agent in the
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "15",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Fan:2020:ADU,
author = "Mingming Fan and Yue Li and Khai N. Truong",
title = "Automatic Detection of Usability Problem Encounters in
Think-aloud Sessions",
journal = j-TIIS,
volume = "10",
number = "2",
pages = "16:1--16:24",
month = jun,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3385732",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Sat Jun 27 14:42:35 MDT 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3385732",
abstract = "Think-aloud protocols are a highly valued usability
testing method for identifying usability problems.
Despite the value of conducting think-aloud usability
test sessions, analyzing think-aloud sessions is often
time-consuming and labor-intensive. \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "16",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Pan:2020:SID,
author = "Shimei Pan and Oliver Brdiczka and Andrea Kleinsmith
and Yangqiu Song",
title = "Special Issue on Data-Driven Personality Modeling for
Intelligent Human-Computer Interaction",
journal = j-TIIS,
volume = "10",
number = "3",
pages = "17:1--17:3",
month = nov,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3402522",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Sun Mar 28 07:49:21 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3402522",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "17",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Taib:2020:PSD,
author = "Ronnie Taib and Shlomo Berkovsky and Irena Koprinska
and Eileen Wang and Yucheng Zeng and Jingjie Li",
title = "Personality Sensing: Detection of Personality Traits
Using Physiological Responses to Image and Video
Stimuli",
journal = j-TIIS,
volume = "10",
number = "3",
pages = "18:1--18:32",
month = nov,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3357459",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Sun Mar 28 07:49:21 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3357459",
abstract = "Personality detection is an important task in
psychology, as different personality traits are linked
to different behaviours and real-life outcomes.
Traditionally it involves filling out lengthy
questionnaires, which is time-consuming, and may also
be \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "18",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Dotti:2020:BCA,
author = "Dario Dotti and Mirela Popa and Stylianos Asteriadis",
title = "Being the Center of Attention: a Person-Context {CNN}
Framework for Personality Recognition",
journal = j-TIIS,
volume = "10",
number = "3",
pages = "19:1--19:20",
month = nov,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3338245",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Sun Mar 28 07:49:21 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3338245",
abstract = "This article proposes a novel study on personality
recognition using video data from different scenarios.
Our goal is to jointly model nonverbal behavioral cues
with contextual information for a robust,
multi-scenario, personality recognition system.
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "19",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Bagheri:2020:ACE,
author = "Elahe Bagheri and Pablo G. Esteban and Hoang-Long Cao
and Albert {De Beir} and Dirk Lefeber and Bram
Vanderborght",
title = "An Autonomous Cognitive Empathy Model Responsive to
Users' Facial Emotion Expressions",
journal = j-TIIS,
volume = "10",
number = "3",
pages = "20:1--20:23",
month = nov,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3341198",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Sun Mar 28 07:49:21 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3341198",
abstract = "Successful social robot services depend on how robots
can interact with users. The effective service can be
obtained through smooth, engaged, and humanoid
interactions in which robots react properly to a user's
affective state. This article proposes a \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "20",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Wang:2020:MDS,
author = "Ruijie Wang and Liming Chen and Ivar Solheim",
title = "Modeling Dyslexic Students' Motivation for Enhanced
Learning in E-learning Systems",
journal = j-TIIS,
volume = "10",
number = "3",
pages = "21:1--21:34",
month = nov,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3341197",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Sun Mar 28 07:49:21 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3341197",
abstract = "E-Learning systems can support real-time monitoring of
learners' learning desires and effects, thus offering
opportunities for enhanced personalized learning.
Recognition of the determinants of dyslexic users'
motivation to use e-learning systems is \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "21",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Khan:2020:PUM,
author = "Euna Mehnaz Khan and Md. Saddam Hossain Mukta and
Mohammed Eunus Ali and Jalal Mahmud",
title = "Predicting Users' Movie Preference and Rating Behavior
from Personality and Values",
journal = j-TIIS,
volume = "10",
number = "3",
pages = "22:1--22:25",
month = nov,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3338244",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Sun Mar 28 07:49:21 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3338244",
abstract = "In this article, we propose novel techniques to
predict a user's movie genre preference and rating
behavior from her psycholinguistic attributes obtained
from the social media interactions. The motivation of
this work comes from various psychological \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "22",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Higuchi:2020:LCD,
author = "Keita Higuchi and Hiroki Tsuchida and Eshed Ohn-Bar
and Yoichi Sato and Kris Kitani",
title = "Learning Context-dependent Personal Preferences for
Adaptive Recommendation",
journal = j-TIIS,
volume = "10",
number = "3",
pages = "23:1--23:26",
month = nov,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3359755",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Sun Mar 28 07:49:21 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3359755",
abstract = "We propose two online-learning algorithms for modeling
the personal preferences of users of interactive
systems. The proposed algorithms leverage user feedback
to estimate user behavior and provide personalized
adaptive recommendation for supporting \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "23",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Hailpern:2020:HIP,
author = "Joshua Hailpern and Mark Huber and Ronald Calvo",
title = "How Impactful Is Presentation in Email? {The} Effect
of Avatars and Signatures",
journal = j-TIIS,
volume = "10",
number = "3",
pages = "24:1--24:26",
month = nov,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3345641",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Sun Mar 28 07:49:21 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3345641",
abstract = "A primary well-controlled study of 900 participants
found that personal presentation choices in
professional emails (non-content changes like Profile
Avatar 8 Signature) impact the recipient's perception
of the sender's personality and the quality of
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "24",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Zhou:2020:ITS,
author = "Michele X. Zhou",
title = "Introduction to the {TiiS} Special Column",
journal = j-TIIS,
volume = "10",
number = "4",
pages = "25:1--25:1",
month = dec,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3427592",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Sun Mar 28 07:49:22 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3427592",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "25",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Shneiderman:2020:BGB,
author = "Ben Shneiderman",
title = "Bridging the Gap Between Ethics and Practice:
Guidelines for Reliable, Safe, and Trustworthy
Human-centered {AI} Systems",
journal = j-TIIS,
volume = "10",
number = "4",
pages = "26:1--26:31",
month = dec,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3419764",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Sun Mar 28 07:49:22 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3419764",
abstract = "This article attempts to bridge the gap between widely
discussed ethical principles of Human-centered AI
(HCAI) and practical steps for effective governance.
Since HCAI systems are developed and implemented in
multiple organizational structures, I \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "26",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Brdiczka:2020:ISI,
author = "Oliver Brdiczka and Duen Horng Chau and Minsuk Kahng
and Ga{\"e}lle Calvary",
title = "Introduction to the Special Issue on Highlights of
{ACM Intelligent User Interface (IUI) 2019}",
journal = j-TIIS,
volume = "10",
number = "4",
pages = "27:1--27:2",
month = dec,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3429946",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Sun Mar 28 07:49:22 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3429946",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "27",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Eiband:2020:MAE,
author = "Malin Eiband and Sarah Theres V{\"o}lkel and Daniel
Buschek and Sophia Cook and Heinrich Hussmann",
title = "A Method and Analysis to Elicit User-Reported Problems
in Intelligent Everyday Applications",
journal = j-TIIS,
volume = "10",
number = "4",
pages = "28:1--28:27",
month = dec,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3370927",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Sun Mar 28 07:49:22 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3370927",
abstract = "The complex nature of intelligent systems motivates
work on supporting users during interaction, for
example, through explanations. However, as of yet,
there is little empirical evidence in regard to
specific problems users face when applying such
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "28",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Springer:2020:PDW,
author = "Aaron Springer and Steve Whittaker",
title = "Progressive Disclosure: When, Why, and How Do Users
Want Algorithmic Transparency Information?",
journal = j-TIIS,
volume = "10",
number = "4",
pages = "29:1--29:32",
month = dec,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3374218",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Sun Mar 28 07:49:22 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3374218",
abstract = "It is essential that users understand how algorithmic
decisions are made, as we increasingly delegate
important decisions to intelligent systems. Prior work
has often taken a techno-centric approach, focusing on
new computational techniques to support \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "29",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Dominguez:2020:AHA,
author = "Vicente Dominguez and Ivania Donoso-Guzm{\'a}n and
Pablo Messina and Denis Parra",
title = "Algorithmic and {HCI} Aspects for Explaining
Recommendations of Artistic Images",
journal = j-TIIS,
volume = "10",
number = "4",
pages = "30:1--30:31",
month = dec,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3369396",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Sun Mar 28 07:49:22 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3369396",
abstract = "Explaining suggestions made by recommendation systems
is key to make users trust and accept these systems.
This is specially critical in areas such as art image
recommendation. Traditionally, artworks are sold in
galleries where people can see them \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "30",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Kouki:2020:GUP,
author = "Pigi Kouki and James Schaffer and Jay Pujara and John
O'Donovan and Lise Getoor",
title = "Generating and Understanding Personalized Explanations
in Hybrid Recommender Systems",
journal = j-TIIS,
volume = "10",
number = "4",
pages = "31:1--31:40",
month = dec,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3365843",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Sun Mar 28 07:49:22 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3365843",
abstract = "Recommender systems are ubiquitous and shape the way
users access information and make decisions. As these
systems become more complex, there is a growing need
for transparency and interpretability. In this article,
we study the problem of generating \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "31",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Hsu:2020:SPE,
author = "Yen-Chia Hsu and Jennifer Cross and Paul Dille and
Michael Tasota and Beatrice Dias and Randy Sargent and
Ting-Hao (Kenneth) Huang and Illah Nourbakhsh",
title = "{Smell Pittsburgh}: Engaging Community Citizen Science
for Air Quality",
journal = j-TIIS,
volume = "10",
number = "4",
pages = "32:1--32:49",
month = dec,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3369397",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Sun Mar 28 07:49:22 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3369397",
abstract = "Urban air pollution has been linked to various human
health concerns, including cardiopulmonary diseases.
Communities who suffer from poor air quality often rely
on experts to identify pollution sources due to the
lack of accessible tools. Taking this \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "32",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Mohanty:2020:PSI,
author = "Vikram Mohanty and David Thames and Sneha Mehta and
Kurt Luther",
title = "{Photo Sleuth}: Identifying Historical Portraits with
Face Recognition and Crowdsourced Human Expertise",
journal = j-TIIS,
volume = "10",
number = "4",
pages = "33:1--33:36",
month = dec,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3365842",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Sun Mar 28 07:49:22 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3365842",
abstract = "Identifying people in historical photographs is
important for preserving material culture, correcting
the historical record, and creating economic value, but
it is also a complex and challenging task. In this
article, we focus on identifying portraits \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "33",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Kulahcioglu:2020:AAW,
author = "Tugba Kulahcioglu and Gerard {De Melo}",
title = "Affect-Aware Word Clouds",
journal = j-TIIS,
volume = "10",
number = "4",
pages = "34:1--34:25",
month = dec,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3370928",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Sun Mar 28 07:49:22 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3370928",
abstract = "Word clouds are widely used for non-analytic purposes,
such as introducing a topic to students, or creating a
gift with personally meaningful text. Surveys show that
users prefer tools that yield word clouds with a
stronger emotional impact. Fonts and \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "34",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Mohan:2021:ERC,
author = "Shiwali Mohan",
title = "Exploring the Role of Common Model of Cognition in
Designing Adaptive Coaching Interactions for Health
Behavior Change",
journal = j-TIIS,
volume = "11",
number = "1",
pages = "1:1--1:30",
month = apr,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3375790",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Tue Apr 27 08:00:40 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3375790",
abstract = "Our research aims to develop intelligent collaborative
agents that are human-aware: They can model, learn, and
reason about their human partner's physiological,
cognitive, and affective states. In this article, we
study how adaptive coaching interactions \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "1",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Penney:2021:SGE,
author = "Sean Penney and Jonathan Dodge and Andrew Anderson and
Claudia Hilderbrand and Logan Simpson and Margaret
Burnett",
title = "The Shoutcasters, the Game Enthusiasts, and the {AI}:
Foraging for Explanations of Real-time Strategy
Players",
journal = j-TIIS,
volume = "11",
number = "1",
pages = "2:1--2:46",
month = apr,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3396047",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Tue Apr 27 08:00:40 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3396047",
abstract = "Assessing and understanding intelligent agents is a
difficult task for users who lack an AI background.
``Explainable AI'' (XAI) aims to address this problem,
but what should be in an explanation? One route toward
answering this question is to turn to \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "2",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Bessghaier:2021:DSA,
author = "Narjes Bessghaier and Makram Soui and Christophe
Kolski and Mabrouka Chouchane",
title = "On the Detection of Structural Aesthetic Defects of
{Android} Mobile User Interfaces with a Metrics-based
Tool",
journal = j-TIIS,
volume = "11",
number = "1",
pages = "3:1--3:27",
month = apr,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3410468",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Tue Apr 27 08:00:40 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3410468",
abstract = "Smartphone users are striving for easy-to-learn and
use mobile apps user interfaces. Accomplishing these
qualities demands an iterative evaluation of the Mobile
User Interface (MUI). Several studies stress the value
of providing a MUI with a pleasing look \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "3",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Oviatt:2021:KWY,
author = "Sharon Oviatt and Jionghao Lin and Abishek Sriramulu",
title = "{I} Know What You Know: What Hand Movements Reveal
about Domain Expertise",
journal = j-TIIS,
volume = "11",
number = "1",
pages = "4:1--4:26",
month = apr,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3423049",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Tue Apr 27 08:00:40 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3423049",
abstract = "This research investigates whether students' level of
domain expertise can be detected during authentic
learning activities by analyzing their physical
activity patterns. More expert students reduced their
manual activity by a substantial 50\%, which was
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "4",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Douer:2021:TMS,
author = "Nir Douer and Joachim Meyer",
title = "Theoretical, Measured, and Subjective Responsibility
in Aided Decision Making",
journal = j-TIIS,
volume = "11",
number = "1",
pages = "5:1--5:37",
month = apr,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3425732",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Tue Apr 27 08:00:40 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3425732",
abstract = "When humans interact with intelligent systems, their
causal responsibility for outcomes becomes equivocal.
We analyze the descriptive abilities of a newly
developed responsibility quantification model (ResQu)
to predict actual human responsibility and \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "5",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Bhattacharya:2021:RTI,
author = "Samit Bhattacharya and Viral Bharat Shah and Krishna
Kumar and Ujjwal Biswas",
title = "A Real-time Interactive Visualizer for Large
Classroom",
journal = j-TIIS,
volume = "11",
number = "1",
pages = "6:1--6:26",
month = apr,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3418529",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Tue Apr 27 08:00:40 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3418529",
abstract = "In improving the teaching and learning experience in a
classroom environment, it is crucial for a teacher to
have a fair idea about the students who need help
during a lecture. However, teachers of large classes
usually face difficulties in identifying \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "6",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Chen:2021:PPR,
author = "Xiaoyu Chen and Nathan Lau and Ran Jin",
title = "{PRIME}: a Personalized Recommender System for
Information Visualization Methods via Extended Matrix
Completion",
journal = j-TIIS,
volume = "11",
number = "1",
pages = "7:1--7:30",
month = apr,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3366484",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Tue Apr 27 08:00:40 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3366484",
abstract = "Adapting user interface designs for specific tasks
performed by different users is a challenging yet
important problem. Automatically adapting visualization
designs to users and contexts (e.g., tasks, display
devices, environments, etc.) can theoretically
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "7",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Ma:2021:HTR,
author = "Wanqi Ma and Xiaoxiao Liao and Wei Dai and Weike Pan
and Zhong Ming",
title = "Holistic Transfer to Rank for Top-{$N$}
Recommendation",
journal = j-TIIS,
volume = "11",
number = "1",
pages = "8:1--8:1",
month = apr,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3434360",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Tue Apr 27 08:00:40 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3434360",
abstract = "Recommender systems have been a valuable component in
various online services such as e-commerce and
entertainment. To provide an accurate top-N
recommendation list of items for each target user, we
have to answer a very basic question of how to model
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "8",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Tran:2021:HRS,
author = "Thi Ngoc Trang Tran and Alexander Felfernig and Nava
Tintarev",
title = "Humanized Recommender Systems: State-of-the-art and
Research Issues",
journal = j-TIIS,
volume = "11",
number = "2",
pages = "9:1--9:41",
month = jul,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3446906",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Thu Jul 22 08:06:11 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3446906",
abstract = "Psychological factors such as personality, emotions,
social connections, and decision biases can
significantly affect the outcome of a decision process.
These factors are also prevalent in the existing
literature related to the inclusion of psychological
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "9",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Shatilov:2021:EEB,
author = "Kirill A. Shatilov and Dimitris Chatzopoulos and
Lik-Hang Lee and Pan Hui",
title = "Emerging {ExG}-based {NUI} Inputs in Extended
Realities: a Bottom-up Survey",
journal = j-TIIS,
volume = "11",
number = "2",
pages = "10:1--10:49",
month = jul,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3457950",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Thu Jul 22 08:06:11 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3457950",
abstract = "Incremental and quantitative improvements of two-way
interactions with e x tended realities (XR) are
contributing toward a qualitative leap into a state of
XR ecosystems being efficient, user-friendly, and
widely adopted. However, there are multiple \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "10",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Gil:2021:AIM,
author = "Yolanda Gil and Daniel Garijo and Deborah Khider and
Craig A. Knoblock and Varun Ratnakar and Maximiliano
Osorio and Hern{\'a}n Vargas and Minh Pham and Jay
Pujara and Basel Shbita and Binh Vu and Yao-Yi Chiang
and Dan Feldman and Yijun Lin and Hayley Song and Vipin
Kumar and Ankush Khandelwal and Michael Steinbach and
Kshitij Tayal and Shaoming Xu and Suzanne A. Pierce and
Lissa Pearson and Daniel Hardesty-Lewis and Ewa Deelman
and Rafael Ferreira {Da Silva} and Rajiv Mayani and
Armen R. Kemanian and Yuning Shi and Lorne Leonard and
Scott Peckham and Maria Stoica and Kelly Cobourn and
Zeya Zhang and Christopher Duffy and Lele Shu",
title = "Artificial Intelligence for Modeling Complex Systems:
Taming the Complexity of Expert Models to Improve
Decision Making",
journal = j-TIIS,
volume = "11",
number = "2",
pages = "11:1--11:49",
month = jul,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3453172",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Thu Jul 22 08:06:11 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3453172",
abstract = "Major societal and environmental challenges involve
complex systems that have diverse multi-scale
interacting processes. Consider, for example, how
droughts and water reserves affect crop production and
how agriculture and industrial needs affect water
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "11",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Rosenberg:2021:ECA,
author = "Maor Rosenberg and Hae Won Park and Rinat
Rosenberg-Kima and Safinah Ali and Anastasia K.
Ostrowski and Cynthia Breazeal and Goren Gordon",
title = "Expressive Cognitive Architecture for a Curious Social
Robot",
journal = j-TIIS,
volume = "11",
number = "2",
pages = "12:1--12:25",
month = jul,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3451531",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Thu Jul 22 08:06:11 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3451531",
abstract = "Artificial curiosity, based on developmental
psychology concepts wherein an agent attempts to
maximize its learning progress, has gained much
attention in recent years. Similarly, social robots are
slowly integrating into our daily lives, in schools,
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "12",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Roffarello:2021:UDM,
author = "Alberto Monge Roffarello and Luigi {De Russis}",
title = "Understanding, Discovering, and Mitigating Habitual
Smartphone Use in Young Adults",
journal = j-TIIS,
volume = "11",
number = "2",
pages = "13:1--13:34",
month = jul,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3447991",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Thu Jul 22 08:06:11 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3447991",
abstract = "People, especially young adults, often use their
smartphones out of habit: They compulsively browse
social networks, check emails, and play video-games
with little or no awareness at all. While previous
studies analyzed this phenomena qualitatively, e.g.,
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "13",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Spiller:2021:PVS,
author = "Moritz Spiller and Ying-Hsang Liu and Md Zakir Hossain
and Tom Gedeon and Julia Geissler and Andreas
N{\"u}rnberger",
title = "Predicting Visual Search Task Success from Eye Gaze
Data as a Basis for User-Adaptive Information
Visualization Systems",
journal = j-TIIS,
volume = "11",
number = "2",
pages = "14:1--14:25",
month = jul,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3446638",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Thu Jul 22 08:06:11 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3446638",
abstract = "Information visualizations are an efficient means to
support the users in understanding large amounts of
complex, interconnected data; user comprehension,
however, depends on individual factors such as their
cognitive abilities. The research literature \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "14",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Banisetty:2021:SAN,
author = "Santosh Balajee Banisetty and Scott Forer and Logan
Yliniemi and Monica Nicolescu and David Feil-Seifer",
title = "Socially Aware Navigation: a Non-linear
Multi-objective Optimization Approach",
journal = j-TIIS,
volume = "11",
number = "2",
pages = "15:1--15:26",
month = jul,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3453445",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Thu Jul 22 08:06:11 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3453445",
abstract = "Mobile robots are increasingly populating homes,
hospitals, shopping malls, factory floors, and other
human environments. Human society has social norms that
people mutually accept; obeying these norms is an
essential signal that someone is participating
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "15",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Mousas:2021:PSV,
author = "Christos Mousas and Claudia Krogmeier and Zhiquan
Wang",
title = "Photo Sequences of Varying Emotion: Optimization with
a Valence-Arousal Annotated Dataset",
journal = j-TIIS,
volume = "11",
number = "2",
pages = "16:1--16:19",
month = jul,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3458844",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Thu Jul 22 08:06:11 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3458844",
abstract = "Synthesizing photo products such as photo strips and
slideshows using a database of images is a
time-consuming and tedious process that requires
significant manual work. To overcome this limitation,
we developed a method that automatically synthesizes
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "16",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Turkay:2021:SII,
author = "Cagatay Turkay and Tatiana {Von Landesberger} and
Daniel Archambault and Shixia Liu and Remco Chang",
title = "Special Issue on Interactive Visual Analytics for
Making Explainable and Accountable Decisions",
journal = j-TIIS,
volume = "11",
number = "3--4",
pages = "17:1--17:4",
month = dec,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3471903",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Fri Dec 10 11:35:09 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3471903",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "17",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Zhang:2021:MMI,
author = "Yu Zhang and Bob Coecke and Min Chen",
title = "{MI3}: Machine-initiated Intelligent Interaction for
Interactive Classification and Data Reconstruction",
journal = j-TIIS,
volume = "11",
number = "3--4",
pages = "18:1--18:34",
month = dec,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3412848",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Fri Dec 10 11:35:09 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3412848",
abstract = "In many applications, while machine learning (ML) can
be used to derive algorithmic models to aid decision
processes, it is often difficult to learn a precise
model when the number of similar data points is
limited. One example of such applications is \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "18",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Sevastjanova:2021:QGA,
author = "Rita Sevastjanova and Wolfgang Jentner and Fabian
Sperrle and Rebecca Kehlbeck and J{\"u}rgen Bernard and
Mennatallah El-assady",
title = "{QuestionComb}: a Gamification Approach for the Visual
Explanation of Linguistic Phenomena through Interactive
Labeling",
journal = j-TIIS,
volume = "11",
number = "3--4",
pages = "19:1--19:38",
month = dec,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3429448",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Fri Dec 10 11:35:09 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3429448",
abstract = "Linguistic insight in the form of high-level
relationships and rules in text builds the basis of our
understanding of language. However, the data-driven
generation of such structures often lacks labeled
resources that can be used as training data for
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "19",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Bernard:2021:TPM,
author = "J{\"u}rgen Bernard and Marco Hutter and Michael
Sedlmair and Matthias Zeppelzauer and Tamara Munzner",
title = "A Taxonomy of Property Measures to Unify Active
Learning and Human-centered Approaches to Data
Labeling",
journal = j-TIIS,
volume = "11",
number = "3--4",
pages = "20:1--20:42",
month = dec,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3439333",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Fri Dec 10 11:35:09 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3439333",
abstract = "Strategies for selecting the next data instance to
label, in service of generating labeled data for
machine learning, have been considered separately in
the machine learning literature on active learning and
in the visual analytics literature on human-.
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "20",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Segura:2021:BBO,
author = "Vin{\'i}cius Segura and Simone D. J. Barbosa",
title = "{BONNIE}: Building Online Narratives from Noteworthy
Interaction Events",
journal = j-TIIS,
volume = "11",
number = "3--4",
pages = "21:1--21:31",
month = dec,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3423048",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Fri Dec 10 11:35:09 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3423048",
abstract = "Nowadays, we have access to data of unprecedented
volume, high dimensionality, and complexity. To extract
novel insights from such complex and dynamic data, we
need effective and efficient strategies. One such
strategy is to combine data analysis and \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "21",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Hinterreiter:2021:PPE,
author = "Andreas Hinterreiter and Christian Steinparz and
Moritz Sch{\"O}fl and Holger Stitz and Marc Streit",
title = "Projection Path Explorer: Exploring Visual Patterns in
Projected Decision-making Paths",
journal = j-TIIS,
volume = "11",
number = "3--4",
pages = "22:1--22:29",
month = dec,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3387165",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Fri Dec 10 11:35:09 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3387165",
abstract = "In problem-solving, a path towards a solutions can be
viewed as a sequence of decisions. The decisions, made
by humans or computers, describe a trajectory through a
high-dimensional representation space of the problem.
By means of dimensionality reduction,. \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "22",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Kim:2021:LGR,
author = "Chris Kim and Xiao Lin and Christopher Collins and
Graham W. Taylor and Mohamed R. Amer",
title = "Learn, Generate, Rank, Explain: a Case Study of Visual
Explanation by Generative Machine Learning",
journal = j-TIIS,
volume = "11",
number = "3--4",
pages = "23:1--23:34",
month = dec,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3465407",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Fri Dec 10 11:35:09 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3465407",
abstract = "While the computer vision problem of searching for
activities in videos is usually addressed by using
discriminative models, their decisions tend to be
opaque and difficult for people to understand. We
propose a case study of a novel machine learning
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "23",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Mohseni:2021:MSF,
author = "Sina Mohseni and Niloofar Zarei and Eric D. Ragan",
title = "A Multidisciplinary Survey and Framework for Design
and Evaluation of Explainable {AI} Systems",
journal = j-TIIS,
volume = "11",
number = "3--4",
pages = "24:1--24:45",
month = dec,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3387166",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Fri Dec 10 11:35:09 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3387166",
abstract = "The need for interpretable and accountable intelligent
systems grows along with the prevalence of artificial
intelligence (AI) applications used in everyday life.
Explainable AI (XAI) systems are intended to
self-explain the reasoning behind system \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "24",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Hepenstal:2021:DCA,
author = "Sam Hepenstal and Leishi Zhang and Neesha Kodagoda and
B. L. William Wong",
title = "Developing Conversational Agents for Use in Criminal
Investigations",
journal = j-TIIS,
volume = "11",
number = "3--4",
pages = "25:1--25:35",
month = dec,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3444369",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Fri Dec 10 11:35:09 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3444369",
abstract = "The adoption of artificial intelligence (AI) systems
in environments that involve high risk and high
consequence decision-making is severely hampered by
critical design issues. These issues include system
transparency and brittleness, where transparency
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "25",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Meng:2021:VVA,
author = "Linhao Meng and Yating Wei and Rusheng Pan and Shuyue
Zhou and Jianwei Zhang and Wei Chen",
title = "{VADAF}: Visualization for Abnormal Client Detection
and Analysis in Federated Learning",
journal = j-TIIS,
volume = "11",
number = "3--4",
pages = "26:1--26:23",
month = dec,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3426866",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Fri Dec 10 11:35:09 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3426866",
abstract = "Federated Learning (FL) provides a powerful solution
to distributed machine learning on a large corpus of
decentralized data. It ensures privacy and security by
performing computation on devices (which we refer to as
clients) based on local data to \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "26",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Li:2021:ASE,
author = "Mingzhao Li and Zhifeng Bao and Farhana Choudhury and
Hanan Samet and Matt Duckham and Timos Sellis",
title = "{AOI}-shapes: an Efficient Footprint Algorithm to
Support Visualization of User-defined Urban Areas of
Interest",
journal = j-TIIS,
volume = "11",
number = "3--4",
pages = "27:1--27:32",
month = dec,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3431817",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Fri Dec 10 11:35:09 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3431817",
abstract = "Understanding urban areas of interest (AOIs) is
essential in many real-life scenarios, and such AOIs
can be computed based on the geographic points that
satisfy user queries. In this article, we study the
problem of efficient and effective visualization
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "27",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Barral:2021:EAG,
author = "Oswald Barral and S{\'e}bastien Lall{\'e} and Alireza
Iranpour and Cristina Conati",
title = "Effect of Adaptive Guidance and Visualization Literacy
on Gaze Attentive Behaviors and Sequential Patterns on
Magazine-Style Narrative Visualizations",
journal = j-TIIS,
volume = "11",
number = "3--4",
pages = "28:1--28:46",
month = dec,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3447992",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Fri Dec 10 11:35:09 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3447992",
abstract = "We study the effectiveness of adaptive interventions
at helping users process textual documents with
embedded visualizations, a form of multimodal documents
known as Magazine-Style Narrative Visualizations
(MSNVs). The interventions are meant to \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "28",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Dodge:2021:AAR,
author = "Jonathan Dodge and Roli Khanna and Jed Irvine and
Kin-ho Lam and Theresa Mai and Zhengxian Lin and
Nicholas Kiddle and Evan Newman and Andrew Anderson and
Sai Raja and Caleb Matthews and Christopher Perdriau
and Margaret Burnett and Alan Fern",
title = "After-Action Review for {AI (AAR\slash AI)}",
journal = j-TIIS,
volume = "11",
number = "3--4",
pages = "29:1--29:35",
month = dec,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3453173",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Fri Dec 10 11:35:09 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3453173",
abstract = "Explainable AI is growing in importance as AI pervades
modern society, but few have studied how explainable AI
can directly support people trying to assess an AI
agent. Without a rigorous process, people may approach
assessment in ad hoc ways-leading to \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "29",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Starke:2021:PEE,
author = "Alain Starke and Martijn Willemsen and Chris
Snijders",
title = "Promoting Energy-Efficient Behavior by Depicting
Social Norms in a Recommender Interface",
journal = j-TIIS,
volume = "11",
number = "3--4",
pages = "30:1--30:32",
month = dec,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3460005",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Fri Dec 10 11:35:09 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3460005",
abstract = "How can recommender interfaces help users to adopt new
behaviors? In the behavioral change literature, social
norms and other nudges are studied to understand how
people can be convinced to take action (e.g., towel
re-use is boosted when stating that \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "30",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Zhou:2021:ISC,
author = "Michelle X. Zhou",
title = "Introduction to the Special Column for Human-Centered
Artificial Intelligence",
journal = j-TIIS,
volume = "11",
number = "3--4",
pages = "31:1--31:1",
month = dec,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3490553",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Fri Dec 10 11:35:09 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3490553",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "31",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Yang:2021:TRA,
author = "Qiang Yang",
title = "Toward Responsible {AI}: an Overview of Federated
Learning for User-centered Privacy-preserving
Computing",
journal = j-TIIS,
volume = "11",
number = "3--4",
pages = "32:1--32:22",
month = dec,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3485875",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Fri Dec 10 11:35:09 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3485875",
abstract = "With the rapid advances of Artificial Intelligence
(AI) technologies and applications, an increasing
concern is on the development and application of
responsible AI technologies. Building AI technologies
or machine-learning models often requires massive
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "32",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Khanna:2022:FAF,
author = "Roli Khanna and Jonathan Dodge and Andrew Anderson and
Rupika Dikkala and Jed Irvine and Zeyad Shureih and
Kin-Ho Lam and Caleb R. Matthews and Zhengxian Lin and
Minsuk Kahng and Alan Fern and Margaret Burnett",
title = "Finding {AI's} Faults with {AAR\slash AI}: an
Empirical Study",
journal = j-TIIS,
volume = "12",
number = "1",
pages = "1:1--1:33",
month = mar,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3487065",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Fri Mar 25 07:11:26 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3487065",
abstract = "Would you allow an AI agent to make decisions on your
behalf? If the answer is ``not always,'' the next
question becomes ``in what circumstances''? Answering
this question requires human users to be able to assess
an AI agent-and not just with overall pass/. \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "1",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{VanBerkel:2022:IRF,
author = "Niels {Van Berkel} and Jeremy Opie and Omer F. Ahmad
and Laurence Lovat and Danail Stoyanov and Ann
Blandford",
title = "Initial Responses to False Positives in {AI}-Supported
Continuous Interactions: a Colonoscopy Case Study",
journal = j-TIIS,
volume = "12",
number = "1",
pages = "2:1--2:18",
month = mar,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3480247",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Fri Mar 25 07:11:26 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3480247",
abstract = "The use of artificial intelligence (AI) in clinical
support systems is increasing. In this article, we
focus on AI support for continuous interaction
scenarios. A thorough understanding of end-user
behaviour during these continuous human-AI
interactions, \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "2",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Zini:2022:ACT,
author = "Floriano Zini and Fabio {Le Piane} and Mauro Gaspari",
title = "Adaptive Cognitive Training with Reinforcement
Learning",
journal = j-TIIS,
volume = "12",
number = "1",
pages = "3:1--3:29",
month = mar,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3476777",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Fri Mar 25 07:11:26 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3476777",
abstract = "Computer-assisted cognitive training can help patients
affected by several illnesses alleviate their cognitive
deficits or healthy people improve their mental
performance. In most computer-based systems, training
sessions consist of graded exercises, \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "3",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Tang:2022:SOA,
author = "Tan Tang and Junxiu Tang and Jiewen Lai and Lu Ying
and Yingcai Wu and Lingyun Yu and Peiran Ren",
title = "{SmartShots}: an Optimization Approach for Generating
Videos with Data Visualizations Embedded",
journal = j-TIIS,
volume = "12",
number = "1",
pages = "4:1--4:21",
month = mar,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3484506",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Fri Mar 25 07:11:26 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3484506",
abstract = "Videos are well-received methods for storytellers to
communicate various narratives. To further engage
viewers, we introduce a novel visual medium where data
visualizations are embedded into videos to present data
insights. However, creating such data-. \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "4",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Ahn:2022:TCI,
author = "Yongsu Ahn and Muheng Yan and Yu-Ru Lin and Wen-Ting
Chung and Rebecca Hwa",
title = "Tribe or Not? {Critical} Inspection of Group
Differences Using {TribalGram}",
journal = j-TIIS,
volume = "12",
number = "1",
pages = "5:1--5:34",
month = mar,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3484509",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Fri Mar 25 07:11:26 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3484509",
abstract = "With the rise of AI and data mining techniques, group
profiling and group-level analysis have been
increasingly used in many domains, including policy
making and direct marketing. In some cases, the
statistics extracted from data may provide insights to
a \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "5",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Bruckner:2022:LGC,
author = "Lukas Br{\"u}ckner and Luis A. Leiva and Antti
Oulasvirta",
title = "Learning {GUI} Completions with User-defined
Constraints",
journal = j-TIIS,
volume = "12",
number = "1",
pages = "6:1--6:40",
month = mar,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3490034",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Fri Mar 25 07:11:26 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3490034",
abstract = "A key objective in the design of graphical user
interfaces (GUIs) is to ensure consistency across
screens of the same product. However, designing a
compliant layout is time-consuming and can distract
designers from creative thinking. This paper studies
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "6",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Zhou:2022:EIT,
author = "Michelle X. Zhou",
title = "Editorial Introduction to {TiiS} Special Category
Article: Practitioners' Toolbox",
journal = j-TIIS,
volume = "12",
number = "1",
pages = "7:1--7:1",
month = mar,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3519381",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Fri Mar 25 07:11:26 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3519381",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "7",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Mascarenhas:2022:FTT,
author = "Samuel Mascarenhas and Manuel Guimar{\~a}es and Rui
Prada and Pedro A. Santos and Jo{\~a}o Dias and Ana
Paiva",
title = "{FAtiMA} Toolkit: Toward an Accessible Tool for the
Development of Socio-emotional Agents",
journal = j-TIIS,
volume = "12",
number = "1",
pages = "8:1--8:30",
month = mar,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3510822",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Fri Mar 25 07:11:26 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3510822",
abstract = "More than a decade has passed since the development of
FearNot!, an application designed to help children deal
with bullying through role-playing with virtual
characters. It was also the application that led to the
creation of FAtiMA, an affective agent \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "8",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Kocaballi:2022:SIC,
author = "A. Baki Kocaballi and Liliana Laranjo and Leigh Clark
and Rafa{\l} Kocielnik and Robert J. Moore and Q. Vera
Liao and Timothy Bickmore",
title = "Special Issue on Conversational Agents for Healthcare
and Wellbeing",
journal = j-TIIS,
volume = "12",
number = "2",
pages = "9:1--9:3",
month = jun,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3532860",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Mon Jul 25 09:40:04 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3532860",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "9",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Maharjan:2022:ESE,
author = "Raju Maharjan and Kevin Doherty and Darius Adam Rohani
and Per B{\ae}kgaard and Jakob E. Bardram",
title = "Experiences of a Speech-enabled Conversational Agent
for the Self-report of Well-being among People Living
with Affective Disorders: an In-the-Wild Study",
journal = j-TIIS,
volume = "12",
number = "2",
pages = "10:1--10:29",
month = jun,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3484508",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Mon Jul 25 09:40:04 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3484508",
abstract = "The growing commercial success of smart speaker
devices following recent advancements in speech
recognition technology has surfaced new opportunities
for collecting self-reported health and well-being
data. Speech-enabled conversational agents (CAs) in
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "10",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Koulouri:2022:CSY,
author = "Theodora Koulouri and Robert D. Macredie and David
Olakitan",
title = "Chatbots to Support Young Adults' Mental Health: an
Exploratory Study of Acceptability",
journal = j-TIIS,
volume = "12",
number = "2",
pages = "11:1--11:39",
month = jun,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3485874",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Mon Jul 25 09:40:04 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3485874",
abstract = "Despite the prevalence of mental health conditions,
stigma, lack of awareness, and limited resources impede
access to care, creating a need to improve mental
health support. The recent surge in scientific and
commercial interest in conversational agents \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "11",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Eagle:2022:DKW,
author = "Tessa Eagle and Conrad Blau and Sophie Bales and
Noopur Desai and Victor Li and Steve Whittaker",
title = "{``I don't know what you mean by `I am anxious'''}: a
New Method for Evaluating Conversational Agent
Responses to Standardized Mental Health Inputs for
Anxiety and Depression",
journal = j-TIIS,
volume = "12",
number = "2",
pages = "12:1--12:23",
month = jun,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3488057",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Mon Jul 25 09:40:04 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3488057",
abstract = "Conversational agents (CAs) are increasingly
ubiquitous and are now commonly used to access medical
information. However, we lack systematic data about the
quality of advice such agents provide. This paper
evaluates CA advice for mental health (MH) \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "12",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Brewer:2022:ESO,
author = "Robin Brewer and Casey Pierce and Pooja Upadhyay and
Leeseul Park",
title = "An Empirical Study of Older Adult's Voice Assistant
Use for Health Information Seeking",
journal = j-TIIS,
volume = "12",
number = "2",
pages = "13:1--13:32",
month = jun,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3484507",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Mon Jul 25 09:40:04 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3484507",
abstract = "Although voice assistants are increasingly being
adopted by older adults, we lack empirical research on
how they interact with these devices for health
information seeking. Also, prior work shows how voice
assistant responses can provide misleading or
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "13",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Razavi:2022:DBO,
author = "S. Zahra Razavi and Lenhart K. Schubert and Kimberly
van Orden and Mohammad Rafayet Ali and Benjamin Kane
and Ehsan Hoque",
title = "Discourse Behavior of Older Adults Interacting with a
Dialogue Agent Competent in Multiple Topics",
journal = j-TIIS,
volume = "12",
number = "2",
pages = "14:1--14:21",
month = jun,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3484510",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Mon Jul 25 09:40:04 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3484510",
abstract = "We present a conversational agent designed to provide
realistic conversational practice to older adults at
risk of isolation or social anxiety, and show the
results of a content analysis on a corpus of data
collected from experiments with elderly patients
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "14",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Jang:2022:RAH,
author = "Yi Hyun Jang and Soo Han Im and Younah Kang and Joon
Sang Baek",
title = "Relational Agents for the Homeless with Tuberculosis
Experience: Providing Social Support Through
Human-agent Relationships",
journal = j-TIIS,
volume = "12",
number = "2",
pages = "15:1--15:22",
month = jun,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3488056",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Mon Jul 25 09:40:04 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3488056",
abstract = "In human-computer interaction (HCI) research,
relational agents (RAs) are increasingly used to
improve social support for vulnerable groups including
people exposed to stigmas, alienation, and isolation.
However, technical support for tuberculosis (TB)
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "15",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Zorrilla:2022:MNC,
author = "Asier L{\'o}pez Zorrilla and M. In{\'e}s Torres",
title = "A Multilingual Neural Coaching Model with Enhanced
Long-term Dialogue Structure",
journal = j-TIIS,
volume = "12",
number = "2",
pages = "16:1--16:47",
month = jun,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3487066",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Mon Jul 25 09:40:04 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3487066",
abstract = "In this work we develop a fully data driven
conversational agent capable of carrying out
motivational coaching sessions in Spanish, French,
Norwegian, and English. Unlike the majority of
coaching, and in general well-being related
conversational agents \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "16",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Caldwell:2022:ANR,
author = "Sabrina Caldwell and Penny Sweetser and Nicholas
O'Donnell and Matthew J. Knight and Matthew Aitchison
and Tom Gedeon and Daniel Johnson and Margot Brereton
and Marcus Gallagher and David Conroy",
title = "An Agile New Research Framework for Hybrid Human-{AI}
Teaming: Trust, Transparency, and Transferability",
journal = j-TIIS,
volume = "12",
number = "3",
pages = "17:1--17:36",
month = sep,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3514257",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Tue Sep 20 09:43:15 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3514257",
abstract = "We propose a new research framework by which the
nascent discipline of human-AI teaming can be explored
within experimental environments in preparation for
transferal to real-world contexts. We examine the
existing literature and unanswered research \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "17",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Nakao:2022:TIE,
author = "Yuri Nakao and Simone Stumpf and Subeida Ahmed and
Aisha Naseer and Lorenzo Strappelli",
title = "Toward Involving End-users in Interactive
Human-in-the-loop {AI} Fairness",
journal = j-TIIS,
volume = "12",
number = "3",
pages = "18:1--18:30",
month = sep,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3514258",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Tue Sep 20 09:43:15 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3514258",
abstract = "Ensuring fairness in artificial intelligence (AI) is
important to counteract bias and discrimination in
far-reaching applications. Recent work has started to
investigate how humans judge fairness and how to
support machine learning experts in making their
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "18",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Kruse:2022:EMA,
author = "Jan Kruse and Andy M. Connor and Stefan Marks",
title = "Evaluation of a Multi-agent {``Human-in-the-loop''}
Game Design System",
journal = j-TIIS,
volume = "12",
number = "3",
pages = "19:1--19:26",
month = sep,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3531009",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Tue Sep 20 09:43:15 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3531009",
abstract = "Designing games is a complicated and time-consuming
process, where developing new levels for existing games
can take weeks. Procedural content generation offers
the potential to shorten this timeframe, however,
automated design tools are not adopted \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "19",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Alhejaili:2022:ELF,
author = "Abdullah Alhejaili and Shaheen Fatima",
title = "Expressive Latent Feature Modelling for Explainable
Matrix Factorisation-based Recommender Systems",
journal = j-TIIS,
volume = "12",
number = "3",
pages = "20:1--20:30",
month = sep,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3530299",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Tue Sep 20 09:43:15 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3530299",
abstract = "The traditional matrix factorisation (MF)-based
recommender system methods, despite their success in
making the recommendation, lack explainable
recommendations as the produced latent features are
meaningless and cannot explain the recommendation. This
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "20",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Hsieh:2022:ADA,
author = "Sheng-Jen Hsieh and Andy R. Wang and Anna Madison and
Chad Tossell and Ewart de Visser",
title = "Adaptive Driving Assistant Model {(ADAM)} for Advising
Drivers of Autonomous Vehicles",
journal = j-TIIS,
volume = "12",
number = "3",
pages = "21:1--21:28",
month = sep,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3545994",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Tue Sep 20 09:43:15 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3545994",
abstract = "Fully autonomous driving is on the horizon; vehicles
with advanced driver assistance systems (ADAS) such as
Tesla's Autopilot are already available to consumers.
However, all currently available ADAS applications
require a human driver to be alert and \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "21",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Handler:2022:CIQ,
author = "Abram Handler and Narges Mahyar and Brendan O'Connor",
title = "{ClioQuery}: Interactive Query-oriented Text Analytics
for Comprehensive Investigation of Historical News
Archives",
journal = j-TIIS,
volume = "12",
number = "3",
pages = "22:1--22:49",
month = sep,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3524025",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Tue Sep 20 09:43:15 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3524025",
abstract = "Historians and archivists often find and analyze the
occurrences of query words in newspaper archives to
help answer fundamental questions about society. But
much work in text analytics focuses on helping people
investigate other textual units, such as \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "22",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Liu:2022:SSE,
author = "Fang Liu and Xiaoming Deng and Jiancheng Song and
Yu-Kun Lai and Yong-Jin Liu and Hao Wang and Cuixia Ma
and Shengfeng Qin and Hongan Wang",
title = "{SketchMaker}: Sketch Extraction and Reuse for
Interactive Scene Sketch Composition",
journal = j-TIIS,
volume = "12",
number = "3",
pages = "23:1--23:26",
month = sep,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543956",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Tue Sep 20 09:43:15 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3543956",
abstract = "Sketching is an intuitive and simple way to depict
sciences with various object form and appearance
characteristics. In the past few years, widely
available touchscreen devices have increasingly made
sketch-based human-AI co-creation applications popular.
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "23",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Kronenberg:2022:IOW,
author = "Rotem Kronenberg and Tsvi Kuflik and Ilan Shimshoni",
title = "Improving Office Workers' Workspace Using a
Self-adjusting Computer Screen",
journal = j-TIIS,
volume = "12",
number = "3",
pages = "24:1--24:32",
month = sep,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3545993",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Tue Sep 20 09:43:15 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3545993",
abstract = "With the rapid evolution of technology, computers and
their users' workspaces have become an essential part
of our life in general. Today, many people use
computers both for work and for personal needs,
spending long hours sitting at a desk in front of a
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "24",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Hammond:2022:SIH,
author = "Tracy Hammond and Bart Knijnenburg and John O'Donovan
and Paul Taele",
title = "Special Issue on Highlights of {IUI} 2021:
Introduction",
journal = j-TIIS,
volume = "12",
number = "4",
pages = "25:1--25:??",
month = dec,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3561516",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Tue Mar 21 06:18:07 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3561516",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "25",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Sovrano:2022:GUC,
author = "Francesco Sovrano and Fabio Vitali",
title = "Generating User-Centred Explanations via Illocutionary
Question Answering: From Philosophy to Interfaces",
journal = j-TIIS,
volume = "12",
number = "4",
pages = "26:1--26:??",
month = dec,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3519265",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Tue Mar 21 06:18:07 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3519265",
abstract = "We propose a new method for generating explanations
with Artificial Intelligence (AI) and a tool to test
its expressive power within a user interface. In order
to bridge the gap between philosophy and human-computer
interfaces, we show a new approach for \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "26",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Wang:2022:EEA,
author = "Xinru Wang and Ming Yin",
title = "Effects of Explanations in {AI}-Assisted Decision
Making: Principles and Comparisons",
journal = j-TIIS,
volume = "12",
number = "4",
pages = "27:1--27:??",
month = dec,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3519266",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Tue Mar 21 06:18:07 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3519266",
abstract = "Recent years have witnessed the growing literature in
empirical evaluation of explainable AI (XAI) methods.
This study contributes to this ongoing conversation by
presenting a comparison on the effects of a set of
established XAI methods in AI-assisted \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "27",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Nourani:2022:IUB,
author = "Mahsan Nourani and Chiradeep Roy and Jeremy E. Block
and Donald R. Honeycutt and Tahrima Rahman and Eric D.
Ragan and Vibhav Gogate",
title = "On the Importance of User Backgrounds and Impressions:
Lessons Learned from Interactive {AI} Applications",
journal = j-TIIS,
volume = "12",
number = "4",
pages = "28:1--28:??",
month = dec,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3531066",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Tue Mar 21 06:18:07 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3531066",
abstract = "While EXplainable Artificial Intelligence (XAI)
approaches aim to improve human-AI collaborative
decision-making by improving model transparency and
mental model formations, experiential factors
associated with human users can cause challenges in
ways \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "28",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Buschek:2022:HSU,
author = "Daniel Buschek and Malin Eiband and Heinrich
Hussmann",
title = "How to Support Users in Understanding Intelligent
Systems? An Analysis and Conceptual Framework of User
Questions Considering User Mindsets, Involvement, and
Knowledge Outcomes",
journal = j-TIIS,
volume = "12",
number = "4",
pages = "29:1--29:??",
month = dec,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3519264",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Tue Mar 21 06:18:07 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3519264",
abstract = "The opaque nature of many intelligent systems violates
established usability principles and thus presents a
challenge for human-computer interaction. Research in
the field therefore highlights the need for
transparency, scrutability, intelligibility, \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "29",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Ramos:2022:FAO,
author = "Gonzalo Ramos and Napol Rachatasumrit and Jina Suh and
Rachel Ng and Christopher Meek",
title = "{ForSense}: Accelerating Online Research Through
Sensemaking Integration and Machine Research Support",
journal = j-TIIS,
volume = "12",
number = "4",
pages = "30:1--30:??",
month = dec,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3532853",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Tue Mar 21 06:18:07 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3532853",
abstract = "Online research is a frequent and important activity
people perform on the Internet, yet current support for
this task is basic, fragmented and not well integrated
into web browser experiences. Guided by sensemaking
theory, we present ForSense, a browser \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "30",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Karimi:2022:TTS,
author = "Pegah Karimi and Emanuele Plebani and Aqueasha
Martin-Hammond and Davide Bolchini",
title = "Textflow: Toward Supporting Screen-free Manipulation
of Situation-Relevant Smart Messages",
journal = j-TIIS,
volume = "12",
number = "4",
pages = "31:1--31:??",
month = dec,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3519263",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Tue Mar 21 06:18:07 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3519263",
abstract = "Texting relies on screen-centric prompts designed for
sighted users, still posing significant barriers to
people who are blind and visually impaired (BVI). Can
we re-imagine texting untethered from a visual display?
In an interview study, 20 BVI adults \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "31",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Svikhnushina:2022:PMK,
author = "Ekaterina Svikhnushina and Pearl Pu",
title = "{PEACE}: a Model of Key Social and Emotional Qualities
of Conversational Chatbots",
journal = j-TIIS,
volume = "12",
number = "4",
pages = "32:1--32:??",
month = dec,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3531064",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Tue Mar 21 06:18:07 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3531064",
abstract = "Open-domain chatbots engage with users in natural
conversations to socialize and establish bonds.
However, designing and developing an effective
open-domain chatbot is challenging. It is unclear what
qualities of a chatbot most correspond to users'
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "32",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Das:2022:DRD,
author = "Kapotaksha Das and Michalis Papakostas and Kais Riani
and Andrew Gasiorowski and Mohamed Abouelenien and
Mihai Burzo and Rada Mihalcea",
title = "Detection and Recognition of Driver Distraction Using
Multimodal Signals",
journal = j-TIIS,
volume = "12",
number = "4",
pages = "33:1--33:??",
month = dec,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3519267",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Tue Mar 21 06:18:07 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3519267",
abstract = "Distracted driving is a leading cause of accidents
worldwide. The tasks of distraction detection and
recognition have been traditionally addressed as
computer vision problems. However, distracted behaviors
are not always expressed in a visually observable
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "33",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Ang:2022:LSR,
author = "Gary Ang and Ee-Peng Lim",
title = "Learning Semantically Rich Network-based Multi-modal
Mobile User Interface Embeddings",
journal = j-TIIS,
volume = "12",
number = "4",
pages = "34:1--34:??",
month = dec,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3533856",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Tue Mar 21 06:18:07 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3533856",
abstract = "Semantically rich information from multiple
modalities-text, code, images, categorical and
numerical data-co-exist in the user interface (UI)
design of mobile applications. Moreover, each UI design
is composed of inter-linked UI entities that support
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "34",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Yagi:2022:GFR,
author = "Takuma Yagi and Takumi Nishiyasu and Kunimasa Kawasaki
and Moe Matsuki and Yoichi Sato",
title = "{GO-Finder}: a Registration-free Wearable System for
Assisting Users in Finding Lost Hand-held Objects",
journal = j-TIIS,
volume = "12",
number = "4",
pages = "35:1--35:??",
month = dec,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3519268",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Tue Mar 21 06:18:07 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3519268",
abstract = "People spend an enormous amount of time and effort
looking for lost objects. To help remind people of the
location of lost objects, various computational systems
that provide information on their locations have been
developed. However, prior systems for \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "35",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Feng:2022:AIA,
author = "Sidong Feng and Minmin Jiang and Tingting Zhou and
Yankun Zhen and Chunyang Chen",
title = "{Auto-Icon+}: an Automated End-to-End Code Generation
Tool for Icon Designs in {UI} Development",
journal = j-TIIS,
volume = "12",
number = "4",
pages = "36:1--36:??",
month = dec,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3531065",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Tue Mar 21 06:18:07 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3531065",
abstract = "Approximately 50\% of development resources are
devoted to user interface (UI) development tasks [ 9 ].
Occupying a large proportion of development resources,
developing icons can be a time-consuming task, because
developers need to consider not only \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "36",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Li:2023:ESE,
author = "Xingjun Li and Yizhi Zhang and Justin Leung and
Chengnian Sun and Jian Zhao",
title = "{EDAssistant}: Supporting Exploratory Data Analysis in
Computational Notebooks with In Situ Code Search and
Recommendation",
journal = j-TIIS,
volume = "13",
number = "1",
pages = "1:1--1:??",
month = mar,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3545995",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Tue Mar 21 06:18:08 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3545995",
abstract = "Using computational notebooks (e.g., Jupyter
Notebook), data scientists rationalize their
exploratory data analysis (EDA) based on their prior
experience and external knowledge, such as online
examples. For novices or data scientists who lack
specific \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "1",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Liu:2023:SGL,
author = "Huimin Liu and Minsoo Choi and Dominic Kao and
Christos Mousas",
title = "Synthesizing Game Levels for Collaborative Gameplay in
a Shared Virtual Environment",
journal = j-TIIS,
volume = "13",
number = "1",
pages = "2:1--2:??",
month = mar,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3558773",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Tue Mar 21 06:18:08 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3558773",
abstract = "We developed a method to synthesize game levels that
accounts for the degree of collaboration required by
two players to finish a given game level. We first
asked a game level designer to create playable game
level chunks. Then, two artificial \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "2",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Yan:2023:IPT,
author = "Dongning Yan and Li Chen",
title = "The Influence of Personality Traits on User
Interaction with Recommendation Interfaces",
journal = j-TIIS,
volume = "13",
number = "1",
pages = "3:1--3:??",
month = mar,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3558772",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Tue Mar 21 06:18:08 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3558772",
abstract = "Users' personality traits can take an active role in
affecting their behavior when they interact with a
computer interface. However, in the area of recommender
systems (RS), though personality-based RS has been
extensively studied, most works focus on \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "3",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Lin:2023:PIM,
author = "Yi-Ling Lin and Shao-Wei Lee",
title = "A Personalized Interaction Mechanism Framework for
Micro-moment Recommender Systems",
journal = j-TIIS,
volume = "13",
number = "1",
pages = "4:1--4:??",
month = mar,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3569586",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Tue Mar 21 06:18:08 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3569586",
abstract = "The emergence of the micro-moment concept highlights
the influence of context; recommender system design
should reflect this trend. In response to different
contexts, a micro-moment recommender system (MMRS)
requires an effective interaction mechanism \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "4",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Afzal:2023:VVA,
author = "Shehzad Afzal and Sohaib Ghani and Mohamad Mazen
Hittawe and Sheikh Faisal Rashid and Omar M. Knio and
Markus Hadwiger and Ibrahim Hoteit",
title = "Visualization and Visual Analytics Approaches for
Image and Video Datasets: a Survey",
journal = j-TIIS,
volume = "13",
number = "1",
pages = "5:1--5:??",
month = mar,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3576935",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Tue Mar 21 06:18:08 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3576935",
abstract = "Image and video data analysis has become an
increasingly important research area with applications
in different domains such as security surveillance,
healthcare, augmented and virtual reality, video and
image editing, activity analysis and recognition,
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "5",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Hernandez-Bocanegra:2023:ERT,
author = "Diana C. Hernandez-Bocanegra and J{\"u}rgen Ziegler",
title = "Explaining Recommendations through Conversations:
Dialog Model and the Effects of Interface Type and
Degree of Interactivity",
journal = j-TIIS,
volume = "13",
number = "2",
pages = "6:1--6:??",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3579541",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Mon Jul 3 06:48:34 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3579541",
abstract = "Explaining system-generated recommendations based on
user reviews can foster users' understanding and
assessment of the recommended items and the recommender
system (RS) as a whole. While up to now explanations
have mostly been static, shown in a single \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "6",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Das:2023:EAR,
author = "Devleena Das and Yasutaka Nishimura and Rajan P. Vivek
and Naoto Takeda and Sean T. Fish and Thomas Pl{\"o}tz
and Sonia Chernova",
title = "Explainable Activity Recognition for Smart Home
Systems",
journal = j-TIIS,
volume = "13",
number = "2",
pages = "7:1--7:??",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3561533",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Mon Jul 3 06:48:34 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3561533",
abstract = "Smart home environments are designed to provide
services that help improve the quality of life for the
occupant via a variety of sensors and actuators
installed throughout the space. Many automated actions
taken by a smart home are governed by the output
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "7",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Rudrauf:2023:CPC,
author = "D. Rudrauf and G. Sergeant-Perhtuis and Y. Tisserand
and T. Monnor and V. {De Gevigney} and O. Belli",
title = "Combining the Projective Consciousness Model and
Virtual Humans for Immersive Psychological Research: a
Proof-of-concept Simulating a {ToM} Assessment",
journal = j-TIIS,
volume = "13",
number = "2",
pages = "8:1--8:??",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3583886",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Mon Jul 3 06:48:34 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3583886",
abstract = "Relating explicit psychological mechanisms and
observable behaviours is a central aim of psychological
and behavioural science. One of the challenges is to
understand and model the role of consciousness and, in
particular, its subjective perspective as an \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "8",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Guo:2023:GGF,
author = "Mengtian Guo and Zhilan Zhou and David Gotz and Yue
Wang",
title = "{GRAFS}: Graphical Faceted Search System to Support
Conceptual Understanding in Exploratory Search",
journal = j-TIIS,
volume = "13",
number = "2",
pages = "9:1--9:??",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3588319",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Mon Jul 3 06:48:34 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3588319",
abstract = "When people search for information about a new topic
within large document collections, they implicitly
construct a mental model of the unfamiliar information
space to represent what they currently know and guide
their exploration into the unknown. \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "9",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Jentner:2023:VAC,
author = "Wolfgang Jentner and Giuliana Lindholz and Hanna
Hauptmann and Mennatallah El-Assady and Kwan-Liu Ma and
Daniel Keim",
title = "Visual Analytics of Co-Occurrences to Discover
Subspaces in Structured Data",
journal = j-TIIS,
volume = "13",
number = "2",
pages = "10:1--10:??",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3579031",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Mon Jul 3 06:48:34 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3579031",
abstract = "We present an approach that shows all relevant
subspaces of categorical data condensed in a single
picture. We model the categorical values of the
attributes as co-occurrences with data partitions
generated from structured data using pattern mining. We
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "10",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Yalcin:2023:IIP,
author = "{\"O}zge Nilay Yal{\c{c}}{\i}n and S{\'e}bastien
Lall{\'e} and Cristina Conati",
title = "The Impact of Intelligent Pedagogical Agents'
Interventions on Student Behavior and Performance in
Open-Ended Game Design Environments",
journal = j-TIIS,
volume = "13",
number = "3",
pages = "11:1--11:??",
month = sep,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3578523",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Wed Sep 13 06:40:19 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3578523",
abstract = "Research has shown that free-form Game-Design (GD)
environments can be very effective in fostering
Computational Thinking (CT) skills at a young
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "11",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Ang:2023:LUU,
author = "Gary Ang and Ee-Peng Lim",
title = "Learning and Understanding User Interface Semantics
from Heterogeneous Networks with Multimodal and
Positional Attributes",
journal = j-TIIS,
volume = "13",
number = "3",
pages = "12:1--12:??",
month = sep,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3578522",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Wed Sep 13 06:40:19 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3578522",
abstract = "User interfaces (UI) of desktop, web, and mobile
applications involve a hierarchy of objects (e.g.,
applications, screens, view class, and other types of
design \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "12",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Ferdous:2023:EEW,
author = "Javedul Ferdous and Hae-Na Lee and Sampath Jayarathna
and Vikas Ashok",
title = "Enabling Efficient {Web} Data-Record Interaction for
People with Visual Impairments via Proxy Interfaces",
journal = j-TIIS,
volume = "13",
number = "3",
pages = "13:1--13:??",
month = sep,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3579364",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Wed Sep 13 06:40:19 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3579364",
abstract = "Web data records are usually accompanied by auxiliary
webpage segments, such as filters, sort options, search
form, and multi-page links, to enhance interaction
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "13",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Aguirre:2023:CTC,
author = "Carlos Aguirre and Shiye Cao and Amama Mahmood and
Chien-Ming Huang",
title = "Crowdsourcing Thumbnail Captions: Data Collection and
Validation",
journal = j-TIIS,
volume = "13",
number = "3",
pages = "14:1--14:??",
month = sep,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3589346",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Wed Sep 13 06:40:19 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3589346",
abstract = "Speech interfaces, such as personal assistants and
screen readers, read image captions to users.
Typically, however, only one caption is available per
image, \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "14",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Shibata:2023:CCS,
author = "Ryoichi Shibata and Shoya Matsumori and Yosuke Fukuchi
and Tomoyuki Maekawa and Mitsuhiko Kimoto and Michita
Imai",
title = "Conversational Context-sensitive Ad Generation with a
Few Core-Queries",
journal = j-TIIS,
volume = "13",
number = "3",
pages = "15:1--15:??",
month = sep,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3588578",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Wed Sep 13 06:40:19 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3588578",
abstract = "When people are talking together in front of digital
signage, advertisements that are aware of the context
of the dialogue will work the most effectively.
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "15",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Sluyters:2023:RAR,
author = "Arthur Slu{\"y}ters and S{\'e}bastien Lambot and Jean
Vanderdonckt and Radu-Daniel Vatavu",
title = "{RadarSense}: Accurate Recognition of Mid-air Hand
Gestures with Radar Sensing and Few Training Examples",
journal = j-TIIS,
volume = "13",
number = "3",
pages = "16:1--16:??",
month = sep,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3589645",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Wed Sep 13 06:40:19 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3589645",
abstract = "Microwave radars bring many benefits to mid-air
gesture sensing due to their large field of view and
independence from environmental conditions, such as
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "16",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Wang:2023:WBH,
author = "Clarice Wang and Kathryn Wang and Andrew Y. Bian and
Rashidul Islam and Kamrun Naher Keya and James Foulds
and Shimei Pan",
title = "When Biased Humans Meet Debiased {AI}: a Case Study in
College Major Recommendation",
journal = j-TIIS,
volume = "13",
number = "3",
pages = "17:1--17:??",
month = sep,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3611313",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Wed Sep 13 06:40:19 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3611313",
abstract = "Currently, there is a surge of interest in fair
Artificial Intelligence (AI) and Machine Learning (ML)
research which aims to mitigate discriminatory bias in
AI \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "17",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Smith:2023:GDB,
author = "Ronnie Smith and Mauro Dragone",
title = "Generalisable Dialogue-based Approach for Active
Learning of Activities of Daily Living",
journal = j-TIIS,
volume = "13",
number = "3",
pages = "18:1--18:??",
month = sep,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3616017",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Wed Sep 13 06:40:19 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3616017",
abstract = "While Human Activity Recognition systems may benefit
from Active Learning by allowing users to self-annotate
their Activities of Daily Living (ADLs), \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "18",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Zhou:2023:TBP,
author = "Michelle Zhou and Shlomo Berkovsky",
title = "2022 {TiiS} Best Paper Announcement",
journal = j-TIIS,
volume = "13",
number = "3",
pages = "19:1--19:??",
month = sep,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3615590",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Wed Sep 13 06:40:19 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3615590",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "19",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Li:2023:VAN,
author = "Yiran Li and Junpeng Wang and Takanori Fujiwara and
Kwan-Liu Ma",
title = "Visual Analytics of Neuron Vulnerability to
Adversarial Attacks on Convolutional Neural Networks",
journal = j-TIIS,
volume = "13",
number = "4",
pages = "20:1--20:??",
month = dec,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3587470",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Thu Dec 21 10:44:24 MST 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3587470",
abstract = "Adversarial attacks on a convolutional neural network
(CNN)-injecting human-imperceptible perturbations into
an input image-could fool a high-performance CNN into
making incorrect predictions. The success of
adversarial attacks raises serious concerns \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "20",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Panigutti:2023:CDH,
author = "Cecilia Panigutti and Andrea Beretta and Daniele Fadda
and Fosca Giannotti and Dino Pedreschi and Alan Perotti
and Salvatore Rinzivillo",
title = "Co-design of Human-centered, Explainable {AI} for
Clinical Decision Support",
journal = j-TIIS,
volume = "13",
number = "4",
pages = "21:1--21:??",
month = dec,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3587271",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Thu Dec 21 10:44:24 MST 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3587271",
abstract = "eXplainable AI (XAI) involves two intertwined but
separate challenges: the development of techniques to
extract explanations from black-box AI models and the
way such explanations are presented to users, i.e., the
explanation user interface. Despite its \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "21",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Cau:2023:EAL,
author = "Federico Maria Cau and Hanna Hauptmann and Lucio
Davide Spano and Nava Tintarev",
title = "Effects of {AI} and Logic-Style Explanations on Users'
Decisions Under Different Levels of Uncertainty",
journal = j-TIIS,
volume = "13",
number = "4",
pages = "22:1--22:??",
month = dec,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3588320",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Thu Dec 21 10:44:24 MST 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3588320",
abstract = "Existing eXplainable Artificial Intelligence (XAI)
techniques support people in interpreting AI advice.
However, although previous work evaluates the users'
understanding of explanations, factors influencing the
decision support are largely overlooked in \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "22",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Singh:2023:DEA,
author = "Ronal Singh and Tim Miller and Henrietta Lyons and Liz
Sonenberg and Eduardo Velloso and Frank Vetere and
Piers Howe and Paul Dourish",
title = "Directive Explanations for Actionable Explainability
in Machine Learning Applications",
journal = j-TIIS,
volume = "13",
number = "4",
pages = "23:1--23:??",
month = dec,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3579363",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Thu Dec 21 10:44:24 MST 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3579363",
abstract = "In this article, we show that explanations of
decisions made by machine learning systems can be
improved by not only explaining why a decision was made
but also explaining how an individual could obtain
their desired outcome. We formally define the
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "23",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Lee:2023:LAE,
author = "Benjamin Charles Germain Lee and Doug Downey and Kyle
Lo and Daniel S. Weld",
title = "{LIMEADE}: From {AI} Explanations to Advice Taking",
journal = j-TIIS,
volume = "13",
number = "4",
pages = "24:1--24:??",
month = dec,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3589345",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Thu Dec 21 10:44:24 MST 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3589345",
abstract = "Research in human-centered AI has shown the benefits
of systems that can explain their predictions. Methods
that allow AI to take advice from humans in response to
explanations are similarly useful. While both
capabilities are well developed for ...$^$",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "24",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Schrills:2023:HDU,
author = "Tim Schrills and Thomas Franke",
title = "How Do Users Experience Traceability of {AI} Systems?
{Examining} Subjective Information Processing Awareness
in Automated Insulin Delivery {(AID)} Systems",
journal = j-TIIS,
volume = "13",
number = "4",
pages = "25:1--25:??",
month = dec,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3588594",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Thu Dec 21 10:44:24 MST 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3588594",
abstract = "When interacting with artificial intelligence (AI) in
the medical domain, users frequently face automated
information processing, which can remain opaque to
them. For example, users with diabetes may interact
daily with automated insulin delivery (AID). \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "25",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Vainio-Pekka:2023:REA,
author = "Heidi Vainio-Pekka and Mamia Ori-Otse Agbese and
Marianna Jantunen and Ville Vakkuri and Tommi Mikkonen
and Rebekah Rousi and Pekka Abrahamsson",
title = "The Role of Explainable {AI} in the Research Field of
{AI} Ethics",
journal = j-TIIS,
volume = "13",
number = "4",
pages = "26:1--26:??",
month = dec,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3599974",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Thu Dec 21 10:44:24 MST 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3599974",
abstract = "Ethics of Artificial Intelligence (AI) is a growing
research field that has emerged in response to the
challenges related to AI. Transparency poses a key
challenge for implementing AI ethics in practice. One
solution to transparency issues is AI systems
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "26",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Martinez:2023:DEH,
author = "Miguel Angel Meza Mart{\'\i}nez and Mario Nadj and
Moritz Langner and Peyman Toreini and Alexander
Maedche",
title = "Does this Explanation Help? {Designing} Local
Model-agnostic Explanation Representations and an
Experimental Evaluation Using Eye-tracking Technology",
journal = j-TIIS,
volume = "13",
number = "4",
pages = "27:1--27:??",
month = dec,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3607145",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Thu Dec 21 10:44:24 MST 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3607145",
abstract = "In Explainable Artificial Intelligence (XAI) research,
various local model-agnostic methods have been proposed
to explain individual predictions to users in order to
increase the transparency of the underlying Artificial
Intelligence (AI) systems. However,. \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "27",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Zoller:2023:XVA,
author = "Marc-Andr{\'e} Z{\"o}ller and Waldemar Titov and
Thomas Schlegel and Marco F. Huber",
title = "{XAutoML}: a Visual Analytics Tool for Understanding
and Validating Automated Machine Learning",
journal = j-TIIS,
volume = "13",
number = "4",
pages = "28:1--28:??",
month = dec,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3625240",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Thu Dec 21 10:44:24 MST 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3625240",
abstract = "In the last 10 years, various automated machine
learning (AutoML) systems have been proposed to build
end-to-end machine learning (ML) pipelines with minimal
human interaction. Even though such automatically
synthesized ML pipelines are able to achieve \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "28",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Roy:2023:EAR,
author = "Chiradeep Roy and Mahsan Nourani and Shivvrat Arya and
Mahesh Shanbhag and Tahrima Rahman and Eric D. Ragan
and Nicholas Ruozzi and Vibhav Gogate",
title = "Explainable Activity Recognition in Videos using Deep
Learning and Tractable Probabilistic Models",
journal = j-TIIS,
volume = "13",
number = "4",
pages = "29:1--29:??",
month = dec,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3626961",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Thu Dec 21 10:44:24 MST 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3626961",
abstract = "We consider the following video activity recognition
(VAR) task: given a video, infer the set of activities
being performed in the video and assign each frame to
an activity. Although VAR can be solved accurately
using existing deep learning techniques, \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "29",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Larasati:2023:MEE,
author = "Retno Larasati and Anna {De Liddo} and Enrico Motta",
title = "Meaningful Explanation Effect on {User}'s Trust in an
{AI} Medical System: Designing Explanations for
Non-Expert Users",
journal = j-TIIS,
volume = "13",
number = "4",
pages = "30:1--30:??",
month = dec,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3631614",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Thu Dec 21 10:44:24 MST 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3631614",
abstract = "Whereas most research in AI system explanation for
healthcare applications looks at developing algorithmic
explanations targeted at AI experts or medical
professionals, the question we raise is: How do we
build meaningful explanations for laypeople? And
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "30",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Zhang:2024:SBO,
author = "Yu Zhang and Martijn Tennekes and Tim {De Jong} and
Lyana Curier and Bob Coecke and Min Chen",
title = "Simulation-based Optimization of User Interfaces for
Quality-assuring Machine Learning Model Predictions",
journal = j-TIIS,
volume = "14",
number = "1",
pages = "1:1--1:??",
month = mar,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3594552",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Tue Jun 25 07:33:09 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3594552",
abstract = "Quality-sensitive applications of machine learning
(ML) require quality assurance (QA) by humans before
the predictions of an ML model can be deployed. QA for
ML (QA4ML) interfaces require users to view a large
amount of data and perform many interactions \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "1",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Wenskovitch:2024:TAA,
author = "John Wenskovitch and Michelle Dowling and Chris
North",
title = "Toward Addressing Ambiguous Interactions and Inferring
User Intent with Dimension Reduction and Clustering
Combinations in Visual Analytics",
journal = j-TIIS,
volume = "14",
number = "1",
pages = "2:1--2:??",
month = mar,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3588565",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Tue Jun 25 07:33:09 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3588565",
abstract = "Direct manipulation interactions on projections are
often incorporated in visual analytics applications.
These interactions enable analysts to provide
incremental feedback to the system in a semi-supervised
manner, demonstrating relationships that the \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "2",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Rathore:2024:VVI,
author = "Archit Rathore and Sunipa Dev and Jeff M. Phillips and
Vivek Srikumar and Yan Zheng and Chin-Chia Michael Yeh
and Junpeng Wang and Wei Zhang and Bei Wang",
title = "{VERB}: Visualizing and Interpreting Bias Mitigation
Techniques Geometrically for Word Representations",
journal = j-TIIS,
volume = "14",
number = "1",
pages = "3:1--3:??",
month = mar,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3604433",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Tue Jun 25 07:33:09 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3604433",
abstract = "Word vector embeddings have been shown to contain and
amplify biases in the data they are extracted from.
Consequently, many techniques have been proposed to
identify, mitigate, and attenuate these biases in word
representations. In this article, we \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "3",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Mehrotra:2024:IBE,
author = "Siddharth Mehrotra and Carolina Centeio Jorge and
Catholijn M. Jonker and Myrthe L. Tielman",
title = "Integrity-based Explanations for Fostering Appropriate
Trust in {AI} Agents",
journal = j-TIIS,
volume = "14",
number = "1",
pages = "4:1--4:??",
month = mar,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3610578",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Tue Jun 25 07:33:09 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3610578",
abstract = "Appropriate trust is an important component of the
interaction between people and AI systems, in that
``inappropriate'' trust can cause disuse, misuse, or
abuse of AI. To foster appropriate trust in AI, we need
to understand how AI systems can elicit \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "4",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Jorge:2024:HSA,
author = "Carolina Centeio Jorge and Catholijn M. Jonker and
Myrthe L. Tielman",
title = "How Should an {AI} Trust its Human Teammates?
{Exploring} Possible Cues of Artificial Trust",
journal = j-TIIS,
volume = "14",
number = "1",
pages = "5:1--5:??",
month = mar,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3635475",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Tue Jun 25 07:33:09 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3635475",
abstract = "In teams composed of humans, we use trust in others to
make decisions, such as what to do next, who to help
and who to ask for help. When a team member is
artificial, they should also be able to assess whether
a human teammate is trustworthy for a certain
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "5",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Zhang:2024:KLB,
author = "Rui Zhang and Christopher Flathmann and Geoff Musick
and Beau Schelble and Nathan J. McNeese and Bart
Knijnenburg and Wen Duan",
title = "{I} Know This Looks Bad, But {I} Can Explain:
Understanding When {AI} Should Explain Actions In
{Human--AI} Teams",
journal = j-TIIS,
volume = "14",
number = "1",
pages = "6:1--6:??",
month = mar,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3635474",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Tue Jun 25 07:33:09 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3635474",
abstract = "Explanation of artificial intelligence (AI)
decision-making has become an important research area
in human-computer interaction (HCI) and
computer-supported teamwork research. While plenty of
research has investigated AI explanations with an
intent to \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "6",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Emamgholizadeh:2024:PGC,
author = "Hanif Emamgholizadeh and Amra Deli{\'c} and Francesco
Ricci",
title = "Predicting Group Choices from Group Profiles",
journal = j-TIIS,
volume = "14",
number = "1",
pages = "7:1--7:??",
month = mar,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3639710",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Tue Jun 25 07:33:09 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3639710",
abstract = "Group recommender systems (GRSs) identify items to
recommend to a group of people by aggregating group
members' individual preferences into a group profile
and selecting the items that have the largest score in
the group profile. The GRS predicts that \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "7",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Guo:2024:TAN,
author = "Yi Guo and Danqing Shi and Mingjuan Guo and Yanqiu Wu
and Nan Cao and Qing Chen",
title = "{Talk$2$Data}: a Natural Language Interface for
Exploratory Visual Analysis via Question
Decomposition",
journal = j-TIIS,
volume = "14",
number = "2",
pages = "8:1--8:??",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3643894",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Tue Jun 25 07:33:10 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3643894",
abstract = "Through a natural language interface (NLI) for
exploratory visual analysis, users can directly ``ask''
analytical questions about the given tabular data. This
process greatly improves user experience and lowers the
technical barriers of data analysis. \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "8",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Yousefi:2024:EFM,
author = "Zeinab R. Yousefi and Tung Vuong and Marie AlGhossein
and Tuukka Ruotsalo and Giulio Jaccuci and Samuel
Kaski",
title = "Entity Footprinting: Modeling Contextual User States
via Digital Activity Monitoring",
journal = j-TIIS,
volume = "14",
number = "2",
pages = "9:1--9:??",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3643893",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Tue Jun 25 07:33:10 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3643893",
abstract = "Our digital life consists of activities that are
organized around tasks and exhibit different user
states in the digital contexts around these activities.
Previous works have shown that digital activity
monitoring can be used to predict entities that
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "9",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Radeta:2024:MME,
author = "Marko Radeta and Ruben Freitas and Claudio Rodrigues
and Agustin Zuniga and Ngoc Thi Nguyen and Huber Flores
and Petteri Nurmi",
title = "Man and the Machine: Effects of {AI}-assisted Human
Labeling on Interactive Annotation of Real-time Video
Streams",
journal = j-TIIS,
volume = "14",
number = "2",
pages = "10:1--10:??",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3649457",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Tue Jun 25 07:33:10 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3649457",
abstract = "AI-assisted interactive annotation is a powerful way
to facilitate data annotation-a prerequisite for
constructing robust AI models. While AI-assisted
interactive annotation has been extensively studied in
static settings, less is known about its usage in
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "10",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Cheng:2024:IWW,
author = "Ruijia Cheng and Ruotong Wang and Thomas Zimmermann
and Denae Ford",
title = "``{It} would work for me too'': How Online Communities
Shape Software Developers' Trust in {AI}-Powered Code
Generation Tools",
journal = j-TIIS,
volume = "14",
number = "2",
pages = "11:1--11:??",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3651990",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Tue Jun 25 07:33:10 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3651990",
abstract = "While revolutionary AI-powered code generation tools
have been rising rapidly, we know little about how and
how to help software developers form appropriate trust
in those AI tools. Through a two-phase formative study,
we investigate how online \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "11",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Liu:2024:SCM,
author = "Can Liu and Yu Zhang and Cong Wu and Chen Li and
Xiaoru Yuan",
title = "A Spatial Constraint Model for Manipulating Static
Visualizations",
journal = j-TIIS,
volume = "14",
number = "2",
pages = "12:1--12:??",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3657642",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Tue Jun 25 07:33:10 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3657642",
abstract = "We introduce a spatial constraint model to
characterize the positioning and interactions in
visualizations, thereby facilitating the activation of
static visualizations. Our model provides users with
the capability to manipulate visualizations through
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "12",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Mo:2024:CMO,
author = "George Mo and John Dudley and Liwei Chan and Yi-Chi
Liao and Antti Oulasvirta and Per Ola Kristensson",
title = "Cooperative Multi-Objective {Bayesian} Design
Optimization",
journal = j-TIIS,
volume = "14",
number = "2",
pages = "13:1--13:??",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3657643",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Tue Jun 25 07:33:10 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3657643",
abstract = "Computational methods can potentially facilitate user
interface design by complementing designer intuition,
prior experience, and personal preference. Framing a
user interface design task as a multi-objective
optimization problem can help with \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "13",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Spinner:2024:TEG,
author = "Thilo Spinner and Rebecca Kehlbeck and Rita
Sevastjanova and Tobias St{\"a}hle and Daniel A. Keim
and Oliver Deussen and Mennatallah El-Assady",
title = "{[tree-emoji]-generAItor}: Tree-in-the-loop Text
Generation for Language Model Explainability and
Adaptation",
journal = j-TIIS,
volume = "14",
number = "2",
pages = "14:1--14:??",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3652028",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Tue Jun 25 07:33:10 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3652028",
abstract = "Large language models (LLMs) are widely deployed in
various downstream tasks, e.g., auto-completion, aided
writing, or chat-based text generation. However, the
considered output candidates of the underlying search
algorithm are under-explored and under-. \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "14",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Li:2024:ISS,
author = "Yante Li and Yang Liu and Andy Nguyen and Henglin Shi
and Eija Vuorenmaa and Sanna J{\"a}rvel{\"a} and
Guoying Zhao",
title = "Interactions for Socially Shared Regulation in
Collaborative Learning: an Interdisciplinary Multimodal
Dataset",
journal = j-TIIS,
volume = "14",
number = "3",
pages = "15:1--15:??",
month = sep,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3658376",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Wed Sep 25 11:23:38 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3658376",
abstract = "Socially shared regulation plays a pivotal role in the
success of collaborative learning. However, evaluating
socially shared regulation of learning (SSRL) proves
challenging due to the dynamic and infrequent cognitive
and socio-emotional interactions, \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "15",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Humer:2024:RMD,
author = "Christina Humer and Andreas Hinterreiter and Benedikt
Leichtmann and Martina Mara and Marc Streit",
title = "Reassuring, Misleading, Debunking: Comparing Effects
of {XAI} Methods on Human Decisions",
journal = j-TIIS,
volume = "14",
number = "3",
pages = "16:1--16:??",
month = sep,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3665647",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Wed Sep 25 11:23:38 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3665647",
abstract = "Trust calibration is essential in AI-assisted
decision-making. If human users understand the
rationale on which an AI model has made a prediction,
they can decide whether they consider this prediction
reasonable. Especially in high-risk tasks such as
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "16",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Mcintosh:2024:RVA,
author = "Timothy R. Mcintosh and Tong Liu and Teo Susnjak and
Paul Watters and Malka N. Halgamuge",
title = "A Reasoning and Value Alignment Test to Assess
Advanced {GPT} Reasoning",
journal = j-TIIS,
volume = "14",
number = "3",
pages = "17:1--17:??",
month = sep,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3670691",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Wed Sep 25 11:23:38 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3670691",
abstract = "In response to diverse perspectives on artificial
general intelligence (AGI), ranging from potential
safety and ethical concerns to more extreme views about
the threats it poses to humanity, this research
presents a generic method to gauge the reasoning
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "17",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Tsiakas:2024:UHA,
author = "Konstantinos Tsiakas and Dave Murray-Rust",
title = "Unpacking Human-{AI} interactions: From Interaction
Primitives to a Design Space",
journal = j-TIIS,
volume = "14",
number = "3",
pages = "18:1--18:??",
month = sep,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3664522",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Wed Sep 25 11:23:38 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3664522",
abstract = "This article aims to develop a semi-formal
representation for Human-AI (HAI) interactions, by
building a set of interaction primitives which can
specify the information exchanges between users and AI
systems during their interaction. We show how these
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "18",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Chatti:2024:VRE,
author = "Mohamed Amine Chatti and Mouadh Guesmi and Arham
Muslim",
title = "Visualization for Recommendation Explainability: a
Survey and New Perspectives",
journal = j-TIIS,
volume = "14",
number = "3",
pages = "19:1--19:??",
month = sep,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3672276",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Wed Sep 25 11:23:38 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3672276",
abstract = "Providing system-generated explanations for
recommendations represents an important step toward
transparent and trustworthy recommender systems.
Explainable recommender systems provide a
human-understandable rationale for their outputs. Over
the past two \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "19",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Antony:2024:ICC,
author = "Victor Nikhil Antony and Chien-Ming Huang",
title = "{ID.8}: Co-Creating Visual Stories with Generative
{AI}",
journal = j-TIIS,
volume = "14",
number = "3",
pages = "20:1--20:??",
month = sep,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3672277",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Wed Sep 25 11:23:38 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3672277",
abstract = "Storytelling is an integral part of human culture and
significantly impacts cognitive and socio-emotional
development and connection. Despite the importance of
interactive visual storytelling, the process of
creating such content requires specialized \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "20",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Anderson:2024:MUE,
author = "Andrew Anderson and Jimena Noa Guevara and Fatima
Moussaoui and Tianyi Li and Mihaela Vorvoreanu and
Margaret Burnett",
title = "Measuring User Experience Inclusivity in {Human-AI}
Interaction via Five User Problem-Solving Styles",
journal = j-TIIS,
volume = "14",
number = "3",
pages = "21:1--21:??",
month = sep,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3663740",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Wed Sep 25 11:23:38 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3663740",
abstract = "Motivations: Recent research has emerged on generally
how to improve AI products' human-AI interaction (HAI)
user experience (UX), but relatively little is known
about HAI-UX inclusivity. For example, what kinds of
users are supported, and who are left \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "21",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Lawless:2024:WIW,
author = "Connor Lawless and Jakob Schoeffer and Lindy Le and
Kael Rowan and Shilad Sen and Cristina {St. Hill} and
Jina Suh and Bahareh Sarrafzadeh",
title = "{``I Want It That Way''}: Enabling Interactive
Decision Support Using Large Language Models and
Constraint Programming",
journal = j-TIIS,
volume = "14",
number = "3",
pages = "22:1--22:??",
month = sep,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3685053",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Wed Sep 25 11:23:38 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3685053",
abstract = "A critical factor in the success of many decision
support systems is the accurate modeling of user
preferences. Psychology research has demonstrated that
users often develop their preferences during the
elicitation process, highlighting the pivotal role
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "22",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Choi:2024:EES,
author = "Minsoo Choi and Siqi Guo and Alexandros Koilias and
Matias Volonte and Dominic Kao and Christos Mousas",
title = "Exploring the Effects of Self-Correction Behavior of
an Intelligent Virtual Character during a Jigsaw Puzzle
Co-Solving Task",
journal = j-TIIS,
volume = "14",
number = "3",
pages = "23:1--23:??",
month = sep,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3688006",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Wed Sep 25 11:23:38 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3688006",
abstract = "Although researchers have explored how humans perceive
the intelligence of virtual characters, few studies
have focused on the ability of intelligent virtual
characters to fix their mistakes. Thus, we explored the
self-correction behavior of a virtual \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "23",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Berkovsky:2024:TBP,
author = "Shlomo Berkovsky",
title = "{2023 TiiS Best Paper} announcement",
journal = j-TIIS,
volume = "14",
number = "3",
pages = "24:1--24:??",
month = sep,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3690000",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Wed Sep 25 11:23:38 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3690000",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "24",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Ning:2024:INL,
author = "Zheng Ning and Yuan Tian and Zheng Zhang and Tianyi
Zhang and Toby Jia-Jun Li",
title = "Insights into Natural Language Database Query Errors:
from Attention Misalignment to User Handling
Strategies",
journal = j-TIIS,
volume = "14",
number = "4",
pages = "25:1--25:??",
month = dec,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3650114",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Sat Dec 21 07:45:47 MST 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3650114",
abstract = "Querying structured databases with natural language
(NL2SQL) has remained a difficult problem for years.
Recently, the advancement of machine learning (ML),
natural language processing (NLP), and large language
models (LLM) have led to significant \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "25",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Franke:2024:AXA,
author = "Loraine Franke and Daniel Karl I. Weidele and Nima
Dehmamy and Lipeng Ning and Daniel Haehn",
title = "{AutoRL X}: Automated Reinforcement Learning on the
{Web}",
journal = j-TIIS,
volume = "14",
number = "4",
pages = "26:1--26:??",
month = dec,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3670692",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Sat Dec 21 07:45:47 MST 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3670692",
abstract = "Reinforcement Learning (RL) is crucial in decision
optimization, but its inherent complexity often
presents challenges in interpretation and
communication. Building upon AutoDOViz-an interface
that pushed the boundaries of Automated RL for Decision
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "26",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Fok:2024:ASP,
author = "Raymond Fok and Luca Soldaini and Cassidy Trier and
Erin Bransom and Kelsey MacMillan and Evie Cheng and
Hita Kambhamettu and Jonathan Bragg and Kyle Lo and
Marti A. Hearst and Andrew Head and Daniel S. Weld",
title = "Accelerating Scientific Paper Skimming with Augmented
Intelligence Through Customizable Faceted Highlights",
journal = j-TIIS,
volume = "14",
number = "4",
pages = "27:1--27:??",
month = dec,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3665648",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Sat Dec 21 07:45:47 MST 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3665648",
abstract = "Scholars need to keep up with an exponentially
increasing flood of scientific papers. To aid this
challenge, we introduce Scim, a novel intelligent
interface that helps scholars skim papers to rapidly
review and gain a cursory understanding of its
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "27",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Warren:2024:CCF,
author = "Greta Warren and Ruth M. J. Byrne and Mark T. Keane",
title = "Categorical and Continuous Features in Counterfactual
Explanations of {AI} Systems",
journal = j-TIIS,
volume = "14",
number = "4",
pages = "28:1--28:??",
month = dec,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673907",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Sat Dec 21 07:45:47 MST 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3673907",
abstract = "Recently, eXplainable AI (XAI) research has focused on
the use of counterfactual explanations to address
interpretability, algorithmic recourse, and bias in AI
system decision-making. The developers of these
algorithms claim they meet user requirements in
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "28",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Kahr:2024:UTR,
author = "Patricia K. Kahr and Gerrit Rooks and Martijn C.
Willemsen and Chris C. P. Snijders",
title = "Understanding Trust and Reliance Development in {AI}
Advice: Assessing Model Accuracy, Model Explanations,
and Experiences from Previous Interactions",
journal = j-TIIS,
volume = "14",
number = "4",
pages = "29:1--29:??",
month = dec,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3686164",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Sat Dec 21 07:45:47 MST 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3686164",
abstract = "People are increasingly interacting with AI systems,
but successful interactions depend on people trusting
these systems only when appropriate. Since neither
gaining trust in AI advice nor restoring lost trust
after AI mistakes is warranted, we seek to \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "29",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}
@Article{Kim:2024:DAB,
author = "Jiwon Kim and Jiwon Kang and Migyeong Yang and Chaehee
Park and Taeeun Kim and Hayeon Song and Jinyoung Han",
title = "Developing an {AI}-based Explainable Expert Support
System for Art Therapy",
journal = j-TIIS,
volume = "14",
number = "4",
pages = "30:1--30:??",
month = dec,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3689649",
ISSN = "2160-6455 (print), 2160-6463 (electronic)",
ISSN-L = "2160-6455",
bibdate = "Sat Dec 21 07:45:47 MST 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/tiis.bib",
URL = "https://dl.acm.org/doi/10.1145/3689649",
abstract = "Sketch-based drawing assessments in art therapy are
widely used to understand individuals' cognitive and
psychological states, such as cognitive impairments or
mental disorders. Along with self-reported measures
based on questionnaires, psychological \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "ACM Trans. Interact. Intell. Syst.",
articleno = "30",
fjournal = "ACM Transactions on Interactive Intelligent Systems
(TIIS)",
journal-URL = "https://dl.acm.org/loi/tiis",
}